repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
tobyweston/simple-excel
src/main/java/bad/robot/excel/style/Alignment.java
942
/* * Copyright (c) 2012-2013, bad robot (london) ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package bad.robot.excel.style; import bad.robot.excel.AbstractValueType; public class Alignment extends AbstractValueType<AlignmentStyle> { public static Alignment alignment(AlignmentStyle value) { return new Alignment(value); } public Alignment(AlignmentStyle value) { super(value); } }
apache-2.0
BruceHurrican/asstudydemo
app/src/blue/java/com/bruce/demo/utils/KKReflect.java
25732
/* * Copyright (c) 2011-2016, Data Geekery GmbH (http://www.datageekery.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.bruce.demo.utils; import java.lang.reflect.AccessibleObject; import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Member; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.lang.reflect.Proxy; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.Map; /** * 自定义反射工具类 * Created by BruceHurrican on 17/1/16. */ public class KKReflect { // --------------------------------------------------------------------- // Static API used as entrance points to the fluent API // --------------------------------------------------------------------- /** * The wrapped object */ private final Object object; /** * A flag indicating whether the wrapped object is a {@link Class} (for * accessing static fields and methods), or any other type of {@link Object} * (for accessing instance fields and methods). */ private final boolean isClass; private KKReflect(Class<?> type) { this.object = type; this.isClass = true; } private KKReflect(Object object) { this.object = object; this.isClass = false; } /** * Wrap a class name. * <p> * This is the same as calling <code>on(Class.forName(name))</code> * * @param name A fully qualified class name * @return A wrapped class object, to be used for further reflection. * @throws KKReflectException If any reflection exception occurred. * @see #on(Class) */ public static KKReflect on(String name) throws KKReflectException { return on(forName(name)); } // --------------------------------------------------------------------- // Members // --------------------------------------------------------------------- /** * Wrap a class name, loading it via a given class loader. * <p> * This is the same as calling * <code>on(Class.forName(name, classLoader))</code> * * @param name A fully qualified class name. * @param classLoader The class loader in whose context the class should be * loaded. * @return A wrapped class object, to be used for further reflection. * @throws KKReflectException If any reflection exception occurred. * @see #on(Class) */ public static KKReflect on(String name, ClassLoader classLoader) throws KKReflectException { return on(forName(name, classLoader)); } /** * Wrap a class. * <p> * Use this when you want to access static fields and methods on a * {@link Class} object, or as a basis for constructing objects of that * class using {@link #create(Object...)} * * @param clazz The class to be wrapped * @return A wrapped class object, to be used for further reflection. */ public static KKReflect on(Class<?> clazz) { return new KKReflect(clazz); } // --------------------------------------------------------------------- // Constructors // --------------------------------------------------------------------- /** * Wrap an object. * <p> * Use this when you want to access instance fields and methods on any * {@link Object} * * @param object The object to be wrapped * @return A wrapped object, to be used for further reflection. */ public static KKReflect on(Object object) { return new KKReflect(object); } /** * Conveniently render an {@link AccessibleObject} accessible. * <p> * To prevent {@link SecurityException}, this is only done if the argument * object and its declaring class are non-public. * * @param accessible The object to render accessible * @return The argument object rendered accessible */ public static <T extends AccessibleObject> T accessible(T accessible) { if (accessible == null) { return null; } if (accessible instanceof Member) { Member member = (Member) accessible; if (Modifier.isPublic(member.getModifiers()) && Modifier.isPublic(member .getDeclaringClass().getModifiers())) { return accessible; } } // [jOOQ #3392] The accessible flag is set to false by default, also for public members. if (!accessible.isAccessible()) { accessible.setAccessible(true); } return accessible; } // --------------------------------------------------------------------- // Fluent KKReflection API // --------------------------------------------------------------------- /** * Get the POJO property name of an getter/setter */ private static String property(String string) { int length = string.length(); if (length == 0) { return ""; } else if (length == 1) { return string.toLowerCase(); } else { return string.substring(0, 1).toLowerCase() + string.substring(1); } } /** * Wrap an object created from a constructor */ private static KKReflect on(Constructor<?> constructor, Object... args) throws KKReflectException { try { return on(accessible(constructor).newInstance(args)); } catch (Exception e) { throw new KKReflectException(e); } } /** * Wrap an object returned from a method */ private static KKReflect on(Method method, Object object, Object... args) throws KKReflectException { try { accessible(method); if (method.getReturnType() == void.class) { method.invoke(object, args); return on(object); } else { return on(method.invoke(object, args)); } } catch (Exception e) { throw new KKReflectException(e); } } /** * Unwrap an object */ private static Object unwrap(Object object) { if (object instanceof KKReflect) { return ((KKReflect) object).get(); } return object; } /** * Get an array of types for an array of objects * * @see Object#getClass() */ private static Class<?>[] types(Object... values) { if (values == null) { return new Class[0]; } Class<?>[] result = new Class[values.length]; for (int i = 0; i < values.length; i++) { Object value = values[i]; result[i] = value == null ? NULL.class : value.getClass(); } return result; } /** * Load a class * * @see Class#forName(String) */ private static Class<?> forName(String name) throws KKReflectException { try { return Class.forName(name); } catch (Exception e) { throw new KKReflectException(e); } } private static Class<?> forName(String name, ClassLoader classLoader) throws KKReflectException { try { return Class.forName(name, true, classLoader); } catch (Exception e) { throw new KKReflectException(e); } } /** * Get a wrapper type for a primitive type, or the argument type itself, if * it is not a primitive type. */ public static Class<?> wrapper(Class<?> type) { if (type == null) { return null; } else if (type.isPrimitive()) { if (boolean.class == type) { return Boolean.class; } else if (int.class == type) { return Integer.class; } else if (long.class == type) { return Long.class; } else if (short.class == type) { return Short.class; } else if (byte.class == type) { return Byte.class; } else if (double.class == type) { return Double.class; } else if (float.class == type) { return Float.class; } else if (char.class == type) { return Character.class; } else if (void.class == type) { return Void.class; } } return type; } /** * Get the wrapped object * * @param <T> A convenience generic parameter for automatic unsafe casting */ @SuppressWarnings("unchecked") public <T> T get() { return (T) object; } /** * Set a field value. * <p> * This is roughly equivalent to {@link Field#set(Object, Object)}. If the * wrapped object is a {@link Class}, then this will set a value to a static * member field. If the wrapped object is any other {@link Object}, then * this will set a value to an instance member field. * * @param name The field name * @param value The new field value * @return The same wrapped object, to be used for further reflection. * @throws KKReflectException If any reflection exception occurred. */ public KKReflect set(String name, Object value) throws KKReflectException { try { Field field = field0(name); field.set(object, unwrap(value)); return this; } catch (Exception e) { throw new KKReflectException(e); } } /** * Get a field value. * <p> * This is roughly equivalent to {@link Field#get(Object)}. If the wrapped * object is a {@link Class}, then this will get a value from a static * member field. If the wrapped object is any other {@link Object}, then * this will get a value from an instance member field. * <p> * If you want to "navigate" to a wrapped version of the field, use * {@link #field(String)} instead. * * @param name The field name * @return The field value * @throws KKReflectException If any reflection exception occurred. * @see #field(String) */ public <T> T get(String name) throws KKReflectException { return field(name).get(); } /** * Get a wrapped field. * <p> * This is roughly equivalent to {@link Field#get(Object)}. If the wrapped * object is a {@link Class}, then this will wrap a static member field. If * the wrapped object is any other {@link Object}, then this wrap an * instance member field. * * @param name The field name * @return The wrapped field * @throws KKReflectException If any reflection exception occurred. */ public KKReflect field(String name) throws KKReflectException { try { Field field = field0(name); return on(field.get(object)); } catch (Exception e) { throw new KKReflectException(e); } } private Field field0(String name) throws KKReflectException { Class<?> type = type(); // Try getting a public field try { return accessible(type.getField(name)); } // Try again, getting a non-public field catch (NoSuchFieldException e) { do { try { return accessible(type.getDeclaredField(name)); } catch (NoSuchFieldException ignore) { } type = type.getSuperclass(); } while (type != null); throw new KKReflectException(e); } } /** * Get a Map containing field names and wrapped values for the fields' * values. * <p> * If the wrapped object is a {@link Class}, then this will return static * fields. If the wrapped object is any other {@link Object}, then this will * return instance fields. * <p> * These two calls are equivalent <code><pre> * on(object).field("myField"); * on(object).fields().get("myField"); * </pre></code> * * @return A map containing field names and wrapped values. */ public Map<String, KKReflect> fields() { Map<String, KKReflect> result = new LinkedHashMap<String, KKReflect>(); Class<?> type = type(); do { for (Field field : type.getDeclaredFields()) { if (!isClass ^ Modifier.isStatic(field.getModifiers())) { String name = field.getName(); if (!result.containsKey(name)) result.put(name, field(name)); } } type = type.getSuperclass(); } while (type != null); return result; } /** * Call a method by its name. * <p> * This is a convenience method for calling * <code>call(name, new Object[0])</code> * * @param name The method name * @return The wrapped method result or the same wrapped object if the * method returns <code>void</code>, to be used for further * reflection. * @throws KKReflectException If any reflection exception occurred. * @see #call(String, Object...) */ public KKReflect call(String name) throws KKReflectException { return call(name, new Object[0]); } // --------------------------------------------------------------------- // Object API // --------------------------------------------------------------------- /** * Call a method by its name. * <p> * This is roughly equivalent to {@link Method#invoke(Object, Object...)}. * If the wrapped object is a {@link Class}, then this will invoke a static * method. If the wrapped object is any other {@link Object}, then this will * invoke an instance method. * <p> * Just like {@link Method#invoke(Object, Object...)}, this will try to wrap * primitive types or unwrap primitive type wrappers if applicable. If * several methods are applicable, by that rule, the first one encountered * is called. i.e. when calling <code><pre> * on(...).call("method", 1, 1); * </pre></code> The first of the following methods will be called: * <code><pre> * public void method(int param1, Integer param2); * public void method(Integer param1, int param2); * public void method(Number param1, Number param2); * public void method(Number param1, Object param2); * public void method(int param1, Object param2); * </pre></code> * <p> * The best matching method is searched for with the following strategy: * <ol> * <li>public method with exact signature match in class hierarchy</li> * <li>non-public method with exact signature match on declaring class</li> * <li>public method with similar signature in class hierarchy</li> * <li>non-public method with similar signature on declaring class</li> * </ol> * * @param name The method name * @param args The method arguments * @return The wrapped method result or the same wrapped object if the * method returns <code>void</code>, to be used for further * reflection. * @throws KKReflectException If any reflection exception occurred. */ public KKReflect call(String name, Object... args) throws KKReflectException { Class<?>[] types = types(args); // Try invoking the "canonical" method, i.e. the one with exact // matching argument types try { Method method = exactMethod(name, types); return on(method, object, args); } // If there is no exact match, try to find a method that has a "similar" // signature if primitive argument types are converted to their wrappers catch (NoSuchMethodException e) { try { Method method = similarMethod(name, types); return on(method, object, args); } catch (NoSuchMethodException e1) { throw new KKReflectException(e1); } } } /** * Searches a method with the exact same signature as desired. * <p> * If a public method is found in the class hierarchy, this method is returned. * Otherwise a private method with the exact same signature is returned. * If no exact match could be found, we let the {@code NoSuchMethodException} pass through. */ private Method exactMethod(String name, Class<?>[] types) throws NoSuchMethodException { Class<?> type = type(); // first priority: find a public method with exact signature match in class hierarchy try { return type.getMethod(name, types); } // second priority: find a private method with exact signature match on declaring class catch (NoSuchMethodException e) { do { try { return type.getDeclaredMethod(name, types); } catch (NoSuchMethodException ignore) { } type = type.getSuperclass(); } while (type != null); throw new NoSuchMethodException(); } } /** * Searches a method with a similar signature as desired using * {@link #isSimilarSignature(java.lang.reflect.Method, String, Class[])}. * <p> * First public methods are searched in the class hierarchy, then private * methods on the declaring class. If a method could be found, it is * returned, otherwise a {@code NoSuchMethodException} is thrown. */ private Method similarMethod(String name, Class<?>[] types) throws NoSuchMethodException { Class<?> type = type(); // first priority: find a public method with a "similar" signature in class hierarchy // similar interpreted in when primitive argument types are converted to their wrappers for (Method method : type.getMethods()) { if (isSimilarSignature(method, name, types)) { return method; } } // second priority: find a non-public method with a "similar" signature on declaring class do { for (Method method : type.getDeclaredMethods()) { if (isSimilarSignature(method, name, types)) { return method; } } type = type.getSuperclass(); } while (type != null); throw new NoSuchMethodException("No similar method " + name + " with params " + Arrays .toString(types) + " could be found on type " + type() + "."); } /** * Determines if a method has a "similar" signature, especially if wrapping * primitive argument types would result in an exactly matching signature. */ private boolean isSimilarSignature(Method possiblyMatchingMethod, String desiredMethodName, Class<?>[] desiredParamTypes) { return possiblyMatchingMethod.getName().equals(desiredMethodName) && match (possiblyMatchingMethod.getParameterTypes(), desiredParamTypes); } // --------------------------------------------------------------------- // Utility methods // --------------------------------------------------------------------- /** * Call a constructor. * <p> * This is a convenience method for calling * <code>create(new Object[0])</code> * * @return The wrapped new object, to be used for further reflection. * @throws KKReflectException If any reflection exception occurred. * @see #create(Object...) */ public KKReflect create() throws KKReflectException { return create(new Object[0]); } /** * Call a constructor. * <p> * This is roughly equivalent to {@link Constructor#newInstance(Object...)}. * If the wrapped object is a {@link Class}, then this will create a new * object of that class. If the wrapped object is any other {@link Object}, * then this will create a new object of the same type. * <p> * Just like {@link Constructor#newInstance(Object...)}, this will try to * wrap primitive types or unwrap primitive type wrappers if applicable. If * several constructors are applicable, by that rule, the first one * encountered is called. i.e. when calling <code><pre> * on(C.class).create(1, 1); * </pre></code> The first of the following constructors will be applied: * <code><pre> * public C(int param1, Integer param2); * public C(Integer param1, int param2); * public C(Number param1, Number param2); * public C(Number param1, Object param2); * public C(int param1, Object param2); * </pre></code> * * @param args The constructor arguments * @return The wrapped new object, to be used for further reflection. * @throws KKReflectException If any reflection exception occurred. */ public KKReflect create(Object... args) throws KKReflectException { Class<?>[] types = types(args); // Try invoking the "canonical" constructor, i.e. the one with exact // matching argument types try { Constructor<?> constructor = type().getDeclaredConstructor(types); return on(constructor, args); } // If there is no exact match, try to find one that has a "similar" // signature if primitive argument types are converted to their wrappers catch (NoSuchMethodException e) { for (Constructor<?> constructor : type().getDeclaredConstructors()) { if (match(constructor.getParameterTypes(), types)) { return on(constructor, args); } } throw new KKReflectException(e); } } /** * Create a proxy for the wrapped object allowing to typesafely invoke * methods on it using a custom interface * * @param proxyType The interface type that is implemented by the proxy * @return A proxy for the wrapped object */ @SuppressWarnings("unchecked") public <P> P as(Class<P> proxyType) { final boolean isMap = (object instanceof Map); final InvocationHandler handler = new InvocationHandler() { @SuppressWarnings("null") @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { String name = method.getName(); // Actual method name matches always come first try { return on(object).call(name, args).get(); } // [#14] Emulate POJO behaviour on wrapped map objects catch (KKReflectException e) { if (isMap) { Map<String, Object> map = (Map<String, Object>) object; int length = (args == null ? 0 : args.length); if (length == 0 && name.startsWith("get")) { return map.get(property(name.substring(3))); } else if (length == 0 && name.startsWith("is")) { return map.get(property(name.substring(2))); } else if (length == 1 && name.startsWith("set")) { map.put(property(name.substring(3)), args[0]); return null; } } throw e; } } }; return (P) Proxy.newProxyInstance(proxyType.getClassLoader(), new Class[]{proxyType}, handler); } /** * Check whether two arrays of types match, converting primitive types to * their corresponding wrappers. */ private boolean match(Class<?>[] declaredTypes, Class<?>[] actualTypes) { if (declaredTypes.length == actualTypes.length) { for (int i = 0; i < actualTypes.length; i++) { if (actualTypes[i] == NULL.class) continue; if (wrapper(declaredTypes[i]).isAssignableFrom(wrapper(actualTypes[i]))) continue; return false; } return true; } else { return false; } } /** * {@inheritDoc} */ @Override public boolean equals(Object obj) { if (obj instanceof KKReflect) { return object.equals(((KKReflect) obj).get()); } return false; } /** * {@inheritDoc} */ @Override public int hashCode() { return object.hashCode(); } /** * {@inheritDoc} */ @Override public String toString() { return object.toString(); } /** * Get the type of the wrapped object. * * @see Object#getClass() */ public Class<?> type() { if (isClass) { return (Class<?>) object; } else { return object.getClass(); } } private static class NULL { } }
apache-2.0
nitinmotgi/hydrator-plugins
hydrator-transforms-lib/src/test/java/co/cask/hydrator/transforms/ParseCSVTest.java
8888
/* * Copyright © 2015 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package co.cask.hydrator.transforms; import co.cask.cdap.api.data.format.StructuredRecord; import co.cask.cdap.api.data.schema.Schema; import co.cask.cdap.etl.api.Transform; import org.junit.Assert; import org.junit.Test; public class ParseCSVTest { private static final Schema INPUT1 = Schema.recordOf("input1", Schema.Field.of("body", Schema.of(Schema.Type.STRING))); private static final Schema OUTPUT1 = Schema.recordOf("output1", Schema.Field.of("a", Schema.of(Schema.Type.STRING)), Schema.Field.of("b", Schema.of(Schema.Type.STRING)), Schema.Field.of("c", Schema.of(Schema.Type.STRING)), Schema.Field.of("d", Schema.of(Schema.Type.STRING)), Schema.Field.of("e", Schema.of(Schema.Type.STRING))); private static final Schema OUTPUT2 = Schema.recordOf("output2", Schema.Field.of("a", Schema.of(Schema.Type.LONG)), Schema.Field.of("b", Schema.of(Schema.Type.STRING)), Schema.Field.of("c", Schema.of(Schema.Type.INT)), Schema.Field.of("d", Schema.of(Schema.Type.DOUBLE)), Schema.Field.of("e", Schema.of(Schema.Type.BOOLEAN))); @Test public void testDefaultCSVParser() throws Exception { String s = OUTPUT1.toString(); ParseCSV.Config config = new ParseCSV.Config("DEFAULT", "body", OUTPUT1.toString()); Transform<StructuredRecord, StructuredRecord> transform = new ParseCSV(config); transform.initialize(null); MockEmitter<StructuredRecord> emitter = new MockEmitter<>(); // Test missing field. emitter.clear(); transform.transform(StructuredRecord.builder(INPUT1) .set("body", "1,2,3,4,").build(), emitter); Assert.assertEquals("1", emitter.getEmitted().get(0).get("a")); Assert.assertEquals("2", emitter.getEmitted().get(0).get("b")); Assert.assertEquals("3", emitter.getEmitted().get(0).get("c")); Assert.assertEquals("4", emitter.getEmitted().get(0).get("d")); Assert.assertEquals("", emitter.getEmitted().get(0).get("e")); // Test adding quote to field value. emitter.clear(); transform.transform(StructuredRecord.builder(INPUT1) .set("body", "1,2,3,'4',5").build(), emitter); Assert.assertEquals("1", emitter.getEmitted().get(0).get("a")); Assert.assertEquals("2", emitter.getEmitted().get(0).get("b")); Assert.assertEquals("3", emitter.getEmitted().get(0).get("c")); Assert.assertEquals("'4'", emitter.getEmitted().get(0).get("d")); Assert.assertEquals("5", emitter.getEmitted().get(0).get("e")); // Test adding spaces in a field and quoted field value. emitter.clear(); transform.transform(StructuredRecord.builder(INPUT1) .set("body", "1,2, 3 ,'4',5").build(), emitter); Assert.assertEquals("1", emitter.getEmitted().get(0).get("a")); Assert.assertEquals("2", emitter.getEmitted().get(0).get("b")); Assert.assertEquals(" 3 ", emitter.getEmitted().get(0).get("c")); Assert.assertEquals("'4'", emitter.getEmitted().get(0).get("d")); Assert.assertEquals("5", emitter.getEmitted().get(0).get("e")); // Test Skipping empty lines. emitter.clear(); transform.transform(StructuredRecord.builder(INPUT1) .set("body", "1,2,3,4,5\n\n").build(), emitter); Assert.assertEquals("1", emitter.getEmitted().get(0).get("a")); Assert.assertEquals("2", emitter.getEmitted().get(0).get("b")); Assert.assertEquals("3", emitter.getEmitted().get(0).get("c")); Assert.assertEquals("4", emitter.getEmitted().get(0).get("d")); Assert.assertEquals("5", emitter.getEmitted().get(0).get("e")); Assert.assertEquals(1, emitter.getEmitted().size()); // Test multiple records emitter.clear(); transform.transform(StructuredRecord.builder(INPUT1) .set("body", "1,2,3,4,5\n6,7,8,9,10").build(), emitter); Assert.assertEquals("1", emitter.getEmitted().get(0).get("a")); Assert.assertEquals("2", emitter.getEmitted().get(0).get("b")); Assert.assertEquals("3", emitter.getEmitted().get(0).get("c")); Assert.assertEquals("4", emitter.getEmitted().get(0).get("d")); Assert.assertEquals("5", emitter.getEmitted().get(0).get("e")); Assert.assertEquals("6", emitter.getEmitted().get(1).get("a")); Assert.assertEquals("7", emitter.getEmitted().get(1).get("b")); Assert.assertEquals("8", emitter.getEmitted().get(1).get("c")); Assert.assertEquals("9", emitter.getEmitted().get(1).get("d")); Assert.assertEquals("10", emitter.getEmitted().get(1).get("e")); // Test with records supporting different types. emitter.clear(); ParseCSV.Config config1 = new ParseCSV.Config("DEFAULT", "body", OUTPUT2.toString()); Transform<StructuredRecord, StructuredRecord> transform1 = new ParseCSV(config1); transform1.initialize(null); transform1.transform(StructuredRecord.builder(INPUT1) .set("body", "10,stringA,3,4.32,true").build(), emitter); Assert.assertEquals(10L, emitter.getEmitted().get(0).get("a")); Assert.assertEquals("stringA", emitter.getEmitted().get(0).get("b")); Assert.assertEquals(3, emitter.getEmitted().get(0).get("c")); Assert.assertEquals(4.32, emitter.getEmitted().get(0).get("d")); Assert.assertEquals(true, emitter.getEmitted().get(0).get("e")); } @Test(expected=RuntimeException.class) public void testDoubleException() throws Exception { MockEmitter<StructuredRecord> emitter = new MockEmitter<>(); ParseCSV.Config config = new ParseCSV.Config("DEFAULT", "body", OUTPUT2.toString()); Transform<StructuredRecord, StructuredRecord> transform = new ParseCSV(config); transform.initialize(null); transform.transform(StructuredRecord.builder(INPUT1) .set("body", "10,stringA,3,,true").build(), emitter); } @Test(expected=RuntimeException.class) public void testIntException() throws Exception { MockEmitter<StructuredRecord> emitter = new MockEmitter<>(); ParseCSV.Config config = new ParseCSV.Config("DEFAULT", "body", OUTPUT2.toString()); Transform<StructuredRecord, StructuredRecord> transform = new ParseCSV(config); transform.initialize(null); transform.transform(StructuredRecord.builder(INPUT1) .set("body", "10,stringA,,4.32,true").build(), emitter); } @Test(expected=RuntimeException.class) public void testLongException() throws Exception { MockEmitter<StructuredRecord> emitter = new MockEmitter<>(); ParseCSV.Config config = new ParseCSV.Config("DEFAULT", "body", OUTPUT2.toString()); Transform<StructuredRecord, StructuredRecord> transform = new ParseCSV(config); transform.initialize(null); transform.transform(StructuredRecord.builder(INPUT1) .set("body", ",stringA,3,4.32,true").build(), emitter); } // // @Test // public void testFoo() throws Exception { // List<String> words = Lists.newArrayList(); // words.add("this"); // words.add("the"); // words.add("is"); // words.add("a"); // words.add("of"); // CharArraySet stopWords = StopFilter.makeStopSet(words); // // List<String> result = Lists.newArrayList(); // Analyzer analyzer = new SimpleAnalyzer(); // TokenStream stream = analyzer.tokenStream(null, "This is a great example of text analysis"); // stream = new StandardFilter(stream); // stream = new StopFilter(stream, stopWords); // stream = new PorterStemFilter(stream); // // stream.reset(); // while(stream.incrementToken()) { // result.add(stream.getAttribute(CharTermAttribute.class).toString()); // } // stream.end(); // stream.close(); // Assert.assertEquals(1, result.size()); // } }
apache-2.0
dingwpmz/Mycat-Demo
src/main/java/persistent/prestige/modules/eshop/model/OrderItem.java
1801
/* * Powered By agile * Web Site: http://www.agile.com * Since 2008 - 2016 */ package persistent.prestige.modules.eshop.model; import persistent.prestige.platform.base.model.AuditableModel; /** * OrderItem 实体类 * @author 雅居乐 2016-8-27 10:31:06 * @version 1.0 */ public class OrderItem extends AuditableModel{ //alias public static final String TABLE_ALIAS = "OrderItem"; //columns START /**订单ID*/ private java.lang.Integer orderId; /**商品id*/ private java.lang.Integer goodsId; private Integer skuId; /**商品单价*/ private java.lang.Long price; /**购买数量*/ private java.lang.Integer num; /**总价格*/ private java.lang.Long totalPrice; //columns END private String buyUid; public Integer getSkuId() { return skuId; } public void setSkuId(Integer skuId) { this.skuId = skuId; } public java.lang.Integer getOrderId() { return this.orderId; } public void setOrderId(java.lang.Integer value) { this.orderId = value; } public java.lang.Integer getGoodsId() { return this.goodsId; } public void setGoodsId(java.lang.Integer value) { this.goodsId = value; } public java.lang.Long getPrice() { return this.price; } public void setPrice(java.lang.Long value) { this.price = value; } public java.lang.Integer getNum() { return this.num; } public void setNum(java.lang.Integer value) { this.num = value; } public java.lang.Long getTotalPrice() { return this.totalPrice; } public void setTotalPrice(java.lang.Long value) { this.totalPrice = value; } public String getBuyUid() { return buyUid; } public void setBuyUid(String buyUid) { this.buyUid = buyUid; } }
apache-2.0
oehme/analysing-gradle-performance
my-app/src/test/java/org/gradle/test/performance/mediummonolithicjavaproject/p131/Test2637.java
2174
package org.gradle.test.performance.mediummonolithicjavaproject.p131; import org.junit.Test; import static org.junit.Assert.*; public class Test2637 { Production2637 objectUnderTest = new Production2637(); @Test public void testProperty0() { Production2634 value = new Production2634(); objectUnderTest.setProperty0(value); assertEquals(value, objectUnderTest.getProperty0()); } @Test public void testProperty1() { Production2635 value = new Production2635(); objectUnderTest.setProperty1(value); assertEquals(value, objectUnderTest.getProperty1()); } @Test public void testProperty2() { Production2636 value = new Production2636(); objectUnderTest.setProperty2(value); assertEquals(value, objectUnderTest.getProperty2()); } @Test public void testProperty3() { String value = "value"; objectUnderTest.setProperty3(value); assertEquals(value, objectUnderTest.getProperty3()); } @Test public void testProperty4() { String value = "value"; objectUnderTest.setProperty4(value); assertEquals(value, objectUnderTest.getProperty4()); } @Test public void testProperty5() { String value = "value"; objectUnderTest.setProperty5(value); assertEquals(value, objectUnderTest.getProperty5()); } @Test public void testProperty6() { String value = "value"; objectUnderTest.setProperty6(value); assertEquals(value, objectUnderTest.getProperty6()); } @Test public void testProperty7() { String value = "value"; objectUnderTest.setProperty7(value); assertEquals(value, objectUnderTest.getProperty7()); } @Test public void testProperty8() { String value = "value"; objectUnderTest.setProperty8(value); assertEquals(value, objectUnderTest.getProperty8()); } @Test public void testProperty9() { String value = "value"; objectUnderTest.setProperty9(value); assertEquals(value, objectUnderTest.getProperty9()); } }
apache-2.0
zhujainxipan/colorweibo
weibo/src/com/ht/jellybean/ui/fragment/MyFragment.java
8865
package com.ht.jellybean.ui.fragment; import android.content.Intent; import android.os.Bundle; import android.support.v4.app.Fragment; import android.text.TextUtils; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.*; import com.ht.jellybean.Constants; import com.ht.jellybean.R; import com.ht.jellybean.ui.activity.StatusActivity; import com.ht.jellybean.ui.adapter.StatusContentListAdapter; import com.ht.jellybean.ui.activity.FollowersActivity; import com.ht.jellybean.ui.activity.FriendsActivity; import com.ht.jellybean.ui.widget.RoundProgressBar; import com.ht.jellybean.util.AccessTokenKeeper; import com.ht.jellybean.thread.CacheImageAsyncTask; import com.ht.jellybean.util.UserInfoKeeper; import com.sina.weibo.sdk.auth.Oauth2AccessToken; import com.sina.weibo.sdk.exception.WeiboException; import com.sina.weibo.sdk.net.RequestListener; import com.sina.weibo.sdk.openapi.UsersAPI; import com.sina.weibo.sdk.openapi.legacy.StatusesAPI; import com.sina.weibo.sdk.openapi.models.Status; import com.sina.weibo.sdk.openapi.models.StatusList; import com.sina.weibo.sdk.openapi.models.User; import java.util.ArrayList; /** * Created by annuo on 2015/6/3. */ public class MyFragment extends Fragment implements Runnable{ private long uid; private Oauth2AccessToken token; private ListView listView; private ArrayList<Status> list; private RoundProgressBar roundProgressBar; private boolean isRoundProgressBarShown = true; //记录当前的ListView的位置 private int CURRENT_LISTVIEW_ITEM_POSITION = 0; @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.fragment_my, container, false); return view; } @Override public void onActivityCreated(Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); roundProgressBar = (RoundProgressBar) getView().findViewById(R.id.roundProgressBar); roundProgressBar.setMax(100); Thread thread = new Thread(this); thread.start(); //从sharepreference中得到用户id token = AccessTokenKeeper.readAccessToken(getActivity()); uid = Long.parseLong(token.getUid()); View myHead = LayoutInflater.from(getActivity()).inflate(R.layout.user_info_head, null); listView = (ListView) getView().findViewById(R.id.my_listview); listView.addHeaderView(myHead, null, false); //点击关注数跳转所有关注 TextView friendsCountTextView = (TextView) myHead.findViewById(R.id.user_info_head_guanzhu_count); friendsCountTextView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Intent intent = new Intent(getActivity(), FriendsActivity.class); startActivity(intent); } }); //点击关注数跳转所有粉丝列表 TextView fellowersCountTextView = (TextView) myHead.findViewById(R.id.user_info_head_fensi_count); fellowersCountTextView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Intent intent = new Intent(getActivity(), FollowersActivity.class); startActivity(intent); } }); listView.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { int position1 = position - listView.getHeaderViewsCount(); Intent intent = new Intent(getActivity(), StatusActivity.class); Log.d("111111111111111111111", "11111111111111"); //准备传递给StatusActivity的数据 Status status = list.get(position1); if (status != null) { intent.putExtra("status", status); } getActivity().startActivity(intent); } }); //使用usersapi获得用户的信息 UsersAPI muserAPI = new UsersAPI(getActivity(), Constants.APP_KEY, token); //显示用户的个人信息 User user = UserInfoKeeper.readUserInfo(getActivity()); if (user != null) { //显示用户的昵称 TextView screenName = (TextView) getActivity().findViewById(R.id.user_info_head_name); screenName.setText(user.screen_name); //显示用户的性别 TextView sexTextView = (TextView) getActivity().findViewById(R.id.user_info_head_sex); String sex = null; switch (user.gender) { case "m": sex = "男"; break; case "f": sex = "女"; break; case "n": sex = "未知"; break; } sexTextView.setText(sex); //显示用户所在的城市 TextView addressTextView = (TextView) getActivity().findViewById(R.id.user_info_head_adress); addressTextView.setText(user.location); //显示用户所发表的微博数量 TextView weiboCountTextView = (TextView) getActivity().findViewById(R.id.user_info_head_weibo_count); weiboCountTextView.setText(user.statuses_count + "\n微博"); //显示用户的粉丝数量 TextView fensiCountTextView = (TextView) getActivity().findViewById(R.id.user_info_head_fensi_count); fensiCountTextView.setText(user.followers_count + "\n粉丝"); //显示用户的关注数量 TextView guanzhuCountTextView = (TextView) getActivity().findViewById(R.id.user_info_head_guanzhu_count); guanzhuCountTextView.setText(user.friends_count + "\n关注"); //显示用户的头像 ImageView imageView = (ImageView) getActivity().findViewById(R.id.user_info_head_avatar); imageView.setTag(user.avatar_large); CacheImageAsyncTask myTask = new CacheImageAsyncTask(imageView, "userico"); myTask.execute(user.avatar_large); } //得到自己发布的微博的最新的微博的条目,第三方应用只返回5条最近的微博 StatusesAPI mstatusesAPI = new StatusesAPI(getActivity(), Constants.APP_KEY, token); mstatusesAPI.userTimeline(uid, 0, 0, 5, 1, false, 0, false, new RequestListener() { @Override public void onComplete(String response) { isRoundProgressBarShown = false; roundProgressBar.setVisibility(View.GONE); if (!TextUtils.isEmpty(response)) { StatusList statusList = StatusList.parse(response); if (statusList != null) { list = statusList.statusList; if (list != null) { StatusContentListAdapter statusContentListAdapter = new StatusContentListAdapter(getActivity(), list); listView.setAdapter(statusContentListAdapter); } else Toast.makeText(getActivity(), "没有更多微博了", Toast.LENGTH_LONG).show(); } } } @Override public void onWeiboException(WeiboException e) { Toast.makeText(getActivity(), e.getMessage(), Toast.LENGTH_LONG).show(); } }); } @Override public void run() { boolean running = true; int i=1; while(running){ i+=5; if(i > 100){ i=1; } roundProgressBar.setProgress(i); if(!isRoundProgressBarShown){ running = isRoundProgressBarShown; } try { Thread.sleep(50); } catch (InterruptedException e) { e.printStackTrace(); } } } @Override public void onPause() { super.onPause(); CURRENT_LISTVIEW_ITEM_POSITION = listView.getFirstVisiblePosition();//得到当前ListView的第一个 Log.d("onPause记住当前位置:", CURRENT_LISTVIEW_ITEM_POSITION + ""); } @Override public void onResume() { super.onResume(); //如果是从其他页面返回的话 //滚动到listivew的上一次的位置 listView.setSelection(CURRENT_LISTVIEW_ITEM_POSITION);//回到原来的位置 Log.d("滚动到原来的位置:", CURRENT_LISTVIEW_ITEM_POSITION + ""); } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-inspector2/src/main/java/com/amazonaws/services/inspector2/model/ScanStatus.java
6197
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.inspector2.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** * <p> * The status of the scan. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/inspector2-2020-06-08/ScanStatus" target="_top">AWS API * Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class ScanStatus implements Serializable, Cloneable, StructuredPojo { /** * <p> * The reason for the scan. * </p> */ private String reason; /** * <p> * The status code of the scan. * </p> */ private String statusCode; /** * <p> * The reason for the scan. * </p> * * @param reason * The reason for the scan. * @see ScanStatusReason */ public void setReason(String reason) { this.reason = reason; } /** * <p> * The reason for the scan. * </p> * * @return The reason for the scan. * @see ScanStatusReason */ public String getReason() { return this.reason; } /** * <p> * The reason for the scan. * </p> * * @param reason * The reason for the scan. * @return Returns a reference to this object so that method calls can be chained together. * @see ScanStatusReason */ public ScanStatus withReason(String reason) { setReason(reason); return this; } /** * <p> * The reason for the scan. * </p> * * @param reason * The reason for the scan. * @return Returns a reference to this object so that method calls can be chained together. * @see ScanStatusReason */ public ScanStatus withReason(ScanStatusReason reason) { this.reason = reason.toString(); return this; } /** * <p> * The status code of the scan. * </p> * * @param statusCode * The status code of the scan. * @see ScanStatusCode */ public void setStatusCode(String statusCode) { this.statusCode = statusCode; } /** * <p> * The status code of the scan. * </p> * * @return The status code of the scan. * @see ScanStatusCode */ public String getStatusCode() { return this.statusCode; } /** * <p> * The status code of the scan. * </p> * * @param statusCode * The status code of the scan. * @return Returns a reference to this object so that method calls can be chained together. * @see ScanStatusCode */ public ScanStatus withStatusCode(String statusCode) { setStatusCode(statusCode); return this; } /** * <p> * The status code of the scan. * </p> * * @param statusCode * The status code of the scan. * @return Returns a reference to this object so that method calls can be chained together. * @see ScanStatusCode */ public ScanStatus withStatusCode(ScanStatusCode statusCode) { this.statusCode = statusCode.toString(); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getReason() != null) sb.append("Reason: ").append(getReason()).append(","); if (getStatusCode() != null) sb.append("StatusCode: ").append(getStatusCode()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof ScanStatus == false) return false; ScanStatus other = (ScanStatus) obj; if (other.getReason() == null ^ this.getReason() == null) return false; if (other.getReason() != null && other.getReason().equals(this.getReason()) == false) return false; if (other.getStatusCode() == null ^ this.getStatusCode() == null) return false; if (other.getStatusCode() != null && other.getStatusCode().equals(this.getStatusCode()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getReason() == null) ? 0 : getReason().hashCode()); hashCode = prime * hashCode + ((getStatusCode() == null) ? 0 : getStatusCode().hashCode()); return hashCode; } @Override public ScanStatus clone() { try { return (ScanStatus) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.inspector2.model.transform.ScanStatusMarshaller.getInstance().marshall(this, protocolMarshaller); } }
apache-2.0
XClouded/t4f-core
java/class/src/test/java/io/aos/reflect/type/Examples.java
1684
package io.aos.reflect.type; import io.aos.reflect.type.TypeResolver; import java.io.Serializable; import java.lang.reflect.Type; import org.testng.annotations.Test; @Test public class Examples { static class Device { } static class Router extends Device { } static class GenericDAO<T, ID extends Serializable> { protected Class<T> persistentClass; protected Class<ID> idClass; @SuppressWarnings("unchecked") private GenericDAO() { Class<?>[] typeArguments = TypeResolver.resolveArguments(getClass(), GenericDAO.class); this.persistentClass = (Class<T>) typeArguments[0]; this.idClass = (Class<ID>) typeArguments[1]; } } static class DeviceDAO<T extends Device> extends GenericDAO<T, Long> { } static class RouterDAO extends DeviceDAO<Router> { } public void shouldResolveLayerSuperTypeInfo() { RouterDAO routerDAO = new RouterDAO(); assert routerDAO.persistentClass == Router.class; assert routerDAO.idClass == Long.class; } static class Entity<ID extends Serializable> { ID id; void setId(ID id) { } } static class SomeEntity extends Entity<Long> { } public void shouldResolveTypeFromFieldDeclaration() throws Exception { Type fieldType = Entity.class.getDeclaredField("id").getGenericType(); assert TypeResolver.resolveClass(fieldType, SomeEntity.class) == Long.class; } public void shouldResolveTypeFromMethodDeclaration() throws Exception { Type mutatorType = Entity.class.getDeclaredMethod("setId", Serializable.class) .getGenericParameterTypes()[0]; assert TypeResolver.resolveClass(mutatorType, SomeEntity.class) == Long.class; } }
apache-2.0
ryansgot/forsuredbcompiler
forsuredbapi/src/test/java/com/fsryan/forsuredb/api/FinderPaginationTest.java
9911
package com.fsryan.forsuredb.api; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.util.Arrays; import static org.junit.Assert.assertEquals; public abstract class FinderPaginationTest extends FinderTest { private final int fromTop; private final int offsetFromTop; private final int fromBottom; private final int offsetFromBottom; private final int expectedLimit; private final int expectedOffset; private final boolean expectedFromBottom; protected Finder finderUnderTest; public FinderPaginationTest(int fromTop, int offsetFromTop, int fromBottom, int offsetFromBottom, int expectedLimit, int expectedOffset, boolean expectedFromBottom) { this.fromTop = fromTop; this.offsetFromTop = offsetFromTop; this.fromBottom = fromBottom; this.offsetFromBottom = offsetFromBottom; this.expectedLimit = expectedLimit; this.expectedOffset = expectedOffset; this.expectedFromBottom = expectedFromBottom; } @Before public void setUpFinder() { finderUnderTest = new Finder(mockResolver) {} .first(fromTop, offsetFromTop) .last(fromBottom, offsetFromBottom); } @Test public void shouldHaveExpectedLimit() { assertEquals(expectedLimit, finderUnderTest.selection().limits().count()); } @Test public void shouldHaveExpectedOffset() { assertEquals(expectedOffset, finderUnderTest.selection().limits().offset()); } @Test public void shouldHaveExpectedFromBottom() { assertEquals(expectedFromBottom, finderUnderTest.selection().limits().isBottom()); } @RunWith(Parameterized.class) public static class LimitsTest extends FinderPaginationTest { public LimitsTest(int fromTop, int offsetFromTop, int fromBottom, int offsetFromBottom, int expectedLimit, int expectedOffset, boolean expectedFromBottom) { super(fromTop, offsetFromTop, fromBottom, offsetFromBottom, expectedLimit, expectedOffset, expectedFromBottom); } @Parameterized.Parameters public static Iterable<Object[]> data() { return Arrays.asList(new Object[][] { {-1, 0, 0, 0, 0, 0, false}, // 00: negative number for first results in zero count {0, 0, -1, 0, 0, 0, false}, // 01: negative number for last results in zero count {-1, 0, -1, 0, 0, 0, false}, // 02: negative number for both first and last results in zero count {1, 0, 0, 0, 1, 0, false}, // 03: positive number for first results in that number offset--last false {0, 0, 1, 0, 1, 0, true}, // 04: positive number for last results in that number offset--formBottom true {0, 1, 0, 0, 0, 0, false}, // 05: positive offset without first results in zero offset {0, 0, 0, 1, 0, 0, false}, // 06: positive offset without last results in zero offset {20, 10, 0, 0, 20, 10, false}, // 07: non-one numbers should be accurate first and offset {0, 0, 20, 10, 20, 10, true}, // 08: non-one numbers should be accurate last and offset }); } } @RunWith(Parameterized.class) public static class Incorporate extends FinderPaginationTest { private Finder toIncorporate; private final int fromTopToIncorporate; private final int offsetFromTopToIncorporate; private final int fromBottomToIncorporate; private final int offsetFromBottomToIncorporate; public Incorporate(int fromTop, int offsetFromTop, int fromBottom, int offsetFromBottom, int fromTopToIncorporate, int offsetFromTopToIncorporate, int fromBottomToIncorporate, int offsetFromBottomToIncorporate, int expectedLimit, int expectedOffset, boolean expectedFromBottom) { super(fromTop, offsetFromTop, fromBottom, offsetFromBottom, expectedLimit, expectedOffset, expectedFromBottom); this.fromTopToIncorporate = fromTopToIncorporate; this.offsetFromTopToIncorporate = offsetFromTopToIncorporate; this.fromBottomToIncorporate = fromBottomToIncorporate; this.offsetFromBottomToIncorporate = offsetFromBottomToIncorporate; } @Parameterized.Parameters public static Iterable<Object[]> data() { return Arrays.asList(new Object[][] { // The test cases where the finder to incorporate has all zeroes {-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, false}, // 00: negative number for first results in zero count {0, 0, -1, 0, 0, 0, 0, 0, 0, 0, false}, // 01: negative number for last results in zero count {-1, 0, -1, 0, 0, 0, 0, 0, 0, 0, false}, // 02: negative number for both first and last results in zero count {1, 0, 0, 0, 0, 0, 0, 0, 1, 0, false}, // 03: positive number for first results in that number offset--last false {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, true}, // 04: positive number for last results in that number offset--formBottom true {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, false}, // 05: positive offset without first results in zero offset {0, 0, 0, 1, 0, 0, 0, 0, 0, 0, false}, // 06: positive offset without last results in zero offset {20, 10, 0, 0, 0, 0, 0, 0, 20, 10, false}, // 07: non-one numbers should be accurate first and offset {0, 0, 20, 10, 0, 0, 0, 0, 20, 10, true}, // 08: non-one numbers should be accurate last and offset // The reverse cases--where the parent finder has all zeroes {0, 0, 0, 0, -1, 0, 0, 0, 0, 0, false}, // 09: negative number for first results in zero count {0, 0, 0, 0, 0, 0, -1, 0, 0, 0, false}, // 10: negative number for last results in zero count {0, 0, 0, 0, -1, 0, -1, 0, 0, 0, false}, // 11: negative number for both first and last results in zero count {0, 0, 0, 0, 1, 0, 0, 0, 1, 0, false}, // 12: positive number for first results in that number offset--last false {0, 0, 0, 0, 0, 0, 1, 0, 1, 0, true}, // 13: positive number for last results in that number offset--formBottom true {0, 0, 0, 0, 0, 1, 0, 0, 0, 0, false}, // 14: positive offset without first results in zero offset {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, false}, // 15: positive offset without last results in zero offset {0, 0, 0, 0, 20, 10, 0, 0, 20, 10, false}, // 16: non-one numbers should be accurate first and offset {0, 0, 0, 0, 0, 0, 20, 10, 20, 10, true}, // 17: non-one numbers should be accurate last and offset // same offset and count {3, 9, 0, 0, 3, 9, 0, 0, 3, 9, false}, // 18: same offset and first should not throw {0, 0, 4, 5, 0, 0, 4, 5, 4, 5, true}, // 19: same offset and last should not throw }); } @Before public void setUpFinderToIncorporateAndIncorporate() { toIncorporate = new Finder(mockResolver) {} .first(fromTopToIncorporate, offsetFromTopToIncorporate) .last(fromBottomToIncorporate, offsetFromBottomToIncorporate); finderUnderTest.incorporate(toIncorporate); } } public static class ExceptionCases extends FinderTest { @Test(expected = IllegalStateException.class) public void shouldThrowWhenCallingFromTopThenFromBottomWithPositiveIntegers() { new Finder(mockResolver) {}.first(1).last(1); } @Test(expected = IllegalStateException.class) public void shouldThrowWhenCallingBottomThenTopPositiveIntegers() { new Finder(mockResolver) {}.last(1).first(1); } @Test(expected = IllegalStateException.class) public void shouldThrowWhenIncorporatedFinderSpecifiesFromBottomAndTParentFinderFromTop() { Finder parent = new Finder(mockResolver) {}.last(1); Finder toIncorporate = new Finder(mockResolver) {}.first(1); parent.incorporate(toIncorporate); } @Test(expected = IllegalStateException.class) public void shouldThrowWhenIncorporatedFinderSpecifiesFromTopAndTParentFinderFromBottom() { Finder parent = new Finder(mockResolver) {}.first(1); Finder toIncorporate = new Finder(mockResolver) {}.last(1); parent.incorporate(toIncorporate); } @Test(expected = IllegalStateException.class) public void shouldThrowWhenIncorporatedFinderSpecifiesDifferentPositiveFromTop() { Finder parent = new Finder(mockResolver) {}.first(1); Finder toIncorporate = new Finder(mockResolver) {}.first(2); parent.incorporate(toIncorporate); } @Test(expected = IllegalStateException.class) public void shouldThrowWhenIncorporatedFinderSpecifiesDifferentPositiveFromBottom() { Finder parent = new Finder(mockResolver) {}.last(1); Finder toIncorporate = new Finder(mockResolver) {}.last(2); parent.incorporate(toIncorporate); } @Test(expected = IllegalStateException.class) public void shouldThrowWhenIncorporatedFinderSpecifiesDifferentPositiveOffset() { Finder parent = new Finder(mockResolver) {}.last(1, 5); Finder toIncorporate = new Finder(mockResolver) {}.last(1, 4); parent.incorporate(toIncorporate); } } }
apache-2.0
DracoAnimus/Coding
src/main/java/net/wildbill22/draco/entities/ai/EntityAINearestAttackableDragon.java
3929
package net.wildbill22.draco.entities.ai; import java.util.Collections; import java.util.List; import net.minecraft.command.IEntitySelector; import net.minecraft.entity.Entity; import net.minecraft.entity.EntityCreature; import net.minecraft.entity.EntityLivingBase; import net.minecraft.entity.ai.EntityAINearestAttackableTarget; import net.minecraft.entity.ai.EntityAITarget; import net.minecraft.entity.player.EntityPlayer; import net.wildbill22.draco.entities.player.DragonPlayer; public class EntityAINearestAttackableDragon extends EntityAITarget { @SuppressWarnings("rawtypes") private final Class targetClass; private final int targetChance; /** Instance of EntityAINearestAttackableTargetSorter. */ private final EntityAINearestAttackableTarget.Sorter theNearestAttackableTargetSorter; /** * This filter is applied to the Entity search. Only matching entities will be targetted. (null -> no * restrictions) */ private final IEntitySelector targetEntitySelector; private EntityLivingBase targetEntity; // private static final String __OBFID = "CL_00001620"; public EntityAINearestAttackableDragon(EntityCreature p_i1663_1_, int p_i1663_3_, boolean p_i1663_4_) { this(p_i1663_1_, p_i1663_3_, p_i1663_4_, false); } public EntityAINearestAttackableDragon(EntityCreature p_i1664_1_, int p_i1664_3_, boolean p_i1664_4_, boolean p_i1664_5_) { this(p_i1664_1_, p_i1664_3_, p_i1664_4_, p_i1664_5_, (IEntitySelector)null); } public EntityAINearestAttackableDragon(EntityCreature attacker, int p_i1665_3_, boolean p_i1665_4_, boolean p_i1665_5_, final IEntitySelector p_i1665_6_) { super(attacker, p_i1665_4_, p_i1665_5_); this.targetClass = EntityPlayer.class; this.targetChance = p_i1665_3_; this.theNearestAttackableTargetSorter = new EntityAINearestAttackableTarget.Sorter(attacker); this.setMutexBits(1); this.targetEntitySelector = new IEntitySelector() { // private static final String __OBFID = "CL_00001621"; /** * Return whether the specified entity is applicable to this filter. */ public boolean isEntityApplicable(Entity p_82704_1_) { return !(p_82704_1_ instanceof EntityLivingBase) ? false : (p_i1665_6_ != null && !p_i1665_6_.isEntityApplicable(p_82704_1_) ? false : EntityAINearestAttackableDragon.this.isSuitableTarget((EntityLivingBase)p_82704_1_, false)); } }; } /** * Returns whether the EntityAIBase should begin execution. */ @SuppressWarnings("unchecked") public boolean shouldExecute() { if (this.targetChance > 0 && this.taskOwner.getRNG().nextInt(this.targetChance) != 0) { return false; } else { double d0 = this.getTargetDistance(); List<?> list = this.taskOwner.worldObj.selectEntitiesWithinAABB(this.targetClass, this.taskOwner.boundingBox.expand(d0, 4.0D, d0), this.targetEntitySelector); Collections.sort(list, this.theNearestAttackableTargetSorter); // Don't attack humans players for (int i = 0; i < list.size(); i++) { this.targetEntity = (EntityLivingBase)list.get(i); if (this.targetEntity instanceof EntityPlayer) { if (DragonPlayer.get((EntityPlayer) this.targetEntity).isDragon() && !this.targetEntity.isInvisible()) { return true; } } else { return true; } } return false; } } /** * Execute a one shot task or start executing a continuous task */ public void startExecuting() { this.taskOwner.setAttackTarget(this.targetEntity); super.startExecuting(); } }
apache-2.0
ystromm/learn-selenium
webapp-angular1/src/test/java/com/github/ystromm/learn_selenium/webapp_angular1/spring/TodosSpringTest.java
7328
package com.github.ystromm.learn_selenium.webapp_angular1.spring; import com.github.ystromm.learn_selenium.backend_api.Todo; import com.github.ystromm.learn_selenium.backend_impl.BackendMain; import com.github.ystromm.learn_selenium.backend_impl.TodoRepository; import com.github.ystromm.learn_selenium.webapp_angular1.PropertyNames; import com.github.ystromm.learn_selenium.webapp_angular1.WebappMain; import com.github.ystromm.learn_selenium.webapp_angular1.pages.TodosPage; import com.github.ystromm.learn_selenium.webapp_angular1.webdriver.Firefox; import org.junit.*; import org.junit.rules.TestName; import org.junit.runner.RunWith; import org.mockito.Mockito; import org.openqa.selenium.WebDriver; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.context.embedded.LocalServerPort; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.junit4.SpringRunner; import java.io.IOException; import java.util.Optional; import static com.github.ystromm.learn_selenium.webapp_angular1.spring.Todos.checkedTodo; import static com.github.ystromm.learn_selenium.webapp_angular1.spring.Todos.todo; import static com.github.ystromm.learn_selenium.webapp_angular1.webdriver.Screenshot.screenshot; import static com.github.ystromm.learn_selenium.webapp_angular1.webdriver.WebElementMatchers.isDisplayed; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.awaitility.Awaitility.await; import static org.awaitility.Duration.ONE_SECOND; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.*; import static org.mockito.Mockito.*; import static org.springframework.boot.test.context.SpringBootTest.WebEnvironment.RANDOM_PORT; @ActiveProfiles("test") @RunWith(SpringRunner.class) @SpringBootTest(classes = {WebappMain.class, BackendMain.class}, webEnvironment = RANDOM_PORT) public class TodosSpringTest { @Rule public TestName testName = new TestName(); @LocalServerPort private int localServerPort; private WebDriver webDriver; private TodosPage todosPage; @Autowired private TodoRepository todoRepository; @Before public void setUpWebDriver() throws IOException { // System.setProperty("webdriver.chrome.driver", "./chromedriver.exe"); webDriver = Firefox.firefoxDriver(); todosPage = new TodosPage(webDriver, "http://localhost:" + localServerPort); } @Before public void setUpProperties() { System.setProperty(PropertyNames.BACKEND_SERVER_PORT, Integer.toString(localServerPort)); System.setProperty(PropertyNames.TODOS_URL, "http://localhost:" + localServerPort); } @Before public void setUpTodoRepository() { // the mock gets re-used between test cases, kind of nasty reset(todoRepository); } @After public void tearDownWebDriver() throws IOException { screenshot(webDriver, getClass().getSimpleName() + "_" + testName.getMethodName()); webDriver.quit(); } @Test public void open_should_get_todos() { when(todoRepository.getAll()).thenReturn(emptyList()); todosPage.open(); await().atMost(ONE_SECOND).until(() -> verify(todoRepository).getAll() ); } @Test public void open_should_not_show_error() { when(todoRepository.getAll()).thenReturn(emptyList()); todosPage.open(); assertThat(todosPage.getError(), not(isDisplayed())); } @Test public void open_should_show_no_todos() { when(todoRepository.getAll()).thenReturn(emptyList()); todosPage.open(); await().atMost(ONE_SECOND).until(() -> assertThat(todosPage.getTodoItems(), empty()) ); } @Test public void open_should_show_todos() { when(todoRepository.getAll()).thenReturn(singletonList(todo())); todosPage.open(); await().atMost(ONE_SECOND).until(() -> assertThat(todosPage.getTodoItems(), hasSize(1)) ); } @Test public void open_should_show_error() { when(todoRepository.getAll()).thenThrow(new IllegalStateException("Testing internal error!")); todosPage.open(); assertThat(todosPage.getError(), isDisplayed()); } @Test public void add_should_create() { final Todo todo = Todo.builder().text("To do!").build(); when(todoRepository.getAll()).thenReturn(emptyList()); when(todoRepository.create(Mockito.any(Todo.class))).thenReturn(todo()); todosPage.open(); todosPage.addTodoItem(todo.getText()); await().atMost(ONE_SECOND).until(() -> verify(todoRepository).create(todo) ); } @Ignore("Fails on travis") @Test public void add_should_getAll_twice() { final Todo todo = Todo.builder().text("To do!").build(); when(todoRepository.getAll()).thenReturn(emptyList()); when(todoRepository.create(Mockito.any(Todo.class))).thenReturn(todo()); todosPage.open(); todosPage.addTodoItem(todo.getText()); await().atMost(ONE_SECOND).until(() -> verify(todoRepository, times(2)).getAll() ); } @Ignore("Fails on purpose") @Test public void click_checkbox_should_update() { when(todoRepository.getAll()).thenReturn(singletonList(todo())); when(todoRepository.update(checkedTodo())).thenReturn(Optional.of(checkedTodo())); todosPage.open(); await().atMost(ONE_SECOND).until(() -> assertThat(todosPage.getTodoItems(), hasSize(1))); todosPage.clickFirstCheckbox(); verify(todoRepository).update(checkedTodo()); } @Test public void click_checkbox_should_getAll_twice() { when(todoRepository.getAll()).thenReturn(singletonList(todo())); when(todoRepository.update(checkedTodo())).thenReturn(Optional.of(checkedTodo())); todosPage.open(); await().atMost(ONE_SECOND).until(() -> assertThat(todosPage.getTodoItems(), hasSize(1))); todosPage.clickFirstCheckbox(); verify(todoRepository, times(2)).getAll(); } @Test public void click_delete_should_remove() { final Todo todo = todo(); when(todoRepository.getAll()).thenReturn(singletonList(todo)); when(todoRepository.remove(todo.getId())).thenReturn(Optional.of(todo)); todosPage.open(); await().atMost(ONE_SECOND).until(() -> assertThat(todosPage.getTodoItems(), hasSize(1))); todosPage.clickFirstDelete(); verify(todoRepository).remove(todo.getId()); } @Ignore("Fails on purpose") @Test public void click_delete_should_getAll_twice() { final Todo todo = todo(); when(todoRepository.getAll()).thenReturn(singletonList(todo)); when(todoRepository.remove(todo.getId())).thenReturn(Optional.of(todo)); todosPage.open(); await().atMost(ONE_SECOND).until(() -> assertThat(todosPage.getTodoItems(), hasSize(1))); todosPage.clickFirstDelete(); verify(todoRepository, times(2)).getAll(); } }
apache-2.0
PengGeTeach/Tea
app/src/main/java/com/phone1000/chayu/Interface/FragmentChangeInterface.java
225
package com.phone1000.chayu.Interface; /** * Created by Administrator on 2016/12/2 0002. */ public interface FragmentChangeInterface { void shijiMoreClick(); void quanziMoreClick(); void wenzhangMoreClick(); }
apache-2.0
twang2218/material-design-toolkit
lib/src/main/java/org/lab99/mdt/drawable/ProxyDrawable.java
6060
package org.lab99.mdt.drawable; import android.annotation.TargetApi; import android.content.res.Resources; import android.graphics.Canvas; import android.graphics.ColorFilter; import android.graphics.PixelFormat; import android.graphics.Rect; import android.graphics.drawable.Drawable; import android.os.Build; /** * A drawable will act as a proxy */ class ProxyDrawable extends Drawable implements Drawable.Callback { private ProxyState mState; public ProxyDrawable(Drawable original) { this(original, null); } ProxyDrawable(Drawable original, ProxyState state) { this(state, null); mState.setOriginal(original); mState.setCallback(this); if (original != null && !original.getBounds().isEmpty()) { super.setBounds(original.getBounds()); } } ProxyDrawable(ProxyState state, Resources res) { mState = createConstantState(state, res); } // Overrides of Drawable @Override public void draw(Canvas canvas) { if (getOriginal() != null) getOriginal().draw(canvas); } @Override public void setColorFilter(ColorFilter cf) { if (getOriginal() != null) getOriginal().setColorFilter(cf); } @Override public int getOpacity() { if (getOriginal() != null) return getOriginal().getOpacity(); else return PixelFormat.TRANSPARENT; } @TargetApi(Build.VERSION_CODES.KITKAT) @Override public int getAlpha() { if (getOriginal() != null) return getOriginal().getAlpha(); else return super.getAlpha(); } @Override public void setAlpha(int alpha) { if (getOriginal() != null) getOriginal().setAlpha(alpha); } @Override public boolean setVisible(boolean visible, boolean restart) { if (getOriginal() != null) return getOriginal().setVisible(visible, restart); else return super.setVisible(visible, restart); } @Override public boolean isStateful() { return getOriginal() != null && getOriginal().isStateful(); } @Override protected boolean onStateChange(int[] state) { if (getOriginal() != null) { boolean changed = getOriginal().setState(state); onBoundsChange(getBounds()); return changed; } else { return super.onStateChange(state); } } @Override protected void onBoundsChange(Rect bounds) { if (getOriginal() != null) getOriginal().setBounds(bounds); } @Override public int getIntrinsicWidth() { if (getOriginal() != null) return getOriginal().getIntrinsicWidth(); else return super.getIntrinsicWidth(); } @Override public int getIntrinsicHeight() { if (getOriginal() != null) return getOriginal().getIntrinsicHeight(); else return super.getIntrinsicHeight(); } @Override public int getMinimumWidth() { if (getOriginal() != null) return getOriginal().getMinimumWidth(); else return super.getMinimumWidth(); } @Override public int getMinimumHeight() { if (getOriginal() != null) return getOriginal().getMinimumHeight(); else return super.getMinimumHeight(); } @Override public ConstantState getConstantState() { return mState; } // Overrides of Drawable.Callback @Override public void invalidateDrawable(Drawable who) { invalidateSelf(); } @Override public void scheduleDrawable(Drawable who, Runnable what, long when) { scheduleSelf(what, when); } @Override public void unscheduleDrawable(Drawable who, Runnable what) { unscheduleSelf(what); } // Getters/Setters public Drawable getOriginal() { ProxyState state = (ProxyState) getConstantState(); return state.getOriginal(); } public void setOriginal(Drawable drawable) { if (drawable != null) { drawable.setBounds(getBounds()); } ProxyState state = (ProxyState) getConstantState(); state.setOriginal(drawable); state.setCallback(this); } protected ProxyState createConstantState(ProxyState orig, Resources res) { return new ProxyState(orig, res); } // ProxyState static class ProxyState extends Drawable.ConstantState { private Drawable mOriginal; ProxyState(ProxyState orig, Resources res) { if (orig != null) { initWithState(orig, res); } else { initWithoutState(res); } } @Override public Drawable newDrawable(Resources res) { return new ProxyDrawable(this, res); } @Override public Drawable newDrawable() { return new ProxyDrawable(this, null); } @Override public int getChangingConfigurations() { return mOriginal != null ? mOriginal.getChangingConfigurations() : 0; } public Drawable getOriginal() { return mOriginal; } public void setOriginal(Drawable original) { if (original != mOriginal) { mOriginal = original; } } protected void setCallback(Drawable.Callback callback) { if (mOriginal != null) { mOriginal.setCallback(callback); } } protected void initWithState(ProxyState orig, Resources res) { if (orig.mOriginal != null) { mOriginal = orig.mOriginal.getConstantState().newDrawable(res); } } protected void initWithoutState(Resources res) { } protected boolean verifyDrawable(Drawable who) { return who == getOriginal(); } } }
apache-2.0
shakamunyi/drill
exec/jdbc/src/test/java/org/apache/drill/jdbc/test/Drill2769UnsupportedReportsUseSqlExceptionTest.java
15958
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.drill.jdbc.test; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; import static org.slf4j.LoggerFactory.getLogger; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.List; import org.apache.drill.common.util.TestTools; import org.apache.drill.jdbc.AlreadyClosedSqlException; import org.apache.drill.jdbc.Driver; import org.apache.drill.jdbc.JdbcTestBase; import org.apache.drill.categories.JdbcTest; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestRule; import org.slf4j.Logger; /** * Test that non-SQLException exceptions used by Drill's current version of * Avatica to indicate unsupported features are wrapped in or mapped to * SQLException exceptions. * * <p> * As of 2015-08-24, Drill's version of Avatica used non-SQLException exception * class to report that methods/features were not implemented. * </p> * <pre> * 5 UnsupportedOperationException in ArrayImpl * 29 UnsupportedOperationException in AvaticaConnection * 10 Helper.todo() (RuntimeException) in AvaticaDatabaseMetaData * 21 UnsupportedOperationException in AvaticaStatement * 4 UnsupportedOperationException in AvaticaPreparedStatement * 103 UnsupportedOperationException in AvaticaResultSet * </pre> */ @Category(JdbcTest.class) public class Drill2769UnsupportedReportsUseSqlExceptionTest extends JdbcTestBase { private static final Logger logger = getLogger(Drill2769UnsupportedReportsUseSqlExceptionTest.class); @Rule public TestRule TIMEOUT = TestTools.getTimeoutRule(180_000 /* ms */); private static Connection connection; private static Statement plainStatement; private static PreparedStatement preparedStatement; // No CallableStatement. private static ResultSet resultSet; private static ResultSetMetaData resultSetMetaData; private static DatabaseMetaData databaseMetaData; @BeforeClass public static void setUpObjects() throws Exception { // (Note: Can't use JdbcTest's connect(...) for this test class.) connection = new Driver().connect("jdbc:drill:zk=local", JdbcAssert.getDefaultProperties()); plainStatement = connection.createStatement(); preparedStatement = connection.prepareStatement("VALUES 'PreparedStatement query'"); try { connection.prepareCall("VALUES 'CallableStatement query'"); fail("Test seems to be out of date. Was prepareCall(...) implemented?"); } catch (SQLException | UnsupportedOperationException e) { // Expected. } try { connection.createArrayOf("INTEGER", new Object[0]); fail("Test seems to be out of date. Were arrays implemented?"); } catch (SQLException | UnsupportedOperationException e) { // Expected. } resultSet = plainStatement.executeQuery("VALUES 'plain Statement query'"); resultSet.next(); resultSetMetaData = resultSet.getMetaData(); databaseMetaData = connection.getMetaData(); // Self-check that member variables are set: assertFalse("Test setup error", connection.isClosed()); assertFalse("Test setup error", plainStatement.isClosed()); assertFalse("Test setup error", preparedStatement.isClosed()); assertFalse("Test setup error", resultSet.isClosed()); // (No ResultSetMetaData.isClosed() or DatabaseMetaData.isClosed():) assertNotNull("Test setup error", resultSetMetaData); assertNotNull("Test setup error", databaseMetaData); } @AfterClass public static void tearDownConnection() throws Exception { connection.close(); } /** * Reflection-based checker that exceptions thrown by JDBC interfaces' * implementation methods for unsupported-operation cases are SQLExceptions * (not UnsupportedOperationExceptions). * * @param <INTF> JDBC interface type */ private static class NoNonSqlExceptionsChecker<INTF> { private final Class<INTF> jdbcIntf; private final INTF jdbcObject; private final StringBuilder failureLinesBuf = new StringBuilder(); private final StringBuilder successLinesBuf = new StringBuilder(); NoNonSqlExceptionsChecker(final Class<INTF> jdbcIntf, final INTF jdbcObject) { this.jdbcIntf = jdbcIntf; this.jdbcObject = jdbcObject; } /** * Hook/factory method to allow context to provide fresh object for each * method. Needed for Statement and PrepareStatement, whose execute... * methods can close the statement (at least given our minimal dummy * argument values). */ protected INTF getJdbcObject() throws SQLException { return jdbcObject; } /** * Gets minimal value suitable for use as actual parameter value for given * formal parameter type. */ private static Object getDummyValueForType(Class<?> type) { final Object result; if (! type.isPrimitive()) { result = null; } else { if (type == boolean.class) { result = false; } else if (type == byte.class) { result = (byte) 0; } else if (type == short.class) { result = (short) 0; } else if (type == int.class) { result = 0; } else if (type == long.class) { result = (long) 0L; } else if (type == float.class) { result = 0F; } else if (type == double.class) { result = 0.0; } else { fail("Test needs to be updated to handle type " + type); result = null; // Not executed; for "final". } } return result; } /** * Assembles method signature text for given method. */ private String makeLabel(Method method) { String methodLabel; methodLabel = jdbcIntf.getSimpleName() + "." + method.getName() + "("; boolean first = true; for (Class<?> paramType : method.getParameterTypes()) { if (! first) { methodLabel += ", "; } first = false; methodLabel += paramType.getSimpleName(); } methodLabel += ")"; return methodLabel; } /** * Assembles (minimal) arguments array for given method. */ private Object[] makeArgs(Method method) { final List<Object> argsList = new ArrayList<>(); for (Class<?> paramType : method.getParameterTypes()) { argsList.add(getDummyValueForType(paramType)); } Object[] argsArray = argsList.toArray(); return argsArray; } /** * Tests one method. * (Disturbs members set by makeArgsAndLabel, but those shouldn't be used * except by this method.) */ private void testOneMethod(Method method) { final String methodLabel = makeLabel(method); try { final INTF jdbcObject; try { jdbcObject = getJdbcObject(); } catch (SQLException e) { fail("Unexpected exception: " + e + " from getJdbcObject()"); throw new RuntimeException("DUMMY; so compiler know block throws"); } // See if method throws exception: method.invoke(jdbcObject, makeArgs(method)); // If here, method didn't throw--check if it's an expected non-throwing // method (e.g., an isClosed). (If not, report error.) final String resultLine = "- " + methodLabel + " didn't throw\n"; successLinesBuf.append(resultLine); } catch (InvocationTargetException wrapperEx) { final Throwable cause = wrapperEx.getCause(); final String resultLine = "- " + methodLabel + " threw <" + cause + ">\n"; if (SQLException.class.isAssignableFrom(cause.getClass()) && ! AlreadyClosedSqlException.class.isAssignableFrom(cause.getClass()) ) { // Good case--almost any exception should be SQLException or subclass // (but make sure not accidentally closed). successLinesBuf.append(resultLine); } else if (NullPointerException.class == cause.getClass() && (method.getName().equals("isWrapperFor") || method.getName().equals("unwrap"))) { // Known good-enough case--these methods throw NullPointerException // because of the way we call them (with null) and the way Avatica // code implements them. successLinesBuf.append(resultLine); } else if (isOkaySpecialCaseException(method, cause)) { successLinesBuf.append(resultLine); } else { final String badResultLine = "- " + methodLabel + " threw <" + cause + "> instead" + " of a " + SQLException.class.getSimpleName() + "\n"; logger.trace("Failure: " + resultLine); failureLinesBuf.append(badResultLine); } } catch (IllegalAccessException | IllegalArgumentException e) { fail("Unexpected exception: " + e + ", cause = " + e.getCause() + " from " + method); } } public void testMethods() { for (Method method : jdbcIntf.getMethods()) { final String methodLabel = makeLabel(method); if ("close".equals(method.getName())) { logger.debug("Skipping (because closes): " + methodLabel); } /* Uncomment to suppress calling DatabaseMetaData.getColumns(...), which sometimes takes about 2 minutes, and other DatabaseMetaData methods that query, collectively taking a while too: else if (DatabaseMetaData.class == jdbcIntf && "getColumns".equals(method.getName())) { logger.debug("Skipping (because really slow): " + methodLabel); } else if (DatabaseMetaData.class == jdbcIntf && ResultSet.class == method.getReturnType()) { logger.debug("Skipping (because a bit slow): " + methodLabel); } */ else { logger.debug("Testing method " + methodLabel); testOneMethod(method); } } } /** * Reports whether it's okay if given method throw given exception (that is * not preferred AlreadyClosedException with regular message). */ protected boolean isOkaySpecialCaseException(Method method, Throwable cause) { return false; } public boolean hadAnyFailures() { return 0 != failureLinesBuf.length(); } public String getFailureLines() { return failureLinesBuf.toString(); } public String getSuccessLines() { return successLinesBuf.toString(); } public String getReport() { final String report = "Failures:\n" + getFailureLines() + "(Successes:\n" + getSuccessLines() + ")"; return report; } } // class NoNonSqlExceptionsChecker<INTF> @Test public void testConnectionMethodsThrowRight() { NoNonSqlExceptionsChecker<Connection> checker = new NoNonSqlExceptionsChecker<Connection>(Connection.class, connection); checker.testMethods(); if (checker.hadAnyFailures()) { System.err.println(checker.getReport()); fail("Non-SQLException exception error(s): \n" + checker.getReport()); } } private static class PlainStatementChecker extends NoNonSqlExceptionsChecker<Statement> { private final Connection factoryConnection; PlainStatementChecker(Connection factoryConnection) { super(Statement.class, null); this.factoryConnection = factoryConnection; } @Override protected Statement getJdbcObject() throws SQLException { return factoryConnection.createStatement(); } @Override protected boolean isOkaySpecialCaseException(Method method, Throwable cause) { // New Java 8 method not supported by Avatica return method.getName().equals( "executeLargeBatch" ); } } // class PlainStatementChecker @Test public void testPlainStatementMethodsThrowRight() { NoNonSqlExceptionsChecker<Statement> checker = new PlainStatementChecker(connection); checker.testMethods(); if (checker.hadAnyFailures()) { fail("Non-SQLException exception error(s): \n" + checker.getReport()); } } private static class PreparedStatementChecker extends NoNonSqlExceptionsChecker<PreparedStatement> { private final Connection factoryConnection; PreparedStatementChecker(Connection factoryConnection) { super(PreparedStatement.class, null); this.factoryConnection = factoryConnection; } @Override protected PreparedStatement getJdbcObject() throws SQLException { return factoryConnection.prepareStatement("VALUES 1"); } @Override protected boolean isOkaySpecialCaseException(Method method, Throwable cause) { // New Java 8 method not supported by Avatica return method.getName().equals( "executeLargeBatch" ); } } // class PlainStatementChecker @Test public void testPreparedStatementMethodsThrowRight() { NoNonSqlExceptionsChecker<PreparedStatement> checker = new PreparedStatementChecker(connection); checker.testMethods(); if (checker.hadAnyFailures()) { fail("Non-SQLException exception error(s): \n" + checker.getReport()); } } @Test public void testResultSetMethodsThrowRight() { NoNonSqlExceptionsChecker<ResultSet> checker = new NoNonSqlExceptionsChecker<ResultSet>(ResultSet.class, resultSet); checker.testMethods(); if (checker.hadAnyFailures()) { fail("Non-SQLException exception error(s): \n" + checker.getReport()); } } @Test public void testResultSetMetaDataMethodsThrowRight() { NoNonSqlExceptionsChecker<ResultSetMetaData> checker = new NoNonSqlExceptionsChecker<ResultSetMetaData>(ResultSetMetaData.class, resultSetMetaData); checker.testMethods(); if (checker.hadAnyFailures()) { fail("Non-SQLException exception error(s): \n" + checker.getReport()); } } @Test public void testDatabaseMetaDataMethodsThrowRight() { NoNonSqlExceptionsChecker<DatabaseMetaData> checker = new NoNonSqlExceptionsChecker<DatabaseMetaData>(DatabaseMetaData.class, databaseMetaData); checker.testMethods(); if (checker.hadAnyFailures()) { fail("Non-SQLException exception error(s): \n" + checker.getReport()); } } }
apache-2.0
jentfoo/aws-sdk-java
aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/UpdateOpsItemRequestMarshaller.java
4662
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.simplesystemsmanagement.model.transform; import java.util.Map; import java.util.List; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.simplesystemsmanagement.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * UpdateOpsItemRequestMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class UpdateOpsItemRequestMarshaller { private static final MarshallingInfo<String> DESCRIPTION_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("Description").build(); private static final MarshallingInfo<Map> OPERATIONALDATA_BINDING = MarshallingInfo.builder(MarshallingType.MAP).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("OperationalData").build(); private static final MarshallingInfo<List> OPERATIONALDATATODELETE_BINDING = MarshallingInfo.builder(MarshallingType.LIST) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("OperationalDataToDelete").build(); private static final MarshallingInfo<List> NOTIFICATIONS_BINDING = MarshallingInfo.builder(MarshallingType.LIST).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("Notifications").build(); private static final MarshallingInfo<Integer> PRIORITY_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("Priority").build(); private static final MarshallingInfo<List> RELATEDOPSITEMS_BINDING = MarshallingInfo.builder(MarshallingType.LIST) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("RelatedOpsItems").build(); private static final MarshallingInfo<String> STATUS_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("Status").build(); private static final MarshallingInfo<String> OPSITEMID_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("OpsItemId").build(); private static final MarshallingInfo<String> TITLE_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("Title").build(); private static final UpdateOpsItemRequestMarshaller instance = new UpdateOpsItemRequestMarshaller(); public static UpdateOpsItemRequestMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(UpdateOpsItemRequest updateOpsItemRequest, ProtocolMarshaller protocolMarshaller) { if (updateOpsItemRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(updateOpsItemRequest.getDescription(), DESCRIPTION_BINDING); protocolMarshaller.marshall(updateOpsItemRequest.getOperationalData(), OPERATIONALDATA_BINDING); protocolMarshaller.marshall(updateOpsItemRequest.getOperationalDataToDelete(), OPERATIONALDATATODELETE_BINDING); protocolMarshaller.marshall(updateOpsItemRequest.getNotifications(), NOTIFICATIONS_BINDING); protocolMarshaller.marshall(updateOpsItemRequest.getPriority(), PRIORITY_BINDING); protocolMarshaller.marshall(updateOpsItemRequest.getRelatedOpsItems(), RELATEDOPSITEMS_BINDING); protocolMarshaller.marshall(updateOpsItemRequest.getStatus(), STATUS_BINDING); protocolMarshaller.marshall(updateOpsItemRequest.getOpsItemId(), OPSITEMID_BINDING); protocolMarshaller.marshall(updateOpsItemRequest.getTitle(), TITLE_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
kingsmiler/trainings
jfinal/jfinal-simple/src/main/java/com/demo/blog/Blog.java
1263
package com.demo.blog; import com.jfinal.plugin.activerecord.Model; import com.jfinal.plugin.activerecord.Page; /** * Blog model. * <p> * 将表结构放在此,消除记忆负担 * mysql> desc blog; * +---------+--------------+------+-----+---------+----------------+ * | Field | Type | Null | Key | Default | Extra | * +---------+--------------+------+-----+---------+----------------+ * | id | int(11) | NO | PRI | NULL | auto_increment | * | title | varchar(200) | NO | | NULL | | * | content | mediumtext | NO | | NULL | | * +---------+--------------+------+-----+---------+----------------+ * <p> * 数据库字段名建议使用驼峰命名规则,便于与 java 代码保持一致,如字段名: userId */ @SuppressWarnings("serial") public class Blog extends Model<Blog> { public static final Blog me = new Blog(); /** * 所有 sql 与业务逻辑写在 Model 或 Service 中,不要写在 Controller 中,养成好习惯,有利于大型项目的开发与维护 */ public Page<Blog> paginate(int pageNumber, int pageSize) { return paginate(pageNumber, pageSize, "select *", "from blog order by id asc"); } }
apache-2.0
GillesMoris/OSS
src/org/parosproxy/paros/view/View.java
41414
/* * * Paros and its related class files. * * Paros is an HTTP/HTTPS proxy for assessing web application security. * Copyright (C) 2003-2004 Chinotec Technologies Company * * This program is free software; you can redistribute it and/or * modify it under the terms of the Clarified Artistic License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * Clarified Artistic License for more details. * * You should have received a copy of the Clarified Artistic License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ // ZAP: 2011/08/04 Changed to support new HttpPanel interface // ZAP: 2011/05/15 Support for exclusions // ZAP: 2011/05/31 Added option to dynamically change the display // ZAP: 2012/02/18 Changed default to be 'bottom full' // ZAP: 2012/03/15 Changed to set the configuration key to the HttpPanels, load // the configuration and disable the response panel. // ZAP: 2012/04/23 Added @Override annotation to all appropriate methods. // ZAP: 2012/04/26 Removed the method setStatus(String), no longer used. // ZAP: 2012/07/02 HttpPanelRequest and -Response constructor changed. // ZAP: 2012/07/23 Removed title parameter in method getSessionDialog(). // Added @Override to getSessionDialog() as exposed in ViewDelegate interface. // ZAP: 2012/07/29 Issue 43: Added support for Scope // ZAP: 2012/08/01 Issue 332: added support for Modes // ZAP: 2012/08/07 Removed the unused method changeDisplayOption(int) // ZAP: 2012/10/02 Issue 385: Added support for Contexts // ZAP: 2012/10/03 Issue 388: Added support for technologies // ZAP: 2012/12/18 Issue 441: Prevent view being initialised in daemon mode // ZAP: 2013/01/16 Issue 453: Dynamic loading and unloading of add-ons - added helper methods // ZAP: 2013/02/17 Issue 496: Allow to see the request and response at the same // time in the main window // ZAP: 2013/02/26 Issue 540: Maximised work tabs hidden when response tab // position changed // ZAP: 2013/04/15 Issue 627: Allow add-ons to remove main tool bar buttons/separators // ZAP: 2013/07/23 Issue 738: Options to hide tabs // ZAP: 2013/08/21 Support for shared context for Context Properties Panels. // ZAP: 2013/12/13 Disabled the updating of 'Sites' tab, because it has been added elsewhere to accomodate the 'Full Layout' functionality. // ZAP: 2014/01/06 Issue 965: Support 'single page' apps and 'non standard' parameter separators // ZAP: 2014/01/19 Added option to execute code after init of the panels when showing the session dialog // ZAP: 2014/01/28 Issue 207: Support keyboard shortcuts // ZAP: 2014/03/23 Issue 1085: Do not add/remove pop up menu items through the method View#getPopupMenu() // ZAP: 2014/04/17 Issue 1155: Historical Request Tab Doesn't allow formatting changes // ZAP: 2014/07/15 Issue 1265: Context import and export // ZAP: 2014/09/22 Issue 1345: Support Attack mode // ZAP: 2014/10/07 Issue 1357: Hide unused tabs // ZAP: 2014/10/24 Issue 1378: Revamp active scan panel // ZAP: 2014/10/31 Issue 1176: Changed parents to Window as part of spider advanced dialog changes // ZAP: 2014/11/23 Added Splash Screen management // ZAP: 2014/12/22 Issue 1476: Display contexts in the Sites tree // ZAP: 2015/01/19 Expose splash screen as Component // ZAP: 2015/02/02 Move output panel help key registration to prevent NPE // ZAP: 2015/03/04 Added no prompt warning methods // ZAP: 2015/04/13 Add default editor and renderer for TextMessageLocationHighlight // ZAP: 2015/08/11 Fix the removal of context panels // ZAP: 2015/09/07 Start GUI on EDT // ZAP: 2015/11/26 Issue 2084: Warn users if they are probably using out of date versions // ZAP: 2016/03/16 Add StatusUI handling // ZAP: 2016/03/22 Allow to remove ContextPanelFactory // ZAP: 2016/03/23 Issue 2331: Custom Context Panels not show in existing contexts after installation of add-on // ZAP: 2016/04/04 Do not require a restart to show/hide the tool bar // ZAP: 2016/04/06 Fix layouts' issues // ZAP: 2016/04/14 Allow to display a message // ZAP: 2016/10/26 Create UI shared context in the session dialogue when adding a context package org.parosproxy.paros.view; import java.awt.Component; import java.awt.Event; import java.awt.Toolkit; import java.awt.Window; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.KeyEvent; import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.MissingResourceException; import java.util.Vector; import javax.swing.ImageIcon; import javax.swing.JButton; import javax.swing.JCheckBox; import javax.swing.JFrame; import javax.swing.JMenu; import javax.swing.JMenuItem; import javax.swing.JOptionPane; import javax.swing.JPanel; import javax.swing.JToggleButton; import javax.swing.JToolBar; import javax.swing.KeyStroke; import org.apache.log4j.Logger; import org.parosproxy.paros.Constant; import org.parosproxy.paros.control.Control; import org.parosproxy.paros.extension.AbstractPanel; import org.parosproxy.paros.extension.ExtensionHookMenu; import org.parosproxy.paros.extension.ExtensionPopupMenuItem; import org.parosproxy.paros.extension.ViewDelegate; import org.parosproxy.paros.extension.option.OptionsParamView; import org.parosproxy.paros.model.Model; import org.parosproxy.paros.model.OptionsParam; import org.parosproxy.paros.model.Session; import org.parosproxy.paros.network.HttpMessage; import org.zaproxy.zap.control.AddOn; import org.zaproxy.zap.control.AddOn.Status; import org.zaproxy.zap.extension.ExtensionPopupMenu; import org.zaproxy.zap.extension.help.ExtensionHelp; import org.zaproxy.zap.extension.httppanel.HttpPanelRequest; import org.zaproxy.zap.extension.httppanel.HttpPanelResponse; import org.zaproxy.zap.extension.httppanel.Message; import org.zaproxy.zap.extension.keyboard.ExtensionKeyboard; import org.zaproxy.zap.model.Context; import org.zaproxy.zap.view.AbstractContextPropertiesPanel; import org.zaproxy.zap.view.ContextExcludePanel; import org.zaproxy.zap.view.ContextGeneralPanel; import org.zaproxy.zap.view.ContextIncludePanel; import org.zaproxy.zap.view.ContextListPanel; import org.zaproxy.zap.view.ContextPanelFactory; import org.zaproxy.zap.view.ContextStructurePanel; import org.zaproxy.zap.view.ContextTechnologyPanel; import org.zaproxy.zap.view.SessionExcludeFromProxyPanel; import org.zaproxy.zap.view.SessionExcludeFromScanPanel; import org.zaproxy.zap.view.SessionExcludeFromSpiderPanel; import org.zaproxy.zap.view.SplashScreen; import org.zaproxy.zap.view.StatusUI; import org.zaproxy.zap.view.ZapMenuItem; import org.zaproxy.zap.view.messagelocation.MessageLocationHighlightRenderersEditors; import org.zaproxy.zap.view.messagelocation.TextMessageLocationHighlight; import org.zaproxy.zap.view.messagelocation.TextMessageLocationHighlightEditor; import org.zaproxy.zap.view.messagelocation.TextMessageLocationHighlightRenderer; public class View implements ViewDelegate { /** * @deprecated (2.5.0) Use {@link WorkbenchPanel.Layout#EXPAND_SELECT} instead. * @see #getMainFrame() * @see MainFrame#setWorkbenchLayout(org.parosproxy.paros.view.WorkbenchPanel.Layout) */ @Deprecated public static final int DISPLAY_OPTION_LEFT_FULL = 0; /** * @deprecated (2.5.0) Use {@link WorkbenchPanel.Layout#EXPAND_STATUS} instead. * @see #getMainFrame() * @see MainFrame#setWorkbenchLayout(org.parosproxy.paros.view.WorkbenchPanel.Layout) */ @Deprecated public static final int DISPLAY_OPTION_BOTTOM_FULL = 1; /** * @deprecated (2.5.0) Use {@link WorkbenchPanel.Layout#FULL} instead. * @see #getMainFrame() * @see MainFrame#setWorkbenchLayout(org.parosproxy.paros.view.WorkbenchPanel.Layout) */ @Deprecated public static final int DISPLAY_OPTION_TOP_FULL = 2; public static final int DISPLAY_OPTION_ICONNAMES = 0; public static final int DISPLAY_OPTION_ONLYICONS = 1; private static View view = null; private static boolean daemon = false; // private FindDialog findDialog = null; private SessionDialog sessionDialog = null; private OptionsDialog optionsDialog = null; //private LogPanel logPanel = null; private MainFrame mainFrame = null; private HttpPanelRequest requestPanel = null; private HttpPanelResponse responsePanel = null; private SiteMapPanel siteMapPanel = null; private OutputPanel outputPanel = null; private Vector<JMenuItem> popupList = new Vector<>(); private JMenu menuShowTabs = null; private JCheckBox rememberCheckbox = null; private JCheckBox dontPromptCheckbox = null; private List<AbstractContextPropertiesPanel> contextPanels = new ArrayList<>(); private List<ContextPanelFactory> contextPanelFactories = new ArrayList<>(); /** * A map containing the {@link AbstractContextPropertiesPanel context panels} created by a {@link ContextPanelFactory * context panel factory}, being the latter the key and the former the value (a {@code List} with the panels). * <p> * The map is used to remove the panels created when the factory is removed. */ private Map<ContextPanelFactory, List<AbstractContextPropertiesPanel>> contextPanelFactoriesPanels = new HashMap<>(); private static final Logger logger = Logger.getLogger(View.class); // ZAP: splash screen private SplashScreen splashScreen = null; private Map<AddOn.Status, StatusUI> statusMap = new HashMap<>(); private boolean postInitialisation; /** * @return Returns the mainFrame. */ @Override public MainFrame getMainFrame() { return mainFrame; } ///** // * @return Returns the requestPanel. // */ //public HttpPanel getRequestPanel() { // return requestPanel; //} ///** // * @return Returns the responsePanel. // */ //public HttpPanel getResponsePanel() { // return responsePanel; //} /** * @deprecated (2.5.0) Use {@link MainFrame#setWorkbenchLayout(org.parosproxy.paros.view.WorkbenchPanel.Layout)} * instead. * @see #getMainFrame() */ @Deprecated @SuppressWarnings("javadoc") public static void setDisplayOption(int displayOption) { View.getSingleton().getMainFrame().setWorkbenchLayout(WorkbenchPanel.Layout.getLayout(displayOption)); } /** * @deprecated (2.5.0) Use {@link MainFrame#getWorkbenchLayout()} instead. * @see #getMainFrame() */ @Deprecated @SuppressWarnings("javadoc") public static int getDisplayOption() { return View.getSingleton().getMainFrame().getWorkbenchLayout().getId(); } // ZAP: Removed method changeDisplayOption(int) public void init() { OptionsParam options = Model.getSingleton().getOptionsParam(); mainFrame = new MainFrame(options, getRequestPanel(), getResponsePanel()); mainFrame.getWorkbench().addPanel(View.getSingleton().getSiteTreePanel(), WorkbenchPanel.PanelType.SELECT); // Install default editor and renderer for TextMessageLocationHighlight MessageLocationHighlightRenderersEditors.getInstance().addEditor( TextMessageLocationHighlight.class, new TextMessageLocationHighlightEditor()); MessageLocationHighlightRenderersEditors.getInstance().addRenderer( TextMessageLocationHighlight.class, new TextMessageLocationHighlightRenderer()); String statusString; for(Status status : AddOn.Status.values()) { //Try/catch in case AddOn.Status gets out of sync with cfu.status i18n entries try { statusString = Constant.messages.getString("cfu.status." + status.toString()); } catch (MissingResourceException mre) { statusString = status.toString(); String errString="Caught " + mre.getClass().getName() + " " + mre.getMessage() + " when looking for i18n string: cfu.status." + statusString; if (Constant.isDevBuild()) { logger.error(errString); } else { logger.warn(errString); } } statusMap.put(status, new StatusUI(status, statusString)); } } public void postInit() { mainFrame.getWorkbench().addPanel(getOutputPanel(), WorkbenchPanel.PanelType.STATUS); refreshTabViewMenus(); // Add the 'tab' menu items JMenuItem showAllMenu = new JMenuItem(Constant.messages.getString("menu.view.tabs.show")); showAllMenu.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { showAllTabs(); } }); mainFrame.getMainMenuBar().getMenuView().add(showAllMenu); JMenuItem hideAllMenu = new JMenuItem(Constant.messages.getString("menu.view.tabs.hide")); hideAllMenu.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { hideAllTabs(); } }); mainFrame.getMainMenuBar().getMenuView().add(hideAllMenu); JMenuItem pinAllMenu = new JMenuItem(Constant.messages.getString("menu.view.tabs.pin")); pinAllMenu.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { pinAllTabs(); } }); mainFrame.getMainMenuBar().getMenuView().add(pinAllMenu); JMenuItem unpinAllMenu = new JMenuItem(Constant.messages.getString("menu.view.tabs.unpin")); unpinAllMenu.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { unpinAllTabs(); } }); mainFrame.getMainMenuBar().getMenuView().add(unpinAllMenu); postInitialisation = true; } /** * @deprecated (2.5.0) No longer in use/working, use * {@link MainFrame#setResponsePanelPosition(org.parosproxy.paros.view.WorkbenchPanel.ResponsePanelPosition)} * instead. * @since 2.1.0 * @see #getMainFrame() */ @Deprecated @SuppressWarnings("javadoc") public org.zaproxy.zap.view.MessagePanelsPositionController getMessagePanelsPositionController() { return new org.zaproxy.zap.view.MessagePanelsPositionController(null, null, null, null); } public void refreshTabViewMenus() { if (menuShowTabs != null) { // Remove the old ones mainFrame.getMainMenuBar().getMenuView().remove(menuShowTabs); } menuShowTabs = new JMenu(Constant.messages.getString("menu.view.showtab")); mainFrame.getMainMenuBar().getMenuView().add(menuShowTabs); ExtensionKeyboard extKey = (ExtensionKeyboard) Control.getSingleton().getExtensionLoader().getExtension(ExtensionKeyboard.NAME); for (AbstractPanel panel : getWorkbench().getSortedPanels(WorkbenchPanel.PanelType.SELECT)) { registerMenu(extKey, panel); } menuShowTabs.addSeparator(); for (AbstractPanel panel : getWorkbench().getSortedPanels(WorkbenchPanel.PanelType.WORK)) { registerMenu(extKey, panel); } menuShowTabs.addSeparator(); for (AbstractPanel panel : getWorkbench().getSortedPanels(WorkbenchPanel.PanelType.STATUS)) { registerMenu(extKey, panel); } } private void registerMenu(ExtensionKeyboard extKey, final AbstractPanel ap) { ZapMenuItem tabMenu = new ZapMenuItem( ap.getClass().getName(), MessageFormat.format(Constant.messages.getString("menu.view.tab"), ap.getName()), ap.getDefaultAccelerator()); tabMenu.setMnemonic(ap.getMnemonic()); if (ap.getIcon() != null) { tabMenu.setIcon(ap.getIcon()); } tabMenu.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { getWorkbench().showPanel(ap); } }); menuShowTabs.add(tabMenu); if (extKey != null) { extKey.registerMenuItem(tabMenu); } } public void showAllTabs() { getWorkbench().setPanelsVisible(true); } public void hideAllTabs() { getWorkbench().setPanelsVisible(false); } public void pinAllTabs() { getWorkbench().pinVisiblePanels(); } public void unpinAllTabs() { getWorkbench().unpinVisiblePanels(); } /** * Open the splash screen */ public void showSplashScreen() { // Show the splashscreen only if it's been enabled by the user if (Model.getSingleton().getOptionsParam().getViewParam().isShowSplashScreen()) { // Show the splash screen to show the user something is happening.. splashScreen = new SplashScreen(); } } /** * Close the curren splash screen and remove all resources */ public void hideSplashScreen() { if (splashScreen != null) { splashScreen.close(); splashScreen = null; } } /** * Set the curent loading completion * @param percentage the percentage of completion from 0 to 100 */ public void setSplashScreenLoadingCompletion(double percentage) { if (splashScreen != null) { splashScreen.setLoadingCompletion(percentage); } } /** * Add the curent loading completion * @param percentage the percentage of completion from 0 to 100 that need to be added */ public void addSplashScreenLoadingCompletion(double percentage) { if (splashScreen != null) { splashScreen.addLoadingCompletion(percentage); } } @Override public int showConfirmDialog(String msg) { return showConfirmDialog(getMainFrame(), msg); } public int showConfirmDialog(JPanel parent, String msg) { return JOptionPane.showConfirmDialog(parent, msg, Constant.PROGRAM_NAME, JOptionPane.OK_CANCEL_OPTION); } public int showConfirmDialog(Window parent, String msg) { return JOptionPane.showConfirmDialog(parent, msg, Constant.PROGRAM_NAME, JOptionPane.OK_CANCEL_OPTION); } @Override public int showYesNoCancelDialog(String msg) { return showYesNoCancelDialog(getMainFrame(), msg); } public int showYesNoCancelDialog(JPanel parent, String msg) { return JOptionPane.showConfirmDialog(parent, msg, Constant.PROGRAM_NAME, JOptionPane.YES_NO_CANCEL_OPTION); } public int showYesNoCancelDialog(Window parent, String msg) { return JOptionPane.showConfirmDialog(parent, msg, Constant.PROGRAM_NAME, JOptionPane.YES_NO_CANCEL_OPTION); } @Override public void showWarningDialog(String msg) { showWarningDialog(getMainFrame(), msg); } public void showWarningDialog(JPanel parent, String msg) { JOptionPane.showMessageDialog(parent, msg, Constant.PROGRAM_NAME, JOptionPane.WARNING_MESSAGE); } public void showWarningDialog(Window parent, String msg) { JOptionPane.showMessageDialog(parent, msg, Constant.PROGRAM_NAME, JOptionPane.WARNING_MESSAGE); } @Override public void showMessageDialog(String msg) { showMessageDialog(getMainFrame(), msg); } public void showMessageDialog(JPanel parent, String msg) { JOptionPane.showMessageDialog(parent, msg, Constant.PROGRAM_NAME, JOptionPane.INFORMATION_MESSAGE); } public void showMessageDialog(Window parent, String msg) { JOptionPane.showMessageDialog(parent, msg, Constant.PROGRAM_NAME, JOptionPane.INFORMATION_MESSAGE); } private JCheckBox getRememberCheckbox() { if (rememberCheckbox == null) { rememberCheckbox = new JCheckBox(Constant.messages.getString("view.dialog.remember")); } return rememberCheckbox; } public boolean isRememberLastDialogChosen() { return this.getRememberCheckbox().isSelected(); } public int showYesNoRememberDialog(Window parent, String msg) { // The checkbox is used for all related dialogs, so always reset this.getRememberCheckbox().setSelected(false); return JOptionPane.showConfirmDialog(parent, new Object[]{msg + "\n", this.getRememberCheckbox()}, Constant.PROGRAM_NAME, JOptionPane.YES_NO_OPTION); } public int showYesNoDialog(Window parent, Object[] objs) { return JOptionPane.showConfirmDialog(parent, objs, Constant.PROGRAM_NAME, JOptionPane.YES_NO_OPTION); } private JCheckBox getDontPromptCheckbox() { if (dontPromptCheckbox == null) { dontPromptCheckbox = new JCheckBox(Constant.messages.getString("view.dialog.dontPrompt")); } return dontPromptCheckbox; } public boolean isDontPromptLastDialogChosen() { return this.getDontPromptCheckbox().isSelected(); } public int showConfirmDontPromptDialog(Window parent, String msg) { // The checkbox is used for all related dialogs, so always reset this.getDontPromptCheckbox().setSelected(false); return JOptionPane.showConfirmDialog(parent, new Object[]{msg + "\n", this.getDontPromptCheckbox()}, Constant.PROGRAM_NAME, JOptionPane.OK_CANCEL_OPTION); } public void showWarningDontPromptDialog(Window parent, String msg) { // The checkbox is used for all related dialogs, so always reset this.getDontPromptCheckbox().setSelected(false); JOptionPane.showMessageDialog(parent, new Object[]{msg + "\n", this.getDontPromptCheckbox()}, Constant.PROGRAM_NAME, JOptionPane.WARNING_MESSAGE); } public void showWarningDontPromptDialog(String msg) { showWarningDontPromptDialog(getMainFrame(), msg); } // ZAP: FindBugs fix - make method synchronised public static synchronized View getSingleton() { if (view == null) { if (daemon) { Exception e = new Exception("Attempting to initialise View in daemon mode"); logger.error(e.getMessage(), e); return null; } logger.info("Initialising View"); view = new View(); view.init(); } return view; } public static boolean isInitialised() { return view != null; } public static void setDaemon(boolean daemon) { View.daemon = daemon; } // public void showFindDialog() { // if (findDialog == null) { // findDialog = new FindDialog(mainFrame, false); // } // // findDialog.setVisible(true); // } /** * @return Returns the siteTreePanel. */ @Override public SiteMapPanel getSiteTreePanel() { if (siteMapPanel == null) { siteMapPanel = new SiteMapPanel(); } return siteMapPanel; } @Override public OutputPanel getOutputPanel() { if (outputPanel == null) { outputPanel = new OutputPanel(); ExtensionHelp.enableHelpKey(outputPanel, "ui.tabs.output"); } return outputPanel; } @Override public HttpPanelRequest getRequestPanel() { if (requestPanel == null) { // ZAP: constructor changed requestPanel = new HttpPanelRequest(false, OptionsParamView.BASE_VIEW_KEY + ".main."); // ZAP: Added 'right arrow' icon requestPanel.setIcon(new ImageIcon(View.class.getResource("/resource/icon/16/105.png"))); requestPanel.setName(Constant.messages.getString("http.panel.request.title")); // ZAP: i18n requestPanel.setEnableViewSelect(true); requestPanel.loadConfig(Model.getSingleton().getOptionsParam().getConfig()); requestPanel.setDefaultAccelerator(KeyStroke.getKeyStroke( KeyEvent.VK_R, Toolkit.getDefaultToolkit().getMenuShortcutKeyMask() | Event.SHIFT_MASK, false)); requestPanel.setMnemonic(Constant.messages.getChar("http.panel.request.mnemonic")); } return requestPanel; } @Override public HttpPanelResponse getResponsePanel() { if (responsePanel == null) { // ZAP: constructor changed responsePanel = new HttpPanelResponse(false, OptionsParamView.BASE_VIEW_KEY + ".main."); // ZAP: Added 'left arrow' icon responsePanel.setIcon(new ImageIcon(View.class.getResource("/resource/icon/16/106.png"))); responsePanel.setName(Constant.messages.getString("http.panel.response.title")); // ZAP: i18n responsePanel.setEnableViewSelect(false); responsePanel.loadConfig(Model.getSingleton().getOptionsParam().getConfig()); responsePanel.setDefaultAccelerator(KeyStroke.getKeyStroke( KeyEvent.VK_R, Toolkit.getDefaultToolkit().getMenuShortcutKeyMask() | Event.ALT_MASK | Event.SHIFT_MASK, false)); responsePanel.setMnemonic(Constant.messages.getChar("http.panel.response.mnemonic")); } return responsePanel; } @Override public SessionDialog getSessionDialog() { if (sessionDialog == null) { String[] ROOT = {}; // ZAP: i18n, plus in-lined title parameter String propertiesTitle = Constant.messages.getString("session.properties.title"); String dialogTitle = Constant.messages.getString("session.dialog.title"); sessionDialog = new SessionDialog(getMainFrame(), true, propertiesTitle, dialogTitle); sessionDialog.addParamPanel(ROOT, new SessionGeneralPanel(), false); sessionDialog.addParamPanel(ROOT, new SessionExcludeFromProxyPanel(), false); sessionDialog.addParamPanel(ROOT, new SessionExcludeFromScanPanel(), false); sessionDialog.addParamPanel(ROOT, new SessionExcludeFromSpiderPanel(), false); sessionDialog.addParamPanel(ROOT, new ContextListPanel(), false); } return sessionDialog; } public void showSessionDialog(Session session, String panel) { showSessionDialog(session, panel, true, null); } public void showSessionDialog(Session session, String panel, boolean recreateUISharedContexts) { showSessionDialog(session, panel, recreateUISharedContexts, null); } /** * Shows the session properties dialog. If a panel is specified, the dialog * is opened showing that panel. If {@code recreateUISharedContexts} is * {@code true}, any old UI shared contexts are discarded and new ones are * created as copies of the contexts. If a {@code postInitRunnable} is * provided, its {@link Runnable#run} method is called after the * initialization of all the panels of the session properties dialog. * * @param session the session * @param panel the panel name to be shown * @param recreateUISharedContexts if true, any old UI shared contexts are * discarded and new ones are created as copies of the contexts * @param postInitRunnable if provided, its {@link Runnable#run} method is * called after the initialization of all the panels of the session * properties dialog. */ public void showSessionDialog(Session session, String panel, boolean recreateUISharedContexts, Runnable postInitRunnable) { if (sessionDialog == null) { this.getSessionDialog(); } if (recreateUISharedContexts) { sessionDialog.recreateUISharedContexts(session); } sessionDialog.initParam(session); if (postInitRunnable != null) { postInitRunnable.run(); } sessionDialog.setTitle(Constant.messages.getString("session.properties.title")); sessionDialog.showDialog(false, panel); } public void addContext(Context c) { getSessionDialog().createUISharedContext(c); String contextsNodeName = Constant.messages.getString("context.list"); ContextGeneralPanel contextGenPanel = new ContextGeneralPanel(c.getName(), c.getIndex()); contextGenPanel.setSessionDialog(getSessionDialog()); getSessionDialog().addParamPanel(new String[]{ contextsNodeName }, contextGenPanel, false); this.contextPanels.add(contextGenPanel); String[] contextPanelPath = new String[] { contextsNodeName, contextGenPanel.getName() }; ContextIncludePanel contextIncPanel = new ContextIncludePanel(c); contextIncPanel.setSessionDialog(getSessionDialog()); getSessionDialog().addParamPanel(contextPanelPath, contextIncPanel, false); this.contextPanels.add(contextIncPanel); ContextExcludePanel contextExcPanel = new ContextExcludePanel(c); contextExcPanel.setSessionDialog(getSessionDialog()); getSessionDialog().addParamPanel(contextPanelPath, contextExcPanel, false); this.contextPanels.add(contextExcPanel); ContextStructurePanel contextStructPanel = new ContextStructurePanel(c); contextStructPanel.setSessionDialog(getSessionDialog()); getSessionDialog().addParamPanel(contextPanelPath, contextStructPanel, false); this.contextPanels.add(contextStructPanel); ContextTechnologyPanel contextTechPanel = new ContextTechnologyPanel(c); contextTechPanel.setSessionDialog(getSessionDialog()); getSessionDialog().addParamPanel(contextPanelPath, contextTechPanel, false); this.contextPanels.add(contextTechPanel); for (ContextPanelFactory cpf : this.contextPanelFactories) { addPanelForContext(c, cpf, contextPanelPath); } this.getSiteTreePanel().reloadContextTree(); } /** * Adds a custom context panel for the given context, created form the given context panel factory and placed under the * given path. * * @param contextPanelFactory context panel factory used to create the panel, must not be {@code null} * @param panelPath the path where to add the created panel, must not be {@code null} * @param context the target context, must not be {@code null} */ private void addPanelForContext(Context context, ContextPanelFactory contextPanelFactory, String[] panelPath) { AbstractContextPropertiesPanel panel = contextPanelFactory.getContextPanel(context); panel.setSessionDialog(getSessionDialog()); getSessionDialog().addParamPanel(panelPath, panel, false); this.contextPanels.add(panel); List<AbstractContextPropertiesPanel> panels = contextPanelFactoriesPanels.get(contextPanelFactory); if (panels == null) { panels = new ArrayList<>(); contextPanelFactoriesPanels.put(contextPanelFactory, panels); } panels.add(panel); } public void renameContext(Context c) { ContextGeneralPanel ctxPanel = getContextGeneralPanel(c); if (ctxPanel != null) { getSessionDialog().renamePanel(ctxPanel, c.getIndex() + ":" + c.getName()); } this.getSiteTreePanel().reloadContextTree(); } /** * Gets the context general panel of the given {@code context}. * * @param context the context whose context general panel will be returned * @return the {@code ContextGeneralPanel} of the given context, {@code null} if not found */ private ContextGeneralPanel getContextGeneralPanel(Context context) { for (AbstractParamPanel panel : contextPanels) { if (panel instanceof ContextGeneralPanel) { ContextGeneralPanel contextGeneralPanel = (ContextGeneralPanel) panel; if (contextGeneralPanel.getContextIndex() == context.getIndex()) { return contextGeneralPanel; } } } return null; } public void changeContext(Context c) { this.getSiteTreePanel().contextChanged(c); } @Override public void addContextPanelFactory(ContextPanelFactory contextPanelFactory) { if (contextPanelFactory == null) { throw new IllegalArgumentException("Parameter contextPanelFactory must not be null."); } this.contextPanelFactories.add(contextPanelFactory); if (postInitialisation) { String contextsNodeName = Constant.messages.getString("context.list"); for (Context context : Model.getSingleton().getSession().getContexts()) { ContextGeneralPanel contextGeneralPanel = getContextGeneralPanel(context); if (contextGeneralPanel != null) { addPanelForContext(context, contextPanelFactory, new String[] { contextsNodeName, contextGeneralPanel.getName() }); } } } } @Override public void removeContextPanelFactory(ContextPanelFactory contextPanelFactory) { if (contextPanelFactory == null) { throw new IllegalArgumentException("Parameter contextPanelFactory must not be null."); } if (contextPanelFactories.remove(contextPanelFactory)) { contextPanelFactory.discardContexts(); List<AbstractContextPropertiesPanel> panels = contextPanelFactoriesPanels.remove(contextPanelFactory); if (panels != null) { for (AbstractContextPropertiesPanel panel : panels) { getSessionDialog().removeParamPanel(panel); } contextPanels.removeAll(panels); } } } public void deleteContext(Context c) { List<AbstractContextPropertiesPanel> removedPanels = new ArrayList<>(); for (Iterator<AbstractContextPropertiesPanel> it = contextPanels.iterator(); it.hasNext();) { AbstractContextPropertiesPanel panel = it.next(); if (panel.getContextIndex() == c.getIndex()) { getSessionDialog().removeParamPanel(panel); it.remove(); removedPanels.add(panel); } } for (ContextPanelFactory cpf : this.contextPanelFactories) { cpf.discardContext(c); List<AbstractContextPropertiesPanel> panels = contextPanelFactoriesPanels.get(cpf); if (panels != null) { panels.removeAll(removedPanels); } } this.getSiteTreePanel().reloadContextTree(); } public void discardContexts() { for (AbstractParamPanel panel : contextPanels) { getSessionDialog().removeParamPanel(panel); } for (ContextPanelFactory cpf : this.contextPanelFactories) { cpf.discardContexts(); contextPanelFactoriesPanels.remove(cpf); } contextPanels.clear(); this.getSiteTreePanel().reloadContextTree(); } public OptionsDialog getOptionsDialog(String title) { // ZAP: FindBugs fix - dont need ROOT //String[] ROOT = {}; if (optionsDialog == null) { optionsDialog = new OptionsDialog(getMainFrame(), true, title); } optionsDialog.setTitle(title); return optionsDialog; } public WorkbenchPanel getWorkbench() { return mainFrame.getWorkbench(); } // ZAP: Removed the method setStatus(String), no longer used. /** * Returns a new {@code MainPopupMenu} instance with the pop pup menu items * returned by the method {@code getPopupList()}. * <p> * <strong>Note:</strong> Pop up menu items ({@code JMenuItem}, * {@code JMenu}, {@code ExtensionPopupMenuItem} and * {@code ExtensionPopupMenu}) should be added/removed to/from the list * returned by the method {@code getPopupList()} not by calling the methods * {@code MainPopupMenu#addMenu(...)} on the returned {@code MainPopupMenu} * instance. Adding pop up menu items to the returned {@code MainPopupMenu} * instance relies on current implementation of {@code MainPopupMenu} which * may change without notice (moreover a new instance is created each time * the method is called). * </p> * * @return a {@code MainPopupMenu} containing the pop up menu items that are * in the list returned by the method {@code getPopupList()}. * @see #getPopupList() * @see MainPopupMenu * @see ExtensionPopupMenu * @see ExtensionPopupMenuItem */ @Override public MainPopupMenu getPopupMenu() { MainPopupMenu popup = new MainPopupMenu(popupList, this); return popup; } /** * Returns the list of pop up menu items that will have the * {@code MainPopupMenu} instance returned by the method * {@code getPopupMenu()}. * <p> * Should be used to dynamically add/remove pop up menu items * ({@code JMenuItem}, {@code JMenu}, {@code ExtensionPopupMenuItem} and * {@code ExtensionPopupMenu}) to the main pop up menu at runtime. * </p> * * @return the list of pop up menu items that will have the main pop up * menu. * @see #getPopupMenu() * @see ExtensionHookMenu#addPopupMenuItem(ExtensionPopupMenu) * @see ExtensionHookMenu#addPopupMenuItem(ExtensionPopupMenuItem) * @see MainPopupMenu * @see ExtensionPopupMenu * @see ExtensionPopupMenuItem */ public Vector<JMenuItem> getPopupList() { return popupList; } @Override public WaitMessageDialog getWaitMessageDialog(String s) { WaitMessageDialog dialog = new WaitMessageDialog(getMainFrame(), true); dialog.setText(s); dialog.centreDialog(); return dialog; } public WaitMessageDialog getWaitMessageDialog(JFrame parent, String s) { WaitMessageDialog dialog = new WaitMessageDialog(parent, true); dialog.setText(s); dialog.centreDialog(); return dialog; } // ZAP: Added main toolbar mathods public void addMainToolbarButton(JButton button) { this.getMainFrame().getMainToolbarPanel().addButton(button); } public void addMainToolbarSeparator() { this.getMainFrame().getMainToolbarPanel().addSeparator(); } public void addMainToolbarButton(JToggleButton button) { this.getMainFrame().getMainToolbarPanel().addButton(button); } public void removeMainToolbarButton(JButton button) { this.getMainFrame().getMainToolbarPanel().removeButton(button); } public void removeMainToolbarButton(JToggleButton button) { this.getMainFrame().getMainToolbarPanel().removeButton(button); } public void addMainToolbarSeparator(JToolBar.Separator separator) { this.getMainFrame().getMainToolbarPanel().addSeparator(separator); } public void removeMainToolbarSeparator(JToolBar.Separator separator) { this.getMainFrame().getMainToolbarPanel().removeSeparator(separator); } /** * Gets the splash screen as {@code Component}. It should be used only as a parent for error/warning dialogues shown during * initialisation. * * @return the splash screen, {@code null} when the splash screen is/was not displayed. * @since 2.4.0 */ public Component getSplashScreen() { return splashScreen; } /** * Returns a StatusUI for the given AddOn.Status * @param status the Status for which a StatusUI is wanted * @return a StatusUI * @since 2.5.0 */ public StatusUI getStatusUI(AddOn.Status status) { return statusMap.get(status); } /** * Sets whether or not the main tool bar should be visible. * * @param visible {@code true} if the main tool bar should be visible, {@code false} otherwise. * @since 2.5.0 */ public void setMainToolbarVisible(boolean visible) { getMainFrame().setMainToolbarVisible(visible); } /** * {@inheritDoc} * <p> * <strong>Note:</strong> Current implementation just supports {@link HttpMessage HTTP messages}. Attempting to display * other message types has no effect. */ @Override public void displayMessage(Message message) { if (message == null) { getRequestPanel().clearView(true); getResponsePanel().clearView(false); return; } if (!(message instanceof HttpMessage)) { logger.warn("Unable to display message: " + message.getClass().getCanonicalName()); return; } HttpMessage httpMessage = (HttpMessage) message; if (httpMessage.getRequestHeader().isEmpty()) { getRequestPanel().clearView(true); } else { getRequestPanel().setMessage(httpMessage); } if (httpMessage.getResponseHeader().isEmpty()) { getResponsePanel().clearView(false); } else { getResponsePanel().setMessage(httpMessage, true); } } }
apache-2.0
dayatang/opencis
opencis-application/src/main/java/org/tmatesoft/svn/examples/wc/test/WCEventHandler.java
3182
/* * ==================================================================== * Copyright (c) 2004-2011 TMate Software Ltd. All rights reserved. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://svnkit.com/license.html * If newer versions of this license are posted there, you may use a * newer version instead, at your option. * ==================================================================== */ package org.tmatesoft.svn.examples.wc.test; import org.tmatesoft.svn.core.SVNCancelException; import org.tmatesoft.svn.core.wc.ISVNEventHandler; import org.tmatesoft.svn.core.wc.SVNEvent; import org.tmatesoft.svn.core.wc.SVNEventAction; /* * This class is an implementation of ISVNEventHandler intended for processing * events generated by do*() methods of an SVNWCClient object. An instance of * this handler will be provided to an SVNWCClient. When calling, for example, * SVNWCClient.doDelete(..) on some path, that method will generate an event * for each 'delete' action it will perform upon every path being deleted. And * this event is passed to * * ISVNEventHandler.handleEvent(SVNEvent event, double progress) * * to notify the handler. The event contains detailed information about the * path, action performed upon the path and some other. */ public class WCEventHandler implements ISVNEventHandler { /* * progress is currently reserved for future purposes and now is always * ISVNEventHandler.UNKNOWN */ public void handleEvent(SVNEvent event, double progress) { /* * Gets the current action. An action is represented by SVNEventAction. */ SVNEventAction action = event.getAction(); if (action == SVNEventAction.ADD){ /* * The item is scheduled for addition. */ System.out.println("A " + event.getFile()); return; }else if (action == SVNEventAction.COPY){ /* * The item is scheduled for addition with history (copied, in * other words). */ System.out.println("A + " + event.getFile()); return; }else if (action == SVNEventAction.DELETE){ /* * The item is scheduled for deletion. */ System.out.println("D " + event.getFile()); return; } else if (action == SVNEventAction.LOCKED){ /* * The item is locked. */ System.out.println("L " + event.getFile()); return; } else if (action == SVNEventAction.LOCK_FAILED){ /* * Locking operation failed. */ System.out.println("failed to lock " + event.getFile()); return; } } /* * Should be implemented to check if the current operation is cancelled. If * it is, this method should throw an SVNCancelException. */ public void checkCancelled() throws SVNCancelException { } }
apache-2.0
Z-starts/hutool
src/main/java/com/xiaoleilu/hutool/db/ds/SimpleDataSource.java
3706
package com.xiaoleilu.hutool.db.ds; import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import com.xiaoleilu.hutool.db.DbUtil; import com.xiaoleilu.hutool.exceptions.DbRuntimeException; import com.xiaoleilu.hutool.setting.Setting; /*** * 简易数据源,没有使用连接池,仅供测试或打开关闭连接非常少的场合使用! * @author loolly * */ public class SimpleDataSource extends AbstractDataSource{ /** 默认的数据库连接配置文件路径 */ public final static String DEFAULT_DB_CONFIG_PATH = "config/db.setting"; //-------------------------------------------------------------------- Fields start private String driver; //数据库驱动 private String url; //jdbc url private String user; //用户名 private String pass; //密码 //-------------------------------------------------------------------- Fields end /** * 获得一个数据源 * * @param group 数据源分组 * @throws ConnException */ synchronized public static SimpleDataSource getDataSource(String group) { return new SimpleDataSource(group); } /** * 获得一个数据源 * * @throws ConnException */ synchronized public static SimpleDataSource getDataSource() { return new SimpleDataSource(); } //-------------------------------------------------------------------- Constructor start /** * 构造 */ public SimpleDataSource() { this(null); } /** * 构造 * @param group 数据库配置文件中的分组 */ public SimpleDataSource(String group) { this(null, group); } /** * 构造 * @param setting 数据库配置 * @param group 数据库配置文件中的分组 */ public SimpleDataSource(Setting setting, String group) { if(null == setting) { setting = new Setting(DEFAULT_DB_CONFIG_PATH); } init( setting.getString("url", group), setting.getString("user", group), setting.getString("pass", group) ); } /** * 构造 * @param url jdbc url * @param user 用户名 * @param pass 密码 */ public SimpleDataSource(String url, String user, String pass) { init(url, user, pass); } //-------------------------------------------------------------------- Constructor end /** * 初始化 * @param url jdbc url * @param user 用户名 * @param pass 密码 */ public void init(String url, String user, String pass) { this.url = url; this.user = user; this.pass = pass; this.driver = DbUtil.identifyDriver(url); try { Class.forName(this.driver); } catch (ClassNotFoundException e) { throw new DbRuntimeException(e, "Get jdbc driver from [{}] error!", url); } } //-------------------------------------------------------------------- Getters and Setters start public String getDriver() { return driver; } public void setDriver(String driver) { this.driver = driver; } public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public String getUser() { return user; } public void setUser(String user) { this.user = user; } public String getPass() { return pass; } public void setPass(String pass) { this.pass = pass; } //-------------------------------------------------------------------- Getters and Setters end @Override public Connection getConnection() throws SQLException { return DriverManager.getConnection(this.url, this.user, this.pass); } @Override public Connection getConnection(String username, String password) throws SQLException { return DriverManager.getConnection(this.url, username, password); } @Override public void close() throws IOException { //Not need to close; } }
apache-2.0
warlock-china/wisp
wisp-core/src/main/java/cn/com/warlock/wisp/core/plugin/router/IWispRouter.java
286
package cn.com.warlock.wisp.core.plugin.router; import cn.com.warlock.wisp.core.exception.WispRouterException; public interface IWispRouter { void start() throws WispRouterException; void init() throws WispRouterException; void shutdown() throws WispRouterException; }
apache-2.0
Servoy/wicket
wicket/src/test/java/org/apache/wicket/resource/ComponentStringResourceLoaderTest.java
3843
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.wicket.resource; import java.util.Locale; import junit.framework.Assert; import org.apache.wicket.Component; import org.apache.wicket.markup.html.panel.Panel; import org.apache.wicket.resource.loader.ComponentStringResourceLoader; import org.apache.wicket.resource.loader.IStringResourceLoader; /** * Test case for the <code>ComponentStringResourceLoader</code> class. * * @author Chris Turner */ public class ComponentStringResourceLoaderTest extends StringResourceLoaderTestBase { /** * Create the test case. * * @param message * The test name */ public ComponentStringResourceLoaderTest(String message) { super(message); } /** * Create and return the loader instance * * @return The loader instance to test */ @Override protected IStringResourceLoader createLoader() { return new ComponentStringResourceLoader(); } /** * @see org.apache.wicket.resource.StringResourceLoaderTestBase#testLoaderUnknownResources() */ @Override public void testLoaderUnknownResources() { Component c = new DummyComponent("hello", application) { private static final long serialVersionUID = 1L; }; DummyPage page = new DummyPage(); page.add(c); IStringResourceLoader loader = new ComponentStringResourceLoader(); Assert.assertNull("Missing resource should return null", loader.loadStringResource( c.getClass(), "test.string.bad", Locale.getDefault(), null)); } /** * */ public void testNullComponent() { Assert.assertNull("Null component should skip resource load", loader.loadStringResource( null, "test.string", Locale.getDefault(), null)); } /** * */ public void testMultiLevelEmbeddedComponentLoadFromComponent() { DummyPage p = new DummyPage(); Panel panel = new Panel("panel"); p.add(panel); DummyComponent c = new DummyComponent("hello", application); panel.add(c); IStringResourceLoader loader = new ComponentStringResourceLoader(); Assert.assertEquals("Valid resourse string should be found", "Component string", loader.loadStringResource(c.getClass(), "component.string", Locale.getDefault(), null)); } /** * */ public void testLoadDirectFromPage() { DummyPage p = new DummyPage(); IStringResourceLoader loader = new ComponentStringResourceLoader(); Assert.assertEquals("Valid resourse string should be found", "Another string", loader.loadStringResource(p.getClass(), "another.test.string", Locale.getDefault(), null)); } /** * */ public void testSearchClassHierarchyFromPage() { DummySubClassPage p = new DummySubClassPage(); IStringResourceLoader loader = new ComponentStringResourceLoader(); Assert.assertEquals("Valid resource string should be found", "SubClass Test String", loader.loadStringResource(p.getClass(), "subclass.test.string", Locale.getDefault(), null)); Assert.assertEquals("Valid resource string should be found", "Another string", loader.loadStringResource(p.getClass(), "another.test.string", Locale.getDefault(), null)); } }
apache-2.0
googleads/google-ads-java
google-ads-stubs-v10/src/test/java/com/google/ads/googleads/v10/services/CustomConversionGoalServiceClientTest.java
4597
/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.ads.googleads.v10.services; import com.google.api.gax.core.NoCredentialsProvider; import com.google.api.gax.grpc.GaxGrpcProperties; import com.google.api.gax.grpc.testing.LocalChannelProvider; import com.google.api.gax.grpc.testing.MockGrpcService; import com.google.api.gax.grpc.testing.MockServiceHelper; import com.google.api.gax.rpc.ApiClientHeaderProvider; import com.google.api.gax.rpc.InvalidArgumentException; import com.google.protobuf.AbstractMessage; import io.grpc.StatusRuntimeException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.UUID; import javax.annotation.Generated; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @Generated("by gapic-generator-java") public class CustomConversionGoalServiceClientTest { private static MockCustomConversionGoalService mockCustomConversionGoalService; private static MockServiceHelper mockServiceHelper; private LocalChannelProvider channelProvider; private CustomConversionGoalServiceClient client; @BeforeClass public static void startStaticServer() { mockCustomConversionGoalService = new MockCustomConversionGoalService(); mockServiceHelper = new MockServiceHelper( UUID.randomUUID().toString(), Arrays.<MockGrpcService>asList(mockCustomConversionGoalService)); mockServiceHelper.start(); } @AfterClass public static void stopServer() { mockServiceHelper.stop(); } @Before public void setUp() throws IOException { mockServiceHelper.reset(); channelProvider = mockServiceHelper.createChannelProvider(); CustomConversionGoalServiceSettings settings = CustomConversionGoalServiceSettings.newBuilder() .setTransportChannelProvider(channelProvider) .setCredentialsProvider(NoCredentialsProvider.create()) .build(); client = CustomConversionGoalServiceClient.create(settings); } @After public void tearDown() throws Exception { client.close(); } @Test public void mutateCustomConversionGoalsTest() throws Exception { MutateCustomConversionGoalsResponse expectedResponse = MutateCustomConversionGoalsResponse.newBuilder() .addAllResults(new ArrayList<MutateCustomConversionGoalResult>()) .build(); mockCustomConversionGoalService.addResponse(expectedResponse); String customerId = "customerId-1581184615"; List<CustomConversionGoalOperation> operations = new ArrayList<>(); MutateCustomConversionGoalsResponse actualResponse = client.mutateCustomConversionGoals(customerId, operations); Assert.assertEquals(expectedResponse, actualResponse); List<AbstractMessage> actualRequests = mockCustomConversionGoalService.getRequests(); Assert.assertEquals(1, actualRequests.size()); MutateCustomConversionGoalsRequest actualRequest = ((MutateCustomConversionGoalsRequest) actualRequests.get(0)); Assert.assertEquals(customerId, actualRequest.getCustomerId()); Assert.assertEquals(operations, actualRequest.getOperationsList()); Assert.assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); } @Test public void mutateCustomConversionGoalsExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); mockCustomConversionGoalService.addException(exception); try { String customerId = "customerId-1581184615"; List<CustomConversionGoalOperation> operations = new ArrayList<>(); client.mutateCustomConversionGoals(customerId, operations); Assert.fail("No exception raised"); } catch (InvalidArgumentException e) { // Expected exception. } } }
apache-2.0
sladeware/friesian
src/main/java/org/arbeitspferde/friesian/WorkEngine.java
2963
/* Copyright 2012 Google, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.arbeitspferde.friesian; import com.google.common.base.Stopwatch; import java.util.Random; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; /** * This abstract class represents units of work performed by worker threads * * Each worker partitions a list and sorts each partition sequentially. Partitions are equally size, * except possibly the last one. The number of partitions is chosen randomly. A worker may randomly * sleep between partition sorts (mimicking blocking on I/O). One of two caches is potentially * updated when the workers are done sorting partitions of the list. */ abstract class WorkEngine implements Runnable { private static final Logger log = Logger.getLogger(WorkEngine.class.getCanonicalName()); private final Random rng; private final int sleepProbability; private final int workerSleepTime; private final AtomicLong jtaWorkerWorkTime; private final AtomicLong jtaWorkerSleepTime; WorkEngine(Random rng, int sleepProbability, int workerSleepTime, AtomicLong jtaWorkerWorkTime, AtomicLong jtaWorkerSleepTime) { this.rng = rng; this.sleepProbability = sleepProbability; this.workerSleepTime = workerSleepTime; this.jtaWorkerWorkTime = jtaWorkerWorkTime; this.jtaWorkerSleepTime = jtaWorkerSleepTime; } public void run() { long startMillis; init(); while (workNotFinished()) { // TODO(mtp): This introduces non-determinism for testing; fix. final Stopwatch timer = new Stopwatch().start(); doWork(); timer.stop(); jtaWorkerWorkTime.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS)); if (sleepProbability >= RandomNumber.generatePercentage(rng)) { try { Thread.sleep(workerSleepTime); jtaWorkerSleepTime.addAndGet(workerSleepTime); } catch (InterruptedException e) { log.log(Level.WARNING, "Worker is unable to sleep", e); } } } cache(); } /** Initialize the work engine */ abstract void init(); /** Returns true when all work is done, otherwise returns false. */ abstract boolean workNotFinished(); /** Performs a unit of work */ abstract void doWork(); /** Possibly caches the work performed in the hot or cold cache */ abstract void cache(); }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-backupgateway/src/main/java/com/amazonaws/services/backupgateway/model/transform/VirtualMachineMarshaller.java
3529
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.backupgateway.model.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.backupgateway.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * VirtualMachineMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class VirtualMachineMarshaller { private static final MarshallingInfo<String> HOSTNAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("HostName").build(); private static final MarshallingInfo<String> HYPERVISORID_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("HypervisorId").build(); private static final MarshallingInfo<java.util.Date> LASTBACKUPDATE_BINDING = MarshallingInfo.builder(MarshallingType.DATE) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("LastBackupDate").timestampFormat("unixTimestamp").build(); private static final MarshallingInfo<String> NAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("Name").build(); private static final MarshallingInfo<String> PATH_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("Path").build(); private static final MarshallingInfo<String> RESOURCEARN_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ResourceArn").build(); private static final VirtualMachineMarshaller instance = new VirtualMachineMarshaller(); public static VirtualMachineMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(VirtualMachine virtualMachine, ProtocolMarshaller protocolMarshaller) { if (virtualMachine == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(virtualMachine.getHostName(), HOSTNAME_BINDING); protocolMarshaller.marshall(virtualMachine.getHypervisorId(), HYPERVISORID_BINDING); protocolMarshaller.marshall(virtualMachine.getLastBackupDate(), LASTBACKUPDATE_BINDING); protocolMarshaller.marshall(virtualMachine.getName(), NAME_BINDING); protocolMarshaller.marshall(virtualMachine.getPath(), PATH_BINDING); protocolMarshaller.marshall(virtualMachine.getResourceArn(), RESOURCEARN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
Longri/cachebox3.0
core/src/de/longri/cachebox3/settings/types/SettingDouble.java
2247
/* * Copyright (C) 2011-2020 team-cachebox.de * * Licensed under the : GNU General Public License (GPL); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.gnu.org/licenses/gpl.html * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.longri.cachebox3.settings.types; public class SettingDouble extends SettingBase<Double> { public SettingDouble(String name, SettingCategory category, SettingMode modus, double defaultValue, SettingStoreType StoreType, SettingUsage usage) { this(name, category, modus, defaultValue, StoreType, usage, false); } public SettingDouble(String name, SettingCategory category, SettingMode modus, double defaultValue, SettingStoreType StoreType, SettingUsage usage, boolean desired) { super(name, category, modus, StoreType, usage, desired); this.defaultValue = defaultValue; this.value = defaultValue; } @Override public Object toDbValue() { return String.valueOf(value); } @Override public boolean fromDbvalue(Object dbString) { try { value = Double.valueOf((String) dbString); return true; } catch (Exception ex) { value = defaultValue; return false; } } @Override public SettingBase<Double> copy() { SettingBase<Double> ret = new SettingDouble(this.name, this.category, this.mode, this.defaultValue, this.storeType, this.usage); ret.value = this.value; ret.lastValue = this.lastValue; return ret; } @Override public boolean equals(Object obj) { if (!(obj instanceof SettingDouble)) return false; SettingDouble inst = (SettingDouble) obj; if (!(inst.name.equals(this.name))) return false; if (inst.value != this.value) return false; return true; } }
apache-2.0
jentfoo/aws-sdk-java
aws-java-sdk-simpleworkflow/src/main/java/com/amazonaws/services/simpleworkflow/model/DecisionTaskCompletedEventAttributes.java
10497
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.simpleworkflow.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** * <p> * Provides the details of the <code>DecisionTaskCompleted</code> event. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/swf-2012-01-25/DecisionTaskCompletedEventAttributes" * target="_top">AWS API Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class DecisionTaskCompletedEventAttributes implements Serializable, Cloneable, StructuredPojo { /** * <p> * User defined context for the workflow execution. * </p> */ private String executionContext; /** * <p> * The ID of the <code>DecisionTaskScheduled</code> event that was recorded when this decision task was scheduled. * This information can be useful for diagnosing problems by tracing back the chain of events leading up to this * event. * </p> */ private Long scheduledEventId; /** * <p> * The ID of the <code>DecisionTaskStarted</code> event recorded when this decision task was started. This * information can be useful for diagnosing problems by tracing back the chain of events leading up to this event. * </p> */ private Long startedEventId; /** * <p> * User defined context for the workflow execution. * </p> * * @param executionContext * User defined context for the workflow execution. */ public void setExecutionContext(String executionContext) { this.executionContext = executionContext; } /** * <p> * User defined context for the workflow execution. * </p> * * @return User defined context for the workflow execution. */ public String getExecutionContext() { return this.executionContext; } /** * <p> * User defined context for the workflow execution. * </p> * * @param executionContext * User defined context for the workflow execution. * @return Returns a reference to this object so that method calls can be chained together. */ public DecisionTaskCompletedEventAttributes withExecutionContext(String executionContext) { setExecutionContext(executionContext); return this; } /** * <p> * The ID of the <code>DecisionTaskScheduled</code> event that was recorded when this decision task was scheduled. * This information can be useful for diagnosing problems by tracing back the chain of events leading up to this * event. * </p> * * @param scheduledEventId * The ID of the <code>DecisionTaskScheduled</code> event that was recorded when this decision task was * scheduled. This information can be useful for diagnosing problems by tracing back the chain of events * leading up to this event. */ public void setScheduledEventId(Long scheduledEventId) { this.scheduledEventId = scheduledEventId; } /** * <p> * The ID of the <code>DecisionTaskScheduled</code> event that was recorded when this decision task was scheduled. * This information can be useful for diagnosing problems by tracing back the chain of events leading up to this * event. * </p> * * @return The ID of the <code>DecisionTaskScheduled</code> event that was recorded when this decision task was * scheduled. This information can be useful for diagnosing problems by tracing back the chain of events * leading up to this event. */ public Long getScheduledEventId() { return this.scheduledEventId; } /** * <p> * The ID of the <code>DecisionTaskScheduled</code> event that was recorded when this decision task was scheduled. * This information can be useful for diagnosing problems by tracing back the chain of events leading up to this * event. * </p> * * @param scheduledEventId * The ID of the <code>DecisionTaskScheduled</code> event that was recorded when this decision task was * scheduled. This information can be useful for diagnosing problems by tracing back the chain of events * leading up to this event. * @return Returns a reference to this object so that method calls can be chained together. */ public DecisionTaskCompletedEventAttributes withScheduledEventId(Long scheduledEventId) { setScheduledEventId(scheduledEventId); return this; } /** * <p> * The ID of the <code>DecisionTaskStarted</code> event recorded when this decision task was started. This * information can be useful for diagnosing problems by tracing back the chain of events leading up to this event. * </p> * * @param startedEventId * The ID of the <code>DecisionTaskStarted</code> event recorded when this decision task was started. This * information can be useful for diagnosing problems by tracing back the chain of events leading up to this * event. */ public void setStartedEventId(Long startedEventId) { this.startedEventId = startedEventId; } /** * <p> * The ID of the <code>DecisionTaskStarted</code> event recorded when this decision task was started. This * information can be useful for diagnosing problems by tracing back the chain of events leading up to this event. * </p> * * @return The ID of the <code>DecisionTaskStarted</code> event recorded when this decision task was started. This * information can be useful for diagnosing problems by tracing back the chain of events leading up to this * event. */ public Long getStartedEventId() { return this.startedEventId; } /** * <p> * The ID of the <code>DecisionTaskStarted</code> event recorded when this decision task was started. This * information can be useful for diagnosing problems by tracing back the chain of events leading up to this event. * </p> * * @param startedEventId * The ID of the <code>DecisionTaskStarted</code> event recorded when this decision task was started. This * information can be useful for diagnosing problems by tracing back the chain of events leading up to this * event. * @return Returns a reference to this object so that method calls can be chained together. */ public DecisionTaskCompletedEventAttributes withStartedEventId(Long startedEventId) { setStartedEventId(startedEventId); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getExecutionContext() != null) sb.append("ExecutionContext: ").append(getExecutionContext()).append(","); if (getScheduledEventId() != null) sb.append("ScheduledEventId: ").append(getScheduledEventId()).append(","); if (getStartedEventId() != null) sb.append("StartedEventId: ").append(getStartedEventId()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof DecisionTaskCompletedEventAttributes == false) return false; DecisionTaskCompletedEventAttributes other = (DecisionTaskCompletedEventAttributes) obj; if (other.getExecutionContext() == null ^ this.getExecutionContext() == null) return false; if (other.getExecutionContext() != null && other.getExecutionContext().equals(this.getExecutionContext()) == false) return false; if (other.getScheduledEventId() == null ^ this.getScheduledEventId() == null) return false; if (other.getScheduledEventId() != null && other.getScheduledEventId().equals(this.getScheduledEventId()) == false) return false; if (other.getStartedEventId() == null ^ this.getStartedEventId() == null) return false; if (other.getStartedEventId() != null && other.getStartedEventId().equals(this.getStartedEventId()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getExecutionContext() == null) ? 0 : getExecutionContext().hashCode()); hashCode = prime * hashCode + ((getScheduledEventId() == null) ? 0 : getScheduledEventId().hashCode()); hashCode = prime * hashCode + ((getStartedEventId() == null) ? 0 : getStartedEventId().hashCode()); return hashCode; } @Override public DecisionTaskCompletedEventAttributes clone() { try { return (DecisionTaskCompletedEventAttributes) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.simpleworkflow.model.transform.DecisionTaskCompletedEventAttributesMarshaller.getInstance().marshall(this, protocolMarshaller); } }
apache-2.0
rterp/GMapsFX
GMapsFX/src/main/java/com/dlsc/gmapsfx/shapes/PolylineOptions.java
1182
/* * Copyright 2014 Geoff Capper. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dlsc.gmapsfx.shapes; import com.dlsc.gmapsfx.javascript.object.MVCArray; /** * * @author Geoff Capper */ public class PolylineOptions extends MapShapeOptions<PolylineOptions> { // icons Array.<IconSequence> The icons to be rendered along the polyline. private MVCArray path; public PolylineOptions() { } public PolylineOptions path(MVCArray path) { setProperty("path", path); this.path = path; return this; } @Override protected PolylineOptions getMe() { return this; } }
apache-2.0
FJplant/AntIDE
src/com/antsoft/ant/browser/sourcebrowser/SourceBrowser.java
22202
/* * Ant ( JDK wrapper Java IDE ) * Version 1.0 * Copyright (c) 1998-1999 Antsoft Co. * All rights reserved. * This program and source file is protected by Korea and international * Copyright laws. * * $Header: /usr/cvsroot/AntIDE/source/com/antsoft/ant/browser/sourcebrowser/SourceBrowser.java,v 1.26 1999/08/31 12:25:33 multipia Exp $ * $Revision: 1.26 $ * $History: SourceBrowser.java $ * * */ package com.antsoft.ant.browser.sourcebrowser; import java.awt.*; import java.awt.event.*; import javax.swing.*; import javax.swing.event.*; import javax.swing.tree.*; import java.util.Vector; import java.util.Hashtable; import com.antsoft.ant.manager.projectmanager.*; import com.antsoft.ant.util.*; import com.antsoft.ant.codecontext.codeeditor.EventContent; /** * class SourceBrowser * * @author Jinwoo Baek * @author kim sang kyun */ public class SourceBrowser extends JPanel implements ActionListener { /** TreeÇüÅ·ΠºÐ¼®µÈ ¼Ò½º¸¦ º¸¿©ÁØ´Ù. */ private JTree tree = null; /** Project Explorer¿¡ Embed µÈ´Ù. */ private ProjectExplorer pe = null; /** tree's root node */ private DefaultMutableTreeNode rootNode; private DefaultMutableTreeNode importRootNode; /** manager tree model */ private DefaultTreeModel treeModel; /** tree selection model */ private TreeSelectionModel treeSelectionModel; private JComboBox combo; private DefaultComboBoxModel comboM; /** TreeCell Renderer */ private SourceBrowserTreeCellRenderer renderer = new SourceBrowserTreeCellRenderer(); /** ÇöÀç º¸¿©Á٠Ŭ·¡½º ³ëµå */ private DefaultMutableTreeNode currentNode = null; /** scroller */ private JScrollPane scrollPane = null; private JViewport vp = null; /** Popup mene */ private JPopupMenu classPopup; private JMenuItem addField; private JMenuItem addMain; private JMenuItem addMethod; private JMenuItem addGetSet; private JMenuItem addHandler; private JLabel sourceNameLbl = new JLabel(" "); private JButton addFieldBtn, addMainBtn, addMethodBtn, addGetSetBtn, addHandlerBtn; private boolean isTreeUpdate = false; private boolean isComboUpdate = false; private int lastDividerLocation = 0; private Hashtable nodeHash =new Hashtable(); /** * Constructor */ public SourceBrowser(ProjectExplorer pe) { this.pe = pe; this.setPreferredSize(new Dimension(250, 0)); this.setMaximumSize(new Dimension(2000, 0)); this.setMinimumSize(new Dimension(0, 0)); this.setLayout(new BorderLayout()); setBorder(BorderList.unselLineBorder); if(!pe.isFilesTab()){ classPopup = new JPopupMenu(); addField = new JMenuItem("Add Field", ImageList.addfieldIcon); addMain = new JMenuItem("Add Main Method", ImageList.addmainIcon); addMethod = new JMenuItem("Add Method", ImageList.addmethodIcon); addGetSet= new JMenuItem("Add GetSet Method", ImageList.addgetsetIcon); addHandler = new JMenuItem("Add EventHandler", ImageList.addeventIcon); comboM = new DefaultComboBoxModel(); combo = new JComboBox(comboM); combo.setMinimumSize(new Dimension(1,1)); combo.setToolTipText("Selected Class"); combo.addItemListener(new ComboHandler()); combo.setRenderer(new ComboRenderer()); addFieldBtn = new JButton(ImageList.addfieldIcon); addFieldBtn.setMargin(new Insets(0,0,0,0)); addFieldBtn.setActionCommand("ADD_FIELD_B"); addFieldBtn.addActionListener(this); addFieldBtn.setToolTipText("Add Field"); addMainBtn = new JButton(ImageList.addmainIcon); addMainBtn.setMargin(new Insets(0,0,0,0)); addMainBtn.setActionCommand("ADD_MAIN_B"); addMainBtn.addActionListener(this); addMainBtn.setToolTipText("Add Main Method"); addMethodBtn = new JButton(ImageList.addmethodIcon); addMethodBtn.setMargin(new Insets(0,0,0,0)); addMethodBtn.setActionCommand("ADD_METHOD_B"); addMethodBtn.addActionListener(this); addMethodBtn.setToolTipText("Add Method"); addGetSetBtn = new JButton(ImageList.addgetsetIcon); addGetSetBtn.setMargin(new Insets(0,0,0,0)); addGetSetBtn.setActionCommand("ADD_GETSET_B"); addGetSetBtn.addActionListener(this); addGetSetBtn.setToolTipText("Add Getter, Setter Method"); addHandlerBtn = new JButton(ImageList.addeventIcon); addHandlerBtn.setMargin(new Insets(0,0,0,0)); addHandlerBtn.setActionCommand("ADD_HANDLER_B"); addHandlerBtn.addActionListener(this); addHandlerBtn.setToolTipText("Add Event Handler Inner Class"); Box btnBox = Box.createHorizontalBox(); btnBox.add(addFieldBtn); btnBox.add(addMainBtn); btnBox.add(addMethodBtn); btnBox.add(addGetSetBtn); btnBox.add(addHandlerBtn); enableBtns(false); JPanel comboP = new JPanel(); comboP.add(combo); JPanel topP = new JPanel(new BorderLayout(0,2)); topP.add(comboP,BorderLayout.WEST); //topP.add(btnBoxP,BorderLayout.CENTER); topP.add(btnBox,BorderLayout.CENTER); add(topP, BorderLayout.NORTH); addField.setActionCommand("ADD_FIELD"); addField.addActionListener(this); addMain.setActionCommand("ADD_MAIN"); addMain.addActionListener(this); addMethod.setActionCommand("ADD_METHOD"); addMethod.addActionListener(this); addGetSet.setActionCommand("ADD_GETSET"); addGetSet.addActionListener(this); addHandler.setActionCommand("ADD_HANDLER"); addHandler.addActionListener(this); classPopup.add(addField); classPopup.add(addMain); classPopup.add(addMethod); classPopup.add(addGetSet); classPopup.add(addHandler); } rootNode = new DefaultMutableTreeNode( new EventContent("", EventContent.FILEROOTNODE)); importRootNode = new DefaultMutableTreeNode( new EventContent("imports", EventContent.IMPORTROOTNODE)); rootNode.add(importRootNode); treeModel = new DefaultTreeModel(rootNode); tree = new JTree(treeModel); tree.setDoubleBuffered(true); tree.putClientProperty("JTree.lineStyle", "Angled"); tree.setEditable(false); treeSelectionModel = tree.getSelectionModel(); treeSelectionModel.setSelectionMode(TreeSelectionModel.SINGLE_TREE_SELECTION); tree.setCellRenderer(renderer); tree.setFont(FontList.treeFont); // tree¿¡¼­ itemÀ» ¼±ÅÃÇÏ¸é ¼Ò½º¿¡ Àû´çÇÑ À§Ä¡·Î À̵¿Çϵµ·Ï ÇÏ´Â °ÍÀÌ ÁÁ°ÚÁã? tree.addMouseListener(new TreeMouseEventHandler()); tree.addMouseMotionListener( new MouseMotionHandler() ); tree.addKeyListener(new KeyHandler()); // Java File NameÀÎ ·çÆ®¸¦ ¾Èº¸ÀÌ°Ô ÇÏ·Á¸é ¾Æ·¡¸¦ Uncomment tree.setRootVisible(false); tree.setShowsRootHandles( true ); scrollPane = new JScrollPane(tree); vp = scrollPane.getViewport(); add(scrollPane, BorderLayout.CENTER); } public void setSelLineBorder(){ setBorder(BorderList.selLineBorder); } public void clearBorder(){ setBorder(BorderList.unselLineBorder); } public void setLastDividerLocation(int newLoc){ lastDividerLocation = newLoc; } public int getLastDividerLocation(){ return lastDividerLocation; } private void enableBtns(boolean flag){ if(pe.isFilesTab()) return; addFieldBtn.setEnabled(flag); addMainBtn.setEnabled(flag); addMethodBtn.setEnabled(flag); addGetSetBtn.setEnabled(flag); addHandlerBtn.setEnabled(flag); } private void remindPE(){ pe.setFocusedComponent(ProjectExplorer.SOURCE_BROWSER); } //treeÀÇ ¿ÞÂÊ À§°¡ º¸À̵µ·Ï ÇÑ´Ù public void setPositionToLeftTop(){ vp.setViewPosition(new Point(0, 0)); } public void actionPerformed(ActionEvent evt) { String cmd = evt.getActionCommand(); if(evt.getSource() instanceof JButton){ if( pe == null || comboM.getSelectedItem() == null) return; } else { if( pe == null || currentNode == null) return; } boolean isInner = false; String key = ""; if(currentNode != null){ EventContent sbte = (EventContent)currentNode.getUserObject(); isInner = ( sbte.getContentType() == EventContent.INNER ); key = sbte.getHashtableKey(); } if (cmd.equals("ADD_FIELD")) pe.addField(key, isInner ); else if (cmd.equals("ADD_MAIN")) pe.addField(key, isInner ); else if (cmd.equals("ADD_METHOD")) pe.addMethod(key, isInner ); else if (cmd.equals("ADD_GETSET")) pe.addGetSetMethod(tree, currentNode, isInner ); else if (cmd.equals("ADD_HANDLER")) pe.addHandler(key, isInner ); else if (cmd.equals("ADD_FIELD_B")) pe.addField((String)comboM.getSelectedItem(), isInner); else if (cmd.equals("ADD_MAIN_B")) pe.addMain((String)comboM.getSelectedItem(), isInner ); else if (cmd.equals("ADD_METHOD_B")) pe.addMethod((String)comboM.getSelectedItem(), isInner ); else if (cmd.equals("ADD_HANDLER_B")) pe.addHandler((String)comboM.getSelectedItem(), isInner); else if (cmd.equals("ADD_GETSET_B")) { for(int i=0; i<rootNode.getChildCount(); i++){ DefaultMutableTreeNode child = (DefaultMutableTreeNode)rootNode.getChildAt(i); EventContent ec = (EventContent)child.getUserObject(); if(ec.getHashtableKey().equals((String)comboM.getSelectedItem())){ pe.addGetSetMethod(tree, child, isInner ); break; } } } } Vector classNodes = new Vector(); private void addTreeNode(DefaultMutableTreeNode parent, EventContent childObj){ int contentType = childObj.getContentType(); int childCount = parent.getChildCount(); int i=0; OUT: for(; i<childCount; i++){ DefaultMutableTreeNode childAt = (DefaultMutableTreeNode)parent.getChildAt(i); EventContent childAtObj = (EventContent)childAt.getUserObject(); int childContentType = childAtObj.getContentType(); switch(contentType){ case EventContent.OPER : if(childContentType == EventContent.ATTR) break OUT; else if(childContentType == EventContent.INNER) continue; break; case EventContent.ATTR : if(childContentType == EventContent.OPER) continue; else if(childContentType == EventContent.INNER) continue; break; case EventContent.INNER : if(childContentType == EventContent.CLASS) continue; else if(childContentType == EventContent.OPER || childContentType == EventContent.ATTR) break OUT; break; case EventContent.PACKAGE : if(childContentType != EventContent.IMPORTROOTNODE) continue; else break OUT; } if(childAtObj.getContent().compareTo(childObj.getContent()) < 0) continue; else if(childAtObj.getContent().compareTo(childObj.getContent()) == 0) return; else break; } DefaultMutableTreeNode toInsert = new DefaultMutableTreeNode(childObj); treeModel.insertNodeInto(toInsert, parent, i); if(contentType==EventContent.CLASS){ nodeHash.put(childObj.getHashtableKey() , toInsert); classNodes.addElement(toInsert); } else if(contentType==EventContent.INNER){ nodeHash.put(childObj.getHashtableKey(), toInsert); } } /** * package ¸¦ ³ªÅ¸³»´Â node¸¦ Ãß°¡ÇÑ´Ù. * * @param packageNode Ãß°¡ÇÒ ³ëµå */ public void addPackageNode(EventContent packageNode) { if( packageNode == null ) return; addTreeNode(rootNode, packageNode); } /** * class ¸¦ ³ªÅ¸³»´Â node¸¦ Ãß°¡ÇÑ´Ù. * * @param classNode Ãß°¡ÇÒ ³ëµå */ public void addClassNode(EventContent classNode) { if (classNode == null) return; if( !pe.isFilesTab() && comboM.getIndexOf(classNode.getHashtableKey())==-1) comboM.addElement(classNode.getHashtableKey()); addTreeNode(rootNode, classNode); } /** * interface ¸¦ ³ªÅ¸³»´Â node¸¦ Ãß°¡ÇÑ´Ù. * * @param interfaceNode Ãß°¡ÇÒ ³ëµå */ public void addInterfaceNode(EventContent interfaceNode) { if (interfaceNode == null) return; addTreeNode(rootNode, interfaceNode); } /** * attribute ¸¦ ³ªÅ¸³»´Â node¸¦ Ãß°¡ÇÑ´Ù. * * @param attNode Ãß°¡ÇÒ ³ëµå */ public void addAttributeNode(EventContent attrNode) { if (attrNode == null) return; DefaultMutableTreeNode parent = (DefaultMutableTreeNode)nodeHash.get(attrNode.getParent().getHashtableKey()); if(parent == null) { EventContent pp = attrNode.getParent(); if(pp.getContentType() == EventContent.CLASS) addClassNode(pp); else if(pp.getContentType() == EventContent.INNER) addInnerClassNode(pp); else addInterfaceNode(pp); parent = (DefaultMutableTreeNode)nodeHash.get(attrNode.getParent().getHashtableKey()); } if(parent != null) addTreeNode(parent, attrNode); } /** * operation À» ³ªÅ¸³»´Â node¸¦ Ãß°¡ÇÑ´Ù. * * @param operNode Ãß°¡ÇÒ ³ëµå */ public void addOperationNode(EventContent operNode) { if (operNode == null) return; DefaultMutableTreeNode parent = (DefaultMutableTreeNode) nodeHash.get(operNode.getParent().getHashtableKey()); if(parent == null) { EventContent pp = operNode.getParent(); if(pp.getContentType() == EventContent.CLASS) addClassNode(pp); else if(pp.getContentType() == EventContent.INNER) addInnerClassNode(pp); else addInterfaceNode(pp); parent = (DefaultMutableTreeNode)nodeHash.get(operNode.getParent().getHashtableKey()); } if(parent != null) addTreeNode(parent, operNode); } /** * inner class ¸¦ ³ªÅ¸³»´Â node¸¦ Ãß°¡ÇÑ´Ù. * * @param innerNode Ãß°¡ÇÒ ³ëµå */ public void addInnerClassNode(EventContent innerNode) { if (innerNode == null) return; DefaultMutableTreeNode parent = (DefaultMutableTreeNode)nodeHash.get(innerNode.getParent().getHashtableKey()); if(parent == null) { EventContent pp = innerNode.getParent(); if(pp.getContentType() == EventContent.CLASS) addClassNode(pp); else if(pp.getContentType() == EventContent.INNER) addInnerClassNode(pp); else addInterfaceNode(pp); parent = (DefaultMutableTreeNode)nodeHash.get(innerNode.getParent().getHashtableKey()); } if( !pe.isFilesTab() && comboM.getIndexOf(innerNode.getHashtableKey())==-1) comboM.addElement(innerNode.getHashtableKey()); if(parent != null) addTreeNode(parent, innerNode); } /** * import ¸¦ ³ªÅ¸³»´Â node¸¦ Ãß°¡ÇÑ´Ù. * * @param importNode Ãß°¡ÇÒ ³ëµå */ public void addImportNode(EventContent importNode) { if (importNode == null) return; addTreeNode(importRootNode, importNode); } public void clearClassNodes(){ if(!pe.isFilesTab()) comboM.removeAllElements(); } public void initViewport() { vp.setViewPosition(new Point(0, 0)); } /** * Tree¿¡¼­ ³ëµå¸¦ »èÁ¦ÇÑ´Ù. * * @param item »èÁ¦ÇÒ item */ public void removeNode(int type, EventContent content) { for (int i = 0; i < tree.getRowCount(); i++) { TreePath tp = tree.getPathForRow(i); DefaultMutableTreeNode tn = (DefaultMutableTreeNode)tp.getLastPathComponent(); Object obj = tn.getUserObject(); if (obj instanceof EventContent) { EventContent c = (EventContent)obj; if (content.equals(c)) { treeModel.removeNodeFromParent(tn); int entryType = content.getContentType(); if(entryType == EventContent.CLASS || entryType == EventContent.INNER){ if(!pe.isFilesTab()) comboM.removeElement(content); nodeHash.remove(content.getHashtableKey()); } break; } } } } /** * reload */ public void reload(){ int width = getSize().width; if(pe.isFilesTab()) return; int btnWidth = addFieldBtn.getWidth() * 5; combo.setPreferredSize(new Dimension(width - btnWidth - 12, combo.getHeight())); enableBtns(true); } /** * Tree»óÀÇ classNodeµéÀ» expand ½ÃŲ´Ù. sourcebrowserÀÇ ³»¿ëÀÌ ¹Ù²ð¶§ ¹Ù´Ù * È£Ã⠵Ǿî¾ß ÇÑ´Ù */ public void expandClassNodes(){ for(int i=0; i<classNodes.size(); i++){ DefaultMutableTreeNode classNode = (DefaultMutableTreeNode)classNodes.elementAt(i); tree.expandPath(new TreePath(classNode.getPath())); } tree.expandPath(new TreePath( importRootNode.getPath() )); classNodes.removeAllElements(); } /** * sourceBrowserÀÇ ³»¿ëÀÌ ÀüºÎ ¹Ù²ð¶§ (´Ù¸¥ ÆÄÀÏ ¼±ÅÃ) È£ÃâµÈ´Ù. * ¸¸µé¾îÁø treeModelÀ» tree¿¡ ¼³Á¤Çϰí, class nodeµéÀ» expand ½ÃŲ´Ù */ public void setTreeModel(){ tree.setModel(treeModel); expandClassNodes(); } public void clear(){ removeAll(); tree.setModel(null); if(pe.isFilesTab()) return; if(comboM.getSize() > 0) comboM.removeAllElements(); enableBtns(false); } /** * Tree¿¡¼­ ¸ðµç ³ëµå¸¦ »èÁ¦ÇÑ´Ù. */ public void removeAll() { if(rootNode.getChildCount() > 0){ importRootNode.removeAllChildren(); rootNode.removeAllChildren(); rootNode.add(importRootNode); nodeHash.clear(); treeModel.reload(); } } class TreeMouseEventHandler extends MouseAdapter { public void mouseClicked(MouseEvent evt) { remindPE(); } public void mouseReleased(MouseEvent evt) { if (evt.isPopupTrigger()) { tree.setSelectionPath(tree.getPathForLocation(evt.getX(), evt.getY())); TreePath path = tree.getSelectionPath(); if (path != null) { DefaultMutableTreeNode dmtn = (DefaultMutableTreeNode)path.getLastPathComponent(); currentNode = dmtn; Object obj = dmtn.getUserObject(); if (obj instanceof EventContent) { EventContent sbte = (EventContent)obj; int type = sbte.getContentType(); if (type == EventContent.CLASS || type == EventContent.INNER) { classPopup.show(tree, evt.getX(), evt.getY()); } } } } } public void mousePressed(MouseEvent evt){ if(isComboUpdate) return; if(tree.hasFocus()){ TreePath tp = tree.getPathForLocation(evt.getX(), evt.getY()); if(tp == null) return; currentNode = (DefaultMutableTreeNode)tp.getLastPathComponent(); EventContent sbte = (EventContent)currentNode.getUserObject(); if (!currentNode.isRoot() && currentNode != importRootNode) { // »óÀ§ÀÇ class name°ú ÇÔ²² º¸³½´Ù. String parentContent = null; String childContent = sbte.getContent(); String key = sbte.getHashtableKey(); if(sbte.getContentType() == EventContent.IMPORT) parentContent = null; else if(sbte.getContentType() == EventContent.PACKAGE) parentContent = null; else if(key.indexOf(".") != -1){ parentContent = key.substring(0, key.lastIndexOf(".")); } pe.sourceBrowserSelection(parentContent, childContent); if(!pe.isFilesTab() && comboM.getIndexOf(sbte.getHashtableKey()) != -1) { isTreeUpdate = true; comboM.setSelectedItem(sbte.getHashtableKey()); isTreeUpdate = false; } } } } } /** * Source Browser³»¿¡¼­ È­¸é Æø ¶§¹®¿¡ º¸ÀÌÁö ¾Ê´Â ¸â¹öµéÀ» º¸¿©ÁÖ±â À§Çؼ­ ÅøÆÁÀ» * ÀÌ¿ëÇÑ´Ù. * Á»´õ ¼¼·ÃµÇ°Ô º¸À̱â À§Çؼ­´Â ¿ø·¡ÀÇ À§Ä¡¿¡ µ¤¾î ¾²´Â °ÍÀÌ ÁÁÀ» °ÍÀÌ´Ù. */ class MouseMotionHandler extends MouseMotionAdapter { public void mouseMoved( MouseEvent e ) { TreePath path = tree.getClosestPathForLocation( e.getX(), e.getY() ); if ( path != null ) { DefaultMutableTreeNode tn = (DefaultMutableTreeNode)path.getLastPathComponent(); Object obj = tn.getUserObject(); if (obj instanceof EventContent) { EventContent sbte = (EventContent)obj; tree.setToolTipText( sbte.getContent() ); } } else tree.setToolTipText( "" ); } } class ComboHandler implements ItemListener { private String prevSel = ""; public void itemStateChanged(ItemEvent e){ if(isTreeUpdate) return; String selItem = (String)comboM.getSelectedItem(); if(selItem == null) return; if(prevSel.equals(selItem)) return; else prevSel = selItem; DefaultMutableTreeNode sameNode = (DefaultMutableTreeNode)nodeHash.get(selItem); if(sameNode != null){ isComboUpdate = true; tree.setSelectionPath(new TreePath(sameNode.getPath())); isComboUpdate = false; pe.sourceBrowserSelection(null, sameNode.toString()); } } } class KeyHandler extends KeyAdapter{ public void keyPressed(KeyEvent e){ if(e.getKeyCode() == KeyEvent.VK_ESCAPE){ if(classPopup != null && classPopup.isVisible()) classPopup.setVisible(false); } } } class ComboRenderer extends DefaultListCellRenderer { private JList list; public ComboRenderer(){ setOpaque(true); setFont(FontList.treeFont); } public Component getListCellRendererComponent( JList list, Object value, int index, boolean isSelected, boolean cellHasFocus) { if(list == null) this.list = list; if (isSelected) { setBackground(ColorList.darkBlue); setForeground(Color.white); } else { setBackground(Color.white); setForeground(Color.black); } if (value instanceof Icon) { setIcon((Icon)value); } else { setText((value == null) ? "" : value.toString()); } setEnabled(list.isEnabled()); setBorder((cellHasFocus) ? UIManager.getBorder("List.focusCellHighlightBorder") : noFocusBorder); if(list.getSelectedValue() != null) list.setToolTipText(list.getSelectedValue().toString()); return this; } } }
apache-2.0
jentfoo/aws-sdk-java
aws-java-sdk-lightsail/src/main/java/com/amazonaws/services/lightsail/model/transform/StopInstanceRequestProtocolMarshaller.java
2662
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.lightsail.model.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.Request; import com.amazonaws.http.HttpMethodName; import com.amazonaws.services.lightsail.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.protocol.*; import com.amazonaws.protocol.Protocol; import com.amazonaws.annotation.SdkInternalApi; /** * StopInstanceRequest Marshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class StopInstanceRequestProtocolMarshaller implements Marshaller<Request<StopInstanceRequest>, StopInstanceRequest> { private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/") .httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true).operationIdentifier("Lightsail_20161128.StopInstance") .serviceName("AmazonLightsail").build(); private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory; public StopInstanceRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) { this.protocolFactory = protocolFactory; } public Request<StopInstanceRequest> marshall(StopInstanceRequest stopInstanceRequest) { if (stopInstanceRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { final ProtocolRequestMarshaller<StopInstanceRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(SDK_OPERATION_BINDING, stopInstanceRequest); protocolMarshaller.startMarshalling(); StopInstanceRequestMarshaller.getInstance().marshall(stopInstanceRequest, protocolMarshaller); return protocolMarshaller.finishMarshalling(); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/DescribePatchBaselinesResult.java
7760
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.simplesystemsmanagement.model; import java.io.Serializable; import javax.annotation.Generated; /** * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchBaselines" target="_top">AWS API * Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class DescribePatchBaselinesResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable { /** * <p> * An array of <code>PatchBaselineIdentity</code> elements. * </p> */ private com.amazonaws.internal.SdkInternalList<PatchBaselineIdentity> baselineIdentities; /** * <p> * The token to use when requesting the next set of items. If there are no additional items to return, the string is * empty. * </p> */ private String nextToken; /** * <p> * An array of <code>PatchBaselineIdentity</code> elements. * </p> * * @return An array of <code>PatchBaselineIdentity</code> elements. */ public java.util.List<PatchBaselineIdentity> getBaselineIdentities() { if (baselineIdentities == null) { baselineIdentities = new com.amazonaws.internal.SdkInternalList<PatchBaselineIdentity>(); } return baselineIdentities; } /** * <p> * An array of <code>PatchBaselineIdentity</code> elements. * </p> * * @param baselineIdentities * An array of <code>PatchBaselineIdentity</code> elements. */ public void setBaselineIdentities(java.util.Collection<PatchBaselineIdentity> baselineIdentities) { if (baselineIdentities == null) { this.baselineIdentities = null; return; } this.baselineIdentities = new com.amazonaws.internal.SdkInternalList<PatchBaselineIdentity>(baselineIdentities); } /** * <p> * An array of <code>PatchBaselineIdentity</code> elements. * </p> * <p> * <b>NOTE:</b> This method appends the values to the existing list (if any). Use * {@link #setBaselineIdentities(java.util.Collection)} or {@link #withBaselineIdentities(java.util.Collection)} if * you want to override the existing values. * </p> * * @param baselineIdentities * An array of <code>PatchBaselineIdentity</code> elements. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribePatchBaselinesResult withBaselineIdentities(PatchBaselineIdentity... baselineIdentities) { if (this.baselineIdentities == null) { setBaselineIdentities(new com.amazonaws.internal.SdkInternalList<PatchBaselineIdentity>(baselineIdentities.length)); } for (PatchBaselineIdentity ele : baselineIdentities) { this.baselineIdentities.add(ele); } return this; } /** * <p> * An array of <code>PatchBaselineIdentity</code> elements. * </p> * * @param baselineIdentities * An array of <code>PatchBaselineIdentity</code> elements. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribePatchBaselinesResult withBaselineIdentities(java.util.Collection<PatchBaselineIdentity> baselineIdentities) { setBaselineIdentities(baselineIdentities); return this; } /** * <p> * The token to use when requesting the next set of items. If there are no additional items to return, the string is * empty. * </p> * * @param nextToken * The token to use when requesting the next set of items. If there are no additional items to return, the * string is empty. */ public void setNextToken(String nextToken) { this.nextToken = nextToken; } /** * <p> * The token to use when requesting the next set of items. If there are no additional items to return, the string is * empty. * </p> * * @return The token to use when requesting the next set of items. If there are no additional items to return, the * string is empty. */ public String getNextToken() { return this.nextToken; } /** * <p> * The token to use when requesting the next set of items. If there are no additional items to return, the string is * empty. * </p> * * @param nextToken * The token to use when requesting the next set of items. If there are no additional items to return, the * string is empty. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribePatchBaselinesResult withNextToken(String nextToken) { setNextToken(nextToken); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getBaselineIdentities() != null) sb.append("BaselineIdentities: ").append(getBaselineIdentities()).append(","); if (getNextToken() != null) sb.append("NextToken: ").append(getNextToken()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof DescribePatchBaselinesResult == false) return false; DescribePatchBaselinesResult other = (DescribePatchBaselinesResult) obj; if (other.getBaselineIdentities() == null ^ this.getBaselineIdentities() == null) return false; if (other.getBaselineIdentities() != null && other.getBaselineIdentities().equals(this.getBaselineIdentities()) == false) return false; if (other.getNextToken() == null ^ this.getNextToken() == null) return false; if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getBaselineIdentities() == null) ? 0 : getBaselineIdentities().hashCode()); hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode()); return hashCode; } @Override public DescribePatchBaselinesResult clone() { try { return (DescribePatchBaselinesResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
apache-2.0
chanil1218/elasticsearch
src/main/java/org/elasticsearch/index/query/BoolFilterParser.java
5008
/* * Licensed to ElasticSearch and Shay Banon under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. ElasticSearch licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilterClause; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.XBooleanFilter; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.cache.filter.support.CacheKeyFilter; import java.io.IOException; /** * */ public class BoolFilterParser implements FilterParser { public static final String NAME = "bool"; @Inject public BoolFilterParser() { } @Override public String[] names() { return new String[]{NAME}; } @Override public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); XBooleanFilter boolFilter = new XBooleanFilter(); boolean cache = false; CacheKeyFilter.Key cacheKey = null; String filterName = null; String currentFieldName = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { if ("must".equals(currentFieldName)) { boolFilter.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.MUST)); } else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) { boolFilter.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.MUST_NOT)); } else if ("should".equals(currentFieldName)) { boolFilter.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.SHOULD)); } else { throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("must".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { boolFilter.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.MUST)); } } else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { boolFilter.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.MUST_NOT)); } } else if ("should".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { boolFilter.add(new FilterClause(parseContext.parseInnerFilter(), BooleanClause.Occur.SHOULD)); } } else { throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("_cache".equals(currentFieldName)) { cache = parser.booleanValue(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new CacheKeyFilter.Key(parser.text()); } else { throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); } } } Filter filter = boolFilter; if (cache) { filter = parseContext.cacheFilter(filter, cacheKey); } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } return filter; } }
apache-2.0
MrBin99/spaceInvadersRebirth
src/iut/info1/spaceInvadersRebirth/gameStates/MenuState.java
1263
/* * MenuState.java */ package iut.info1.spaceInvadersRebirth.gameStates; /** * Représente un état de menu de jeu qui peut être affiché à l'écran. * @author * @version 1.0 */ public abstract class MenuState extends GameState { /** Les options du menu affiché. */ protected String[] menuOptions; /** * L'option courante du menu selectionnée * (indice dans le tableau des options). */ protected int currentOptionSelected; /** * Construit un nouvel état de menu. * @param gameStateManager le manager qui gère cet état. * @param menuOptions les options du menu. * @throws NullPointerException si <code>gameStateManager == null</code> * ou si <code>menuOptions == null</code>. */ protected MenuState(GameStateManager gameStateManager, String[] menuOptions) throws NullPointerException { super(gameStateManager); if (menuOptions == null) { throw new NullPointerException(); } // Initialise les champs this.currentOptionSelected = 0; this.menuOptions = menuOptions; } /** Exécute l'action sélectionnée dans le menu. */ public abstract void doAction(); }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-kinesis/src/main/java/com/amazonaws/services/kinesisanalytics/model/DescribeApplicationRequest.java
3659
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.kinesisanalytics.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.AmazonWebServiceRequest; /** * <p/> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/DescribeApplication" * target="_top">AWS API Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class DescribeApplicationRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable { /** * <p> * Name of the application. * </p> */ private String applicationName; /** * <p> * Name of the application. * </p> * * @param applicationName * Name of the application. */ public void setApplicationName(String applicationName) { this.applicationName = applicationName; } /** * <p> * Name of the application. * </p> * * @return Name of the application. */ public String getApplicationName() { return this.applicationName; } /** * <p> * Name of the application. * </p> * * @param applicationName * Name of the application. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeApplicationRequest withApplicationName(String applicationName) { setApplicationName(applicationName); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getApplicationName() != null) sb.append("ApplicationName: ").append(getApplicationName()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof DescribeApplicationRequest == false) return false; DescribeApplicationRequest other = (DescribeApplicationRequest) obj; if (other.getApplicationName() == null ^ this.getApplicationName() == null) return false; if (other.getApplicationName() != null && other.getApplicationName().equals(this.getApplicationName()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getApplicationName() == null) ? 0 : getApplicationName().hashCode()); return hashCode; } @Override public DescribeApplicationRequest clone() { return (DescribeApplicationRequest) super.clone(); } }
apache-2.0
springbootbuch/webmvc
src/main/java/de/springbootbuch/webmvc/FilmsAtomView.java
990
package de.springbootbuch.webmvc; import com.rometools.rome.feed.atom.Entry; import java.util.ArrayList; import java.util.List; import java.util.Map; import static java.util.stream.Collectors.toList; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.springframework.stereotype.Component; import org.springframework.web.servlet.view.feed.AbstractAtomFeedView; /** * Part of springbootbuch.de. * * @author Michael J. Simons * @author @rotnroll666 */ @Component("films.atom") public class FilmsAtomView extends AbstractAtomFeedView { @Override protected List<Entry> buildFeedEntries(Map<String, Object> model, HttpServletRequest request, HttpServletResponse response) throws Exception { final List<Film> films = (List<Film>) model.getOrDefault("films", new ArrayList<>()); return films.stream().map(film -> { final Entry entry = new Entry(); entry.setTitle(film.getTitle()); return entry; }).collect(toList()); } }
apache-2.0
firejake308/jeopardy-engine
src/serverside/Main.java
231
package serverside; import java.net.SocketException; public class Main { public static void main(String[] args) { try { new JeopardyServerThread().start(); } catch (SocketException e) { e.printStackTrace(); } } }
apache-2.0
kingargyle/turmeric-bot
camel-core/src/test/java/org/apache/camel/processor/aggregator/AggregatorTest.java
4112
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.processor.aggregator; import java.util.HashMap; import java.util.Map; import org.apache.camel.ContextTestSupport; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; import org.apache.camel.processor.aggregate.UseLatestAggregationStrategy; /** * @version $Revision$ */ public class AggregatorTest extends ContextTestSupport { protected int messageCount = 100; public void testSendingLotsOfMessagesGetAggregatedToTheLatestMessage() throws Exception { MockEndpoint resultEndpoint = resolveMandatoryEndpoint("mock:result", MockEndpoint.class); resultEndpoint.expectedBodiesReceived("message:" + messageCount); // lets send a large batch of messages for (int i = 1; i <= messageCount; i++) { String body = "message:" + i; template.sendBodyAndHeader("direct:start", body, "cheese", 123); } resultEndpoint.assertIsSatisfied(); } public void testOneMessage() throws Exception { MockEndpoint resultEndpoint = resolveMandatoryEndpoint("mock:result", MockEndpoint.class); resultEndpoint.expectedMessageCount(1); Map<String, Object> headers = new HashMap<String, Object>(); headers.put("cheese", 123); headers.put("bar", "viper bar"); template.sendBodyAndHeaders("direct:predicate", "test", headers); resultEndpoint.assertIsSatisfied(); } public void testBatchTimeoutExpiry() throws Exception { MockEndpoint resultEndpoint = resolveMandatoryEndpoint("mock:result", MockEndpoint.class); resultEndpoint.expectedMessageCount(1); template.sendBodyAndHeader("direct:start", "message:1", "cheese", 123); resultEndpoint.assertIsSatisfied(); } public void testAggregatorNotAtStart() throws Exception { MockEndpoint resultEndpoint = resolveMandatoryEndpoint("mock:result", MockEndpoint.class); resultEndpoint.expectedMessageCount(1); resultEndpoint.message(0).header("visited").isNotNull(); template.sendBodyAndHeader("seda:header", "message:1", "cheese", 123); resultEndpoint.assertIsSatisfied(); } protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { // START SNIPPET: ex // in this route we aggregate all from direct:state based on the header id cheese from("direct:start") .aggregate(header("cheese"), new UseLatestAggregationStrategy()).completionTimeout(1000L) .to("mock:result"); from("seda:header").setHeader("visited", constant(true)) .aggregate(header("cheese"), new UseLatestAggregationStrategy()).completionTimeout(1000L) .to("mock:result"); // in this sample we aggregate with a completion predicate from("direct:predicate") .aggregate(header("cheese"), new UseLatestAggregationStrategy()).completionTimeout(1000L) .completionPredicate(header("cheese").isEqualTo(123)) .to("mock:result"); // END SNIPPET: ex } }; } }
apache-2.0
googleapis/google-api-java-client-services
clients/google-api-services-cloudresourcemanager/v3/1.31.0/com/google/api/services/cloudresourcemanager/v3/model/UpdateTagKeyMetadata.java
1616
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.cloudresourcemanager.v3.model; /** * Runtime operation information for updating a TagKey. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Cloud Resource Manager API. For a detailed * explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class UpdateTagKeyMetadata extends com.google.api.client.json.GenericJson { @Override public UpdateTagKeyMetadata set(String fieldName, Object value) { return (UpdateTagKeyMetadata) super.set(fieldName, value); } @Override public UpdateTagKeyMetadata clone() { return (UpdateTagKeyMetadata) super.clone(); } }
apache-2.0
fogaiht/Ajude-a-Joice
app/src/main/java/fogaiht/ajudeajoice/DataBase/_Default.java
525
package fogaiht.ajudeajoice.DataBase; public class _Default { protected String _mensagem; protected boolean _status; public _Default(){ this._status = true; this._mensagem = ""; } public String get_mensagem() { return _mensagem; } public void set_mensagem(String _mensagem) { this._mensagem = _mensagem; } public boolean is_status() { return _status; } public void set_status(boolean _status) { this._status = _status; } }
apache-2.0
mikewalch/accumulo
core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java
6432
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.accumulo.core.metadata.schema; import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN; import java.util.EnumSet; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.Objects; import org.apache.accumulo.core.client.RowIterator; import org.apache.accumulo.core.client.Scanner; import org.apache.accumulo.core.client.impl.Table; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.data.impl.KeyExtent; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.FutureLocationColumnFamily; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LastLocationColumnFamily; import org.apache.accumulo.core.util.HostAndPort; import org.apache.hadoop.io.Text; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; import com.google.common.collect.Iterators; public class TabletMetadata { private Table.ID tableId; private Text prevEndRow; private Text endRow; private Location location; private List<String> files; private EnumSet<FetchedColumns> fetchedColumns; private KeyExtent extent; private Location last; public static enum LocationType { CURRENT, FUTURE, LAST } public static enum FetchedColumns { LOCATION, PREV_ROW, FILES, LAST } public static class Location { private final String server; private final String session; private final LocationType lt; Location(String server, String session, LocationType lt) { this.server = server; this.session = session; this.lt = lt; } public HostAndPort getHostAndPort() { return HostAndPort.fromString(server); } public String getSession() { return session; } public LocationType getLocationType() { return lt; } } public Table.ID getTableId() { return tableId; } public KeyExtent getExtent() { if (extent == null) { extent = new KeyExtent(getTableId(), getEndRow(), getPrevEndRow()); } return extent; } public Text getPrevEndRow() { Preconditions.checkState(fetchedColumns.contains(FetchedColumns.PREV_ROW), "Requested prev row when it was not fetched"); return prevEndRow; } public Text getEndRow() { return endRow; } public Location getLocation() { Preconditions.checkState(fetchedColumns.contains(FetchedColumns.LOCATION), "Requested location when it was not fetched"); return location; } public Location getLast() { Preconditions.checkState(fetchedColumns.contains(FetchedColumns.LAST), "Requested last when it was not fetched"); return last; } public List<String> getFiles() { Preconditions.checkState(fetchedColumns.contains(FetchedColumns.FILES), "Requested files when it was not fetched"); return files; } public static TabletMetadata convertRow(Iterator<Entry<Key,Value>> rowIter, EnumSet<FetchedColumns> fetchedColumns) { Objects.requireNonNull(rowIter); TabletMetadata te = new TabletMetadata(); Builder<String> filesBuilder = ImmutableList.builder(); ByteSequence row = null; while (rowIter.hasNext()) { Entry<Key,Value> kv = rowIter.next(); Key k = kv.getKey(); Value v = kv.getValue(); Text fam = k.getColumnFamily(); if (row == null) { row = k.getRowData(); KeyExtent ke = new KeyExtent(k.getRow(), (Text) null); te.endRow = ke.getEndRow(); te.tableId = ke.getTableId(); } else if (!row.equals(k.getRowData())) { throw new IllegalArgumentException("Input contains more than one row : " + row + " " + k.getRowData()); } if (PREV_ROW_COLUMN.hasColumns(k)) { te.prevEndRow = KeyExtent.decodePrevEndRow(v); } if (fam.equals(DataFileColumnFamily.NAME)) { filesBuilder.add(k.getColumnQualifier().toString()); } else if (fam.equals(CurrentLocationColumnFamily.NAME)) { if (te.location != null) { throw new IllegalArgumentException("Input contains more than one location " + te.location + " " + v); } te.location = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.CURRENT); } else if (fam.equals(FutureLocationColumnFamily.NAME)) { if (te.location != null) { throw new IllegalArgumentException("Input contains more than one location " + te.location + " " + v); } te.location = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.FUTURE); } else if (fam.equals(LastLocationColumnFamily.NAME)) { te.last = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.LAST); } } te.files = filesBuilder.build(); te.fetchedColumns = fetchedColumns; return te; } public static Iterable<TabletMetadata> convert(Scanner input, EnumSet<FetchedColumns> fetchedColumns) { return new Iterable<TabletMetadata>() { @Override public Iterator<TabletMetadata> iterator() { RowIterator rowIter = new RowIterator(input); return Iterators.transform(rowIter, ri -> convertRow(ri, fetchedColumns)); } }; } }
apache-2.0
vgubskiy/vgubskiy
chapter_002/src/test/java/ru/job4j/tracker/package-info.java
132
/**. * Package for test of tracker task * * @author Valeriy Gubskiy * @version 1.0 * @since 10.08.2017 */ package ru.job4j.tracker;
apache-2.0
dmyersturnbull/network_merge
src/main/java/org/structnetalign/cross/SimpleCrossingManager.java
5757
/** * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * @author dmyersturnbull */ package org.structnetalign.cross; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.WeakHashMap; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import javax.xml.parsers.ParserConfigurationException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.structnetalign.CleverGraph; import org.structnetalign.InteractionEdge; import org.structnetalign.PipelineProperties; import org.structnetalign.ReportGenerator; import org.structnetalign.util.GraphMLAdaptor; import org.xml.sax.SAXException; /** * A simple multithreaded {@link CrossingManager} that delegates each interaction to a job in a thread pool. * @author dmyersturnbull * @see HomologySearchJob * @see InteractionEdgeUpdate */ public class SimpleCrossingManager implements CrossingManager { private static final Logger logger = LogManager.getLogger("org.structnetalign"); private int maxDepth; private int nCores; public static void main(String[] args) throws ParserConfigurationException, SAXException, IOException { if (args.length != 3) { System.err.println("Usage: " + SimpleCrossingManager.class.getSimpleName() + " interaction-graph-file homology-graph-file output-file"); return; } File interactionFile = new File(args[0]); File homologyFile = new File(args[1]); File output = new File(args[2]); CleverGraph graph = GraphMLAdaptor.readGraph(interactionFile, homologyFile); SimpleCrossingManager cross = new SimpleCrossingManager(2, 1000); cross.cross(graph); GraphMLAdaptor.writeInteractionGraph(graph.getInteraction(), output); } public SimpleCrossingManager(int nCores, int maxDepth) { this.nCores = nCores; this.maxDepth = maxDepth; } @Override public void cross(CleverGraph graph) { ExecutorService pool = Executors.newFixedThreadPool(nCores); try { // depressingly, this used to be List<Future<Pair<Map<Integer,Double>>>> // I'm glad that's no longer the case CompletionService<InteractionEdgeUpdate> completion = new ExecutorCompletionService<>(pool); List<Future<InteractionEdgeUpdate>> futures = new ArrayList<>(); // submit the jobs for (InteractionEdge interaction : graph.getInteraction().getEdges()) { HomologySearchJob job = new HomologySearchJob(interaction, graph); job.setMaxDepth(maxDepth); Future<InteractionEdgeUpdate> result = completion.submit(job); futures.add(result); } /* * We'll make a list of updates to do when we're finished. * Otherwise, we can run into some ugly concurrency issues and get the wrong answer. */ int nUpdates = 0; int nEdgesUpdated = 0; WeakHashMap<InteractionEdge, Double> edgesToUpdate = new WeakHashMap<>(futures.size()); for (Future<InteractionEdgeUpdate> future : futures) { // now wait for completion InteractionEdgeUpdate update = null; try { // We should do this in case the job gets interrupted // Sometimes the OS or JVM might do this // Use the flag instead of future == null because future.get() may actually return null while (update == null) { try { update = future.get(); } catch (InterruptedException e) { logger.warn("A thread was interrupted while waiting to get interaction udpate. Retrying.", e); continue; } } } catch (ExecutionException e) { logger.error("Encountered an error trying to update an interaction. Skipping interaction.", e); continue; } // we have an update to make! nUpdates += update.getnUpdates(); if (update.getnUpdates() > 0) { // don't bother if we didn't change anything nEdgesUpdated++; InteractionEdge edge = update.getRootInteraction(); // don't make a copy here!! edgesToUpdate.put(edge, edge.getWeight() + update.getScore() - edge.getWeight() * update.getScore()); logger.debug("Updated interaction " + edge.getId() + " to " + PipelineProperties.getInstance().getDisplayFormatter().format(edge.getWeight())); } } /* * Now that the multithreaded part has finished, we can update the interactions. */ for (InteractionEdge edge : edgesToUpdate.keySet()) { edge.setWeight(edgesToUpdate.get(edge)); } if (ReportGenerator.getInstance() != null) { ReportGenerator.getInstance().putInCrossed("manager", this.getClass().getSimpleName()); ReportGenerator.getInstance().putInCrossed("n_updates", nUpdates); ReportGenerator.getInstance().putInCrossed("n_updated", nEdgesUpdated); } } finally { pool.shutdownNow(); int count = Thread.activeCount()-1; if (count > 0) { logger.warn("There are " + count + " lingering threads"); } } } }
apache-2.0
ahammer/ModelViewLib
ModelViewLib/src/main/java/com/mysaasa/modelviews/ModelFrameLayout.java
1098
package com.mysaasa.modelviews; import android.content.Context; import android.util.AttributeSet; import android.view.ViewGroup; import android.widget.FrameLayout; /** * Created by adamhammer2 on 2016-06-25. */ public abstract class ModelFrameLayout<T> extends FrameLayout implements IModelView<T> { ModelViewDelegate<T> modelViewDelegate = new ModelViewDelegate(this); public ModelFrameLayout(Context context) { super(context); modelViewDelegate.initialize(); } public ModelFrameLayout(Context context, AttributeSet attrs) { super(context, attrs); modelViewDelegate.initialize(); } public ModelFrameLayout(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); modelViewDelegate.initialize(); } @Override public ViewGroup getTargetViewGroup() { return this; } @Override public void setModel(T model) { modelViewDelegate.setModel(model); } @Override public T getModel() { return modelViewDelegate.getModel(); } }
apache-2.0
bozimmerman/CoffeeMud
com/planet_ink/coffee_mud/Abilities/Skills/Skill_HandCuff.java
10345
package com.planet_ink.coffee_mud.Abilities.Skills; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.util.*; /* Copyright 2003-2022 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ public class Skill_HandCuff extends StdSkill { @Override public String ID() { return "Skill_HandCuff"; } private final static String localizedName = CMLib.lang().L("Handcuff"); @Override public String name() { return localizedName; } private final static String localizedStaticDisplay = CMLib.lang().L("(Handcuffed)"); @Override public String displayText() { return localizedStaticDisplay; } @Override protected int canAffectCode() { return CAN_MOBS; } @Override protected int canTargetCode() { return CAN_MOBS; } @Override public int abstractQuality() { return Ability.QUALITY_MALICIOUS; } private static final String[] triggerStrings = I(new String[] { "HANDCUFF", "CUFF" }); @Override public String[] triggerStrings() { return triggerStrings; } @Override public int classificationCode() { return Ability.ACODE_SKILL | Ability.DOMAIN_BINDING; } @Override public long flags() { return Ability.FLAG_BINDING; } @Override public int usageType() { return USAGE_MOVEMENT; } public int amountRemaining=0; public boolean oldAssist=false; public boolean oldGuard=false; @Override public void affectPhyStats(final Physical affected, final PhyStats affectableStats) { super.affectPhyStats(affected,affectableStats); affectableStats.setDisposition(affectableStats.disposition()|PhyStats.IS_BOUND); } @Override public boolean okMessage(final Environmental myHost, final CMMsg msg) { if(!(affected instanceof MOB)) return true; final MOB mob=(MOB)affected; // when this spell is on a MOBs Affected list, // it should consistantly prevent the mob // from trying to do ANYTHING except sleep if(msg.amISource(mob)) { if(msg.sourceMinor()==CMMsg.TYP_RECALL) { if(msg.source().location()!=null) msg.source().location().show(msg.source(),null,CMMsg.MSG_OK_ACTION,L("<S-NAME> attempt(s) to recall, but the handcuffs prevent <S-HIM-HER>.")); return false; } else if(((msg.sourceMinor()==CMMsg.TYP_FOLLOW)&&(msg.target()!=invoker())) ||((msg.sourceMinor()==CMMsg.TYP_NOFOLLOW)&&(msg.source().amFollowing()==invoker()))) { mob.location().show(mob,null,CMMsg.MSG_OK_ACTION,L("<S-NAME> struggle(s) against <S-HIS-HER> cuffs.")); amountRemaining-=(mob.charStats().getStat(CharStats.STAT_STRENGTH)+mob.phyStats().level()); if(amountRemaining<0) unInvoke(); else return false; } else if((msg.sourceMinor()==CMMsg.TYP_LEAVE) ||(msg.sourceMinor()==CMMsg.TYP_SIT) ||(msg.sourceMinor()==CMMsg.TYP_STAND)) return true; else if(((msg.sourceMinor()==CMMsg.TYP_ENTER) &&(msg.target() instanceof Room) &&(!((Room)msg.target()).isInhabitant(invoker)))) { mob.location().show(mob,null,CMMsg.MSG_OK_ACTION,L("<S-NAME> struggle(s) against <S-HIS-HER> cuffs.")); amountRemaining-=(mob.charStats().getStat(CharStats.STAT_STRENGTH)+mob.phyStats().level()); if(amountRemaining<0) unInvoke(); else return false; } else if(msg.sourceMinor()==CMMsg.TYP_ENTER) return true; else if((!msg.sourceMajor(CMMsg.MASK_ALWAYS)) &&(amountRemaining>0) &&((msg.sourceMajor(CMMsg.MASK_HANDS)) ||(msg.sourceMajor(CMMsg.MASK_MOVE)))) { mob.location().show(mob,null,CMMsg.MSG_OK_ACTION,L("<S-NAME> struggle(s) against <S-HIS-HER> cuffs.")); amountRemaining-=mob.charStats().getStat(CharStats.STAT_STRENGTH); if(amountRemaining<0) unInvoke(); else return false; } } else if(((msg.targetMajor()&CMMsg.MASK_MALICIOUS)>0) &&(msg.amITarget(affected)) &&(!mob.isInCombat()) &&(mob.amFollowing()!=null) &&(msg.source().isMonster()) &&(msg.source().getVictim()!=mob)) { msg.source().tell(L("You may not assault this prisoner.")); if(mob.getVictim()==msg.source()) { mob.makePeace(true); mob.setVictim(null); } return false; } return super.okMessage(myHost,msg); } @Override public void unInvoke() { // undo the affects of this spell if(!(affected instanceof MOB)) return; final MOB mob=(MOB)affected; super.unInvoke(); if(canBeUninvoked()) { mob.setFollowing(null); if(!mob.amDead()) mob.location().show(mob,null,CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> <S-IS-ARE> released from the handcuffs.")); if(!oldAssist) mob.setAttribute(MOB.Attrib.AUTOASSIST,false); if(oldGuard) mob.setAttribute(MOB.Attrib.AUTOGUARD,false); CMLib.commands().postStand(mob,true, false); } } @Override public int castingQuality(final MOB mob, final Physical target) { if((mob!=null)&&(target instanceof MOB)) { if(mob.isInCombat()) return Ability.QUALITY_INDIFFERENT; if(Skill_Arrest.getWarrantsOf((MOB)target, CMLib.law().getLegalObject(mob.location().getArea())).size()==0) return Ability.QUALITY_INDIFFERENT; if(CMLib.flags().isStanding((MOB)target)) return Ability.QUALITY_INDIFFERENT; if(target.fetchEffect(ID())!=null) return Ability.QUALITY_INDIFFERENT; } return super.castingQuality(mob,target); } @Override public boolean invoke(final MOB mob, final List<String> commands, final Physical givenTarget, final boolean auto, final int asLevel) { if(mob.isInCombat()&&(!auto)) { mob.tell(L("Not while you are fighting!")); return false; } if((commands.size()>0)&&(commands.get(0)).equalsIgnoreCase("UNTIE")) { commands.remove(0); final MOB target=super.getTarget(mob,commands,givenTarget,false,true); if(target==null) return false; final Ability A=target.fetchEffect(ID()); if(A!=null) { if(mob.location().show(mob,target,null,CMMsg.MSG_HANDS,L("<S-NAME> attempt(s) to unbind <T-NAMESELF>."))) { A.unInvoke(); return true; } return false; } mob.tell(L("@x1 doesn't appear to be handcuffed.",target.name(mob))); return false; } final MOB target=getTarget(mob,commands,givenTarget); if(target==null) return false; if(Skill_Arrest.getWarrantsOf(target, CMLib.law().getLegalObject(mob.location().getArea())).size()==0) { final Area A=CMLib.law().getLegalObject(mob.location()); final LegalBehavior B=CMLib.law().getLegalBehavior(A); if((B==null)||(!B.isAnyOfficer(A, mob))||(B.isEligibleOfficer(A, mob))) { mob.tell(L("@x1 has no warrants out here.",target.name(mob))); return false; } } if((CMLib.flags().isStanding(target))&&(!auto)) { mob.tell(L("@x1 doesn't look willing to cooperate.",target.name(mob))); return false; } if(!super.invoke(mob,commands,givenTarget,auto,asLevel)) return false; boolean success=proficiencyCheck(mob,0,auto); if(success) { final CMMsg msg=CMClass.getMsg(mob,target,this,CMMsg.MSG_NOISYMOVEMENT|(auto?CMMsg.MASK_ALWAYS:CMMsg.MASK_MALICIOUS),L("<S-NAME> handcuff(s) <T-NAME>.")); if((mob.location().okMessage(mob,msg))&&(target.fetchEffect(this.ID())==null)) { mob.location().send(mob,msg); if(msg.value()<=0) { final int amountToRemain=adjustedLevel(mob,asLevel)*300; amountRemaining=amountToRemain; if(target.location()==mob.location()) { success=maliciousAffect(mob,target,asLevel,Ability.TICKS_ALMOST_FOREVER,-1)!=null; if(success) { Skill_HandCuff A = (Skill_HandCuff)target.fetchEffect(ID()); if(A!=null) { A.amountRemaining = amountToRemain; if(auto) A.makeLongLasting(); } oldAssist=target.isAttributeSet(MOB.Attrib.AUTOASSIST); if(!oldAssist) target.setAttribute(MOB.Attrib.AUTOASSIST,true); oldGuard=target.isAttributeSet(MOB.Attrib.AUTOASSIST); if(oldGuard) target.setAttribute(MOB.Attrib.AUTOGUARD,false); final boolean oldNOFOL=target.isAttributeSet(MOB.Attrib.NOFOLLOW); if(target.numFollowers()>0) CMLib.commands().forceStandardCommand(target,"NoFollow",new XVector<String>("UNFOLLOW","QUIETLY")); target.setAttribute(MOB.Attrib.NOFOLLOW,false); CMLib.commands().postFollow(target,mob,true); if(oldNOFOL) target.setAttribute(MOB.Attrib.NOFOLLOW,true); else target.setAttribute(MOB.Attrib.NOFOLLOW,false); target.setFollowing(mob); A = (Skill_HandCuff)target.fetchEffect(ID()); if(A!=null) A.amountRemaining = amountToRemain; } } } if(mob.getVictim()==target) mob.setVictim(null); } } else return maliciousFizzle(mob,target,L("<S-NAME> attempt(s) to bind <T-NAME> and fail(s).")); // return whether it worked return success; } }
apache-2.0
yunhai281/baseProject
src/test/java/com/boyuyun/base/sso/RoleUserTypeBizTest.java
1735
package com.boyuyun.base.sso; import static org.junit.Assert.*; import java.util.List; import javax.annotation.Resource; import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.FixMethodOrder; import org.junit.Test; import org.junit.runners.MethodSorters; import com.boyuyun.base.sso.biz.RoleUserTypeBiz; import com.boyuyun.base.sso.entity.Role; import com.boyuyun.base.sso.entity.RoleScope; import com.boyuyun.common.junit.SpringJunitTest; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class RoleUserTypeBizTest extends SpringJunitTest { private static Logger logger = Logger.getLogger(RoleUserTypeBizTest.class); @Resource private RoleUserTypeBiz roleUserTypeService; /** * 这个方法关联到4个表而且没有删除方法,不建议使用此测试 * sso_role_application, * sso_role_user, * sso_role_user_government_school, * sso_role_user_type */ // @Test // public void testSave() { // RoleScope rs = new RoleScope(); // rs.setAddOrUpdate("add"); // rs.setRoleId("bbeddc6f93fc498cbaf190ed80c4df87"); // try { // roleUserTypeService.save(rs); // } catch (Exception e) { // e.printStackTrace(); // logger.error(e); // } // } @Test public void testA_GetRoleScope() { RoleScope po = null; try { po = roleUserTypeService.getRoleScope("e327d216d8d6459298b8867eb26e0de4"); } catch (Exception e) { e.printStackTrace(); logger.error(e); } Assert.assertTrue(po != null); } @Test public void testB_GetUserRoles() { List<Role> list = null; try { list = roleUserTypeService.getUserRoles("540fed39-633f-41b5-a181-527d71e94d73"); } catch (Exception e) { e.printStackTrace(); logger.error(e); } Assert.assertTrue(list != null); } }
apache-2.0
ST-DDT/CrazyCore
src/main/java/de/st_ddt/crazyutil/modules/permissiongroups/VaultPermissionSystem.java
2233
package de.st_ddt.crazyutil.modules.permissiongroups; import java.util.LinkedHashSet; import java.util.Set; import net.milkbowl.vault.chat.Chat; import net.milkbowl.vault.permission.Permission; import org.bukkit.Bukkit; import org.bukkit.entity.Player; import org.bukkit.plugin.RegisteredServiceProvider; import de.st_ddt.crazyutil.ChatHelper; class VaultPermissionSystem extends NoPermissionSystem { private final Permission permission; private final Chat chat; public VaultPermissionSystem() { super(); final RegisteredServiceProvider<Permission> permissionProvider = Bukkit.getServicesManager().getRegistration(Permission.class); if (permissionProvider == null) permission = null; else permission = Bukkit.getServicesManager().getRegistration(Permission.class).getProvider(); final RegisteredServiceProvider<Chat> chatProvider = Bukkit.getServicesManager().getRegistration(Chat.class); if (chatProvider == null) chat = null; else chat = chatProvider.getProvider(); } @Override public String getName() { return "Vault"; } @Override public boolean hasGroup(final Player player, final String name) { if (permission == null) return false; final String[] groups = permission.getPlayerGroups(player); if (groups == null) return false; for (final String group : groups) if (group.equals(name)) return true; return false; } @Override public String getGroup(final Player player) { if (permission == null) return null; else return permission.getPrimaryGroup(player); } @Override public String getGroupPrefix(final Player player) { if (chat == null) return null; else return ChatHelper.colorise(chat.getPlayerPrefix(player)); } @Override public String getGroupSuffix(final Player player) { if (chat == null) return null; else return ChatHelper.colorise(chat.getPlayerSuffix(player)); } @Override public Set<String> getGroups(final Player player) { if (permission == null) return null; final String[] groups = permission.getPlayerGroups(player); if (groups == null) return null; final Set<String> res = new LinkedHashSet<String>(); for (final String group : groups) res.add(group); return res; } }
apache-2.0
nivanov/ignite
modules/math/src/main/java/org/apache/ignite/math/impls/matrix/MatrixView.java
2926
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.math.impls.matrix; import java.io.Externalizable; import org.apache.ignite.math.Matrix; import org.apache.ignite.math.MatrixStorage; import org.apache.ignite.math.Vector; import org.apache.ignite.math.exceptions.UnsupportedOperationException; import org.apache.ignite.math.impls.storage.matrix.MatrixDelegateStorage; /** * Implements the rectangular view into the parent {@link Matrix}. */ public class MatrixView extends AbstractMatrix { /** * Constructor for {@link Externalizable} interface. */ public MatrixView() { // No-op. } /** * @param parent Backing parent {@link Matrix}. * @param rowOff Row offset to parent matrix. * @param colOff Column offset to parent matrix. * @param rows Amount of rows in the view. * @param cols Amount of columns in the view. */ public MatrixView(Matrix parent, int rowOff, int colOff, int rows, int cols) { this(parent == null ? null : parent.getStorage(), rowOff, colOff, rows, cols); } /** * @param sto Backing parent {@link MatrixStorage}. * @param rowOff Row offset to parent storage. * @param colOff Column offset to parent storage. * @param rows Amount of rows in the view. * @param cols Amount of columns in the view. */ public MatrixView(MatrixStorage sto, int rowOff, int colOff, int rows, int cols) { super(new MatrixDelegateStorage(sto, rowOff, colOff, rows, cols)); } /** * * */ private MatrixDelegateStorage storage() { return (MatrixDelegateStorage)getStorage(); } /** {@inheritDoc} */ @Override public Matrix copy() { MatrixDelegateStorage sto = storage(); return new MatrixView(sto.delegate(), sto.rowOffset(), sto.columnOffset(), sto.rowSize(), sto.columnSize()); } /** {@inheritDoc} */ @Override public Matrix like(int rows, int cols) { throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public Vector likeVector(int crd) { throw new UnsupportedOperationException(); } }
apache-2.0
ruspl-afed/dbeaver
plugins/org.jkiss.dbeaver.model/src/org/jkiss/dbeaver/model/navigator/DBNRoot.java
5208
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2017 Serge Rider (serge@jkiss.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.model.navigator; import org.eclipse.core.resources.IProject; import org.jkiss.code.NotNull; import org.jkiss.dbeaver.model.DBPImage; import org.jkiss.dbeaver.model.app.DBPProjectListener; import org.jkiss.dbeaver.model.messages.ModelMessages; import org.jkiss.dbeaver.model.meta.Property; import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor; import org.jkiss.utils.ArrayUtils; import java.util.Arrays; import java.util.Comparator; /** * DBNRoot */ public class DBNRoot extends DBNNode implements DBNContainer, DBPProjectListener { private final DBNModel model; private DBNProject[] projects = new DBNProject[0]; public DBNRoot(DBNModel model) { super(); this.model = model; model.getPlatform().getProjectManager().addProjectListener(this); } @Override void dispose(boolean reflect) { for (DBNProject project : projects) { project.dispose(reflect); } projects = new DBNProject[0]; model.getPlatform().getProjectManager().removeProjectListener(this); } @Override public DBNModel getModel() { return model; } @Override public String getNodeType() { return ModelMessages.model_navigator_Root; } @Override public Object getValueObject() { return this; } @Override public String getChildrenType() { return ModelMessages.model_navigator_Project; } @Override public Class<IProject> getChildrenClass() { return IProject.class; } @NotNull @Override @Property(viewable = true, order = 1) public String getName() { return super.getName(); } @Override public String getNodeName() { return "#root"; //$NON-NLS-1$ } @Override public String getNodeDescription() { return ModelMessages.model_navigator_Model_root; } @Override public DBPImage getNodeIcon() { return null; } @Override public boolean allowsChildren() { return projects.length > 0; } @Override public DBNProject[] getChildren(DBRProgressMonitor monitor) { return projects; } public DBNProject[] getProjects() { return projects; } @Override public boolean allowsOpen() { return true; } @Override public String getNodeItemPath() { return null; } public DBNProject getProject(IProject project) { for (DBNProject node : projects) { if (node.getProject() == project) { return node; } } return null; } DBNProject addProject(IProject project, boolean reflect) { DBNProject projectNode = new DBNProject( this, project, model.getPlatform().getProjectManager().getResourceHandler(project)); projects = ArrayUtils.add(DBNProject.class, projects, projectNode); Arrays.sort(projects, new Comparator<DBNProject>() { @Override public int compare(DBNProject o1, DBNProject o2) { return o1.getNodeName().compareTo(o2.getNodeName()); } }); model.fireNodeEvent(new DBNEvent(this, DBNEvent.Action.ADD, projectNode)); return projectNode; } void removeProject(IProject project) { for (int i = 0; i < projects.length; i++) { DBNProject projectNode = projects[i]; if (projectNode.getProject() == project) { projects = ArrayUtils.remove(DBNProject.class, projects, i); model.fireNodeEvent(new DBNEvent(this, DBNEvent.Action.REMOVE, projectNode)); projectNode.dispose(true); break; } } } @Override public void handleActiveProjectChange(IProject oldValue, IProject newValue) { DBNProject projectNode = getProject(newValue); DBNProject oldProjectNode = getProject(oldValue); if (projectNode != null) { model.fireNodeEvent(new DBNEvent(this, DBNEvent.Action.UPDATE, projectNode)); } if (oldProjectNode != null) { model.fireNodeEvent(new DBNEvent(this, DBNEvent.Action.UPDATE, oldProjectNode)); } } }
apache-2.0
ilyashik/hzcpoc
src/main/java/com/hzcpoc/IncomingMessage.java
950
package com.hzcpoc; import java.io.Serializable; import java.util.Date; import java.util.UUID; /** * Created by Ilya on 29.01.2017. */ public class IncomingMessage implements Serializable{ Integer id; byte[] body; Date received; public IncomingMessage(Integer id, byte[] body) { this.id = id; this.body = body; this.received = new Date(); } public IncomingMessage(Integer id, byte[] body, Date received) { this.id = id; this.body = body; this.received = received; } public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public byte[] getBody() { return body; } public void setBody(byte[] body) { this.body = body; } public Date getReceived() { return received; } public void setReceived(Date received) { this.received = received; } }
apache-2.0
nhaarman/trinity
trinity-compiler/src/main/java/com/nhaarman/trinity/internal/codegen/step/ValidateTableElementsStep.java
1208
package com.nhaarman.trinity.internal.codegen.step; import com.nhaarman.trinity.annotations.Table; import com.nhaarman.trinity.internal.codegen.ProcessingStepResult; import com.nhaarman.trinity.internal.codegen.validator.TableTypeValidator; import com.nhaarman.trinity.internal.codegen.validator.ValidationHandler; import java.io.IOException; import java.util.Set; import javax.annotation.processing.RoundEnvironment; import javax.lang.model.element.Element; import org.jetbrains.annotations.NotNull; public class ValidateTableElementsStep implements ProcessingStep { @NotNull private final ValidationHandler mValidationHandler; @NotNull private final TableTypeValidator mTableTypeValidator; public ValidateTableElementsStep(@NotNull final ValidationHandler validationHandler) { mValidationHandler = validationHandler; mTableTypeValidator = new TableTypeValidator(); } @NotNull @Override public ProcessingStepResult process(@NotNull final RoundEnvironment roundEnvironment) throws IOException { Set<? extends Element> tableElements = roundEnvironment.getElementsAnnotatedWith(Table.class); return mTableTypeValidator.validate(tableElements, mValidationHandler); } }
apache-2.0
rkistner/binarization
java/src/rkistner/algorithms/KittlerThresholder.java
2355
/* * Copyright 2010 Ralf Kistner * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rkistner.algorithms; /** * Algorithm from: * <p/> * Lecture 4: Thresholding * c Bryan S. Morse, Brigham Young University, 1998–2000 * Last modified on Wednesday, January 12, 2000 at 10:00 AM. */ public class KittlerThresholder implements ThresholdFinder { public int findThreshold(int[] h) { int N = h.length; int H = 0; int M = 0; for (int i = 0; i < N; i++) { H += h[i]; M += i * h[i]; } double min = Float.MAX_VALUE; int best = 0; int Hf = 0; int Mf = 0; for (int T = 1; T < N; T++) { // Separate into two clusters. // First is from 0 to T-1 // Second is from T to n-1 Hf += h[T - 1]; int Hb = H - Hf; Mf += (T - 1) * h[T - 1]; int Mb = M - Mf; if (Hf == 0 || Hb == 0) continue; double mu_f = (double) Mf / Hf; double mu_b = (double) Mb / Hb; double Pf = (double) Hf / H; double Pb = (double) Hb / H; double var_f = 0; for(int i = 0; i < T; i++) { var_f += (i - mu_f)*(i - mu_f) * h[i]; } var_f /= Hf; double var_b = 0; for(int i = T; i < N; i++) { var_b += (i - mu_b)*(i - mu_b) * h[i]; } var_b /= Hb; double J = 1 + Pf *Math.log(var_f) + Pb *Math.log(var_b) - 2* Pf *Math.log(Pf) - 2* Pb *Math.log(Pb); if (J > 0 && J < min) { min = J; best = T; } } return best; } public String toString() { return "Kittler"; } }
apache-2.0
dbarentine/keycloak
services/src/main/java/org/keycloak/social/twitter/TwitterIdentityProvider.java
7933
/* * Copyright 2016 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.keycloak.social.twitter; import org.jboss.logging.Logger; import org.keycloak.common.ClientConnection; import org.keycloak.broker.oidc.OAuth2IdentityProviderConfig; import org.keycloak.broker.provider.AbstractIdentityProvider; import org.keycloak.broker.provider.AuthenticationRequest; import org.keycloak.broker.provider.BrokeredIdentityContext; import org.keycloak.broker.provider.IdentityBrokerException; import org.keycloak.events.EventBuilder; import org.keycloak.events.EventType; import org.keycloak.models.ClientModel; import org.keycloak.models.ClientSessionModel; import org.keycloak.models.FederatedIdentityModel; import org.keycloak.models.KeycloakSession; import org.keycloak.models.RealmModel; import org.keycloak.services.managers.ClientSessionCode; import org.keycloak.services.messages.Messages; import org.keycloak.services.ErrorPage; import org.keycloak.broker.social.SocialIdentityProvider; import twitter4j.Twitter; import twitter4j.TwitterFactory; import twitter4j.auth.AccessToken; import twitter4j.auth.RequestToken; import javax.ws.rs.GET; import javax.ws.rs.QueryParam; import javax.ws.rs.core.Context; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; import java.net.URI; import static org.keycloak.models.ClientSessionModel.Action.AUTHENTICATE; /** * @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a> */ public class TwitterIdentityProvider extends AbstractIdentityProvider<OAuth2IdentityProviderConfig> implements SocialIdentityProvider<OAuth2IdentityProviderConfig> { protected static final Logger logger = Logger.getLogger(TwitterIdentityProvider.class); public TwitterIdentityProvider(OAuth2IdentityProviderConfig config) { super(config); } @Override public Object callback(RealmModel realm, AuthenticationCallback callback, EventBuilder event) { return new Endpoint(realm, callback); } @Override public Response performLogin(AuthenticationRequest request) { try { Twitter twitter = new TwitterFactory().getInstance(); twitter.setOAuthConsumer(getConfig().getClientId(), getConfig().getClientSecret()); URI uri = new URI(request.getRedirectUri() + "?state=" + request.getState()); RequestToken requestToken = twitter.getOAuthRequestToken(uri.toString()); ClientSessionModel clientSession = request.getClientSession(); clientSession.setNote("twitter_token", requestToken.getToken()); clientSession.setNote("twitter_tokenSecret", requestToken.getTokenSecret()); URI authenticationUrl = URI.create(requestToken.getAuthenticationURL()); return Response.temporaryRedirect(authenticationUrl).build(); } catch (Exception e) { throw new IdentityBrokerException("Could send authentication request to twitter.", e); } } protected class Endpoint { protected RealmModel realm; protected AuthenticationCallback callback; @Context protected KeycloakSession session; @Context protected ClientConnection clientConnection; @Context protected HttpHeaders headers; @Context protected UriInfo uriInfo; public Endpoint(RealmModel realm, AuthenticationCallback callback) { this.realm = realm; this.callback = callback; } @GET public Response authResponse(@QueryParam("state") String state, @QueryParam("denied") String denied, @QueryParam("oauth_verifier") String verifier) { try { Twitter twitter = new TwitterFactory().getInstance(); twitter.setOAuthConsumer(getConfig().getClientId(), getConfig().getClientSecret()); ClientSessionModel clientSession = parseClientSessionCode(state).getClientSession(); String twitterToken = clientSession.getNote("twitter_token"); String twitterSecret = clientSession.getNote("twitter_tokenSecret"); RequestToken requestToken = new RequestToken(twitterToken, twitterSecret); AccessToken oAuthAccessToken = twitter.getOAuthAccessToken(requestToken, verifier); twitter4j.User twitterUser = twitter.verifyCredentials(); BrokeredIdentityContext identity = new BrokeredIdentityContext(Long.toString(twitterUser.getId())); identity.setIdp(TwitterIdentityProvider.this); identity.setUsername(twitterUser.getScreenName()); identity.setName(twitterUser.getName()); StringBuilder tokenBuilder = new StringBuilder(); tokenBuilder.append("{"); tokenBuilder.append("\"oauth_token\":").append("\"").append(oAuthAccessToken.getToken()).append("\"").append(","); tokenBuilder.append("\"oauth_token_secret\":").append("\"").append(oAuthAccessToken.getTokenSecret()).append("\"").append(","); tokenBuilder.append("\"screen_name\":").append("\"").append(oAuthAccessToken.getScreenName()).append("\"").append(","); tokenBuilder.append("\"user_id\":").append("\"").append(oAuthAccessToken.getUserId()).append("\""); tokenBuilder.append("}"); identity.setToken(tokenBuilder.toString()); identity.setCode(state); identity.setIdpConfig(getConfig()); return callback.authenticated(identity); } catch (Exception e) { logger.error("Could get user profile from twitter.", e); } EventBuilder event = new EventBuilder(realm, session, clientConnection); event.event(EventType.LOGIN); event.error("twitter_login_failed"); return ErrorPage.error(session, Messages.UNEXPECTED_ERROR_HANDLING_RESPONSE); } private ClientSessionCode parseClientSessionCode(String code) { ClientSessionCode clientCode = ClientSessionCode.parse(code, this.session, this.realm); if (clientCode != null && clientCode.isValid(AUTHENTICATE.name(), ClientSessionCode.ActionType.LOGIN)) { ClientSessionModel clientSession = clientCode.getClientSession(); if (clientSession != null) { ClientModel client = clientSession.getClient(); if (client == null) { throw new IdentityBrokerException("Invalid client"); } logger.debugf("Got authorization code from client [%s].", client.getClientId()); } logger.debugf("Authorization code is valid."); return clientCode; } throw new IdentityBrokerException("Invalid code, please login again through your application."); } } @Override public Response retrieveToken(KeycloakSession session, FederatedIdentityModel identity) { return Response.ok(identity.getToken()).type(MediaType.APPLICATION_JSON).build(); } }
apache-2.0
msteindorfer/disl-svn-2013-10-01
src-test/ch/usi/dag/disl/test/dispatch2/TargetClass.java
467
package ch.usi.dag.disl.test.dispatch2; public class TargetClass { public static void main(String[] args) throws InterruptedException { long start = System.nanoTime(); int COUNT = 10000000; TargetClass ta[] = new TargetClass[COUNT]; int i; for(i = 0; i < COUNT; ++i) { ta[i] = new TargetClass(); } System.out.println("Allocated " + i + " objects in " + (System.nanoTime() - start) / 1000000 + " ms"); } }
apache-2.0
marcosperanza/logo
src/main/java/org/logo/viewer/logo/Procedure.java
1260
/** * * Copyright 2003-2011 Simple Logo Viewer * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.logo.viewer.logo; import java.util.List; import org.logo.viewer.exp.Exp; import org.logo.viewer.logo.exception.ParameterException; import org.logo.viewer.logo.exception.ProcedureNotFoundException; import org.logo.viewer.logo.exception.VariableNotFoundException; public interface Procedure { public void setName(String name); public String getName(); public List<Statement> getBody(); public void invoke(List<Exp> realParams, ScopedStatement invoker) throws ProcedureNotFoundException, ParameterException, VariableNotFoundException; public void setBody(List<Statement> body); }
apache-2.0
jinyneu/jiweather
src/com/jiweather/app/model/County.java
640
package com.jiweather.app.model; public class County { private int id; private String countyName; private String countyCode; private int cityId; public int getId() { return id; } public void setId(int id) { this.id = id; } public String getCountyName() { return countyName; } public void setCountyName(String countyName) { this.countyName = countyName; } public String getCountyCode() { return countyCode; } public void setCountyCode(String countyCode) { this.countyCode = countyCode; } public int getCityId() { return cityId; } public void setCityId(int cityId) { this.cityId = cityId; } }
apache-2.0
pister/wint
wint-framework/src/main/java/wint/mvc/form/DefaultForm.java
7671
package wint.mvc.form; import java.util.Collection; import java.util.List; import java.util.Map; import wint.core.config.Constants; import wint.lang.convert.ConvertUtil; import wint.lang.magic.MagicList; import wint.lang.magic.MagicObject; import wint.lang.magic.Property; import wint.lang.utils.CollectionUtil; import wint.lang.utils.MapUtil; import wint.mvc.flow.InnerFlowData; import wint.mvc.form.config.FieldConfig; import wint.mvc.form.config.FormConfig; import wint.mvc.form.config.ValidatorConfig; import wint.mvc.form.runtime.FormFactory; import wint.mvc.form.runtime.ResultRunTimeForm; import wint.mvc.form.runtime.RunTimeForm; import wint.mvc.form.validator.ValidateResult; import wint.mvc.form.validator.Validator; import wint.mvc.parameters.MapParameters; import wint.mvc.parameters.Parameters; import wint.mvc.view.types.ViewTypes; /** * @author pister * 2012-2-12 01:29:11 */ public class DefaultForm implements Form { private FormConfig formConfig; private InnerFlowData flowData; private ValidateResult validateResult; private Map<String, Field> fields; private Parameters parameters; private boolean isHeld = false; public DefaultForm(FormConfig formConfig, InnerFlowData flowData) { super(); this.formConfig = formConfig; this.flowData = flowData; this.fields = MapUtil.newHashMap(); initFields(); // 记录最后一次表单名称 flowData.setAttribute(Constants.Form.LAST_FORM_NAME, getName()); } private void initFields() { Map<String, FieldConfig> fieldConfigs = formConfig.getFieldConfigs(); for (Map.Entry<String, FieldConfig> entry : fieldConfigs.entrySet()) { FieldConfig fieldConfig = entry.getValue(); DefaultField defaultField = new DefaultField(fieldConfig, this); fields.put(fieldConfig.getName(), defaultField); } } public void hold(Object object) { if (isHeld) { return; } FormFactory formFactory = (FormFactory)flowData.getInnerContext().get(Constants.Form.TEMPLATE_FORM_FACTORY_NAME); if (formFactory.getResultForm(getName()) != null) { // 已经有form设置了,不再进行覆盖 isHeld = true; return; } ResultRunTimeForm resultFormFactory = new ResultRunTimeForm(this, object); formFactory.addResultForm(getName(), resultFormFactory); isHeld = true; } @Override public Form holdValue(String name, Object value) { FormFactory formFactory = (FormFactory)flowData.getInnerContext().get(Constants.Form.TEMPLATE_FORM_FACTORY_NAME); RunTimeForm runTimeForm = formFactory.getResultForm(getName()); if (runTimeForm == null) { throw new RuntimeException("you must call holdValue() after call hold()"); } runTimeForm.setValue(name, value); return this; } public boolean apply(Object target) { if (validateResult == null) { validate(); } if (!validateResult.isSuccess()) { return false; } MagicObject magicObject = MagicObject.wrap(target); Map<String, Property> properties = magicObject.getMagicClass().getProperties(); for (Map.Entry<String, Field> entry: fields.entrySet()) { String name = entry.getKey(); Field field = entry.getValue(); FieldConfig fieldConfig = field.getFieldConfig(); Property property = properties.get(name); if (property == null) { continue; } if (!property.isWritable()) { continue; } String value = entry.getValue().getValue(); String[] values = entry.getValue().getValues(); if (property.getPropertyClass().isArray()) { property.setValueExt(target, values); continue; } if (property.getPropertyClass().isCollectionLike()) { property.setValueExt(target, toTargetCollection(values, fieldConfig.getMultipleValueType())); continue; } if (fieldConfig.isMultipleValue()) { String stringValue = valuesToString(values, fieldConfig); property.setValueExt(target, stringValue); continue; } property.setValueExt(target, value); } return true; } protected String valuesToString(String[] values, FieldConfig fieldConfig) { return MagicList.wrap(values).join(fieldConfig.getMultipleValueSeparator()); } protected Collection toTargetCollection(String[] values, String multipleValueType) { if (values == null) { return null; } Collection ret = CollectionUtil.newArrayList(values.length); for (String value : values) { Object newValue = ConvertUtil.convertTo(value, multipleValueType); ret.add(newValue); } return ret; } public String getName() { return formConfig.getName(); } public boolean validate() { if (validateResult != null) { return validateResult.isSuccess(); } ValidateResult theResult = new ValidateResult(); Parameters parameters = flowData.getParameters(); Map<String, FieldConfig> fieldConfigs = formConfig.getFieldConfigs(); for (Map.Entry<String, FieldConfig> entry : fieldConfigs.entrySet()) { String fieldName = entry.getKey(); FieldConfig fieldConfig = entry.getValue(); MagicList<ValidatorConfig> validatorConfigs = fieldConfig.getValidatorConfigs(); List<String> messages = CollectionUtil.newArrayList(); theResult.setFieldMessages(fieldName, messages); String fieldValue = parameters.getString(fieldName); Field field = fields.get(fieldName); field.setValue(fieldValue); String[] values = parameters.getStringArray(fieldName); field.setValues(values); for (ValidatorConfig validatorConfig : validatorConfigs) { Validator validator = validatorConfig.getValidator(); boolean result = validator.validate(flowData, formConfig, fieldName, fieldValue); if (!result) { field.setMessage(validatorConfig.getMessage()); messages.add(validatorConfig.getMessage()); break; } } } validateResult = theResult; boolean ret = validateResult.isSuccess(); buildParameters(); holdRequest(); return ret; } private void buildParameters() { Map<String, String[]> values = MapUtil.newHashMap(); for(Map.Entry<String, Field> entry : fields.entrySet()) { String name = entry.getKey(); Field field = entry.getValue(); if (field == null) { continue; } values.put(name, field.getValues()); } parameters = new MapParameters(values); } public void holdRequest() { ResultRunTimeForm resultFormFactory = new ResultRunTimeForm(this); FormFactory formFactory = (FormFactory)flowData.getInnerContext().get(Constants.Form.TEMPLATE_FORM_FACTORY_NAME); formFactory.addResultForm(getName(), resultFormFactory); isHeld = true; } public void clearHold() { FormFactory formFactory = (FormFactory)flowData.getInnerContext().get(Constants.Form.TEMPLATE_FORM_FACTORY_NAME); formFactory.removeResultForm(getName()); isHeld = false; } public ValidateResult getValidateResult() { return validateResult; } public Map<String, Field> getFields() { return fields; } public Parameters getValues() { return parameters; } public void setFields(Map<String, Field> fields) { this.fields = fields; } public boolean isHeld() { return isHeld; } }
apache-2.0
superspeedone/youyou
src/com/youno/view/LoadingView.java
1014
package com.youno.view; import com.youno.R; import android.content.Context; import android.util.AttributeSet; import android.view.LayoutInflater; import android.widget.ImageView; import android.widget.RelativeLayout; import android.widget.TextView; public class LoadingView extends RelativeLayout { private Context mContext; private ImageView mImageView; private TextView mTextView; public LoadingView(Context context) { super(context); mContext=context; initView(); } public LoadingView(Context context, AttributeSet attrs) { super(context, attrs); mContext=context; initView(); } private void initView(){ LayoutInflater.from(mContext).inflate(R.layout.common_loading_view, this); mImageView=(ImageView) findViewById(R.id.iv_loading); mTextView=(TextView) findViewById(R.id.tv_loading); } public void setImgOnClickListener(OnClickListener listener){ mImageView.setOnClickListener(listener); } public void setText(int txtRes){ mTextView.setText(txtRes); } }
apache-2.0
JamesBarnes88/CleanTorrent
app/src/test/java/com/castle/cleantorrent/ExampleUnitTest.java
316
package com.castle.cleantorrent; import org.junit.Test; import static org.junit.Assert.*; /** * To work on unit tests, switch the Test Artifact in the Build Variants view. */ public class ExampleUnitTest { @Test public void addition_isCorrect() throws Exception { assertEquals(4, 2 + 2); } }
apache-2.0
kantega/respiro
plugins/core/jersey-metrics/src/main/java/org/kantega/respiro/jerseymetrics/TimerBeforeFilter.java
1591
/* * Copyright 2019 Kantega AS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kantega.respiro.jerseymetrics; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.container.ContainerRequestFilter; import javax.ws.rs.ext.Provider; import java.io.IOException; import static com.codahale.metrics.MetricRegistry.name; /** * */ @Provider public class TimerBeforeFilter implements ContainerRequestFilter { private final String path; public TimerBeforeFilter(String path) { this.path = path; } @Override public void filter(ContainerRequestContext requestContext) throws IOException { MetricRegistry registry = JerseyMetricsPlugin.getMetricRegistry(); String name = name("REST", requestContext.getMethod(), path); Timer.Context context = registry.timer(name).time(); requestContext.setProperty("metrics.timeContext", context); requestContext.setProperty("metrics.path", path); } }
apache-2.0
realityforge/arez
doc-examples/src/main/java/arez/doc/examples/at_observe3/CurrencyView.java
609
package arez.doc.examples.at_observe3; import arez.annotations.ArezComponent; import arez.annotations.Executor; import arez.annotations.Observe; @ArezComponent public abstract class CurrencyView { // A read-only observer that renders @Observe( executor = Executor.EXTERNAL ) public ReactNode render() { //Render component here //DOC ELIDE START return null; //DOC ELIDE END } void onRenderDepsChange() { // Schedule this component scheduleRender(); } //DOC ELIDE START static class ReactNode { } private void scheduleRender() { } //DOC ELIDE END }
apache-2.0
Kevin-Lee/kommonlee-core
src/main/java/org/elixirian/kommonlee/functional/collect/ForEachInArray.java
2488
/** * This project is licensed under the Apache License, Version 2.0 * if the following condition is met: * (otherwise it cannot be used by anyone but the author, Kevin, only) * * The original KommonLee project is owned by Lee, Seong Hyun (Kevin). * * -What does it mean to you? * Nothing, unless you want to take the ownership of * "the original project" (not yours or forked & modified one). * You are free to use it for both non-commercial and commercial projects * and free to modify it as the Apache License allows. * * -So why is this condition necessary? * It is only to protect the original project (See the case of Java). * * * Copyright 2009 Lee, Seong Hyun (Kevin) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.elixirian.kommonlee.functional.collect; import org.elixirian.kommonlee.functional.VoidFunction1; /** * <pre> * ___ _____ _____ * / \/ / ______ __________________ ______ __ ______ / / ______ ______ * / / _/ __ // / / / / / /_/ __ // // // / / ___ \/ ___ \ * / \ / /_/ _/ _ _ / _ _ // /_/ _/ __ // /___/ _____/ _____/ * /____/\____\/_____//__//_//_/__//_//_/ /_____//___/ /__//________/\_____/ \_____/ * </pre> * * <pre> * ___ _____ _____ * / \/ /_________ ___ ____ __ ______ / / ______ ______ * / / / ___ \ \/ //___// // / / / / ___ \/ ___ \ * / \ / _____/\ // // __ / / /___/ _____/ _____/ * /____/\____\\_____/ \__//___//___/ /__/ /________/\_____/ \_____/ * </pre> * * @author Lee, SeongHyun (Kevin) * @version 0.0.1 (2011-07-23) * @param <E> * @param <F> */ public class ForEachInArray<E, F extends VoidFunction1<? super E>> { public void forEach(final E[] source, final F function) { for (final E element : source) { function.apply(element); } } }
apache-2.0
mdanielwork/intellij-community
java/java-analysis-impl/src/com/intellij/codeInspection/RedundantBackticksAroundRawStringLiteralInspection.java
2855
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.codeInspection; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.TextRange; import com.intellij.openapi.util.text.StringUtil; import com.intellij.pom.java.LanguageLevel; import com.intellij.psi.*; import com.intellij.psi.impl.source.tree.java.PsiLiteralExpressionImpl; import com.intellij.psi.util.PsiUtil; import org.jetbrains.annotations.Nls; import org.jetbrains.annotations.NotNull; public class RedundantBackticksAroundRawStringLiteralInspection extends AbstractBaseJavaLocalInspectionTool implements CleanupLocalInspectionTool { @NotNull @Override public PsiElementVisitor buildVisitor(@NotNull final ProblemsHolder holder, boolean isOnTheFly) { if (PsiUtil.getLanguageLevel(holder.getFile()) != LanguageLevel.JDK_12_PREVIEW) { return PsiElementVisitor.EMPTY_VISITOR; } return new JavaElementVisitor() { @Override public void visitLiteralExpression(PsiLiteralExpression expression) { if (((PsiLiteralExpressionImpl)expression).getLiteralElementType() == JavaTokenType.RAW_STRING_LITERAL) { String text = expression.getText(); String rawString = ((PsiLiteralExpressionImpl)expression).getRawString(); int reducedNumberOfBackTicks = PsiRawStringLiteralUtil.getReducedNumberOfBackticks(text); if (reducedNumberOfBackTicks > 0) { String newBackticksSequence = StringUtil.repeat("`", reducedNumberOfBackTicks); int redundantTicksLength = (text.length() - rawString.length()) / 2 - reducedNumberOfBackTicks; holder.registerProblem(expression, "Number of backticks may be reduced by " + redundantTicksLength, ProblemHighlightType.LIKE_UNKNOWN_SYMBOL, new TextRange(0, redundantTicksLength), new LocalQuickFix() { @Nls @NotNull @Override public String getFamilyName() { return "Reduce number of backticks"; } @Override public void applyFix(@NotNull Project project, @NotNull ProblemDescriptor descriptor) { PsiElement element = descriptor.getPsiElement(); PsiExpression newRawStringLiteral = JavaPsiFacade.getElementFactory(project) .createExpressionFromText( newBackticksSequence + rawString + newBackticksSequence, element); element.replace(newRawStringLiteral); } }); } } } }; } }
apache-2.0
zengjingfang/AndroidBox
box/src/main/java/cn/zengjingfang/box/android/rxhandler/rx/Subscriber.java
2479
package cn.zengjingfang.box.android.rxhandler.rx; import cn.zengjingfang.box.android.rxhandler.rx.observer.Observer; import cn.zengjingfang.box.android.rxhandler.rx.util.SubscriptionList; /** * * Created by ZengJingFang on 2018/4/25. */ public abstract class Subscriber<T> implements Observer<T>, Subscription { // represents requested not set yet private static final long NOT_SET = Long.MIN_VALUE; private final SubscriptionList subscriptions; private final Subscriber<?> subscriber; /* protected by `this` */ private Producer producer; /* protected by `this` */ private long requested = NOT_SET; // default to not set protected Subscriber() { this(null); } protected Subscriber(Subscriber<?> subscriber) { this.subscriber = subscriber; this.subscriptions = subscriber != null ? subscriber.subscriptions : new SubscriptionList(); } public final void add(Subscription s) { subscriptions.add(s); } public void onStart() { // do nothing by default } @Override public final void unsubscribe() { // subscriptions.unsubscribe(); } /** * Indicates whether this Subscriber has unsubscribed from its list of subscriptions. * * @return {@code true} if this Subscriber has unsubscribed from its subscriptions, {@code false} otherwise */ @Override public final boolean isUnsubscribed() { // return subscriptions.isUnsubscribed(); return false; } public void setProducer(Producer p) { long toRequest; boolean passToSubscriber = false; synchronized (this) { toRequest = requested; producer = p; if (subscriber != null) { // middle operator ... we pass through unless a request has been made if (toRequest == NOT_SET) { // we pass through to the next producer as nothing has been requested passToSubscriber = true; } } } // do after releasing lock if (passToSubscriber) { subscriber.setProducer(producer); } else { // we execute the request with whatever has been requested (or Long.MAX_VALUE) if (toRequest == NOT_SET) { producer.request(Long.MAX_VALUE); } else { producer.request(toRequest); } } } }
apache-2.0
igor-sfdc/aura
aura-osgi-extensions/src/main/java/org/auraframework/ds/http/whiteboard/providers/AuraResourceRewriteFilterProxy.java
1500
/* * Copyright (C) 2013 salesforce.com, inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.auraframework.ds.http.whiteboard.providers; import org.auraframework.ds.http.whiteboard.proxy.HttpServiceProviderProxy; import org.auraframework.ds.http.whiteboard.proxy.impl.FilterProxyImpl; import org.auraframework.http.AuraResourceRewriteFilter; import aQute.bnd.annotation.component.Component; @Component (provide=HttpServiceProviderProxy.class) public class AuraResourceRewriteFilterProxy extends FilterProxyImpl<AuraResourceRewriteFilter> { private static final int AURA_RESOURCE_REWRITE_FILTER_RANKING = 200; private static final String AURA_RESOURCE_REWRITE_FILTER_PATTERN = "/l/.*"; public AuraResourceRewriteFilterProxy() { super(AURA_RESOURCE_REWRITE_FILTER_PATTERN, AURA_RESOURCE_REWRITE_FILTER_RANKING); } @Override protected AuraResourceRewriteFilter newInstance() { return new AuraResourceRewriteFilter(); } }
apache-2.0
CloudSlang/score
engine/orchestrator/score-orchestrator-impl/src/main/java/io/cloudslang/orchestrator/services/EngineVersionServiceImpl.java
972
/* * Copyright © 2014-2017 EntIT Software LLC, a Micro Focus company (L.P.) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.cloudslang.orchestrator.services; import org.springframework.transaction.annotation.Transactional; /** * Created by kravtsov on 03/01/2016 */ public class EngineVersionServiceImpl implements EngineVersionService { @Override @Transactional public String getEngineVersionId() { return ""; } }
apache-2.0
ilyashik/hzcpoc
src/main/java/com/hzcpoc/runnable/ConsumePersist.java
2489
package com.hzcpoc.runnable; import com.hazelcast.client.HazelcastClient; import com.hazelcast.client.config.ClientConfig; import com.hazelcast.core.HazelcastInstance; import com.hazelcast.core.IMap; import com.hzcpoc.IncomingMessage; import com.rabbitmq.client.Channel; import com.rabbitmq.client.Connection; import com.rabbitmq.client.ConnectionFactory; import com.rabbitmq.client.QueueingConsumer; import java.io.IOException; import java.io.Serializable; import java.util.concurrent.TimeoutException; import static com.hzcpoc.Constants.HAZELCAST_NODE1; import static com.rmq.Producer.*; /** * Created by Ilya on 07.02.2017. */ public class ConsumePersist implements Runnable, Serializable { @Override public void run() { ConnectionFactory cf = new ConnectionFactory(); cf.setHost(RABBITMQ_HOST); cf.setUsername(RABBIT_USERNAME); cf.setPassword(RABBIT_USER_PWD); Connection connection = null; try { connection = cf.newConnection(); Channel channel = connection.createChannel(); channel.basicQos(0); channel.queueDeclare(RABBITMQ_QUEUE, true /*durable*/, false, false, null); System.out.println("Consumer Queue declared"); ClientConfig config = new ClientConfig(); config.getNetworkConfig().addAddress(HAZELCAST_NODE1/*, HAZELCAST_NODE2, HAZELCAST_NODE3*/); HazelcastInstance instance = HazelcastClient.newHazelcastClient(config); System.out.println("Instance name : " + instance.getName()); IMap defaultMap = instance.getMap("incomingMessages"); QueueingConsumer qConsumer = new QueueingConsumer(channel); channel.basicConsume(RABBITMQ_QUEUE, false, qConsumer); QueueingConsumer.Delivery delivery = null; delivery = qConsumer.nextDelivery(); byte[] body = delivery.getBody(); IncomingMessage im = new IncomingMessage(1, body); defaultMap.put(3, im); System.out.println(body.length); //channel.basicAck(delivery.getEnvelope().getDeliveryTag(), true); System.out.println("Exiting ConsumePersist runnable"); connection.close(); } catch (IOException e) { e.printStackTrace(); } catch (TimeoutException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } } }
apache-2.0
heriram/incubator-asterixdb
asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/api/IDataParserFactory.java
2518
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.asterix.external.api; import java.io.Serializable; import java.util.List; import java.util.Map; import org.apache.asterix.common.exceptions.AsterixException; import org.apache.asterix.external.api.IExternalDataSourceFactory.DataSourceType; import org.apache.asterix.om.types.ARecordType; public interface IDataParserFactory extends Serializable { /** * @return The expected data source type {STREAM or RECORDS} * The data source type for a parser and a data source must match. * an instance of IDataParserFactory with RECORDS data source type must implement IRecordDataParserFactory * <T> * an instance of IDataParserFactory with STREAM data source type must implement IStreamDataParserFactory * @throws AsterixException */ public DataSourceType getDataSourceType(); /** * Configure the data parser factory. The passed map contains key value pairs from the * submitted AQL statement and any additional pairs added by the compiler * * @param configuration */ public void configure(Map<String, String> configuration) throws AsterixException; /** * Set the record type expected to be produced by parsers created by this factory * * @param recordType */ public void setRecordType(ARecordType recordType); /** * Set the meta record type expected to be produced by parsers created by this factory * * @param metaType */ public void setMetaType(ARecordType metaType); /** * Get the formats that are handled by this parser. * * @return A list of formats */ public List<String> getParserFormats(); }
apache-2.0
chtyim/cdap
cdap-proto/src/main/java/co/cask/cdap/proto/metadata/MetadataSearchTargetType.java
1597
/* * Copyright © 2015 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package co.cask.cdap.proto.metadata; /** * Supported types for metadata search. */ public enum MetadataSearchTargetType { // the custom values are required because these value match the entity-type stored as // a part of MDS key. ALL("All"), ARTIFACT("Artifact"), APP("Application"), PROGRAM("Program"), DATASET("DatasetInstance"), STREAM("Stream"), VIEW("View"); private final String serializedForm; MetadataSearchTargetType(String serializedForm) { this.serializedForm = serializedForm; } /** * @return {@link MetadataSearchTargetType} of the given value. */ public static MetadataSearchTargetType valueOfSerializedForm(String value) { for (MetadataSearchTargetType metadataSearchTargetType : values()) { if (metadataSearchTargetType.serializedForm.equalsIgnoreCase(value)) { return metadataSearchTargetType; } } throw new IllegalArgumentException(String.format("No enum constant for serialized form: %s", value)); } }
apache-2.0
HazelChen/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
60167
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.net.InetAddresses; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.util.CyclicIteration; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.*; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; import java.io.IOException; import java.io.PrintWriter; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.*; import static org.apache.hadoop.util.Time.now; /** * Manage datanodes, include decommission and other activities. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeManager { static final Log LOG = LogFactory.getLog(DatanodeManager.class); private final Namesystem namesystem; private final BlockManager blockManager; private final HeartbeatManager heartbeatManager; private Daemon decommissionthread = null; /** * Stores the datanode -> block map. * <p> * Done by storing a set of {@link DatanodeDescriptor} objects, sorted by * storage id. In order to keep the storage map consistent it tracks * all storages ever registered with the namenode. * A descriptor corresponding to a specific storage id can be * <ul> * <li>added to the map if it is a new storage id;</li> * <li>updated with a new datanode started as a replacement for the old one * with the same storage id; and </li> * <li>removed if and only if an existing datanode is restarted to serve a * different storage id.</li> * </ul> <br> * <p> * Mapping: StorageID -> DatanodeDescriptor */ private final NavigableMap<String, DatanodeDescriptor> datanodeMap = new TreeMap<String, DatanodeDescriptor>(); /** Cluster network topology */ private final NetworkTopology networktopology; /** Host names to datanode descriptors mapping. */ private final Host2NodesMap host2DatanodeMap = new Host2NodesMap(); private final DNSToSwitchMapping dnsToSwitchMapping; private final boolean rejectUnresolvedTopologyDN; private final int defaultXferPort; private final int defaultInfoPort; private final int defaultInfoSecurePort; private final int defaultIpcPort; /** Read include/exclude files*/ private final HostFileManager hostFileManager = new HostFileManager(); /** The period to wait for datanode heartbeat.*/ private final long heartbeatExpireInterval; /** Ask Datanode only up to this many blocks to delete. */ final int blockInvalidateLimit; /** The interval for judging stale DataNodes for read/write */ private final long staleInterval; /** Whether or not to avoid using stale DataNodes for reading */ private final boolean avoidStaleDataNodesForRead; /** * Whether or not to avoid using stale DataNodes for writing. * Note that, even if this is configured, the policy may be * temporarily disabled when a high percentage of the nodes * are marked as stale. */ private final boolean avoidStaleDataNodesForWrite; /** * When the ratio of stale datanodes reaches this number, stop avoiding * writing to stale datanodes, i.e., continue using stale nodes for writing. */ private final float ratioUseStaleDataNodesForWrite; /** The number of stale DataNodes */ private volatile int numStaleNodes; /** The number of stale storages */ private volatile int numStaleStorages; /** * Whether or not this cluster has ever consisted of more than 1 rack, * according to the NetworkTopology. */ private boolean hasClusterEverBeenMultiRack = false; private final boolean checkIpHostnameInRegistration; /** * Whether we should tell datanodes what to cache in replies to * heartbeat messages. */ private boolean shouldSendCachingCommands = false; /** * The number of datanodes for each software version. This list should change * during rolling upgrades. * Software version -> Number of datanodes with this version */ private HashMap<String, Integer> datanodesSoftwareVersions = new HashMap<String, Integer>(4, 0.75f); /** * The minimum time between resending caching directives to Datanodes, * in milliseconds. * * Note that when a rescan happens, we will send the new directives * as soon as possible. This timeout only applies to resending * directives that we've already sent. */ private final long timeBetweenResendingCachingDirectivesMs; DatanodeManager(final BlockManager blockManager, final Namesystem namesystem, final Configuration conf) throws IOException { this.namesystem = namesystem; this.blockManager = blockManager; networktopology = NetworkTopology.getInstance(conf); this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf); this.defaultXferPort = NetUtils.createSocketAddr( conf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); this.defaultInfoPort = NetUtils.createSocketAddr( conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort(); this.defaultInfoSecurePort = NetUtils.createSocketAddr( conf.get(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort(); this.defaultIpcPort = NetUtils.createSocketAddr( conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort(); try { this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""), conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "")); } catch (IOException e) { LOG.error("error reading hosts files: ", e); } this.dnsToSwitchMapping = ReflectionUtils.newInstance( conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, ScriptBasedMapping.class, DNSToSwitchMapping.class), conf); this.rejectUnresolvedTopologyDN = conf.getBoolean( DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT); // If the dns to switch mapping supports cache, resolve network // locations of those hosts in the include list and store the mapping // in the cache; so future calls to resolve will be fast. if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { final ArrayList<String> locations = new ArrayList<String>(); for (InetSocketAddress addr : hostFileManager.getIncludes()) { locations.add(addr.getAddress().getHostAddress()); } dnsToSwitchMapping.resolve(locations); } final long heartbeatIntervalSeconds = conf.getLong( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT); final int heartbeatRecheckInterval = conf.getInt( DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * 1000 * heartbeatIntervalSeconds; final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds), DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT); this.blockInvalidateLimit = conf.getInt( DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit); LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" + this.blockInvalidateLimit); this.checkIpHostnameInRegistration = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY, DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT); LOG.info(DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY + "=" + checkIpHostnameInRegistration); this.avoidStaleDataNodesForRead = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT); this.avoidStaleDataNodesForWrite = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT); this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval); this.ratioUseStaleDataNodesForWrite = conf.getFloat( DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY, DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT); Preconditions.checkArgument( (ratioUseStaleDataNodesForWrite > 0 && ratioUseStaleDataNodesForWrite <= 1.0f), DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY + " = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " + "It should be a positive non-zero float value, not greater than 1.0f."); this.timeBetweenResendingCachingDirectivesMs = conf.getLong( DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS, DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS_DEFAULT); } private static long getStaleIntervalFromConf(Configuration conf, long heartbeatExpireInterval) { long staleInterval = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT); Preconditions.checkArgument(staleInterval > 0, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY + " = '" + staleInterval + "' is invalid. " + "It should be a positive non-zero value."); final long heartbeatIntervalSeconds = conf.getLong( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT); // The stale interval value cannot be smaller than // 3 times of heartbeat interval final long minStaleInterval = conf.getInt( DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_DEFAULT) * heartbeatIntervalSeconds * 1000; if (staleInterval < minStaleInterval) { LOG.warn("The given interval for marking stale datanode = " + staleInterval + ", which is less than " + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_DEFAULT + " heartbeat intervals. This may cause too frequent changes of " + "stale states of DataNodes since a heartbeat msg may be missing " + "due to temporary short-term failures. Reset stale interval to " + minStaleInterval + "."); staleInterval = minStaleInterval; } if (staleInterval > heartbeatExpireInterval) { LOG.warn("The given interval for marking stale datanode = " + staleInterval + ", which is larger than heartbeat expire interval " + heartbeatExpireInterval + "."); } return staleInterval; } void activate(final Configuration conf) { final DecommissionManager dm = new DecommissionManager(namesystem, blockManager); this.decommissionthread = new Daemon(dm.new Monitor( conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT), conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT))); decommissionthread.start(); heartbeatManager.activate(conf); } void close() { if (decommissionthread != null) { decommissionthread.interrupt(); try { decommissionthread.join(3000); } catch (InterruptedException e) { } } heartbeatManager.close(); } /** @return the network topology. */ public NetworkTopology getNetworkTopology() { return networktopology; } /** @return the heartbeat manager. */ HeartbeatManager getHeartbeatManager() { return heartbeatManager; } /** @return the datanode statistics. */ public DatanodeStatistics getDatanodeStatistics() { return heartbeatManager; } private boolean isInactive(DatanodeInfo datanode) { if (datanode.isDecommissioned()) { return true; } if (avoidStaleDataNodesForRead) { return datanode.isStale(staleInterval); } return false; } /** Sort the located blocks by the distance to the target host. */ public void sortLocatedBlocks(final String targethost, final List<LocatedBlock> locatedblocks) { //sort the blocks // As it is possible for the separation of node manager and datanode, // here we should get node but not datanode only . Node client = getDatanodeByHost(targethost); if (client == null) { List<String> hosts = new ArrayList<String> (1); hosts.add(targethost); String rName = dnsToSwitchMapping.resolve(hosts).get(0); if (rName != null) client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost); } Comparator<DatanodeInfo> comparator = avoidStaleDataNodesForRead ? new DFSUtil.DecomStaleComparator(staleInterval) : DFSUtil.DECOM_COMPARATOR; for (LocatedBlock b : locatedblocks) { DatanodeInfo[] di = b.getLocations(); // Move decommissioned/stale datanodes to the bottom Arrays.sort(di, comparator); int lastActiveIndex = di.length - 1; while (lastActiveIndex > 0 && isInactive(di[lastActiveIndex])) { --lastActiveIndex; } int activeLen = lastActiveIndex + 1; networktopology.sortByDistance(client, b.getLocations(), activeLen); } } CyclicIteration<String, DatanodeDescriptor> getDatanodeCyclicIteration( final String firstkey) { return new CyclicIteration<String, DatanodeDescriptor>( datanodeMap, firstkey); } /** @return the datanode descriptor for the host. */ public DatanodeDescriptor getDatanodeByHost(final String host) { return host2DatanodeMap.getDatanodeByHost(host); } /** @return the datanode descriptor for the host. */ public DatanodeDescriptor getDatanodeByXferAddr(String host, int xferPort) { return host2DatanodeMap.getDatanodeByXferAddr(host, xferPort); } /** @return the Host2NodesMap */ public Host2NodesMap getHost2DatanodeMap() { return this.host2DatanodeMap; } /** * Given datanode address or host name, returns the DatanodeDescriptor for the * same, or if it doesn't find the datanode, it looks for a machine local and * then rack local datanode, if a rack local datanode is not possible either, * it returns the DatanodeDescriptor of any random node in the cluster. * * @param address hostaddress:transfer address * @return the best match for the given datanode */ DatanodeDescriptor getDatanodeDescriptor(String address) { DatanodeID dnId = parseDNFromHostsEntry(address); String host = dnId.getIpAddr(); int xferPort = dnId.getXferPort(); DatanodeDescriptor node = getDatanodeByXferAddr(host, xferPort); if (node == null) { node = getDatanodeByHost(host); } if (node == null) { String networkLocation = resolveNetworkLocationWithFallBackToDefaultLocation(dnId); // If the current cluster doesn't contain the node, fallback to // something machine local and then rack local. List<Node> rackNodes = getNetworkTopology() .getDatanodesInRack(networkLocation); if (rackNodes != null) { // Try something machine local. for (Node rackNode : rackNodes) { if (((DatanodeDescriptor) rackNode).getIpAddr().equals(host)) { node = (DatanodeDescriptor) rackNode; break; } } // Try something rack local. if (node == null && !rackNodes.isEmpty()) { node = (DatanodeDescriptor) (rackNodes .get(DFSUtil.getRandom().nextInt(rackNodes.size()))); } } // If we can't even choose rack local, just choose any node in the // cluster. if (node == null) { node = (DatanodeDescriptor)getNetworkTopology() .chooseRandom(NodeBase.ROOT); } } return node; } /** Get a datanode descriptor given corresponding DatanodeUUID */ DatanodeDescriptor getDatanode(final String datanodeUuid) { if (datanodeUuid == null) { return null; } return datanodeMap.get(datanodeUuid); } /** * Get data node by datanode ID. * * @param nodeID datanode ID * @return DatanodeDescriptor or null if the node is not found. * @throws UnregisteredNodeException */ public DatanodeDescriptor getDatanode(DatanodeID nodeID ) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanode(nodeID.getDatanodeUuid()); if (node == null) return null; if (!node.getXferAddr().equals(nodeID.getXferAddr())) { final UnregisteredNodeException e = new UnregisteredNodeException( nodeID, node); NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: " + e.getLocalizedMessage()); throw e; } return node; } public DatanodeStorageInfo[] getDatanodeStorageInfos( DatanodeID[] datanodeID, String[] storageIDs) throws UnregisteredNodeException { if (datanodeID.length == 0) { return null; } final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[datanodeID.length]; for(int i = 0; i < datanodeID.length; i++) { final DatanodeDescriptor dd = getDatanode(datanodeID[i]); storages[i] = dd.getStorageInfo(storageIDs[i]); } return storages; } /** Prints information about all datanodes. */ void datanodeDump(final PrintWriter out) { synchronized (datanodeMap) { out.println("Metasave: Number of datanodes: " + datanodeMap.size()); for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); out.println(node.dumpDatanode()); } } } /** * Remove a datanode descriptor. * @param nodeInfo datanode descriptor. */ private void removeDatanode(DatanodeDescriptor nodeInfo) { assert namesystem.hasWriteLock(); heartbeatManager.removeDatanode(nodeInfo); blockManager.removeBlocksAssociatedTo(nodeInfo); networktopology.remove(nodeInfo); decrementVersionCount(nodeInfo.getSoftwareVersion()); if (LOG.isDebugEnabled()) { LOG.debug("remove datanode " + nodeInfo); } namesystem.checkSafeMode(); } /** * Remove a datanode * @throws UnregisteredNodeException */ public void removeDatanode(final DatanodeID node ) throws UnregisteredNodeException { namesystem.writeLock(); try { final DatanodeDescriptor descriptor = getDatanode(node); if (descriptor != null) { removeDatanode(descriptor); } else { NameNode.stateChangeLog.warn("BLOCK* removeDatanode: " + node + " does not exist"); } } finally { namesystem.writeUnlock(); } } /** Remove a dead datanode. */ void removeDeadDatanode(final DatanodeID nodeID) { synchronized(datanodeMap) { DatanodeDescriptor d; try { d = getDatanode(nodeID); } catch(IOException e) { d = null; } if (d != null && isDatanodeDead(d)) { NameNode.stateChangeLog.info( "BLOCK* removeDeadDatanode: lost heartbeat from " + d); removeDatanode(d); } } } /** Is the datanode dead? */ boolean isDatanodeDead(DatanodeDescriptor node) { return (node.getLastUpdate() < (Time.now() - heartbeatExpireInterval)); } /** Add a datanode. */ void addDatanode(final DatanodeDescriptor node) { // To keep host2DatanodeMap consistent with datanodeMap, // remove from host2DatanodeMap the datanodeDescriptor removed // from datanodeMap before adding node to host2DatanodeMap. synchronized(datanodeMap) { host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node)); } networktopology.add(node); // may throw InvalidTopologyException host2DatanodeMap.add(node); checkIfClusterIsNowMultiRack(node); if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ".addDatanode: " + "node " + node + " is added to datanodeMap."); } } /** Physically remove node from datanodeMap. */ private void wipeDatanode(final DatanodeID node) { final String key = node.getDatanodeUuid(); synchronized (datanodeMap) { host2DatanodeMap.remove(datanodeMap.remove(key)); } if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ".wipeDatanode(" + node + "): storage " + key + " is removed from datanodeMap."); } } private void incrementVersionCount(String version) { if (version == null) { return; } synchronized(datanodeMap) { Integer count = this.datanodesSoftwareVersions.get(version); count = count == null ? 1 : count + 1; this.datanodesSoftwareVersions.put(version, count); } } private void decrementVersionCount(String version) { if (version == null) { return; } synchronized(datanodeMap) { Integer count = this.datanodesSoftwareVersions.get(version); if(count != null) { if(count > 1) { this.datanodesSoftwareVersions.put(version, count-1); } else { this.datanodesSoftwareVersions.remove(version); } } } } private boolean shouldCountVersion(DatanodeDescriptor node) { return node.getSoftwareVersion() != null && node.isAlive && !isDatanodeDead(node); } private void countSoftwareVersions() { synchronized(datanodeMap) { HashMap<String, Integer> versionCount = new HashMap<String, Integer>(); for(DatanodeDescriptor dn: datanodeMap.values()) { // Check isAlive too because right after removeDatanode(), // isDatanodeDead() is still true if(shouldCountVersion(dn)) { Integer num = versionCount.get(dn.getSoftwareVersion()); num = num == null ? 1 : num+1; versionCount.put(dn.getSoftwareVersion(), num); } } this.datanodesSoftwareVersions = versionCount; } } public HashMap<String, Integer> getDatanodesSoftwareVersions() { synchronized(datanodeMap) { return new HashMap<String, Integer> (this.datanodesSoftwareVersions); } } /** * Resolve a node's network location. If the DNS to switch mapping fails * then this method guarantees default rack location. * @param node to resolve to network location * @return network location path */ private String resolveNetworkLocationWithFallBackToDefaultLocation ( DatanodeID node) { String networkLocation; try { networkLocation = resolveNetworkLocation(node); } catch (UnresolvedTopologyException e) { LOG.error("Unresolved topology mapping. Using " + NetworkTopology.DEFAULT_RACK + " for host " + node.getHostName()); networkLocation = NetworkTopology.DEFAULT_RACK; } return networkLocation; } /** * Resolve a node's network location. If the DNS to switch mapping fails, * then this method throws UnresolvedTopologyException. * @param node to resolve to network location * @return network location path. * @throws UnresolvedTopologyException if the DNS to switch mapping fails * to resolve network location. */ private String resolveNetworkLocation (DatanodeID node) throws UnresolvedTopologyException { List<String> names = new ArrayList<String>(1); if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { names.add(node.getIpAddr()); } else { names.add(node.getHostName()); } List<String> rName = resolveNetworkLocation(names); String networkLocation; if (rName == null) { LOG.error("The resolve call returned null!"); throw new UnresolvedTopologyException( "Unresolved topology mapping for host " + node.getHostName()); } else { networkLocation = rName.get(0); } return networkLocation; } /** * Resolve network locations for specified hosts * * @param names * @return Network locations if available, Else returns null */ public List<String> resolveNetworkLocation(List<String> names) { // resolve its network location List<String> rName = dnsToSwitchMapping.resolve(names); return rName; } /** * Resolve a node's dependencies in the network. If the DNS to switch * mapping fails then this method returns empty list of dependencies * @param node to get dependencies for * @return List of dependent host names */ private List<String> getNetworkDependenciesWithDefault(DatanodeInfo node) { List<String> dependencies; try { dependencies = getNetworkDependencies(node); } catch (UnresolvedTopologyException e) { LOG.error("Unresolved dependency mapping for host " + node.getHostName() +". Continuing with an empty dependency list"); dependencies = Collections.emptyList(); } return dependencies; } /** * Resolves a node's dependencies in the network. If the DNS to switch * mapping fails to get dependencies, then this method throws * UnresolvedTopologyException. * @param node to get dependencies for * @return List of dependent host names * @throws UnresolvedTopologyException if the DNS to switch mapping fails */ private List<String> getNetworkDependencies(DatanodeInfo node) throws UnresolvedTopologyException { List<String> dependencies = Collections.emptyList(); if (dnsToSwitchMapping instanceof DNSToSwitchMappingWithDependency) { //Get dependencies dependencies = ((DNSToSwitchMappingWithDependency)dnsToSwitchMapping).getDependency( node.getHostName()); if(dependencies == null) { LOG.error("The dependency call returned null for host " + node.getHostName()); throw new UnresolvedTopologyException("The dependency call returned " + "null for host " + node.getHostName()); } } return dependencies; } /** * Remove an already decommissioned data node who is neither in include nor * exclude hosts lists from the the list of live or dead nodes. This is used * to not display an already decommssioned data node to the operators. * The operation procedure of making a already decommissioned data node not * to be displayed is as following: * <ol> * <li> * Host must have been in the include hosts list and the include hosts list * must not be empty. * </li> * <li> * Host is decommissioned by remaining in the include hosts list and added * into the exclude hosts list. Name node is updated with the new * information by issuing dfsadmin -refreshNodes command. * </li> * <li> * Host is removed from both include hosts and exclude hosts lists. Name * node is updated with the new informationby issuing dfsamin -refreshNodes * command. * <li> * </ol> * * @param nodeList * , array list of live or dead nodes. */ private void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) { // If the include list is empty, any nodes are welcomed and it does not // make sense to exclude any nodes from the cluster. Therefore, no remove. if (!hostFileManager.hasIncludes()) { return; } for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); if ((!hostFileManager.isIncluded(node)) && (!hostFileManager.isExcluded(node)) && node.isDecommissioned()) { // Include list is not empty, an existing datanode does not appear // in both include or exclude lists and it has been decommissioned. // Remove it from the node list. it.remove(); } } } /** * Decommission the node if it is in exclude list. */ private void checkDecommissioning(DatanodeDescriptor nodeReg) { // If the registered node is in exclude list, then decommission it if (hostFileManager.isExcluded(nodeReg)) { startDecommission(nodeReg); } } /** * Change, if appropriate, the admin state of a datanode to * decommission completed. Return true if decommission is complete. */ boolean checkDecommissionState(DatanodeDescriptor node) { // Check to see if all blocks in this decommissioned // node has reached their target replication factor. if (node.isDecommissionInProgress() && node.checkBlockReportReceived()) { if (!blockManager.isReplicationInProgress(node)) { node.setDecommissioned(); LOG.info("Decommission complete for " + node); } } return node.isDecommissioned(); } /** Start decommissioning the specified datanode. */ @InterfaceAudience.Private @VisibleForTesting public void startDecommission(DatanodeDescriptor node) { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { for (DatanodeStorageInfo storage : node.getStorageInfos()) { LOG.info("Start Decommissioning " + node + " " + storage + " with " + storage.numBlocks() + " blocks"); } heartbeatManager.startDecommission(node); node.decommissioningStatus.setStartTime(now()); // all the blocks that reside on this node have to be replicated. checkDecommissionState(node); } } /** Stop decommissioning the specified datanodes. */ void stopDecommission(DatanodeDescriptor node) { if (node.isDecommissionInProgress() || node.isDecommissioned()) { LOG.info("Stop Decommissioning " + node); heartbeatManager.stopDecommission(node); // Over-replicated blocks will be detected and processed when // the dead node comes back and send in its full block report. if (node.isAlive) { blockManager.processOverReplicatedBlocksOnReCommission(node); } } } /** * Register the given datanode with the namenode. NB: the given * registration is mutated and given back to the datanode. * * @param nodeReg the datanode registration * @throws DisallowedDatanodeException if the registration request is * denied because the datanode does not match includes/excludes * @throws UnresolvedTopologyException if the registration request is * denied because resolving datanode network location fails. */ public void registerDatanode(DatanodeRegistration nodeReg) throws DisallowedDatanodeException, UnresolvedTopologyException { InetAddress dnAddress = Server.getRemoteIp(); if (dnAddress != null) { // Mostly called inside an RPC, update ip and peer hostname String hostname = dnAddress.getHostName(); String ip = dnAddress.getHostAddress(); if (checkIpHostnameInRegistration && !isNameResolved(dnAddress)) { // Reject registration of unresolved datanode to prevent performance // impact of repetitive DNS lookups later. final String message = "hostname cannot be resolved (ip=" + ip + ", hostname=" + hostname + ")"; LOG.warn("Unresolved datanode registration: " + message); throw new DisallowedDatanodeException(nodeReg, message); } // update node registration with the ip and hostname from rpc request nodeReg.setIpAddr(ip); nodeReg.setPeerHostName(hostname); } try { nodeReg.setExportedKeys(blockManager.getBlockKeys()); // Checks if the node is not on the hosts list. If it is not, then // it will be disallowed from registering. if (!hostFileManager.isIncluded(nodeReg)) { throw new DisallowedDatanodeException(nodeReg); } NameNode.stateChangeLog.info("BLOCK* registerDatanode: from " + nodeReg + " storage " + nodeReg.getDatanodeUuid()); DatanodeDescriptor nodeS = getDatanode(nodeReg.getDatanodeUuid()); DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr( nodeReg.getIpAddr(), nodeReg.getXferPort()); if (nodeN != null && nodeN != nodeS) { NameNode.LOG.info("BLOCK* registerDatanode: " + nodeN); // nodeN previously served a different data storage, // which is not served by anybody anymore. removeDatanode(nodeN); // physically remove node from datanodeMap wipeDatanode(nodeN); nodeN = null; } if (nodeS != null) { if (nodeN == nodeS) { // The same datanode has been just restarted to serve the same data // storage. We do not need to remove old data blocks, the delta will // be calculated on the next block report from the datanode if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* registerDatanode: " + "node restarted."); } } else { // nodeS is found /* The registering datanode is a replacement node for the existing data storage, which from now on will be served by a new node. If this message repeats, both nodes might have same storageID by (insanely rare) random chance. User needs to restart one of the nodes with its data cleared (or user can just remove the StorageID value in "VERSION" file under the data directory of the datanode, but this is might not work if VERSION file format has changed */ NameNode.stateChangeLog.info("BLOCK* registerDatanode: " + nodeS + " is replaced by " + nodeReg + " with the same storageID " + nodeReg.getDatanodeUuid()); } boolean success = false; try { // update cluster map getNetworkTopology().remove(nodeS); if(shouldCountVersion(nodeS)) { decrementVersionCount(nodeS.getSoftwareVersion()); } nodeS.updateRegInfo(nodeReg); nodeS.setSoftwareVersion(nodeReg.getSoftwareVersion()); nodeS.setDisallowed(false); // Node is in the include list // resolve network location if(this.rejectUnresolvedTopologyDN) { nodeS.setNetworkLocation(resolveNetworkLocation(nodeS)); nodeS.setDependentHostNames(getNetworkDependencies(nodeS)); } else { nodeS.setNetworkLocation( resolveNetworkLocationWithFallBackToDefaultLocation(nodeS)); nodeS.setDependentHostNames( getNetworkDependenciesWithDefault(nodeS)); } getNetworkTopology().add(nodeS); // also treat the registration message as a heartbeat heartbeatManager.register(nodeS); incrementVersionCount(nodeS.getSoftwareVersion()); checkDecommissioning(nodeS); success = true; } finally { if (!success) { removeDatanode(nodeS); wipeDatanode(nodeS); countSoftwareVersions(); } } return; } DatanodeDescriptor nodeDescr = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK); boolean success = false; try { // resolve network location if(this.rejectUnresolvedTopologyDN) { nodeDescr.setNetworkLocation(resolveNetworkLocation(nodeDescr)); nodeDescr.setDependentHostNames(getNetworkDependencies(nodeDescr)); } else { nodeDescr.setNetworkLocation( resolveNetworkLocationWithFallBackToDefaultLocation(nodeDescr)); nodeDescr.setDependentHostNames( getNetworkDependenciesWithDefault(nodeDescr)); } networktopology.add(nodeDescr); nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion()); // register new datanode addDatanode(nodeDescr); checkDecommissioning(nodeDescr); // also treat the registration message as a heartbeat // no need to update its timestamp // because its is done when the descriptor is created heartbeatManager.addDatanode(nodeDescr); success = true; incrementVersionCount(nodeReg.getSoftwareVersion()); } finally { if (!success) { removeDatanode(nodeDescr); wipeDatanode(nodeDescr); countSoftwareVersions(); } } } catch (InvalidTopologyException e) { // If the network location is invalid, clear the cached mappings // so that we have a chance to re-add this DataNode with the // correct network location later. List<String> invalidNodeNames = new ArrayList<String>(3); // clear cache for nodes in IP or Hostname invalidNodeNames.add(nodeReg.getIpAddr()); invalidNodeNames.add(nodeReg.getHostName()); invalidNodeNames.add(nodeReg.getPeerHostName()); dnsToSwitchMapping.reloadCachedMappings(invalidNodeNames); throw e; } } /** * Rereads conf to get hosts and exclude list file names. * Rereads the files to update the hosts and exclude lists. It * checks if any of the hosts have changed states: */ public void refreshNodes(final Configuration conf) throws IOException { refreshHostsReader(conf); namesystem.writeLock(); try { refreshDatanodes(); countSoftwareVersions(); } finally { namesystem.writeUnlock(); } } /** Reread include/exclude files. */ private void refreshHostsReader(Configuration conf) throws IOException { // Reread the conf to get dfs.hosts and dfs.hosts.exclude filenames. // Update the file names and refresh internal includes and excludes list. if (conf == null) { conf = new HdfsConfiguration(); } this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""), conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "")); } /** * 1. Added to hosts --> no further work needed here. * 2. Removed from hosts --> mark AdminState as decommissioned. * 3. Added to exclude --> start decommission. * 4. Removed from exclude --> stop decommission. */ private void refreshDatanodes() { for(DatanodeDescriptor node : datanodeMap.values()) { // Check if not include. if (!hostFileManager.isIncluded(node)) { node.setDisallowed(true); // case 2. } else { if (hostFileManager.isExcluded(node)) { startDecommission(node); // case 3. } else { stopDecommission(node); // case 4. } } } } /** @return the number of live datanodes. */ public int getNumLiveDataNodes() { int numLive = 0; synchronized (datanodeMap) { for(DatanodeDescriptor dn : datanodeMap.values()) { if (!isDatanodeDead(dn) ) { numLive++; } } } return numLive; } /** @return the number of dead datanodes. */ public int getNumDeadDataNodes() { return getDatanodeListForReport(DatanodeReportType.DEAD).size(); } /** @return list of datanodes where decommissioning is in progress. */ public List<DatanodeDescriptor> getDecommissioningNodes() { // There is no need to take namesystem reader lock as // getDatanodeListForReport will synchronize on datanodeMap final List<DatanodeDescriptor> decommissioningNodes = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> results = getDatanodeListForReport( DatanodeReportType.LIVE); for(DatanodeDescriptor node : results) { if (node.isDecommissionInProgress()) { decommissioningNodes.add(node); } } return decommissioningNodes; } /* Getter and Setter for stale DataNodes related attributes */ /** * Whether stale datanodes should be avoided as targets on the write path. * The result of this function may change if the number of stale datanodes * eclipses a configurable threshold. * * @return whether stale datanodes should be avoided on the write path */ public boolean shouldAvoidStaleDataNodesForWrite() { // If # stale exceeds maximum staleness ratio, disable stale // datanode avoidance on the write path return avoidStaleDataNodesForWrite && (numStaleNodes <= heartbeatManager.getLiveDatanodeCount() * ratioUseStaleDataNodesForWrite); } /** * @return The time interval used to mark DataNodes as stale. */ long getStaleInterval() { return staleInterval; } /** * Set the number of current stale DataNodes. The HeartbeatManager got this * number based on DataNodes' heartbeats. * * @param numStaleNodes * The number of stale DataNodes to be set. */ void setNumStaleNodes(int numStaleNodes) { this.numStaleNodes = numStaleNodes; } /** * @return Return the current number of stale DataNodes (detected by * HeartbeatManager). */ public int getNumStaleNodes() { return this.numStaleNodes; } /** * Get the number of content stale storages. */ public int getNumStaleStorages() { return numStaleStorages; } /** * Set the number of content stale storages. * * @param numStaleStorages The number of content stale storages. */ void setNumStaleStorages(int numStaleStorages) { this.numStaleStorages = numStaleStorages; } /** Fetch live and dead datanodes. */ public void fetchDatanodes(final List<DatanodeDescriptor> live, final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) { if (live == null && dead == null) { throw new HadoopIllegalArgumentException("Both live and dead lists are null"); } // There is no need to take namesystem reader lock as // getDatanodeListForReport will synchronize on datanodeMap final List<DatanodeDescriptor> results = getDatanodeListForReport(DatanodeReportType.ALL); for(DatanodeDescriptor node : results) { if (isDatanodeDead(node)) { if (dead != null) { dead.add(node); } } else { if (live != null) { live.add(node); } } } if (removeDecommissionNode) { if (live != null) { removeDecomNodeFromList(live); } if (dead != null) { removeDecomNodeFromList(dead); } } } /** * @return true if this cluster has ever consisted of multiple racks, even if * it is not now a multi-rack cluster. */ boolean hasClusterEverBeenMultiRack() { return hasClusterEverBeenMultiRack; } /** * Check if the cluster now consists of multiple racks. If it does, and this * is the first time it's consisted of multiple racks, then process blocks * that may now be misreplicated. * * @param node DN which caused cluster to become multi-rack. Used for logging. */ @VisibleForTesting void checkIfClusterIsNowMultiRack(DatanodeDescriptor node) { if (!hasClusterEverBeenMultiRack && networktopology.getNumOfRacks() > 1) { String message = "DN " + node + " joining cluster has expanded a formerly " + "single-rack cluster to be multi-rack. "; if (namesystem.isPopulatingReplQueues()) { message += "Re-checking all blocks for replication, since they should " + "now be replicated cross-rack"; LOG.info(message); } else { message += "Not checking for mis-replicated blocks because this NN is " + "not yet processing repl queues."; LOG.debug(message); } hasClusterEverBeenMultiRack = true; if (namesystem.isPopulatingReplQueues()) { blockManager.processMisReplicatedBlocks(); } } } /** * Parse a DatanodeID from a hosts file entry * @param hostLine of form [hostname|ip][:port]? * @return DatanodeID constructed from the given string */ private DatanodeID parseDNFromHostsEntry(String hostLine) { DatanodeID dnId; String hostStr; int port; int idx = hostLine.indexOf(':'); if (-1 == idx) { hostStr = hostLine; port = DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT; } else { hostStr = hostLine.substring(0, idx); port = Integer.parseInt(hostLine.substring(idx+1)); } if (InetAddresses.isInetAddress(hostStr)) { // The IP:port is sufficient for listing in a report dnId = new DatanodeID(hostStr, "", "", port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); } else { String ipAddr = ""; try { ipAddr = InetAddress.getByName(hostStr).getHostAddress(); } catch (UnknownHostException e) { LOG.warn("Invalid hostname " + hostStr + " in hosts file"); } dnId = new DatanodeID(ipAddr, hostStr, "", port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); } return dnId; } /** For generating datanode reports */ public List<DatanodeDescriptor> getDatanodeListForReport( final DatanodeReportType type) { final boolean listLiveNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.LIVE; final boolean listDeadNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.DEAD; final boolean listDecommissioningNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.DECOMMISSIONING; ArrayList<DatanodeDescriptor> nodes; final HostFileManager.HostSet foundNodes = new HostFileManager.HostSet(); final HostFileManager.HostSet includedNodes = hostFileManager.getIncludes(); final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes(); synchronized(datanodeMap) { nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size()); for (DatanodeDescriptor dn : datanodeMap.values()) { final boolean isDead = isDatanodeDead(dn); final boolean isDecommissioning = dn.isDecommissionInProgress(); if ((listLiveNodes && !isDead) || (listDeadNodes && isDead) || (listDecommissioningNodes && isDecommissioning)) { nodes.add(dn); } foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn)); } } if (listDeadNodes) { for (InetSocketAddress addr : includedNodes) { if (foundNodes.matchedBy(addr) || excludedNodes.match(addr)) { continue; } // The remaining nodes are ones that are referenced by the hosts // files but that we do not know about, ie that we have never // head from. Eg. an entry that is no longer part of the cluster // or a bogus entry was given in the hosts files // // If the host file entry specified the xferPort, we use that. // Otherwise, we guess that it is the default xfer port. // We can't ask the DataNode what it had configured, because it's // dead. DatanodeDescriptor dn = new DatanodeDescriptor(new DatanodeID(addr .getAddress().getHostAddress(), addr.getHostName(), "", addr.getPort() == 0 ? defaultXferPort : addr.getPort(), defaultInfoPort, defaultInfoSecurePort, defaultIpcPort)); dn.setLastUpdate(0); // Consider this node dead for reporting nodes.add(dn); } } if (LOG.isDebugEnabled()) { LOG.debug("getDatanodeListForReport with " + "includedNodes = " + hostFileManager.getIncludes() + ", excludedNodes = " + hostFileManager.getExcludes() + ", foundNodes = " + foundNodes + ", nodes = " + nodes); } return nodes; } /** * Checks if name resolution was successful for the given address. If IP * address and host name are the same, then it means name resolution has * failed. As a special case, local addresses are also considered * acceptable. This is particularly important on Windows, where 127.0.0.1 does * not resolve to "localhost". * * @param address InetAddress to check * @return boolean true if name resolution successful or address is local */ private static boolean isNameResolved(InetAddress address) { String hostname = address.getHostName(); String ip = address.getHostAddress(); return !hostname.equals(ip) || NetUtils.isLocalAddress(address); } private void setDatanodeDead(DatanodeDescriptor node) { node.setLastUpdate(0); } /** Handle heartbeat from datanodes. */ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, StorageReport[] reports, final String blockPoolId, long cacheCapacity, long cacheUsed, int xceiverCount, int maxTransfers, int failedVolumes ) throws IOException { synchronized (heartbeatManager) { synchronized (datanodeMap) { DatanodeDescriptor nodeinfo = null; try { nodeinfo = getDatanode(nodeReg); } catch(UnregisteredNodeException e) { return new DatanodeCommand[]{RegisterCommand.REGISTER}; } // Check if this datanode should actually be shutdown instead. if (nodeinfo != null && nodeinfo.isDisallowed()) { setDatanodeDead(nodeinfo); throw new DisallowedDatanodeException(nodeinfo); } if (nodeinfo == null || !nodeinfo.isAlive) { return new DatanodeCommand[]{RegisterCommand.REGISTER}; } heartbeatManager.updateHeartbeat(nodeinfo, reports, cacheCapacity, cacheUsed, xceiverCount, failedVolumes); // If we are in safemode, do not send back any recovery / replication // requests. Don't even drain the existing queue of work. if(namesystem.isInSafeMode()) { return new DatanodeCommand[0]; } //check lease recovery BlockInfoUnderConstruction[] blocks = nodeinfo .getLeaseRecoveryCommand(Integer.MAX_VALUE); if (blocks != null) { BlockRecoveryCommand brCommand = new BlockRecoveryCommand( blocks.length); for (BlockInfoUnderConstruction b : blocks) { final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations(); // Skip stale nodes during recovery - not heart beated for some time (30s by default). final List<DatanodeStorageInfo> recoveryLocations = new ArrayList<DatanodeStorageInfo>(storages.length); for (int i = 0; i < storages.length; i++) { if (!storages[i].getDatanodeDescriptor().isStale(staleInterval)) { recoveryLocations.add(storages[i]); } } // If we only get 1 replica after eliminating stale nodes, then choose all // replicas for recovery and let the primary data node handle failures. if (recoveryLocations.size() > 1) { if (recoveryLocations.size() != storages.length) { LOG.info("Skipped stale nodes for recovery : " + (storages.length - recoveryLocations.size())); } brCommand.add(new RecoveringBlock( new ExtendedBlock(blockPoolId, b), DatanodeStorageInfo.toDatanodeInfos(recoveryLocations), b.getBlockRecoveryId())); } else { // If too many replicas are stale, then choose all replicas to participate // in block recovery. brCommand.add(new RecoveringBlock( new ExtendedBlock(blockPoolId, b), DatanodeStorageInfo.toDatanodeInfos(storages), b.getBlockRecoveryId())); } } return new DatanodeCommand[] { brCommand }; } final List<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>(); //check pending replication List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand( maxTransfers); if (pendingList != null) { cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId, pendingList)); } //check block invalidation Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit); if (blks != null) { cmds.add(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId, blks)); } boolean sendingCachingCommands = false; long nowMs = Time.monotonicNow(); if (shouldSendCachingCommands && ((nowMs - nodeinfo.getLastCachingDirectiveSentTimeMs()) >= timeBetweenResendingCachingDirectivesMs)) { DatanodeCommand pendingCacheCommand = getCacheCommand(nodeinfo.getPendingCached(), nodeinfo, DatanodeProtocol.DNA_CACHE, blockPoolId); if (pendingCacheCommand != null) { cmds.add(pendingCacheCommand); sendingCachingCommands = true; } DatanodeCommand pendingUncacheCommand = getCacheCommand(nodeinfo.getPendingUncached(), nodeinfo, DatanodeProtocol.DNA_UNCACHE, blockPoolId); if (pendingUncacheCommand != null) { cmds.add(pendingUncacheCommand); sendingCachingCommands = true; } if (sendingCachingCommands) { nodeinfo.setLastCachingDirectiveSentTimeMs(nowMs); } } blockManager.addKeyUpdateCommand(cmds, nodeinfo); // check for balancer bandwidth update if (nodeinfo.getBalancerBandwidth() > 0) { cmds.add(new BalancerBandwidthCommand(nodeinfo.getBalancerBandwidth())); // set back to 0 to indicate that datanode has been sent the new value nodeinfo.setBalancerBandwidth(0); } if (!cmds.isEmpty()) { return cmds.toArray(new DatanodeCommand[cmds.size()]); } } } return new DatanodeCommand[0]; } /** * Convert a CachedBlockList into a DatanodeCommand with a list of blocks. * * @param list The {@link CachedBlocksList}. This function * clears the list. * @param datanode The datanode. * @param action The action to perform in the command. * @param poolId The block pool id. * @return A DatanodeCommand to be sent back to the DN, or null if * there is nothing to be done. */ private DatanodeCommand getCacheCommand(CachedBlocksList list, DatanodeDescriptor datanode, int action, String poolId) { int length = list.size(); if (length == 0) { return null; } // Read the existing cache commands. long[] blockIds = new long[length]; int i = 0; for (Iterator<CachedBlock> iter = list.iterator(); iter.hasNext(); ) { CachedBlock cachedBlock = iter.next(); blockIds[i++] = cachedBlock.getBlockId(); } return new BlockIdCommand(action, poolId, blockIds); } /** * Tell all datanodes to use a new, non-persistent bandwidth value for * dfs.balance.bandwidthPerSec. * * A system administrator can tune the balancer bandwidth parameter * (dfs.datanode.balance.bandwidthPerSec) dynamically by calling * "dfsadmin -setBalanacerBandwidth newbandwidth", at which point the * following 'bandwidth' variable gets updated with the new value for each * node. Once the heartbeat command is issued to update the value on the * specified datanode, this value will be set back to 0. * * @param bandwidth Blanacer bandwidth in bytes per second for all datanodes. * @throws IOException */ public void setBalancerBandwidth(long bandwidth) throws IOException { synchronized(datanodeMap) { for (DatanodeDescriptor nodeInfo : datanodeMap.values()) { nodeInfo.setBalancerBandwidth(bandwidth); } } } public void markAllDatanodesStale() { LOG.info("Marking all datandoes as stale"); synchronized (datanodeMap) { for (DatanodeDescriptor dn : datanodeMap.values()) { for(DatanodeStorageInfo storage : dn.getStorageInfos()) { storage.markStaleAfterFailover(); } } } } /** * Clear any actions that are queued up to be sent to the DNs * on their next heartbeats. This includes block invalidations, * recoveries, and replication requests. */ public void clearPendingQueues() { synchronized (datanodeMap) { for (DatanodeDescriptor dn : datanodeMap.values()) { dn.clearBlockQueues(); } } } /** * Reset the lastCachingDirectiveSentTimeMs field of all the DataNodes we * know about. */ public void resetLastCachingDirectiveSentTime() { synchronized (datanodeMap) { for (DatanodeDescriptor dn : datanodeMap.values()) { dn.setLastCachingDirectiveSentTimeMs(0L); } } } @Override public String toString() { return getClass().getSimpleName() + ": " + host2DatanodeMap; } public void clearPendingCachingCommands() { for (DatanodeDescriptor dn : datanodeMap.values()) { dn.getPendingCached().clear(); dn.getPendingUncached().clear(); } } public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) { this.shouldSendCachingCommands = shouldSendCachingCommands; } }
apache-2.0
TheGenuine/ThirdFloorCoffee
src/de/reneruck/thirdfeCoffee/PlacesSet.java
502
package de.reneruck.thirdfeCoffee; import java.util.List; /** * Created by Rene on 16/08/13. */ public class PlacesSet { private long lastUpdate; private List<Place> places; public List<Place> getPlaces() { return places; } public void setPlaces(List<Place> places) { this.places = places; } public long getLastUpdate() { return lastUpdate; } public void setLastUpdate(long lastUpdate) { this.lastUpdate = lastUpdate; } }
apache-2.0
vam-google/google-cloud-java
google-api-grpc/proto-google-cloud-vision-v1/src/main/java/com/google/cloud/vision/v1/ImportProductSetsRequestOrBuilder.java
1623
// Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/cloud/vision/v1/product_search_service.proto package com.google.cloud.vision.v1; public interface ImportProductSetsRequestOrBuilder extends // @@protoc_insertion_point(interface_extends:google.cloud.vision.v1.ImportProductSetsRequest) com.google.protobuf.MessageOrBuilder { /** * * * <pre> * The project in which the ProductSets should be imported. * Format is `projects/PROJECT_ID/locations/LOC_ID`. * </pre> * * <code>string parent = 1;</code> */ java.lang.String getParent(); /** * * * <pre> * The project in which the ProductSets should be imported. * Format is `projects/PROJECT_ID/locations/LOC_ID`. * </pre> * * <code>string parent = 1;</code> */ com.google.protobuf.ByteString getParentBytes(); /** * * * <pre> * The input content for the list of requests. * </pre> * * <code>.google.cloud.vision.v1.ImportProductSetsInputConfig input_config = 2;</code> */ boolean hasInputConfig(); /** * * * <pre> * The input content for the list of requests. * </pre> * * <code>.google.cloud.vision.v1.ImportProductSetsInputConfig input_config = 2;</code> */ com.google.cloud.vision.v1.ImportProductSetsInputConfig getInputConfig(); /** * * * <pre> * The input content for the list of requests. * </pre> * * <code>.google.cloud.vision.v1.ImportProductSetsInputConfig input_config = 2;</code> */ com.google.cloud.vision.v1.ImportProductSetsInputConfigOrBuilder getInputConfigOrBuilder(); }
apache-2.0
ofg-dmitrys/micro-infra-spring
swagger/micro-infra-spring-swagger/src/main/java/repackaged/com/mangofactory/swagger/readers/operation/OperationHttpMethodReader.java
1433
package repackaged.com.mangofactory.swagger.readers.operation; import repackaged.com.mangofactory.swagger.scanners.RequestMappingContext; import com.wordnik.swagger.annotations.ApiOperation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.util.StringUtils; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.method.HandlerMethod; public class OperationHttpMethodReader implements RequestMappingReader { private static final Logger log = LoggerFactory.getLogger(OperationHttpMethodReader.class); @Override public void execute(RequestMappingContext context) { RequestMethod currentHttpMethod = (RequestMethod) context.get("currentHttpMethod"); HandlerMethod handlerMethod = context.getHandlerMethod(); String requestMethod = currentHttpMethod.toString(); ApiOperation apiOperationAnnotation = handlerMethod.getMethodAnnotation(ApiOperation.class); if (apiOperationAnnotation != null && StringUtils.hasText(apiOperationAnnotation.httpMethod())) { String apiMethod = apiOperationAnnotation.httpMethod(); try { RequestMethod.valueOf(apiMethod); requestMethod = apiMethod; } catch (IllegalArgumentException e) { log.error("Invalid http method: " + apiMethod + "Valid ones are [" + RequestMethod.values() + "]", e); } } context.put("httpRequestMethod", requestMethod); } }
apache-2.0
dagnir/aws-sdk-java
aws-java-sdk-waf/src/main/java/com/amazonaws/services/waf/model/waf_regional/transform/GetSizeConstraintSetRequestProtocolMarshaller.java
2740
/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.waf.model.waf_regional.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.Request; import com.amazonaws.http.HttpMethodName; import com.amazonaws.services.waf.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * GetSizeConstraintSetRequest Marshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class GetSizeConstraintSetRequestProtocolMarshaller implements Marshaller<Request<GetSizeConstraintSetRequest>, GetSizeConstraintSetRequest> { private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/") .httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true) .operationIdentifier("AWSWAF_Regional_20161128.GetSizeConstraintSet").serviceName("AWSWAFRegional").build(); private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory; public GetSizeConstraintSetRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) { this.protocolFactory = protocolFactory; } public Request<GetSizeConstraintSetRequest> marshall(GetSizeConstraintSetRequest getSizeConstraintSetRequest) { if (getSizeConstraintSetRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { final ProtocolRequestMarshaller<GetSizeConstraintSetRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(SDK_OPERATION_BINDING, getSizeConstraintSetRequest); protocolMarshaller.startMarshalling(); GetSizeConstraintSetRequestMarshaller.getInstance().marshall(getSizeConstraintSetRequest, protocolMarshaller); return protocolMarshaller.finishMarshalling(); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
camilesing/zstack
sdk/src/main/java/org/zstack/sdk/GetEcsInstanceVncUrlAction.java
2453
package org.zstack.sdk; import java.util.HashMap; import java.util.Map; import org.zstack.sdk.*; public class GetEcsInstanceVncUrlAction extends AbstractAction { private static final HashMap<String, Parameter> parameterMap = new HashMap<>(); private static final HashMap<String, Parameter> nonAPIParameterMap = new HashMap<>(); public static class Result { public ErrorCode error; public org.zstack.sdk.GetEcsInstanceVncUrlResult value; public Result throwExceptionIfError() { if (error != null) { throw new ApiException( String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) ); } return this; } } @Param(required = true, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public java.lang.String uuid; @Param(required = false) public java.util.List systemTags; @Param(required = false) public java.util.List userTags; @Param(required = true) public String sessionId; private Result makeResult(ApiResult res) { Result ret = new Result(); if (res.error != null) { ret.error = res.error; return ret; } org.zstack.sdk.GetEcsInstanceVncUrlResult value = res.getResult(org.zstack.sdk.GetEcsInstanceVncUrlResult.class); ret.value = value == null ? new org.zstack.sdk.GetEcsInstanceVncUrlResult() : value; return ret; } public Result call() { ApiResult res = ZSClient.call(this); return makeResult(res); } public void call(final Completion<Result> completion) { ZSClient.call(this, new InternalCompletion() { @Override public void complete(ApiResult res) { completion.complete(makeResult(res)); } }); } protected Map<String, Parameter> getParameterMap() { return parameterMap; } protected Map<String, Parameter> getNonAPIParameterMap() { return nonAPIParameterMap; } protected RestInfo getRestInfo() { RestInfo info = new RestInfo(); info.httpMethod = "GET"; info.path = "/hybrid/aliyun/ecs-vnc/{uuid}"; info.needSession = true; info.needPoll = false; info.parameterName = ""; return info; } }
apache-2.0
francisliu/hbase
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
113639
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.io.InterruptedIOException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableSet; import java.util.Optional; import java.util.OptionalDouble; import java.util.OptionalInt; import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Predicate; import java.util.function.ToLongFunction; import java.util.stream.Collectors; import java.util.stream.LongStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.FailedArchiveException; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.InvalidHFileException; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.quotas.RegionSizeStore; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; /** * A Store holds a column family in a Region. Its a memstore and a set of zero * or more StoreFiles, which stretch backwards over time. * * <p>There's no reason to consider append-logging at this level; all logging * and locking is handled at the HRegion level. Store just provides * services to manage sets of StoreFiles. One of the most important of those * services is compaction services where files are aggregated once they pass * a configurable threshold. * * <p>Locking and transactions are handled at a higher level. This API should * not be called directly but by an HRegion manager. */ @InterfaceAudience.Private public class HStore implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver { public static final String MEMSTORE_CLASS_NAME = "hbase.regionserver.memstore.class"; public static final String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY = "hbase.server.compactchecker.interval.multiplier"; public static final String BLOCKING_STOREFILES_KEY = "hbase.hstore.blockingStoreFiles"; public static final String BLOCK_STORAGE_POLICY_KEY = "hbase.hstore.block.storage.policy"; // keep in accordance with HDFS default storage policy public static final String DEFAULT_BLOCK_STORAGE_POLICY = "HOT"; public static final int DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER = 1000; public static final int DEFAULT_BLOCKING_STOREFILE_COUNT = 16; private static final Logger LOG = LoggerFactory.getLogger(HStore.class); protected final MemStore memstore; // This stores directory in the filesystem. protected final HRegion region; private final ColumnFamilyDescriptor family; private final HRegionFileSystem fs; protected Configuration conf; protected CacheConfig cacheConf; private long lastCompactSize = 0; volatile boolean forceMajor = false; /* how many bytes to write between status checks */ static int closeCheckInterval = 0; private AtomicLong storeSize = new AtomicLong(); private AtomicLong totalUncompressedBytes = new AtomicLong(); private boolean cacheOnWriteLogged; /** * RWLock for store operations. * Locked in shared mode when the list of component stores is looked at: * - all reads/writes to table data * - checking for split * Locked in exclusive mode when the list of component stores is modified: * - closing * - completing a compaction */ final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); /** * Lock specific to archiving compacted store files. This avoids races around * the combination of retrieving the list of compacted files and moving them to * the archive directory. Since this is usually a background process (other than * on close), we don't want to handle this with the store write lock, which would * block readers and degrade performance. * * Locked by: * - CompactedHFilesDispatchHandler via closeAndArchiveCompactedFiles() * - close() */ final ReentrantLock archiveLock = new ReentrantLock(); private final boolean verifyBulkLoads; /** * Use this counter to track concurrent puts. If TRACE-log is enabled, if we are over the * threshold set by hbase.region.store.parallel.put.print.threshold (Default is 50) we will * log a message that identifies the Store experience this high-level of concurrency. */ private final AtomicInteger currentParallelPutCount = new AtomicInteger(0); private final int parallelPutCountPrintThreshold; private ScanInfo scanInfo; // All access must be synchronized. // TODO: ideally, this should be part of storeFileManager, as we keep passing this to it. private final List<HStoreFile> filesCompacting = Lists.newArrayList(); // All access must be synchronized. private final Set<ChangedReadersObserver> changedReaderObservers = Collections.newSetFromMap(new ConcurrentHashMap<ChangedReadersObserver, Boolean>()); protected final int blocksize; private HFileDataBlockEncoder dataBlockEncoder; /** Checksum configuration */ protected ChecksumType checksumType; protected int bytesPerChecksum; // Comparing KeyValues protected final CellComparator comparator; final StoreEngine<?, ?, ?, ?> storeEngine; private static final AtomicBoolean offPeakCompactionTracker = new AtomicBoolean(); private volatile OffPeakHours offPeakHours; private static final int DEFAULT_FLUSH_RETRIES_NUMBER = 10; private int flushRetriesNumber; private int pauseTime; private long blockingFileCount; private int compactionCheckMultiplier; protected Encryption.Context cryptoContext = Encryption.Context.NONE; private AtomicLong flushedCellsCount = new AtomicLong(); private AtomicLong compactedCellsCount = new AtomicLong(); private AtomicLong majorCompactedCellsCount = new AtomicLong(); private AtomicLong flushedCellsSize = new AtomicLong(); private AtomicLong flushedOutputFileSize = new AtomicLong(); private AtomicLong compactedCellsSize = new AtomicLong(); private AtomicLong majorCompactedCellsSize = new AtomicLong(); /** * Constructor * @param family HColumnDescriptor for this column * @param confParam configuration object failed. Can be null. */ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { this.fs = region.getRegionFileSystem(); // Assemble the store's home directory and Ensure it exists. fs.createStoreDir(family.getNameAsString()); this.region = region; this.family = family; // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. this.conf = new CompoundConfiguration() .add(confParam) .addBytesMap(region.getTableDescriptor().getValues()) .addStringMap(family.getConfiguration()) .addBytesMap(family.getValues()); this.blocksize = family.getBlocksize(); // set block storage policy for store directory String policyName = family.getStoragePolicy(); if (null == policyName) { policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); } this.fs.setStoragePolicy(family.getNameAsString(), policyName.trim()); this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); this.comparator = region.getCellComparator(); // used by ScanQueryMatcher long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); LOG.trace("Time to purge deletes set to {}ms in store {}", timeToPurgeDeletes, this); // Get TTL long ttl = determineTTLFromFamily(family); // Why not just pass a HColumnDescriptor in here altogether? Even if have // to clone it? scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); // Setting up cache configuration for this family createCacheConf(family); this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false); this.blockingFileCount = conf.getInt(BLOCKING_STOREFILES_KEY, DEFAULT_BLOCKING_STOREFILE_COUNT); this.compactionCheckMultiplier = conf.getInt( COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY, DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER); if (this.compactionCheckMultiplier <= 0) { LOG.error("Compaction check period multiplier must be positive, setting default: {}", DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER); this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER; } if (HStore.closeCheckInterval == 0) { HStore.closeCheckInterval = conf.getInt( "hbase.hstore.close.check.interval", 10*1000*1000 /* 10 MB */); } this.storeEngine = createStoreEngine(this, this.conf, this.comparator); List<HStoreFile> hStoreFiles = loadStoreFiles(warmup); // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and // update the storeSize in the completeCompaction(..) finally (just like compaction) , so // no need calculate the storeSize twice. this.storeSize.addAndGet(getStorefilesSize(hStoreFiles, sf -> true)); this.totalUncompressedBytes.addAndGet(getTotalUncompressedBytes(hStoreFiles)); this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles); // Initialize checksum type from name. The names are CRC32, CRC32C, etc. this.checksumType = getChecksumType(conf); // Initialize bytes per checksum this.bytesPerChecksum = getBytesPerChecksum(conf); flushRetriesNumber = conf.getInt( "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER); pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE); if (flushRetriesNumber <= 0) { throw new IllegalArgumentException( "hbase.hstore.flush.retries.number must be > 0, not " + flushRetriesNumber); } cryptoContext = EncryptionUtil.createEncryptionContext(conf, family); int confPrintThreshold = this.conf.getInt("hbase.region.store.parallel.put.print.threshold", 50); if (confPrintThreshold < 10) { confPrintThreshold = 10; } this.parallelPutCountPrintThreshold = confPrintThreshold; LOG.info("Store={}, memstore type={}, storagePolicy={}, verifyBulkLoads={}, " + "parallelPutCountPrintThreshold={}, encoding={}, compression={}", getColumnFamilyName(), memstore.getClass().getSimpleName(), policyName, verifyBulkLoads, parallelPutCountPrintThreshold, family.getDataBlockEncoding(), family.getCompressionType()); cacheOnWriteLogged = false; } /** * @return MemStore Instance to use in this store. */ private MemStore getMemstore() { MemStore ms = null; // Check if in-memory-compaction configured. Note MemoryCompactionPolicy is an enum! MemoryCompactionPolicy inMemoryCompaction = null; if (this.getTableName().isSystemTable()) { inMemoryCompaction = MemoryCompactionPolicy.valueOf( conf.get("hbase.systemtables.compacting.memstore.type", "NONE")); } else { inMemoryCompaction = family.getInMemoryCompaction(); } if (inMemoryCompaction == null) { inMemoryCompaction = MemoryCompactionPolicy.valueOf(conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT).toUpperCase()); } switch (inMemoryCompaction) { case NONE: ms = ReflectionUtils.newInstance(DefaultMemStore.class, new Object[] { conf, this.comparator, this.getHRegion().getRegionServicesForStores()}); break; default: Class<? extends CompactingMemStore> clz = conf.getClass(MEMSTORE_CLASS_NAME, CompactingMemStore.class, CompactingMemStore.class); ms = ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction}); } return ms; } /** * Creates the cache config. * @param family The current column family. */ protected void createCacheConf(final ColumnFamilyDescriptor family) { this.cacheConf = new CacheConfig(conf, family, region.getBlockCache(), region.getRegionServicesForStores().getByteBuffAllocator()); } /** * Creates the store engine configured for the given Store. * @param store The store. An unfortunate dependency needed due to it * being passed to coprocessors via the compactor. * @param conf Store configuration. * @param kvComparator KVComparator for storeFileManager. * @return StoreEngine to use. */ protected StoreEngine<?, ?, ?, ?> createStoreEngine(HStore store, Configuration conf, CellComparator kvComparator) throws IOException { return StoreEngine.create(store, conf, comparator); } /** * @return TTL in seconds of the specified family */ public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { // HCD.getTimeToLive returns ttl in seconds. Convert to milliseconds. long ttl = family.getTimeToLive(); if (ttl == HConstants.FOREVER) { // Default is unlimited ttl. ttl = Long.MAX_VALUE; } else if (ttl == -1) { ttl = Long.MAX_VALUE; } else { // Second -> ms adjust for user data ttl *= 1000; } return ttl; } @Override public String getColumnFamilyName() { return this.family.getNameAsString(); } @Override public TableName getTableName() { return this.getRegionInfo().getTable(); } @Override public FileSystem getFileSystem() { return this.fs.getFileSystem(); } public HRegionFileSystem getRegionFileSystem() { return this.fs; } /* Implementation of StoreConfigInformation */ @Override public long getStoreFileTtl() { // TTL only applies if there's no MIN_VERSIONs setting on the column. return (this.scanInfo.getMinVersions() == 0) ? this.scanInfo.getTtl() : Long.MAX_VALUE; } @Override public long getMemStoreFlushSize() { // TODO: Why is this in here? The flushsize of the region rather than the store? St.Ack return this.region.memstoreFlushSize; } @Override public MemStoreSize getFlushableSize() { return this.memstore.getFlushableSize(); } @Override public MemStoreSize getSnapshotSize() { return this.memstore.getSnapshotSize(); } @Override public long getCompactionCheckMultiplier() { return this.compactionCheckMultiplier; } @Override public long getBlockingFileCount() { return blockingFileCount; } /* End implementation of StoreConfigInformation */ /** * Returns the configured bytesPerChecksum value. * @param conf The configuration * @return The bytesPerChecksum that is set in the configuration */ public static int getBytesPerChecksum(Configuration conf) { return conf.getInt(HConstants.BYTES_PER_CHECKSUM, HFile.DEFAULT_BYTES_PER_CHECKSUM); } /** * Returns the configured checksum algorithm. * @param conf The configuration * @return The checksum algorithm that is set in the configuration */ public static ChecksumType getChecksumType(Configuration conf) { String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME); if (checksumName == null) { return ChecksumType.getDefaultChecksumType(); } else { return ChecksumType.nameToType(checksumName); } } /** * @return how many bytes to write between status checks */ public static int getCloseCheckInterval() { return closeCheckInterval; } @Override public ColumnFamilyDescriptor getColumnFamilyDescriptor() { return this.family; } @Override public OptionalLong getMaxSequenceId() { return StoreUtils.getMaxSequenceIdInList(this.getStorefiles()); } @Override public OptionalLong getMaxMemStoreTS() { return StoreUtils.getMaxMemStoreTSInList(this.getStorefiles()); } /** * @param tabledir {@link Path} to where the table is being stored * @param hri {@link RegionInfo} for the region. * @param family {@link ColumnFamilyDescriptor} describing the column family * @return Path to family/Store home directory. * @deprecated Since 05/05/2013, HBase-7808, hbase-1.0.0 */ @Deprecated public static Path getStoreHomedir(final Path tabledir, final RegionInfo hri, final byte[] family) { return getStoreHomedir(tabledir, hri.getEncodedName(), family); } /** * @param tabledir {@link Path} to where the table is being stored * @param encodedName Encoded region name. * @param family {@link ColumnFamilyDescriptor} describing the column family * @return Path to family/Store home directory. * @deprecated Since 05/05/2013, HBase-7808, hbase-1.0.0 */ @Deprecated public static Path getStoreHomedir(final Path tabledir, final String encodedName, final byte[] family) { return new Path(tabledir, new Path(encodedName, Bytes.toString(family))); } /** * @return the data block encoder */ public HFileDataBlockEncoder getDataBlockEncoder() { return dataBlockEncoder; } /** * Should be used only in tests. * @param blockEncoder the block delta encoder to use */ void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) { this.dataBlockEncoder = blockEncoder; } /** * Creates an unsorted list of StoreFile loaded in parallel * from the given directory. */ private List<HStoreFile> loadStoreFiles(boolean warmup) throws IOException { Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName()); return openStoreFiles(files, warmup); } private List<HStoreFile> openStoreFiles(Collection<StoreFileInfo> files, boolean warmup) throws IOException { if (CollectionUtils.isEmpty(files)) { return Collections.emptyList(); } // initialize the thread pool for opening store files in parallel.. ThreadPoolExecutor storeFileOpenerThreadPool = this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpener-" + this.region.getRegionInfo().getEncodedName() + "-" + this.getColumnFamilyName()); CompletionService<HStoreFile> completionService = new ExecutorCompletionService<>(storeFileOpenerThreadPool); int totalValidStoreFile = 0; for (StoreFileInfo storeFileInfo : files) { // open each store file in parallel completionService.submit(() -> this.createStoreFileAndReader(storeFileInfo)); totalValidStoreFile++; } Set<String> compactedStoreFiles = new HashSet<>(); ArrayList<HStoreFile> results = new ArrayList<>(files.size()); IOException ioe = null; try { for (int i = 0; i < totalValidStoreFile; i++) { try { HStoreFile storeFile = completionService.take().get(); if (storeFile != null) { LOG.debug("loaded {}", storeFile); results.add(storeFile); compactedStoreFiles.addAll(storeFile.getCompactedStoreFiles()); } } catch (InterruptedException e) { if (ioe == null) { ioe = new InterruptedIOException(e.getMessage()); } } catch (ExecutionException e) { if (ioe == null) { ioe = new IOException(e.getCause()); } } } } finally { storeFileOpenerThreadPool.shutdownNow(); } if (ioe != null) { // close StoreFile readers boolean evictOnClose = cacheConf != null? cacheConf.shouldEvictOnClose(): true; for (HStoreFile file : results) { try { if (file != null) { file.closeStoreFile(evictOnClose); } } catch (IOException e) { LOG.warn("Could not close store file", e); } } throw ioe; } // Should not archive the compacted store files when region warmup. See HBASE-22163. if (!warmup) { // Remove the compacted files from result List<HStoreFile> filesToRemove = new ArrayList<>(compactedStoreFiles.size()); for (HStoreFile storeFile : results) { if (compactedStoreFiles.contains(storeFile.getPath().getName())) { LOG.warn("Clearing the compacted storefile {} from this store", storeFile); storeFile.getReader().close(true); filesToRemove.add(storeFile); } } results.removeAll(filesToRemove); if (!filesToRemove.isEmpty() && this.isPrimaryReplicaStore()) { LOG.debug("Moving the files {} to archive", filesToRemove); this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); } } return results; } @Override public void refreshStoreFiles() throws IOException { Collection<StoreFileInfo> newFiles = fs.getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); } /** * Replaces the store files that the store has with the given files. Mainly used by secondary * region replicas to keep up to date with the primary region files. */ public void refreshStoreFiles(Collection<String> newFiles) throws IOException { List<StoreFileInfo> storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file)); } refreshStoreFilesInternal(storeFiles); } /** * Checks the underlying store files, and opens the files that have not * been opened, and removes the store file readers for store files no longer * available. Mainly used by secondary region replicas to keep up to date with * the primary region files. */ private void refreshStoreFilesInternal(Collection<StoreFileInfo> newFiles) throws IOException { StoreFileManager sfm = storeEngine.getStoreFileManager(); Collection<HStoreFile> currentFiles = sfm.getStorefiles(); Collection<HStoreFile> compactedFiles = sfm.getCompactedfiles(); if (currentFiles == null) { currentFiles = Collections.emptySet(); } if (newFiles == null) { newFiles = Collections.emptySet(); } if (compactedFiles == null) { compactedFiles = Collections.emptySet(); } HashMap<StoreFileInfo, HStoreFile> currentFilesSet = new HashMap<>(currentFiles.size()); for (HStoreFile sf : currentFiles) { currentFilesSet.put(sf.getFileInfo(), sf); } HashMap<StoreFileInfo, HStoreFile> compactedFilesSet = new HashMap<>(compactedFiles.size()); for (HStoreFile sf : compactedFiles) { compactedFilesSet.put(sf.getFileInfo(), sf); } Set<StoreFileInfo> newFilesSet = new HashSet<StoreFileInfo>(newFiles); // Exclude the files that have already been compacted newFilesSet = Sets.difference(newFilesSet, compactedFilesSet.keySet()); Set<StoreFileInfo> toBeAddedFiles = Sets.difference(newFilesSet, currentFilesSet.keySet()); Set<StoreFileInfo> toBeRemovedFiles = Sets.difference(currentFilesSet.keySet(), newFilesSet); if (toBeAddedFiles.isEmpty() && toBeRemovedFiles.isEmpty()) { return; } LOG.info("Refreshing store files for region " + this.getRegionInfo().getRegionNameAsString() + " files to add: " + toBeAddedFiles + " files to remove: " + toBeRemovedFiles); Set<HStoreFile> toBeRemovedStoreFiles = new HashSet<>(toBeRemovedFiles.size()); for (StoreFileInfo sfi : toBeRemovedFiles) { toBeRemovedStoreFiles.add(currentFilesSet.get(sfi)); } // try to open the files List<HStoreFile> openedFiles = openStoreFiles(toBeAddedFiles, false); // propogate the file changes to the underlying store file manager replaceStoreFiles(toBeRemovedStoreFiles, openedFiles); //won't throw an exception // Advance the memstore read point to be at least the new store files seqIds so that // readers might pick it up. This assumes that the store is not getting any writes (otherwise // in-flight transactions might be made visible) if (!toBeAddedFiles.isEmpty()) { // we must have the max sequence id here as we do have several store files region.getMVCC().advanceTo(this.getMaxSequenceId().getAsLong()); } completeCompaction(toBeRemovedStoreFiles); } @VisibleForTesting protected HStoreFile createStoreFileAndReader(final Path p) throws IOException { StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(), p, isPrimaryReplicaStore()); return createStoreFileAndReader(info); } private HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); HStoreFile storeFile = new HStoreFile(info, this.family.getBloomFilterType(), this.cacheConf); storeFile.initReader(); return storeFile; } /** * This message intends to inform the MemStore that next coming updates * are going to be part of the replaying edits from WAL */ public void startReplayingFromWAL(){ this.memstore.startReplayingFromWAL(); } /** * This message intends to inform the MemStore that the replaying edits from WAL * are done */ public void stopReplayingFromWAL(){ this.memstore.stopReplayingFromWAL(); } /** * Adds a value to the memstore */ public void add(final Cell cell, MemStoreSizing memstoreSizing) { lock.readLock().lock(); try { if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { LOG.trace(this.getTableName() + "tableName={}, encodedName={}, columnFamilyName={} is " + "too busy!", this.getRegionInfo().getEncodedName(), this .getColumnFamilyName()); } this.memstore.add(cell, memstoreSizing); } finally { lock.readLock().unlock(); currentParallelPutCount.decrementAndGet(); } } /** * Adds the specified value to the memstore */ public void add(final Iterable<Cell> cells, MemStoreSizing memstoreSizing) { lock.readLock().lock(); try { if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { LOG.trace(this.getTableName() + "tableName={}, encodedName={}, columnFamilyName={} is " + "too busy!", this.getRegionInfo().getEncodedName(), this .getColumnFamilyName()); } memstore.add(cells, memstoreSizing); } finally { lock.readLock().unlock(); currentParallelPutCount.decrementAndGet(); } } @Override public long timeOfOldestEdit() { return memstore.timeOfOldestEdit(); } /** * @return All store files. */ @Override public Collection<HStoreFile> getStorefiles() { return this.storeEngine.getStoreFileManager().getStorefiles(); } @Override public Collection<HStoreFile> getCompactedFiles() { return this.storeEngine.getStoreFileManager().getCompactedfiles(); } /** * This throws a WrongRegionException if the HFile does not fit in this region, or an * InvalidHFileException if the HFile is not valid. */ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { HFile.Reader reader = null; try { LOG.info("Validating hfile at " + srcPath + " for inclusion in " + "store " + this + " region " + this.getRegionInfo().getRegionNameAsString()); FileSystem srcFs = srcPath.getFileSystem(conf); srcFs.access(srcPath, FsAction.READ_WRITE); reader = HFile.createReader(srcFs, srcPath, cacheConf, isPrimaryReplicaStore(), conf); Optional<byte[]> firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); Optional<Cell> lk = reader.getLastKey(); Preconditions.checkState(lk.isPresent(), "Last key can not be null"); byte[] lastKey = CellUtil.cloneRow(lk.get()); if (LOG.isDebugEnabled()) { LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey.get()) + " last=" + Bytes.toStringBinary(lastKey)); LOG.debug("Region bounds: first=" + Bytes.toStringBinary(getRegionInfo().getStartKey()) + " last=" + Bytes.toStringBinary(getRegionInfo().getEndKey())); } if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) { throw new WrongRegionException( "Bulk load file " + srcPath.toString() + " does not fit inside region " + this.getRegionInfo().getRegionNameAsString()); } if(reader.length() > conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE)) { LOG.warn("Trying to bulk load hfile " + srcPath + " with size: " + reader.length() + " bytes can be problematic as it may lead to oversplitting."); } if (verifyBulkLoads) { long verificationStartTime = EnvironmentEdgeManager.currentTime(); LOG.info("Full verification started for bulk load hfile: {}", srcPath); Cell prevCell = null; HFileScanner scanner = reader.getScanner(false, false, false); scanner.seekTo(); do { Cell cell = scanner.getCell(); if (prevCell != null) { if (comparator.compareRows(prevCell, cell) > 0) { throw new InvalidHFileException("Previous row is greater than" + " current row: path=" + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" + CellUtil.getCellKeyAsString(cell)); } if (CellComparator.getInstance().compareFamilies(prevCell, cell) != 0) { throw new InvalidHFileException("Previous key had different" + " family compared to current key: path=" + srcPath + " previous=" + Bytes.toStringBinary(prevCell.getFamilyArray(), prevCell.getFamilyOffset(), prevCell.getFamilyLength()) + " current=" + Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())); } } prevCell = cell; } while (scanner.next()); LOG.info("Full verification complete for bulk load hfile: " + srcPath.toString() + " took " + (EnvironmentEdgeManager.currentTime() - verificationStartTime) + " ms"); } } finally { if (reader != null) { reader.close(); } } } /** * This method should only be called from Region. It is assumed that the ranges of values in the * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this) * * @param seqNum sequence Id associated with the HFile */ public Pair<Path, Path> preBulkLoadHFile(String srcPathStr, long seqNum) throws IOException { Path srcPath = new Path(srcPathStr); return fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); } public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws IOException { Path srcPath = new Path(srcPathStr); try { fs.commitStoreFile(srcPath, dstPath); } finally { if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCommitStoreFile(family, srcPath, dstPath); } } LOG.info("Loaded HFile " + srcPath + " into store '" + getColumnFamilyName() + "' as " + dstPath + " - updating store file list."); HStoreFile sf = createStoreFileAndReader(dstPath); bulkLoadHFile(sf); LOG.info("Successfully loaded store file {} into store {} (new location: {})", srcPath, this, dstPath); return dstPath; } public void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException { HStoreFile sf = createStoreFileAndReader(fileInfo); bulkLoadHFile(sf); } private void bulkLoadHFile(HStoreFile sf) throws IOException { StoreFileReader r = sf.getReader(); this.storeSize.addAndGet(r.length()); this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); // Append the new storefile into the list this.lock.writeLock().lock(); try { this.storeEngine.getStoreFileManager().insertNewFiles(Lists.newArrayList(sf)); } finally { // We need the lock, as long as we are updating the storeFiles // or changing the memstore. Let us release it before calling // notifyChangeReadersObservers. See HBASE-4485 for a possible // deadlock scenario that could have happened if continue to hold // the lock. this.lock.writeLock().unlock(); } LOG.info("Loaded HFile " + sf.getFileInfo() + " into store '" + getColumnFamilyName()); if (LOG.isTraceEnabled()) { String traceMessage = "BULK LOAD time,size,store size,store files [" + EnvironmentEdgeManager.currentTime() + "," + r.length() + "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]"; LOG.trace(traceMessage); } } /** * Close all the readers We don't need to worry about subsequent requests because the Region holds * a write lock that will prevent any more reads or writes. * @return the {@link StoreFile StoreFiles} that were previously being used. * @throws IOException on failure */ public ImmutableCollection<HStoreFile> close() throws IOException { this.archiveLock.lock(); this.lock.writeLock().lock(); try { // Clear so metrics doesn't find them. ImmutableCollection<HStoreFile> result = storeEngine.getStoreFileManager().clearFiles(); Collection<HStoreFile> compactedfiles = storeEngine.getStoreFileManager().clearCompactedFiles(); // clear the compacted files if (CollectionUtils.isNotEmpty(compactedfiles)) { removeCompactedfiles(compactedfiles); } if (!result.isEmpty()) { // initialize the thread pool for closing store files in parallel. ThreadPoolExecutor storeFileCloserThreadPool = this.region .getStoreFileOpenAndCloseThreadPool("StoreFileCloser-" + this.region.getRegionInfo().getEncodedName() + "-" + this.getColumnFamilyName()); // close each store file in parallel CompletionService<Void> completionService = new ExecutorCompletionService<>(storeFileCloserThreadPool); for (HStoreFile f : result) { completionService.submit(new Callable<Void>() { @Override public Void call() throws IOException { boolean evictOnClose = cacheConf != null? cacheConf.shouldEvictOnClose(): true; f.closeStoreFile(evictOnClose); return null; } }); } IOException ioe = null; try { for (int i = 0; i < result.size(); i++) { try { Future<Void> future = completionService.take(); future.get(); } catch (InterruptedException e) { if (ioe == null) { ioe = new InterruptedIOException(); ioe.initCause(e); } } catch (ExecutionException e) { if (ioe == null) { ioe = new IOException(e.getCause()); } } } } finally { storeFileCloserThreadPool.shutdownNow(); } if (ioe != null) { throw ioe; } } LOG.trace("Closed {}", this); return result; } finally { this.lock.writeLock().unlock(); this.archiveLock.unlock(); } } /** * Snapshot this stores memstore. Call before running * {@link #flushCache(long, MemStoreSnapshot, MonitoredTask, ThroughputController, * FlushLifeCycleTracker)} * so it has some work to do. */ void snapshot() { this.lock.writeLock().lock(); try { this.memstore.snapshot(); } finally { this.lock.writeLock().unlock(); } } /** * Write out current snapshot. Presumes {@link #snapshot()} has been called previously. * @param logCacheFlushId flush sequence number * @return The path name of the tmp file to which the store was flushed * @throws IOException if exception occurs during process */ protected List<Path> flushCache(final long logCacheFlushId, MemStoreSnapshot snapshot, MonitoredTask status, ThroughputController throughputController, FlushLifeCycleTracker tracker) throws IOException { // If an exception happens flushing, we let it out without clearing // the memstore snapshot. The old snapshot will be returned when we say // 'snapshot', the next time flush comes around. // Retry after catching exception when flushing, otherwise server will abort // itself StoreFlusher flusher = storeEngine.getStoreFlusher(); IOException lastException = null; for (int i = 0; i < flushRetriesNumber; i++) { try { List<Path> pathNames = flusher.flushSnapshot(snapshot, logCacheFlushId, status, throughputController, tracker); Path lastPathName = null; try { for (Path pathName : pathNames) { lastPathName = pathName; validateStoreFile(pathName); } return pathNames; } catch (Exception e) { LOG.warn("Failed validating store file {}, retrying num={}", lastPathName, i, e); if (e instanceof IOException) { lastException = (IOException) e; } else { lastException = new IOException(e); } } } catch (IOException e) { LOG.warn("Failed flushing store file, retrying num={}", i, e); lastException = e; } if (lastException != null && i < (flushRetriesNumber - 1)) { try { Thread.sleep(pauseTime); } catch (InterruptedException e) { IOException iie = new InterruptedIOException(); iie.initCause(e); throw iie; } } } throw lastException; } public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { LOG.info("Validating recovered hfile at {} for inclusion in store {} region {}", path, this, getRegionInfo().getRegionNameAsString()); FileSystem srcFs = path.getFileSystem(conf); srcFs.access(path, FsAction.READ_WRITE); try (HFile.Reader reader = HFile.createReader(srcFs, path, cacheConf, isPrimaryReplicaStore(), conf)) { Optional<byte[]> firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); Optional<Cell> lk = reader.getLastKey(); Preconditions.checkState(lk.isPresent(), "Last key can not be null"); byte[] lastKey = CellUtil.cloneRow(lk.get()); if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) { throw new WrongRegionException("Recovered hfile " + path.toString() + " does not fit inside region " + this.getRegionInfo().getRegionNameAsString()); } } Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); HStoreFile sf = createStoreFileAndReader(dstPath); StoreFileReader r = sf.getReader(); this.storeSize.addAndGet(r.length()); this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); this.lock.writeLock().lock(); try { this.storeEngine.getStoreFileManager().insertNewFiles(Lists.newArrayList(sf)); } finally { this.lock.writeLock().unlock(); } LOG.info("Loaded recovered hfile to {}, entries={}, sequenceid={}, filesize={}", sf, r.getEntries(), r.getSequenceID(), TraditionalBinaryPrefix.long2String(r.length(), "B", 1)); return sf; } /** * @param path The pathname of the tmp file into which the store was flushed * @return store file created. */ private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); status.setStatus("Flushing " + this + ": reopening flushed file"); HStoreFile sf = createStoreFileAndReader(dstPath); StoreFileReader r = sf.getReader(); this.storeSize.addAndGet(r.length()); this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); if (LOG.isInfoEnabled()) { LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId + ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1)); } return sf; } public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind) throws IOException { return createWriterInTmp(maxKeyCount, compression, isCompaction, includeMVCCReadpoint, includesTag, shouldDropBehind, -1); } /** * @param compression Compression algorithm to use * @param isCompaction whether we are creating a new file in a compaction * @param includeMVCCReadpoint - whether to include MVCC or not * @param includesTag - includesTag or not * @return Writer for a new StoreFile in the tmp dir. */ // TODO : allow the Writer factory to create Writers of ShipperListener type only in case of // compaction public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind, long totalCompactedFilesSize) throws IOException { // creating new cache config for each new writer final CacheConfig writerCacheConf = new CacheConfig(cacheConf); if (isCompaction) { // Don't cache data on write on compactions, unless specifically configured to do so // Cache only when total file size remains lower than configured threshold final boolean cacheCompactedBlocksOnWrite = cacheConf.shouldCacheCompactedBlocksOnWrite(); // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well if (cacheCompactedBlocksOnWrite && totalCompactedFilesSize <= cacheConf .getCacheCompactedBlocksOnWriteThreshold()) { writerCacheConf.enableCacheOnWrite(); if (!cacheOnWriteLogged) { LOG.info("For Store {} , cacheCompactedBlocksOnWrite is true, hence enabled " + "cacheOnWrite for Data blocks, Index blocks and Bloom filter blocks", getColumnFamilyName()); cacheOnWriteLogged = true; } } else { writerCacheConf.setCacheDataOnWrite(false); if (totalCompactedFilesSize > cacheConf.getCacheCompactedBlocksOnWriteThreshold()) { // checking condition once again for logging LOG.debug( "For Store {}, setting cacheCompactedBlocksOnWrite as false as total size of compacted " + "files - {}, is greater than cacheCompactedBlocksOnWriteThreshold - {}", getColumnFamilyName(), totalCompactedFilesSize, cacheConf.getCacheCompactedBlocksOnWriteThreshold()); } } } else { final boolean shouldCacheDataOnWrite = cacheConf.shouldCacheDataOnWrite(); if (shouldCacheDataOnWrite) { writerCacheConf.enableCacheOnWrite(); if (!cacheOnWriteLogged) { LOG.info("For Store {} , cacheDataOnWrite is true, hence enabled cacheOnWrite for " + "Index blocks and Bloom filter blocks", getColumnFamilyName()); cacheOnWriteLogged = true; } } } InetSocketAddress[] favoredNodes = null; if (region.getRegionServerServices() != null) { favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( region.getRegionInfo().getEncodedName()); } HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, cryptoContext); Path familyTempDir = new Path(fs.getTempDir(), family.getNameAsString()); StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, this.getFileSystem()) .withOutputDir(familyTempDir) .withBloomType(family.getBloomFilterType()) .withMaxKeyCount(maxKeyCount) .withFavoredNodes(favoredNodes) .withFileContext(hFileContext) .withShouldDropCacheBehind(shouldDropBehind) .withCompactedFilesSupplier(this::getCompactedFiles); return builder.build(); } private HFileContext createFileContext(Compression.Algorithm compression, boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } HFileContext hFileContext = new HFileContextBuilder() .withIncludesMvcc(includeMVCCReadpoint) .withIncludesTags(includesTag) .withCompression(compression) .withCompressTags(family.isCompressTags()) .withChecksumType(checksumType) .withBytesPerCheckSum(bytesPerChecksum) .withBlockSize(blocksize) .withHBaseCheckSum(true) .withDataBlockEncoding(family.getDataBlockEncoding()) .withEncryptionContext(cryptoContext) .withCreateTime(EnvironmentEdgeManager.currentTime()) .withColumnFamily(family.getName()) .withTableName(region.getTableDescriptor() .getTableName().getName()) .withCellComparator(this.comparator) .build(); return hFileContext; } private long getTotalSize(Collection<HStoreFile> sfs) { return sfs.stream().mapToLong(sf -> sf.getReader().length()).sum(); } /** * Change storeFiles adding into place the Reader produced by this new flush. * @param sfs Store files * @return Whether compaction is required. */ private boolean updateStorefiles(List<HStoreFile> sfs, long snapshotId) throws IOException { this.lock.writeLock().lock(); try { this.storeEngine.getStoreFileManager().insertNewFiles(sfs); if (snapshotId > 0) { this.memstore.clearSnapshot(snapshotId); } } finally { // We need the lock, as long as we are updating the storeFiles // or changing the memstore. Let us release it before calling // notifyChangeReadersObservers. See HBASE-4485 for a possible // deadlock scenario that could have happened if continue to hold // the lock. this.lock.writeLock().unlock(); } // notify to be called here - only in case of flushes notifyChangedReadersObservers(sfs); if (LOG.isTraceEnabled()) { long totalSize = getTotalSize(sfs); String traceMessage = "FLUSH time,count,size,store size,store files [" + EnvironmentEdgeManager.currentTime() + "," + sfs.size() + "," + totalSize + "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]"; LOG.trace(traceMessage); } return needsCompaction(); } /** * Notify all observers that set of Readers has changed. */ private void notifyChangedReadersObservers(List<HStoreFile> sfs) throws IOException { for (ChangedReadersObserver o : this.changedReaderObservers) { List<KeyValueScanner> memStoreScanners; this.lock.readLock().lock(); try { memStoreScanners = this.memstore.getScanners(o.getReadPoint()); } finally { this.lock.readLock().unlock(); } o.updateReaders(sfs, memStoreScanners); } } /** * Get all scanners with no filtering based on TTL (that happens further down the line). * @param cacheBlocks cache the blocks or not * @param usePread true to use pread, false if not * @param isCompaction true if the scanner is created for compaction * @param matcher the scan query matcher * @param startRow the start row * @param stopRow the stop row * @param readPt the read point of the current scan * @return all scanners for this store */ public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt) throws IOException { return getScanners(cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow, false, readPt); } /** * Get all scanners with no filtering based on TTL (that happens further down the line). * @param cacheBlocks cache the blocks or not * @param usePread true to use pread, false if not * @param isCompaction true if the scanner is created for compaction * @param matcher the scan query matcher * @param startRow the start row * @param includeStartRow true to include start row, false if not * @param stopRow the stop row * @param includeStopRow true to include stop row, false if not * @param readPt the read point of the current scan * @return all scanners for this store */ public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt) throws IOException { Collection<HStoreFile> storeFilesToScan; List<KeyValueScanner> memStoreScanners; this.lock.readLock().lock(); try { storeFilesToScan = this.storeEngine.getStoreFileManager().getFilesForScan(startRow, includeStartRow, stopRow, includeStopRow); memStoreScanners = this.memstore.getScanners(readPt); } finally { this.lock.readLock().unlock(); } try { // First the store file scanners // TODO this used to get the store files in descending order, // but now we get them in ascending order, which I think is // actually more correct, since memstore get put at the end. List<StoreFileScanner> sfScanners = StoreFileScanner .getScannersForStoreFiles(storeFilesToScan, cacheBlocks, usePread, isCompaction, false, matcher, readPt); List<KeyValueScanner> scanners = new ArrayList<>(sfScanners.size() + 1); scanners.addAll(sfScanners); // Then the memstore scanners scanners.addAll(memStoreScanners); return scanners; } catch (Throwable t) { clearAndClose(memStoreScanners); throw t instanceof IOException ? (IOException) t : new IOException(t); } } private static void clearAndClose(List<KeyValueScanner> scanners) { if (scanners == null) { return; } for (KeyValueScanner s : scanners) { s.close(); } scanners.clear(); } /** * Create scanners on the given files and if needed on the memstore with no filtering based on TTL * (that happens further down the line). * @param files the list of files on which the scanners has to be created * @param cacheBlocks cache the blocks or not * @param usePread true to use pread, false if not * @param isCompaction true if the scanner is created for compaction * @param matcher the scan query matcher * @param startRow the start row * @param stopRow the stop row * @param readPt the read point of the current scan * @param includeMemstoreScanner true if memstore has to be included * @return scanners on the given files and on the memstore if specified */ public List<KeyValueScanner> getScanners(List<HStoreFile> files, boolean cacheBlocks, boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner) throws IOException { return getScanners(files, cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow, false, readPt, includeMemstoreScanner); } /** * Create scanners on the given files and if needed on the memstore with no filtering based on TTL * (that happens further down the line). * @param files the list of files on which the scanners has to be created * @param cacheBlocks ache the blocks or not * @param usePread true to use pread, false if not * @param isCompaction true if the scanner is created for compaction * @param matcher the scan query matcher * @param startRow the start row * @param includeStartRow true to include start row, false if not * @param stopRow the stop row * @param includeStopRow true to include stop row, false if not * @param readPt the read point of the current scan * @param includeMemstoreScanner true if memstore has to be included * @return scanners on the given files and on the memstore if specified */ public List<KeyValueScanner> getScanners(List<HStoreFile> files, boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt, boolean includeMemstoreScanner) throws IOException { List<KeyValueScanner> memStoreScanners = null; if (includeMemstoreScanner) { this.lock.readLock().lock(); try { memStoreScanners = this.memstore.getScanners(readPt); } finally { this.lock.readLock().unlock(); } } try { List<StoreFileScanner> sfScanners = StoreFileScanner .getScannersForStoreFiles(files, cacheBlocks, usePread, isCompaction, false, matcher, readPt); List<KeyValueScanner> scanners = new ArrayList<>(sfScanners.size() + 1); scanners.addAll(sfScanners); // Then the memstore scanners if (memStoreScanners != null) { scanners.addAll(memStoreScanners); } return scanners; } catch (Throwable t) { clearAndClose(memStoreScanners); throw t instanceof IOException ? (IOException) t : new IOException(t); } } /** * @param o Observer who wants to know about changes in set of Readers */ public void addChangedReaderObserver(ChangedReadersObserver o) { this.changedReaderObservers.add(o); } /** * @param o Observer no longer interested in changes in set of Readers. */ public void deleteChangedReaderObserver(ChangedReadersObserver o) { // We don't check if observer present; it may not be (legitimately) this.changedReaderObservers.remove(o); } ////////////////////////////////////////////////////////////////////////////// // Compaction ////////////////////////////////////////////////////////////////////////////// /** * Compact the StoreFiles. This method may take some time, so the calling * thread must be able to block for long periods. * * <p>During this time, the Store can work as usual, getting values from * StoreFiles and writing new StoreFiles from the memstore. * * Existing StoreFiles are not destroyed until the new compacted StoreFile is * completely written-out to disk. * * <p>The compactLock prevents multiple simultaneous compactions. * The structureLock prevents us from interfering with other write operations. * * <p>We don't want to hold the structureLock for the whole time, as a compact() * can be lengthy and we want to allow cache-flushes during this period. * * <p> Compaction event should be idempotent, since there is no IO Fencing for * the region directory in hdfs. A region server might still try to complete the * compaction after it lost the region. That is why the following events are carefully * ordered for a compaction: * 1. Compaction writes new files under region/.tmp directory (compaction output) * 2. Compaction atomically moves the temporary file under region directory * 3. Compaction appends a WAL edit containing the compaction input and output files. * Forces sync on WAL. * 4. Compaction deletes the input files from the region directory. * * Failure conditions are handled like this: * - If RS fails before 2, compaction wont complete. Even if RS lives on and finishes * the compaction later, it will only write the new data file to the region directory. * Since we already have this data, this will be idempotent but we will have a redundant * copy of the data. * - If RS fails between 2 and 3, the region will have a redundant copy of the data. The * RS that failed won't be able to finish snyc() for WAL because of lease recovery in WAL. * - If RS fails after 3, the region region server who opens the region will pick up the * the compaction marker from the WAL and replay it by removing the compaction input files. * Failed RS can also attempt to delete those files, but the operation will be idempotent * * See HBASE-2231 for details. * * @param compaction compaction details obtained from requestCompaction() * @return Storefile we compacted into or null if we failed or opted out early. */ public List<HStoreFile> compact(CompactionContext compaction, ThroughputController throughputController, User user) throws IOException { assert compaction != null; CompactionRequestImpl cr = compaction.getRequest(); try { // Do all sanity checking in here if we have a valid CompactionRequestImpl // because we need to clean up after it on the way out in a finally // block below long compactionStartTime = EnvironmentEdgeManager.currentTime(); assert compaction.hasSelection(); Collection<HStoreFile> filesToCompact = cr.getFiles(); assert !filesToCompact.isEmpty(); synchronized (filesCompacting) { // sanity check: we're compacting files that this store knows about // TODO: change this to LOG.error() after more debugging Preconditions.checkArgument(filesCompacting.containsAll(filesToCompact)); } // Ready to go. Have list of files to compact. LOG.info("Starting compaction of " + filesToCompact + " into tmpdir=" + fs.getTempDir() + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1)); return doCompaction(cr, filesToCompact, user, compactionStartTime, compaction.compact(throughputController, user)); } finally { finishCompactionRequest(cr); } } @VisibleForTesting protected List<HStoreFile> doCompaction(CompactionRequestImpl cr, Collection<HStoreFile> filesToCompact, User user, long compactionStartTime, List<Path> newFiles) throws IOException { // Do the steps necessary to complete the compaction. List<HStoreFile> sfs = moveCompactedFilesIntoPlace(cr, newFiles, user); writeCompactionWalRecord(filesToCompact, sfs); replaceStoreFiles(filesToCompact, sfs); if (cr.isMajor()) { majorCompactedCellsCount.addAndGet(getCompactionProgress().getTotalCompactingKVs()); majorCompactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize); } else { compactedCellsCount.addAndGet(getCompactionProgress().getTotalCompactingKVs()); compactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize); } long outputBytes = getTotalSize(sfs); // At this point the store will use new files for all new scanners. completeCompaction(filesToCompact); // update store size. long now = EnvironmentEdgeManager.currentTime(); if (region.getRegionServerServices() != null && region.getRegionServerServices().getMetrics() != null) { region.getRegionServerServices().getMetrics().updateCompaction( region.getTableDescriptor().getTableName().getNameAsString(), cr.isMajor(), now - compactionStartTime, cr.getFiles().size(), newFiles.size(), cr.getSize(), outputBytes); } logCompactionEndMessage(cr, sfs, now, compactionStartTime); return sfs; } private List<HStoreFile> moveCompactedFilesIntoPlace(CompactionRequestImpl cr, List<Path> newFiles, User user) throws IOException { List<HStoreFile> sfs = new ArrayList<>(newFiles.size()); for (Path newFile : newFiles) { assert newFile != null; HStoreFile sf = moveFileIntoPlace(newFile); if (this.getCoprocessorHost() != null) { getCoprocessorHost().postCompact(this, sf, cr.getTracker(), cr, user); } assert sf != null; sfs.add(sf); } return sfs; } // Package-visible for tests HStoreFile moveFileIntoPlace(Path newFile) throws IOException { validateStoreFile(newFile); // Move the file into the right spot Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile); return createStoreFileAndReader(destPath); } /** * Writes the compaction WAL record. * @param filesCompacted Files compacted (input). * @param newFiles Files from compaction. */ private void writeCompactionWalRecord(Collection<HStoreFile> filesCompacted, Collection<HStoreFile> newFiles) throws IOException { if (region.getWAL() == null) { return; } List<Path> inputPaths = filesCompacted.stream().map(HStoreFile::getPath).collect(Collectors.toList()); List<Path> outputPaths = newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList()); RegionInfo info = this.region.getRegionInfo(); CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info, family.getName(), inputPaths, outputPaths, fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString())); // Fix reaching into Region to get the maxWaitForSeqId. // Does this method belong in Region altogether given it is making so many references up there? // Could be Region#writeCompactionMarker(compactionDescriptor); WALUtil.writeCompactionMarker(this.region.getWAL(), this.region.getReplicationScope(), this.region.getRegionInfo(), compactionDescriptor, this.region.getMVCC()); } @VisibleForTesting void replaceStoreFiles(Collection<HStoreFile> compactedFiles, Collection<HStoreFile> result) throws IOException { this.lock.writeLock().lock(); try { this.storeEngine.getStoreFileManager().addCompactionResults(compactedFiles, result); synchronized (filesCompacting) { filesCompacting.removeAll(compactedFiles); } // These may be null when the RS is shutting down. The space quota Chores will fix the Region // sizes later so it's not super-critical if we miss these. RegionServerServices rsServices = region.getRegionServerServices(); if (rsServices != null && rsServices.getRegionServerSpaceQuotaManager() != null) { updateSpaceQuotaAfterFileReplacement( rsServices.getRegionServerSpaceQuotaManager().getRegionSizeStore(), getRegionInfo(), compactedFiles, result); } } finally { this.lock.writeLock().unlock(); } } /** * Updates the space quota usage for this region, removing the size for files compacted away * and adding in the size for new files. * * @param sizeStore The object tracking changes in region size for space quotas. * @param regionInfo The identifier for the region whose size is being updated. * @param oldFiles Files removed from this store's region. * @param newFiles Files added to this store's region. */ void updateSpaceQuotaAfterFileReplacement( RegionSizeStore sizeStore, RegionInfo regionInfo, Collection<HStoreFile> oldFiles, Collection<HStoreFile> newFiles) { long delta = 0; if (oldFiles != null) { for (HStoreFile compactedFile : oldFiles) { if (compactedFile.isHFile()) { delta -= compactedFile.getReader().length(); } } } if (newFiles != null) { for (HStoreFile newFile : newFiles) { if (newFile.isHFile()) { delta += newFile.getReader().length(); } } } sizeStore.incrementRegionSize(regionInfo, delta); } /** * Log a very elaborate compaction completion message. * @param cr Request. * @param sfs Resulting files. * @param compactionStartTime Start time. */ private void logCompactionEndMessage( CompactionRequestImpl cr, List<HStoreFile> sfs, long now, long compactionStartTime) { StringBuilder message = new StringBuilder( "Completed" + (cr.isMajor() ? " major" : "") + " compaction of " + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in " + this + " of " + this.getRegionInfo().getShortNameToLog() + " into "); if (sfs.isEmpty()) { message.append("none, "); } else { for (HStoreFile sf: sfs) { message.append(sf.getPath().getName()); message.append("(size="); message.append(TraditionalBinaryPrefix.long2String(sf.getReader().length(), "", 1)); message.append("), "); } } message.append("total size for store is ") .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)) .append(". This selection was in queue for ") .append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime())) .append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime)) .append(" to execute."); LOG.info(message.toString()); if (LOG.isTraceEnabled()) { int fileCount = storeEngine.getStoreFileManager().getStorefileCount(); long resultSize = getTotalSize(sfs); String traceMessage = "COMPACTION start,end,size out,files in,files out,store size," + "store files [" + compactionStartTime + "," + now + "," + resultSize + "," + cr.getFiles().size() + "," + sfs.size() + "," + storeSize + "," + fileCount + "]"; LOG.trace(traceMessage); } } /** * Call to complete a compaction. Its for the case where we find in the WAL a compaction * that was not finished. We could find one recovering a WAL after a regionserver crash. * See HBASE-2231. */ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, boolean removeFiles) throws IOException { LOG.debug("Completing compaction from the WAL marker"); List<String> compactionInputs = compaction.getCompactionInputList(); List<String> compactionOutputs = Lists.newArrayList(compaction.getCompactionOutputList()); // The Compaction Marker is written after the compaction is completed, // and the files moved into the region/family folder. // // If we crash after the entry is written, we may not have removed the // input files, but the output file is present. // (The unremoved input files will be removed by this function) // // If we scan the directory and the file is not present, it can mean that: // - The file was manually removed by the user // - The file was removed as consequence of subsequent compaction // so, we can't do anything with the "compaction output list" because those // files have already been loaded when opening the region (by virtue of // being in the store's folder) or they may be missing due to a compaction. String familyName = this.getColumnFamilyName(); Set<String> inputFiles = new HashSet<>(); for (String compactionInput : compactionInputs) { Path inputPath = fs.getStoreFilePath(familyName, compactionInput); inputFiles.add(inputPath.getName()); } //some of the input files might already be deleted List<HStoreFile> inputStoreFiles = new ArrayList<>(compactionInputs.size()); for (HStoreFile sf : this.getStorefiles()) { if (inputFiles.contains(sf.getPath().getName())) { inputStoreFiles.add(sf); } } // check whether we need to pick up the new files List<HStoreFile> outputStoreFiles = new ArrayList<>(compactionOutputs.size()); if (pickCompactionFiles) { for (HStoreFile sf : this.getStorefiles()) { compactionOutputs.remove(sf.getPath().getName()); } for (String compactionOutput : compactionOutputs) { StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), compactionOutput); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); outputStoreFiles.add(storeFile); } } if (!inputStoreFiles.isEmpty() || !outputStoreFiles.isEmpty()) { LOG.info("Replaying compaction marker, replacing input files: " + inputStoreFiles + " with output files : " + outputStoreFiles); this.replaceStoreFiles(inputStoreFiles, outputStoreFiles); this.completeCompaction(inputStoreFiles); } } /** * This method tries to compact N recent files for testing. * Note that because compacting "recent" files only makes sense for some policies, * e.g. the default one, it assumes default policy is used. It doesn't use policy, * but instead makes a compaction candidate list by itself. * @param N Number of files. */ @VisibleForTesting public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException { List<HStoreFile> filesToCompact; boolean isMajor; this.lock.readLock().lock(); try { synchronized (filesCompacting) { filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles()); if (!filesCompacting.isEmpty()) { // exclude all files older than the newest file we're currently // compacting. this allows us to preserve contiguity (HBASE-2856) HStoreFile last = filesCompacting.get(filesCompacting.size() - 1); int idx = filesToCompact.indexOf(last); Preconditions.checkArgument(idx != -1); filesToCompact.subList(0, idx + 1).clear(); } int count = filesToCompact.size(); if (N > count) { throw new RuntimeException("Not enough files"); } filesToCompact = filesToCompact.subList(count - N, count); isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount()); filesCompacting.addAll(filesToCompact); Collections.sort(filesCompacting, storeEngine.getStoreFileManager() .getStoreFileComparator()); } } finally { this.lock.readLock().unlock(); } try { // Ready to go. Have list of files to compact. List<Path> newFiles = ((DefaultCompactor)this.storeEngine.getCompactor()) .compactForTesting(filesToCompact, isMajor); for (Path newFile: newFiles) { // Move the compaction into place. HStoreFile sf = moveFileIntoPlace(newFile); if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCompact(this, sf, null, null, null); } replaceStoreFiles(filesToCompact, Collections.singletonList(sf)); completeCompaction(filesToCompact); } } finally { synchronized (filesCompacting) { filesCompacting.removeAll(filesToCompact); } } } @Override public boolean hasReferences() { // Grab the read lock here, because we need to ensure that: only when the atomic // replaceStoreFiles(..) finished, we can get all the complete store file list. this.lock.readLock().lock(); try { // Merge the current store files with compacted files here due to HBASE-20940. Collection<HStoreFile> allStoreFiles = new ArrayList<>(getStorefiles()); allStoreFiles.addAll(getCompactedFiles()); return StoreUtils.hasReferences(allStoreFiles); } finally { this.lock.readLock().unlock(); } } /** * getter for CompactionProgress object * @return CompactionProgress object; can be null */ public CompactionProgress getCompactionProgress() { return this.storeEngine.getCompactor().getProgress(); } @Override public boolean shouldPerformMajorCompaction() throws IOException { for (HStoreFile sf : this.storeEngine.getStoreFileManager().getStorefiles()) { // TODO: what are these reader checks all over the place? if (sf.getReader() == null) { LOG.debug("StoreFile {} has null Reader", sf); return false; } } return storeEngine.getCompactionPolicy().shouldPerformMajorCompaction( this.storeEngine.getStoreFileManager().getStorefiles()); } public Optional<CompactionContext> requestCompaction() throws IOException { return requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null); } public Optional<CompactionContext> requestCompaction(int priority, CompactionLifeCycleTracker tracker, User user) throws IOException { // don't even select for compaction if writes are disabled if (!this.areWritesEnabled()) { return Optional.empty(); } // Before we do compaction, try to get rid of unneeded files to simplify things. removeUnneededFiles(); final CompactionContext compaction = storeEngine.createCompaction(); CompactionRequestImpl request = null; this.lock.readLock().lock(); try { synchronized (filesCompacting) { // First, see if coprocessor would want to override selection. if (this.getCoprocessorHost() != null) { final List<HStoreFile> candidatesForCoproc = compaction.preSelect(this.filesCompacting); boolean override = getCoprocessorHost().preCompactSelection(this, candidatesForCoproc, tracker, user); if (override) { // Coprocessor is overriding normal file selection. compaction.forceSelect(new CompactionRequestImpl(candidatesForCoproc)); } } // Normal case - coprocessor is not overriding file selection. if (!compaction.hasSelection()) { boolean isUserCompaction = priority == Store.PRIORITY_USER; boolean mayUseOffPeak = offPeakHours.isOffPeakHour() && offPeakCompactionTracker.compareAndSet(false, true); try { compaction.select(this.filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor && filesCompacting.isEmpty()); } catch (IOException e) { if (mayUseOffPeak) { offPeakCompactionTracker.set(false); } throw e; } assert compaction.hasSelection(); if (mayUseOffPeak && !compaction.getRequest().isOffPeak()) { // Compaction policy doesn't want to take advantage of off-peak. offPeakCompactionTracker.set(false); } } if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCompactSelection( this, ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker, compaction.getRequest(), user); } // Finally, we have the resulting files list. Check if we have any files at all. request = compaction.getRequest(); Collection<HStoreFile> selectedFiles = request.getFiles(); if (selectedFiles.isEmpty()) { return Optional.empty(); } addToCompactingFiles(selectedFiles); // If we're enqueuing a major, clear the force flag. this.forceMajor = this.forceMajor && !request.isMajor(); // Set common request properties. // Set priority, either override value supplied by caller or from store. request.setPriority((priority != Store.NO_PRIORITY) ? priority : getCompactPriority()); request.setDescription(getRegionInfo().getRegionNameAsString(), getColumnFamilyName()); request.setTracker(tracker); } } finally { this.lock.readLock().unlock(); } if (LOG.isDebugEnabled()) { LOG.debug(getRegionInfo().getEncodedName() + " - " + getColumnFamilyName() + ": Initiating " + (request.isMajor() ? "major" : "minor") + " compaction" + (request.isAllFiles() ? " (all files)" : "")); } this.region.reportCompactionRequestStart(request.isMajor()); return Optional.of(compaction); } /** Adds the files to compacting files. filesCompacting must be locked. */ private void addToCompactingFiles(Collection<HStoreFile> filesToAdd) { if (CollectionUtils.isEmpty(filesToAdd)) { return; } // Check that we do not try to compact the same StoreFile twice. if (!Collections.disjoint(filesCompacting, filesToAdd)) { Preconditions.checkArgument(false, "%s overlaps with %s", filesToAdd, filesCompacting); } filesCompacting.addAll(filesToAdd); Collections.sort(filesCompacting, storeEngine.getStoreFileManager().getStoreFileComparator()); } private void removeUnneededFiles() throws IOException { if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) { return; } if (getColumnFamilyDescriptor().getMinVersions() > 0) { LOG.debug("Skipping expired store file removal due to min version being {}", getColumnFamilyDescriptor().getMinVersions()); return; } this.lock.readLock().lock(); Collection<HStoreFile> delSfs = null; try { synchronized (filesCompacting) { long cfTtl = getStoreFileTtl(); if (cfTtl != Long.MAX_VALUE) { delSfs = storeEngine.getStoreFileManager().getUnneededFiles( EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting); addToCompactingFiles(delSfs); } } } finally { this.lock.readLock().unlock(); } if (CollectionUtils.isEmpty(delSfs)) { return; } Collection<HStoreFile> newFiles = Collections.emptyList(); // No new files. writeCompactionWalRecord(delSfs, newFiles); replaceStoreFiles(delSfs, newFiles); completeCompaction(delSfs); LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " + this + " of " + this.getRegionInfo().getRegionNameAsString() + "; total size for store is " + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)); } public void cancelRequestedCompaction(CompactionContext compaction) { finishCompactionRequest(compaction.getRequest()); } protected void finishCompactionRequest(CompactionRequestImpl cr) { this.region.reportCompactionRequestEnd(cr.isMajor(), cr.getFiles().size(), cr.getSize()); if (cr.isOffPeak()) { offPeakCompactionTracker.set(false); cr.setOffPeak(false); } synchronized (filesCompacting) { filesCompacting.removeAll(cr.getFiles()); } } /** * Validates a store file by opening and closing it. In HFileV2 this should not be an expensive * operation. * @param path the path to the store file */ private void validateStoreFile(Path path) throws IOException { HStoreFile storeFile = null; try { storeFile = createStoreFileAndReader(path); } catch (IOException e) { LOG.error("Failed to open store file : {}, keeping it in tmp location", path, e); throw e; } finally { if (storeFile != null) { storeFile.closeStoreFile(false); } } } /** * Update counts. * @param compactedFiles list of files that were compacted */ @VisibleForTesting protected void completeCompaction(Collection<HStoreFile> compactedFiles) // Rename this method! TODO. throws IOException { this.storeSize.set(0L); this.totalUncompressedBytes.set(0L); for (HStoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) { StoreFileReader r = hsf.getReader(); if (r == null) { LOG.warn("StoreFile {} has a null Reader", hsf); continue; } this.storeSize.addAndGet(r.length()); this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); } } /* * @param wantedVersions How many versions were asked for. * @return wantedVersions or this families' {@link HConstants#VERSIONS}. */ int versionsToReturn(final int wantedVersions) { if (wantedVersions <= 0) { throw new IllegalArgumentException("Number of versions must be > 0"); } // Make sure we do not return more than maximum versions for this store. int maxVersions = this.family.getMaxVersions(); return wantedVersions > maxVersions ? maxVersions: wantedVersions; } @Override public boolean canSplit() { this.lock.readLock().lock(); try { // Not split-able if we find a reference store file present in the store. boolean result = !hasReferences(); if (!result) { LOG.trace("Not splittable; has references: {}", this); } return result; } finally { this.lock.readLock().unlock(); } } /** * Determines if Store should be split. */ public Optional<byte[]> getSplitPoint() { this.lock.readLock().lock(); try { // Should already be enforced by the split policy! assert !this.getRegionInfo().isMetaRegion(); // Not split-able if we find a reference store file present in the store. if (hasReferences()) { LOG.trace("Not splittable; has references: {}", this); return Optional.empty(); } return this.storeEngine.getStoreFileManager().getSplitPoint(); } catch(IOException e) { LOG.warn("Failed getting store size for {}", this, e); } finally { this.lock.readLock().unlock(); } return Optional.empty(); } @Override public long getLastCompactSize() { return this.lastCompactSize; } @Override public long getSize() { return storeSize.get(); } public void triggerMajorCompaction() { this.forceMajor = true; } ////////////////////////////////////////////////////////////////////////////// // File administration ////////////////////////////////////////////////////////////////////////////// /** * Return a scanner for both the memstore and the HStore files. Assumes we are not in a * compaction. * @param scan Scan to apply when scanning the stores * @param targetCols columns to scan * @return a scanner over the current key values * @throws IOException on failure */ public KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols, long readPt) throws IOException { lock.readLock().lock(); try { ScanInfo scanInfo; if (this.getCoprocessorHost() != null) { scanInfo = this.getCoprocessorHost().preStoreScannerOpen(this); } else { scanInfo = getScanInfo(); } return createScanner(scan, scanInfo, targetCols, readPt); } finally { lock.readLock().unlock(); } } // HMobStore will override this method to return its own implementation. protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> targetCols, long readPt) throws IOException { return scan.isReversed() ? new ReversedStoreScanner(this, scanInfo, scan, targetCols, readPt) : new StoreScanner(this, scanInfo, scan, targetCols, readPt); } /** * Recreates the scanners on the current list of active store file scanners * @param currentFileScanners the current set of active store file scanners * @param cacheBlocks cache the blocks or not * @param usePread use pread or not * @param isCompaction is the scanner for compaction * @param matcher the scan query matcher * @param startRow the scan's start row * @param includeStartRow should the scan include the start row * @param stopRow the scan's stop row * @param includeStopRow should the scan include the stop row * @param readPt the read point of the current scane * @param includeMemstoreScanner whether the current scanner should include memstorescanner * @return list of scanners recreated on the current Scanners */ public List<KeyValueScanner> recreateScanners(List<KeyValueScanner> currentFileScanners, boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt, boolean includeMemstoreScanner) throws IOException { this.lock.readLock().lock(); try { Map<String, HStoreFile> name2File = new HashMap<>(getStorefilesCount() + getCompactedFilesCount()); for (HStoreFile file : getStorefiles()) { name2File.put(file.getFileInfo().getActiveFileName(), file); } Collection<HStoreFile> compactedFiles = getCompactedFiles(); for (HStoreFile file : IterableUtils.emptyIfNull(compactedFiles)) { name2File.put(file.getFileInfo().getActiveFileName(), file); } List<HStoreFile> filesToReopen = new ArrayList<>(); for (KeyValueScanner kvs : currentFileScanners) { assert kvs.isFileScanner(); if (kvs.peek() == null) { continue; } filesToReopen.add(name2File.get(kvs.getFilePath().getName())); } if (filesToReopen.isEmpty()) { return null; } return getScanners(filesToReopen, cacheBlocks, false, false, matcher, startRow, includeStartRow, stopRow, includeStopRow, readPt, false); } finally { this.lock.readLock().unlock(); } } @Override public String toString() { return this.getColumnFamilyName(); } @Override public int getStorefilesCount() { return this.storeEngine.getStoreFileManager().getStorefileCount(); } @Override public int getCompactedFilesCount() { return this.storeEngine.getStoreFileManager().getCompactedFilesCount(); } private LongStream getStoreFileAgeStream() { return this.storeEngine.getStoreFileManager().getStorefiles().stream().filter(sf -> { if (sf.getReader() == null) { LOG.warn("StoreFile {} has a null Reader", sf); return false; } else { return true; } }).filter(HStoreFile::isHFile).mapToLong(sf -> sf.getFileInfo().getCreatedTimestamp()) .map(t -> EnvironmentEdgeManager.currentTime() - t); } @Override public OptionalLong getMaxStoreFileAge() { return getStoreFileAgeStream().max(); } @Override public OptionalLong getMinStoreFileAge() { return getStoreFileAgeStream().min(); } @Override public OptionalDouble getAvgStoreFileAge() { return getStoreFileAgeStream().average(); } @Override public long getNumReferenceFiles() { return this.storeEngine.getStoreFileManager().getStorefiles().stream() .filter(HStoreFile::isReference).count(); } @Override public long getNumHFiles() { return this.storeEngine.getStoreFileManager().getStorefiles().stream() .filter(HStoreFile::isHFile).count(); } @Override public long getStoreSizeUncompressed() { return this.totalUncompressedBytes.get(); } @Override public long getStorefilesSize() { // Include all StoreFiles return getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), sf -> true); } @Override public long getHFilesSize() { // Include only StoreFiles which are HFiles return getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), HStoreFile::isHFile); } private long getTotalUncompressedBytes(List<HStoreFile> files) { return files.stream() .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::getTotalUncompressedBytes)) .sum(); } private long getStorefilesSize(Collection<HStoreFile> files, Predicate<HStoreFile> predicate) { return files.stream().filter(predicate) .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::length)).sum(); } private long getStorefileFieldSize(HStoreFile file, ToLongFunction<StoreFileReader> f) { if (file == null) { return 0L; } StoreFileReader reader = file.getReader(); if (reader == null) { return 0L; } return f.applyAsLong(reader); } private long getStorefilesFieldSize(ToLongFunction<StoreFileReader> f) { return this.storeEngine.getStoreFileManager().getStorefiles().stream() .mapToLong(file -> getStorefileFieldSize(file, f)).sum(); } @Override public long getStorefilesRootLevelIndexSize() { return getStorefilesFieldSize(StoreFileReader::indexSize); } @Override public long getTotalStaticIndexSize() { return getStorefilesFieldSize(StoreFileReader::getUncompressedDataIndexSize); } @Override public long getTotalStaticBloomSize() { return getStorefilesFieldSize(StoreFileReader::getTotalBloomSize); } @Override public MemStoreSize getMemStoreSize() { return this.memstore.size(); } @Override public int getCompactPriority() { int priority = this.storeEngine.getStoreFileManager().getStoreCompactionPriority(); if (priority == PRIORITY_USER) { LOG.warn("Compaction priority is USER despite there being no user compaction"); } return priority; } public boolean throttleCompaction(long compactionSize) { return storeEngine.getCompactionPolicy().throttleCompaction(compactionSize); } public HRegion getHRegion() { return this.region; } public RegionCoprocessorHost getCoprocessorHost() { return this.region.getCoprocessorHost(); } @Override public RegionInfo getRegionInfo() { return this.fs.getRegionInfo(); } @Override public boolean areWritesEnabled() { return this.region.areWritesEnabled(); } @Override public long getSmallestReadPoint() { return this.region.getSmallestReadPoint(); } /** * Adds or replaces the specified KeyValues. * <p> * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. * <p> * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic * across all of them. * @param readpoint readpoint below which we can safely remove duplicate KVs */ public void upsert(Iterable<Cell> cells, long readpoint, MemStoreSizing memstoreSizing) throws IOException { this.lock.readLock().lock(); try { this.memstore.upsert(cells, readpoint, memstoreSizing); } finally { this.lock.readLock().unlock(); } } public StoreFlushContext createFlushContext(long cacheFlushId, FlushLifeCycleTracker tracker) { return new StoreFlusherImpl(cacheFlushId, tracker); } private final class StoreFlusherImpl implements StoreFlushContext { private final FlushLifeCycleTracker tracker; private final long cacheFlushSeqNum; private MemStoreSnapshot snapshot; private List<Path> tempFiles; private List<Path> committedFiles; private long cacheFlushCount; private long cacheFlushSize; private long outputFileSize; private StoreFlusherImpl(long cacheFlushSeqNum, FlushLifeCycleTracker tracker) { this.cacheFlushSeqNum = cacheFlushSeqNum; this.tracker = tracker; } /** * This is not thread safe. The caller should have a lock on the region or the store. * If necessary, the lock can be added with the patch provided in HBASE-10087 */ @Override public MemStoreSize prepare() { // passing the current sequence number of the wal - to allow bookkeeping in the memstore this.snapshot = memstore.snapshot(); this.cacheFlushCount = snapshot.getCellsCount(); this.cacheFlushSize = snapshot.getDataSize(); committedFiles = new ArrayList<>(1); return snapshot.getMemStoreSize(); } @Override public void flushCache(MonitoredTask status) throws IOException { RegionServerServices rsService = region.getRegionServerServices(); ThroughputController throughputController = rsService == null ? null : rsService.getFlushThroughputController(); tempFiles = HStore.this.flushCache(cacheFlushSeqNum, snapshot, status, throughputController, tracker); } @Override public boolean commit(MonitoredTask status) throws IOException { if (CollectionUtils.isEmpty(this.tempFiles)) { return false; } List<HStoreFile> storeFiles = new ArrayList<>(this.tempFiles.size()); for (Path storeFilePath : tempFiles) { try { HStoreFile sf = HStore.this.commitFile(storeFilePath, cacheFlushSeqNum, status); outputFileSize += sf.getReader().length(); storeFiles.add(sf); } catch (IOException ex) { LOG.error("Failed to commit store file {}", storeFilePath, ex); // Try to delete the files we have committed before. for (HStoreFile sf : storeFiles) { Path pathToDelete = sf.getPath(); try { sf.deleteStoreFile(); } catch (IOException deleteEx) { LOG.error(HBaseMarkers.FATAL, "Failed to delete store file we committed, " + "halting {}", pathToDelete, ex); Runtime.getRuntime().halt(1); } } throw new IOException("Failed to commit the flush", ex); } } for (HStoreFile sf : storeFiles) { if (HStore.this.getCoprocessorHost() != null) { HStore.this.getCoprocessorHost().postFlush(HStore.this, sf, tracker); } committedFiles.add(sf.getPath()); } HStore.this.flushedCellsCount.addAndGet(cacheFlushCount); HStore.this.flushedCellsSize.addAndGet(cacheFlushSize); HStore.this.flushedOutputFileSize.addAndGet(outputFileSize); // Add new file to store files. Clear snapshot too while we have the Store write lock. return HStore.this.updateStorefiles(storeFiles, snapshot.getId()); } @Override public long getOutputFileSize() { return outputFileSize; } @Override public List<Path> getCommittedFiles() { return committedFiles; } /** * Similar to commit, but called in secondary region replicas for replaying the * flush cache from primary region. Adds the new files to the store, and drops the * snapshot depending on dropMemstoreSnapshot argument. * @param fileNames names of the flushed files * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot */ @Override public void replayFlush(List<String> fileNames, boolean dropMemstoreSnapshot) throws IOException { List<HStoreFile> storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); HStore.this.storeSize.addAndGet(storeFile.getReader().length()); HStore.this.totalUncompressedBytes .addAndGet(storeFile.getReader().getTotalUncompressedBytes()); if (LOG.isInfoEnabled()) { LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() + " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() + ", sequenceid=" + storeFile.getReader().getSequenceID() + ", filesize=" + TraditionalBinaryPrefix.long2String(storeFile.getReader().length(), "", 1)); } } long snapshotId = -1; // -1 means do not drop if (dropMemstoreSnapshot && snapshot != null) { snapshotId = snapshot.getId(); snapshot.close(); } HStore.this.updateStorefiles(storeFiles, snapshotId); } /** * Abort the snapshot preparation. Drops the snapshot if any. */ @Override public void abort() throws IOException { if (snapshot != null) { //We need to close the snapshot when aborting, otherwise, the segment scanner //won't be closed. If we are using MSLAB, the chunk referenced by those scanners //can't be released, thus memory leak snapshot.close(); HStore.this.updateStorefiles(Collections.emptyList(), snapshot.getId()); } } } @Override public boolean needsCompaction() { List<HStoreFile> filesCompactingClone = null; synchronized (filesCompacting) { filesCompactingClone = Lists.newArrayList(filesCompacting); } return this.storeEngine.needsCompaction(filesCompactingClone); } /** * Used for tests. * @return cache configuration for this Store. */ @VisibleForTesting public CacheConfig getCacheConfig() { return this.cacheConf; } public static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT + (27 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (6 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN)); public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT + ScanInfo.FIXED_OVERHEAD); @Override public long heapSize() { MemStoreSize memstoreSize = this.memstore.size(); return DEEP_OVERHEAD + memstoreSize.getHeapSize(); } @Override public CellComparator getComparator() { return comparator; } public ScanInfo getScanInfo() { return scanInfo; } /** * Set scan info, used by test * @param scanInfo new scan info to use for test */ void setScanInfo(ScanInfo scanInfo) { this.scanInfo = scanInfo; } @Override public boolean hasTooManyStoreFiles() { return getStorefilesCount() > this.blockingFileCount; } @Override public long getFlushedCellsCount() { return flushedCellsCount.get(); } @Override public long getFlushedCellsSize() { return flushedCellsSize.get(); } @Override public long getFlushedOutputFileSize() { return flushedOutputFileSize.get(); } @Override public long getCompactedCellsCount() { return compactedCellsCount.get(); } @Override public long getCompactedCellsSize() { return compactedCellsSize.get(); } @Override public long getMajorCompactedCellsCount() { return majorCompactedCellsCount.get(); } @Override public long getMajorCompactedCellsSize() { return majorCompactedCellsSize.get(); } /** * Returns the StoreEngine that is backing this concrete implementation of Store. * @return Returns the {@link StoreEngine} object used internally inside this HStore object. */ @VisibleForTesting public StoreEngine<?, ?, ?, ?> getStoreEngine() { return this.storeEngine; } protected OffPeakHours getOffPeakHours() { return this.offPeakHours; } /** * {@inheritDoc} */ @Override public void onConfigurationChange(Configuration conf) { this.conf = new CompoundConfiguration() .add(conf) .addBytesMap(family.getValues()); this.storeEngine.compactionPolicy.setConf(conf); this.offPeakHours = OffPeakHours.getInstance(conf); } /** * {@inheritDoc} */ @Override public void registerChildren(ConfigurationManager manager) { // No children to register } /** * {@inheritDoc} */ @Override public void deregisterChildren(ConfigurationManager manager) { // No children to deregister } @Override public double getCompactionPressure() { return storeEngine.getStoreFileManager().getCompactionPressure(); } @Override public boolean isPrimaryReplicaStore() { return getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID; } /** * Sets the store up for a region level snapshot operation. * @see #postSnapshotOperation() */ public void preSnapshotOperation() { archiveLock.lock(); } /** * Perform tasks needed after the completion of snapshot operation. * @see #preSnapshotOperation() */ public void postSnapshotOperation() { archiveLock.unlock(); } /** * Closes and archives the compacted files under this store */ public synchronized void closeAndArchiveCompactedFiles() throws IOException { // ensure other threads do not attempt to archive the same files on close() archiveLock.lock(); try { lock.readLock().lock(); Collection<HStoreFile> copyCompactedfiles = null; try { Collection<HStoreFile> compactedfiles = this.getStoreEngine().getStoreFileManager().getCompactedfiles(); if (CollectionUtils.isNotEmpty(compactedfiles)) { // Do a copy under read lock copyCompactedfiles = new ArrayList<>(compactedfiles); } else { LOG.trace("No compacted files to archive"); } } finally { lock.readLock().unlock(); } if (CollectionUtils.isNotEmpty(copyCompactedfiles)) { removeCompactedfiles(copyCompactedfiles); } } finally { archiveLock.unlock(); } } /** * Archives and removes the compacted files * @param compactedfiles The compacted files in this store that are not active in reads */ private void removeCompactedfiles(Collection<HStoreFile> compactedfiles) throws IOException { final List<HStoreFile> filesToRemove = new ArrayList<>(compactedfiles.size()); final List<Long> storeFileSizes = new ArrayList<>(compactedfiles.size()); for (final HStoreFile file : compactedfiles) { synchronized (file) { try { StoreFileReader r = file.getReader(); if (r == null) { LOG.debug("The file {} was closed but still not archived", file); // HACK: Temporarily re-open the reader so we can get the size of the file. Ideally, // we should know the size of an HStoreFile without having to ask the HStoreFileReader // for that. long length = getStoreFileSize(file); filesToRemove.add(file); storeFileSizes.add(length); continue; } if (file.isCompactedAway() && !file.isReferencedInReads()) { // Even if deleting fails we need not bother as any new scanners won't be // able to use the compacted file as the status is already compactedAway LOG.trace("Closing and archiving the file {}", file); // Copy the file size before closing the reader final long length = r.length(); r.close(true); // Just close and return filesToRemove.add(file); // Only add the length if we successfully added the file to `filesToRemove` storeFileSizes.add(length); } else { LOG.info("Can't archive compacted file " + file.getPath() + " because of either isCompactedAway=" + file.isCompactedAway() + " or file has reference, isReferencedInReads=" + file.isReferencedInReads() + ", refCount=" + r.getRefCount() + ", skipping for now."); } } catch (Exception e) { LOG.error("Exception while trying to close the compacted store file {}", file.getPath(), e); } } } if (this.isPrimaryReplicaStore()) { // Only the primary region is allowed to move the file to archive. // The secondary region does not move the files to archive. Any active reads from // the secondary region will still work because the file as such has active readers on it. if (!filesToRemove.isEmpty()) { LOG.debug("Moving the files {} to archive", filesToRemove); // Only if this is successful it has to be removed try { this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); } catch (FailedArchiveException fae) { // Even if archiving some files failed, we still need to clear out any of the // files which were successfully archived. Otherwise we will receive a // FileNotFoundException when we attempt to re-archive them in the next go around. Collection<Path> failedFiles = fae.getFailedFiles(); Iterator<HStoreFile> iter = filesToRemove.iterator(); Iterator<Long> sizeIter = storeFileSizes.iterator(); while (iter.hasNext()) { sizeIter.next(); if (failedFiles.contains(iter.next().getPath())) { iter.remove(); sizeIter.remove(); } } if (!filesToRemove.isEmpty()) { clearCompactedfiles(filesToRemove); } throw fae; } } } if (!filesToRemove.isEmpty()) { // Clear the compactedfiles from the store file manager clearCompactedfiles(filesToRemove); // Try to send report of this archival to the Master for updating quota usage faster reportArchivedFilesForQuota(filesToRemove, storeFileSizes); } } /** * Computes the length of a store file without succumbing to any errors along the way. If an * error is encountered, the implementation returns {@code 0} instead of the actual size. * * @param file The file to compute the size of. * @return The size in bytes of the provided {@code file}. */ long getStoreFileSize(HStoreFile file) { long length = 0; try { file.initReader(); length = file.getReader().length(); } catch (IOException e) { LOG.trace("Failed to open reader when trying to compute store file size, ignoring", e); } finally { try { file.closeStoreFile( file.getCacheConf() != null ? file.getCacheConf().shouldEvictOnClose() : true); } catch (IOException e) { LOG.trace("Failed to close reader after computing store file size, ignoring", e); } } return length; } public Long preFlushSeqIDEstimation() { return memstore.preFlushSeqIDEstimation(); } @Override public boolean isSloppyMemStore() { return this.memstore.isSloppy(); } private void clearCompactedfiles(List<HStoreFile> filesToRemove) throws IOException { LOG.trace("Clearing the compacted file {} from this store", filesToRemove); try { lock.writeLock().lock(); this.getStoreEngine().getStoreFileManager().removeCompactedFiles(filesToRemove); } finally { lock.writeLock().unlock(); } } void reportArchivedFilesForQuota(List<? extends StoreFile> archivedFiles, List<Long> fileSizes) { // Sanity check from the caller if (archivedFiles.size() != fileSizes.size()) { throw new RuntimeException("Coding error: should never see lists of varying size"); } RegionServerServices rss = this.region.getRegionServerServices(); if (rss == null) { return; } List<Entry<String,Long>> filesWithSizes = new ArrayList<>(archivedFiles.size()); Iterator<Long> fileSizeIter = fileSizes.iterator(); for (StoreFile storeFile : archivedFiles) { final long fileSize = fileSizeIter.next(); if (storeFile.isHFile() && fileSize != 0) { filesWithSizes.add(Maps.immutableEntry(storeFile.getPath().getName(), fileSize)); } } if (LOG.isTraceEnabled()) { LOG.trace("Files archived: " + archivedFiles + ", reporting the following to the Master: " + filesWithSizes); } boolean success = rss.reportFileArchivalForQuotas(getTableName(), filesWithSizes); if (!success) { LOG.warn("Failed to report archival of files: " + filesWithSizes); } } @Override public int getCurrentParallelPutCount() { return currentParallelPutCount.get(); } public int getStoreRefCount() { return this.storeEngine.getStoreFileManager().getStorefiles().stream() .filter(sf -> sf.getReader() != null).filter(HStoreFile::isHFile) .mapToInt(HStoreFile::getRefCount).sum(); } /** * @return get maximum ref count of storeFile among all compacted HStore Files * for the HStore */ public int getMaxCompactedStoreFileRefCount() { OptionalInt maxCompactedStoreFileRefCount = this.storeEngine.getStoreFileManager() .getCompactedfiles() .stream() .filter(sf -> sf.getReader() != null) .filter(HStoreFile::isHFile) .mapToInt(HStoreFile::getRefCount) .max(); return maxCompactedStoreFileRefCount.isPresent() ? maxCompactedStoreFileRefCount.getAsInt() : 0; } }
apache-2.0
Kevin-Lee/kommonlee-core
src/main/java/org/elixirian/kommonlee/type/functional/Function6.java
2557
/** * This project is licensed under the Apache License, Version 2.0 * if the following condition is met: * (otherwise it cannot be used by anyone but the author, Kevin, only) * * The original KommonLee project is owned by Lee, Seong Hyun (Kevin). * * -What does it mean to you? * Nothing, unless you want to take the ownership of * "the original project" (not yours or forked & modified one). * You are free to use it for both non-commercial and commercial projects * and free to modify it as the Apache License allows. * * -So why is this condition necessary? * It is only to protect the original project (See the case of Java). * * * Copyright 2009 Lee, Seong Hyun (Kevin) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.elixirian.kommonlee.type.functional; /** * <pre> * ___ _____ _____ * / \/ / ______ __________________ ______ __ ______ / / ______ ______ * / / _/ __ // / / / / / /_/ __ // // // / / ___ \/ ___ \ * / \ / /_/ _/ _ _ / _ _ // /_/ _/ __ // /___/ _____/ _____/ * /____/\____\/_____//__//_//_/__//_//_/ /_____//___/ /__//________/\_____/ \_____/ * </pre> * * <pre> * ___ _____ _____ * / \/ /_________ ___ ____ __ ______ / / ______ ______ * / / / ___ \ \/ //___// // / / / / ___ \/ ___ \ * / \ / _____/\ // // __ / / /___/ _____/ _____/ * /____/\____\\_____/ \__//___//___/ /__/ /________/\_____/ \_____/ * </pre> * * @author Lee, SeongHyun (Kevin) * @version 0.0.1 (2010-11-13) * @param <X1> * input1 * @param <X2> * input2 * @param <X3> * input3 * @param <X4> * input4 * @param <X5> * input5 * @param <X6> * input6 * @param <R> * result */ public interface Function6<X1, X2, X3, X4, X5, X6, R> { R apply(X1 input1, X2 input2, X3 input3, X4 input4, X5 input5, X6 input6); }
apache-2.0
cerner/beadledom
swagger2/src/main/java/com/cerner/beadledom/swagger2/Swagger2Module.java
2954
package com.cerner.beadledom.swagger2; import com.google.inject.AbstractModule; import com.google.inject.multibindings.Multibinder; import io.swagger.converter.ModelConverter; import io.swagger.converter.ModelConverters; import io.swagger.jaxrs.config.JaxrsScanner; import io.swagger.jaxrs.config.SwaggerContextService; import io.swagger.jaxrs.listing.SwaggerSerializers; import io.swagger.models.Info; import java.util.Set; import javax.annotation.PostConstruct; import javax.inject.Inject; /** * Configures a service to serve Swagger 2 documentation at '/api-docs'. * * <p>To use this, you must provide a Swagger Info object such as the following: * * <p><pre>{@code @Provides * Info provideSwaggerConfig(ServiceMetadata serviceMetadata) { * Info info = new Info(); * info.title("Name of My Service") * .description("A description of my service. My service lets you do some things. It's owned " * + " by My Awesome Team") * .setVersion(serviceMetadata.getBuildInfo().getVersion()); * return info; * } * }</pre> * * <p>Then annotate your resources, operations, and models with the Swagger annotations. * * <p>Provides the following JAX-RS resources and providers: * <ul> * <li>{@link SwaggerApiResource}</li> * <li>{@link SwaggerUiResource}</li> * <li>{@link SwaggerSerializers}</li> * </ul> * * <p>Requires: * <ul> * <li>{@link io.swagger.models.Info}</li> * </ul> * * <p>You may also supply set bindings for {@link io.swagger.converter.ModelConverter}. * These will be added to the list of model converters (before the default converter, but otherwise * in unspecified order). */ public class Swagger2Module extends AbstractModule { @Override protected void configure() { requireBinding(Info.class); bind(SwaggerApiResource.class); bind(SwaggerUiResource.class); bind(SwaggerSerializers.class); bind(JaxrsScanner.class).to(SwaggerGuiceJaxrsScanner.class); bind(SwaggerLifecycleHook.class).asEagerSingleton(); // Create empty multibinder in case no ModelConverter bindings exist Multibinder.newSetBinder(binder(), ModelConverter.class); } static class SwaggerLifecycleHook { private final JaxrsScanner jaxrsScanner; private final Set<ModelConverter> modelConverters; @Inject SwaggerLifecycleHook( JaxrsScanner jaxrsScanner, Set<ModelConverter> modelConverters) { this.jaxrsScanner = jaxrsScanner; this.modelConverters = modelConverters; } @PostConstruct public void startup() { for (ModelConverter modelConverter : modelConverters) { ModelConverters.getInstance().addConverter(modelConverter); } // Swagger uses a lot of static state / servlet context to store things. This is how we make // sure it is initialized with our scanner. new SwaggerContextService().withScanner(jaxrsScanner).initScanner(); } } }
apache-2.0
JanewzhWang/dasein-cloud-core
src/main/java/org/dasein/cloud/compute/SpotVirtualMachineRequest.java
4782
/** * Copyright (C) 2009-2015 Dell, Inc. * See annotations for authorship information * * ==================================================================== * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ==================================================================== */ package org.dasein.cloud.compute; import javax.annotation.Nonnegative; import javax.annotation.Nonnull; import javax.annotation.Nullable; /** * The Spot VM request as it is held with the cloud provider. As long as this request exists * in an active state within the cloud then the provider will spin up VMs as the price history allows. */ public class SpotVirtualMachineRequest { private String providerSpotVmRequestId; private float spotPrice; private SpotVirtualMachineRequestType type; private String providerMachineImageId; private String productId; private long validUntilTimestamp; private long validFromTimestamp; private long createdTimestamp; private long fulfillmentTimestamp; private String fulfillmentDataCenterId; private String launchGroup; /** * @param providerSpotVmRequestId the id of the request * @param spotPrice maximum spot vm price when the request should be fulfilled * @param type type of the request * @param providerMachineImageId machine image which needs to be used for fulfillment * @param productId product which needs to be used for fulfillment * @param createdTimestamp when the request has been created * @param validFromTimestamp when the request is valid from * @param validUntilTimestamp when the request expires * @param fulfillmentTimestamp when the request has been fulfilled * @param fulfillmentDataCenterId which data center the request has been fulfilled in * @param launchGroup launch group used to launch instances together * @return a constructed request instance */ public static @Nonnull SpotVirtualMachineRequest getInstance( @Nonnull String providerSpotVmRequestId, @Nonnegative float spotPrice, @Nonnull SpotVirtualMachineRequestType type, @Nonnull String providerMachineImageId, @Nonnull String productId, @Nonnegative long createdTimestamp, @Nonnegative long validFromTimestamp, @Nonnegative long validUntilTimestamp, long fulfillmentTimestamp, @Nullable String fulfillmentDataCenterId, @Nullable String launchGroup ) { SpotVirtualMachineRequest sir = new SpotVirtualMachineRequest(); sir.providerSpotVmRequestId = providerSpotVmRequestId; sir.spotPrice = spotPrice; sir.type = type; sir.providerMachineImageId = providerMachineImageId; sir.productId = productId; sir.createdTimestamp = createdTimestamp; sir.validFromTimestamp = validFromTimestamp; sir.validUntilTimestamp = validUntilTimestamp; sir.fulfillmentTimestamp = fulfillmentTimestamp; sir.fulfillmentDataCenterId = fulfillmentDataCenterId; sir.launchGroup = launchGroup; return sir; } public @Nonnull String getProviderSpotVmRequestId() { return providerSpotVmRequestId; } public @Nonnegative float getSpotPrice() { return spotPrice; } public @Nonnull SpotVirtualMachineRequestType getType() { return type; } public long getFulfillmentTimestamp() { return fulfillmentTimestamp; } public @Nonnull String getProviderMachineImageId() { return providerMachineImageId; } public @Nonnull String getProductId() { return productId; } public @Nonnegative long getCreatedTimestamp() { return createdTimestamp; } public @Nullable String getFulfillmentDataCenterId() { return fulfillmentDataCenterId; } public @Nullable String getLaunchGroup() { return launchGroup; } public @Nonnegative long getValidUntilTimestamp() { return validUntilTimestamp; } public @Nonnegative long getValidFromTimestamp() { return validFromTimestamp; } }
apache-2.0
Radomiej/JavityEngine
javity-engine/src/main/java/org/javity/engine/resources/BitmapFontResource.java
475
package org.javity.engine.resources; import org.javity.engine.Resource; import galaxy.rapid.asset.RapidAsset; public class BitmapFontResource implements Resource { private String resourcePath; public BitmapFontResource() { } public BitmapFontResource(String resourcePath){ this.resourcePath = resourcePath; RapidAsset.INSTANCE.loadBitmapFont(resourcePath); } @Override public String getResourcePath() { return resourcePath; } }
apache-2.0
liwujun/xmpp
src/com/way/activity/RegisterActivity.java
4114
package com.way.activity; import org.jivesoftware.smack.AccountManager; import org.jivesoftware.smack.Connection; import org.jivesoftware.smack.ConnectionConfiguration; import org.jivesoftware.smack.XMPPConnection; import org.jivesoftware.smack.XMPPException; import android.app.Activity; import android.os.Bundle; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; import android.widget.EditText; import com.way.util.PreferenceConstants; import com.way.util.T; import com.way.xx.R; /** * 2014-05-21 * @author li * 注册页面的实现 */ public class RegisterActivity extends Activity { private Button mRegisterBtn; private EditText mRegisterPasswd1; private EditText mRegisterPasswd2; private EditText mRegisterAccount; private static AccountManager mUserAccount; private static Connection connection; private static ConnectionConfiguration config; @Override protected void onCreate(Bundle savedInstanceState){ super.onCreate(savedInstanceState); setContentView(R.layout.registeraccount); init(); initView(); } protected void initView(){ mRegisterBtn=(Button)findViewById(R.id.register_queding_btn); mRegisterAccount=(EditText)findViewById(R.id.register_zhanghao_edt); mRegisterPasswd1=(EditText)findViewById(R.id.register_mima_edt); mRegisterPasswd2=(EditText)findViewById(R.id.register_mima_queding_edt); mRegisterBtn.setFocusable(true); mRegisterBtn.setClickable(true); mRegisterBtn.setOnClickListener(new OnClickListener(){ public void onClick(View v){ String passwd1=mRegisterPasswd1.getText().toString(); String passwd2=mRegisterPasswd2.getText().toString(); String account=mRegisterAccount.getText().toString(); if(!passwd1.equals(passwd2)){ T.showShort(RegisterActivity.this, "两次输入不同密码"); }else if(passwd1.equals("")){ T.showShort(RegisterActivity.this, "请输入密码"); }else if(passwd1.length()<6){ T.showShort(RegisterActivity.this, "密码长度小于6位"); }else if(mRegisterAccount.length()<=0){ T.showShort(RegisterActivity.this, "请输入帐号"); }else{ try { mUserAccount=connection.getAccountManager(); mUserAccount.createAccount(account, passwd1); } catch (XMPPException e) { // TODO Auto-generated catch block T.showShort(RegisterActivity.this, "建立用户错误,请与li5jun@126.com联系"); e.printStackTrace(); } } } }); } public void init() { try { // connection = new XMPPConnection(PreferenceConstants.Server); // connection.connect(); /** 5222是openfire服务器默认的通信端口,你可以登录http://192.168.8.32:9090/到管理员控制台查看客户端到服务器端口 */ config = new ConnectionConfiguration(PreferenceConstants.Server, 5222); /** 是否启用压缩 */ config.setCompressionEnabled(true); /** 是否启用安全验证 */ config.setSASLAuthenticationEnabled(true); /** 是否启用调试 */ config.setDebuggerEnabled(false); config.setReconnectionAllowed(true); config.setRosterLoadedAtLogin(true); /** 创建connection链接 */ connection = new XMPPConnection(config); /** 建立连接 */ connection.connect(); } catch (XMPPException e) { e.printStackTrace(); } fail(connection); fail(connection.getConnectionID()); } private final void fail(Object o, Object... args) { if (o != null && args != null && args.length > 0) { String s = o.toString(); for (int i = 0; i < args.length; i++) { String item = args[i] == null ? "" : args[i].toString(); if (s.contains("{" + i + "}")) { s = s.replace("{" + i + "}", item); } else { s += " " + item; } } System.out.println(s); } } }
apache-2.0
lshift/bletchley
src/main/java/net/lshift/spki/convert/Convert.java
3370
package net.lshift.spki.convert; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Annotations to guide the SExp converter. */ public class Convert { /** * An annotation for annotations - the registry uses this * to learn how to interpret an annotation in order to * construct a converter for a class */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.ANNOTATION_TYPE}) public @interface ConverterFactoryClass { Class<? extends ConverterFactory<?>> value(); } /** * Each field has a specific position in the sexp */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE}) @ConverterFactoryClass(PositionBeanConverterFactory.class) public @interface ByPosition { String name(); String[] fields(); } /** * Each field gets a named sub-sexp in the sexp for this object */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE}) @ConverterFactoryClass(NameBeanConverterFactory.class) public @interface ByName { String value(); } /** * There's only one field, which is a list type; write the name first * then convert each element of the list one by one */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE}) @ConverterFactoryClass(SequenceConverterFactory.class) public @interface SequenceConverted { String value(); } /** * Fields are list types, then they're followed by a sequence. */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE}) @ConverterFactoryClass(PositionSequenceConverterFactory.class) public @interface PositionSequence { String name(); String[] fields(); String seq(); } /** * This is one of several sub-classes, discriminated by the name of the sexp. * */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE}) @ConverterFactoryClass(DiscriminatingConverterFactory.class) public @interface Discriminated { Class<?> [] value(); } /** * The converter should be an instance of this class */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE}) @ConverterFactoryClass(ConvertClassFactory.class) public @interface ConvertClass { Class<? extends StepConverter<?,?>> value(); } /** * This specifies how to interpret annotations specifying * actions that should take place after the converter is registered. */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.ANNOTATION_TYPE}) public @interface HandlerClass { Class<? extends AnnotationHandler<?>> value(); } /** * Another converter must be registered for this converter to work * - usually a foreign class */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE}) @HandlerClass(RequiresConverterHandler.class) public @interface RequiresConverter { Class<? extends Converter<?>> value(); } /** * Mark a field as optional */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.FIELD}) public @interface Nullable { // no fields } }
apache-2.0
scalding/shib-cas-authn2
idp-cas-invoker/src/main/java/net/unicon/idp/authn/provider/CasLoginHandler.java
3374
package net.unicon.idp.authn.provider; import java.io.IOException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import edu.internet2.middleware.shibboleth.idp.authn.provider.AbstractLoginHandler; import edu.internet2.middleware.shibboleth.idp.authn.provider.ExternalAuthnSystemLoginHandler; /** * CasLoginHandler replaces the {@link CasInvokerServlet} AND {@link CasAuthenticatorResource} (facade) from the earlier implementations. * Allows simplification of the SHIB-CAS authenticator by removing the need to configure and deploy a separate war. * @author chasegawa@unicon.net */ public class CasLoginHandler extends AbstractLoginHandler { private String callbackUrl; private String casLoginUrl; private Logger logger = LoggerFactory.getLogger(CasLoginHandler.class); /** * All attributes/parameters required * @param postAuthnCallbackUrl * @param casResourceUrl */ public CasLoginHandler(String casLoginUrl, String callbackUrl) { if (isEmpty(casLoginUrl)) { logger.error("Unable to create CasLoginHandler - missing casLoginUrl parameter. Please check $IDP_HOME/conf/handler.xml"); throw new IllegalArgumentException( "CasLoginHandler missing casLoginUrl attribute in handler configuration."); } this.casLoginUrl = casLoginUrl; if (isEmpty(callbackUrl)) { logger.error("Unable to create CasLoginHandler - missing callbackUrl parameter. Please check $IDP_HOME/conf/handler.xml"); throw new IllegalArgumentException( "CasLoginHandler missing callbackUrl attribute in handler configuration."); } this.callbackUrl = callbackUrl; } /** * Essentially StringUtils.isEmpty, but put this here to avoid another jar/dependency * @param string * @return */ private boolean isEmpty(String string) { return null == string || "".equals(string.trim()); } /** * Translate the SHIB request so that cas renew and/or gateway are set properly before handing off to CAS. * @see edu.internet2.middleware.shibboleth.idp.authn.LoginHandler#login(javax.servlet.http.HttpServletRequest, javax.servlet.http.HttpServletResponse) */ @Override public void login(HttpServletRequest request, HttpServletResponse response) { Boolean force = (Boolean) request.getAttribute(ExternalAuthnSystemLoginHandler.FORCE_AUTHN_PARAM); if (null == force) { force = Boolean.FALSE; } setSupportsForceAuthentication(force); String authnType = (force) ? "&renew=false" : "&renew=true"; Boolean passive = (Boolean) request.getAttribute(ExternalAuthnSystemLoginHandler.PASSIVE_AUTHN_PARAM); if (null != passive) { setSupportsPassive(passive); if (passive) { authnType += "&gateway=true"; } } try { response.sendRedirect(response.encodeRedirectURL(casLoginUrl + "?service=" + callbackUrl + authnType)); } catch (IOException e) { logger.error("Unable to redirect to CAS from LoginHandler", e); } } }
apache-2.0
ebi-uniprot/QuickGOBE
geneproduct-rest/src/test/java/uk/ac/ebi/quickgo/geneproduct/service/converter/GeneProductDocConverterImplTest.java
4387
package uk.ac.ebi.quickgo.geneproduct.service.converter; import uk.ac.ebi.quickgo.geneproduct.common.GeneProductDocument; import uk.ac.ebi.quickgo.geneproduct.common.GeneProductType; import uk.ac.ebi.quickgo.geneproduct.model.GeneProduct; import java.util.Arrays; import java.util.List; import org.junit.Before; import org.junit.Test; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; import static uk.ac.ebi.quickgo.geneproduct.service.converter.GeneProductDocConverterImpl.DEFAULT_TAXON_ID; /** * Unit tests the {@link GeneProductDocConverterImpl} class. */ public class GeneProductDocConverterImplTest { private static final String ID = "A0A000"; private static final int TAX_ID = 789; private static final String DATABASE = "UniProt"; private static final String SYMBOL = "G12345"; private static final String TYPE = "protein"; private static final String NAME = "moeA5"; private static final String PARENT_ID = "QWERTY"; private static final String REF_PROTEOME = "P1234"; private static final String DATABASE_SUBSET = "SUB1"; private static final String PROTEOME = "complete"; private static final List<String> SYNONYMS = Arrays.asList("Q1234", "R1234", "S1234"); private GeneProductDocConverter geneProductDocConverter; private GeneProductDocument geneProductDocument; @Before public void setup() { geneProductDocConverter = new GeneProductDocConverterImpl(); geneProductDocument = new GeneProductDocument(); geneProductDocument.id = ID; geneProductDocument.database = DATABASE; geneProductDocument.databaseSubset = DATABASE_SUBSET; geneProductDocument.synonyms = SYNONYMS; geneProductDocument.name = NAME; geneProductDocument.parentId = PARENT_ID; geneProductDocument.symbol = SYMBOL; geneProductDocument.taxonId = TAX_ID; geneProductDocument.type = TYPE; geneProductDocument.proteome = PROTEOME; } @Test public void convertOne() { GeneProduct convertedGeneProduct = geneProductDocConverter.convert(geneProductDocument); assertThat(convertedGeneProduct.id, is(equalTo(ID))); assertThat(convertedGeneProduct.database, is(equalTo(DATABASE))); assertThat(convertedGeneProduct.databaseSubset, is("SUB1")); assertThat(convertedGeneProduct.synonyms, containsInAnyOrder("Q1234", "R1234", "S1234")); assertThat(convertedGeneProduct.name, is(NAME)); assertThat(convertedGeneProduct.parentId, is(PARENT_ID)); assertThat(convertedGeneProduct.symbol, is(SYMBOL)); assertThat(convertedGeneProduct.taxonId, is(TAX_ID)); assertThat(convertedGeneProduct.type, is(GeneProductType.PROTEIN)); assertThat(convertedGeneProduct.proteome, is(PROTEOME)); } @Test public void noTaxIdInDocResultsInNullModelTaxId() { geneProductDocument.taxonId = DEFAULT_TAXON_ID; GeneProduct convertedGeneProduct = geneProductDocConverter.convert(geneProductDocument); assertThat(convertedGeneProduct.taxonId, is(DEFAULT_TAXON_ID)); } @Test public void nullDocDbSubsetConvertsToNullModelDbSubset() { geneProductDocument.databaseSubset = null; GeneProduct convertedGeneProduct = geneProductDocConverter.convert(geneProductDocument); assertThat(convertedGeneProduct.databaseSubset, is(nullValue())); } @Test public void nullDocSynonymsConvertsToNullModelSynonyms() { geneProductDocument.synonyms = null; GeneProduct convertedGeneProduct = geneProductDocConverter.convert(geneProductDocument); assertThat(convertedGeneProduct.synonyms, is(nullValue())); } @Test(expected = IllegalArgumentException.class) public void invalidGeneProductTypeCausesError() { geneProductDocument.type = "this is not a valid gene product type, I promise."; geneProductDocConverter.convert(geneProductDocument); } @Test(expected = IllegalArgumentException.class) public void nullGeneProductTypeCausesError() { geneProductDocument.type = null; geneProductDocConverter.convert(geneProductDocument); } }
apache-2.0
werkt/bazel
src/main/java/com/google/devtools/build/lib/rules/android/AndroidLibraryResourceClassJarProvider.java
3262
// Copyright 2016 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.rules.android; import com.google.devtools.build.lib.actions.Artifact; import com.google.devtools.build.lib.analysis.TransitiveInfoCollection; import com.google.devtools.build.lib.collect.nestedset.Depset; import com.google.devtools.build.lib.collect.nestedset.NestedSet; import com.google.devtools.build.lib.collect.nestedset.NestedSetBuilder; import com.google.devtools.build.lib.packages.BuiltinProvider; import com.google.devtools.build.lib.packages.NativeInfo; import com.google.devtools.build.lib.starlarkbuildapi.android.AndroidLibraryResourceClassJarProviderApi; import com.google.devtools.build.lib.syntax.EvalException; /** * A provider which contains the resource class jars from android_library rules. See {@link * AndroidRuleClasses#ANDROID_RESOURCES_CLASS_JAR}. */ public final class AndroidLibraryResourceClassJarProvider extends NativeInfo implements AndroidLibraryResourceClassJarProviderApi<Artifact> { public static final Provider PROVIDER = new Provider(); private final NestedSet<Artifact> resourceClassJars; private AndroidLibraryResourceClassJarProvider(NestedSet<Artifact> resourceClassJars) { super(PROVIDER); this.resourceClassJars = resourceClassJars; } public static AndroidLibraryResourceClassJarProvider create( NestedSet<Artifact> resourceClassJars) { return new AndroidLibraryResourceClassJarProvider(resourceClassJars); } public static AndroidLibraryResourceClassJarProvider getProvider( TransitiveInfoCollection target) { return (AndroidLibraryResourceClassJarProvider) target.get(AndroidLibraryResourceClassJarProvider.PROVIDER.getKey()); } @Override public Depset /*<Artifact>*/ getResourceClassJarsForStarlark() { return Depset.of(Artifact.TYPE, resourceClassJars); } public NestedSet<Artifact> getResourceClassJars() { return resourceClassJars; } /** Provider class for {@link AndroidLibraryResourceClassJarProvider} objects. */ public static class Provider extends BuiltinProvider<AndroidLibraryResourceClassJarProvider> implements AndroidLibraryResourceClassJarProviderApi.Provider<Artifact> { private Provider() { super(NAME, AndroidLibraryResourceClassJarProvider.class); } public String getName() { return NAME; } @Override public AndroidLibraryResourceClassJarProvider create(Depset jars) throws EvalException { return new AndroidLibraryResourceClassJarProvider( NestedSetBuilder.<Artifact>stableOrder() .addTransitive(Depset.cast(jars, Artifact.class, "jars")) .build()); } } }
apache-2.0
appbakers/automon_example
jamonapi/jamon/src/main/java/com/jamonapi/RangeBase.java
2571
package com.jamonapi; /** Base class for ranges which are a compromise between aggregation and tracking details. */ final class RangeBase extends RangeImp { private static final long serialVersionUID = 278L; private double[] rangeValues; /** The first range will catcth anything less than that value. */ RangeBase(RangeHolder rangeHolder) { this.rangeHolder=rangeHolder; this.isLessThan=rangeHolder.isLessThan(); this.rangeValues=rangeHolder.getEndPoints(); int len=rangeValues.length; // add one to cover values less than first range frequencyDist=new FrequencyDistBase[len+1]; for (int i=0;i<len;i++) { RangeHolder.RangeHolderItem item=rangeHolder.get(i); frequencyDist[i]=new FrequencyDistBase(item.getDisplayHeader(), item.getEndPoint(), getFreqDistName(i)); } frequencyDist[len]=new FrequencyDistBase(getLastHeader(),Double.MAX_VALUE,getFreqDistName(len)); } /** return which Distribution the value belongs to. */ public FrequencyDist getFrequencyDist(double value) { int last=frequencyDist.length-1; // If comparison is for < else <=. Defaults to <= if (isLessThan) { for (int i=0;i<last;i++) { if (value<rangeValues[i]) return frequencyDist[i]; } } else { // <= i.e. not less than for (int i=0;i<last;i++) { if (value<=rangeValues[i]) return frequencyDist[i]; } } //if nothing has matched until this point then match on the last range. return frequencyDist[last]; } public void add(double value) { getFrequencyDist(value).add(value); } public void reset() { for (int i=0;i<frequencyDist.length;i++) frequencyDist[i].reset(); } @Override public RangeImp copy(ActivityStats activityStats) { RangeBase rb=new RangeBase(rangeHolder); rb.setActivityStats(activityStats); return rb; } private void setActivityStats(ActivityStats stats) { for (int i=0;i<frequencyDist.length;i++) frequencyDist[i].setActivityStats(stats); } /** Ranges are implemented as JAMonListeners */ public void processEvent(Monitor mon) { double value=mon.getLastValue(); getFrequencyDist(value).add(value); } public String getName() { return "Range"; } public void setName(String name) { // noop } }
apache-2.0
cduicu/play-jaas
app/auth/WebSession.java
2827
package auth; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import play.cache.Cache; import play.mvc.Http; import java.io.Serializable; import java.util.HashMap; /** * Simulates the Servlet's HTTPSession object. */ public class WebSession { private static Logger logger = LoggerFactory.getLogger(WebSession.class); private long lastAccessTime; private String id; public static long TIME_OUT = 20 * 60000; // 20 min. private HashMap<String,Serializable> sessionData = new HashMap<String,Serializable>(); private WebSession(String id) { this.id = id; this.lastAccessTime = System.currentTimeMillis(); } public static WebSession newSession(Http.Session session) { String id = java.util.UUID.randomUUID().toString(); logger.info("New session created id=" + id); session.put("uuid", id); WebSession s = new WebSession(id); Cache.set(id, s); return s; } public static WebSession getSession(Http.Session session) { return getSession(session, false); } public static WebSession getSession(Http.Session session, boolean autoCreate) { WebSession s = null; if (session == null) { logger.info("Session is null!"); return null; } String id = session.get("uuid"); if (id == null) { if (!autoCreate) { return null; } else { s = WebSession.newSession(session); } } else { s = (WebSession) Cache.get(id); if (s == null) { logger.error("Cannot get session with id=" + id + " from cache!"); } } if (s != null) { if (s.lastAccessTime < System.currentTimeMillis() - TIME_OUT) { // session expired logger.info("Session expired! id=" + id); removeSession(id); s = null; } } if (s!= null) { s.touch(); } return s; } public static Object get(Http.Session session, String key) { WebSession s = getSession(session); if (s != null) { return s.get(key); } return null; } public static void removeSession(String id) { if (id != null) { logger.info("Removing session id=" + id); Cache.set(id, null); } } public Object get(String key) { touch(); return sessionData.get(key); } private void touch() { lastAccessTime = System.currentTimeMillis(); } public String getId() { return id; } public void put(String key, Object value) { touch(); sessionData.put(key, (Serializable) value); } }
apache-2.0
robdouglas/incubator-streams
streams-contrib/streams-provider-youtube/src/main/java/com/youtube/serializer/YoutubeActivityUtil.java
7570
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.youtube.serializer; import com.google.api.client.util.Maps; import com.google.api.services.youtube.model.Channel; import com.google.api.services.youtube.model.Thumbnail; import com.google.api.services.youtube.model.ThumbnailDetails; import com.google.api.services.youtube.model.Video; import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.collect.Lists; import org.apache.streams.exceptions.ActivitySerializerException; import org.apache.streams.pojo.extensions.ExtensionUtil; import org.apache.streams.pojo.json.Activity; import org.apache.streams.pojo.json.ActivityObject; import org.apache.streams.pojo.json.Actor; import org.apache.streams.pojo.json.Image; import org.apache.streams.pojo.json.Provider; import org.joda.time.DateTime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.Map; public class YoutubeActivityUtil { private static final Logger LOGGER = LoggerFactory.getLogger(YoutubeActivityUtil.class); /** * Given a {@link com.google.api.services.youtube.YouTube.Videos} object and an * {@link org.apache.streams.pojo.json.Activity} object, fill out the appropriate details * * @param video * @param activity * @throws org.apache.streams.exceptions.ActivitySerializerException */ public static void updateActivity(Video video, Activity activity, String channelId) throws ActivitySerializerException { activity.setActor(buildActor(video, video.getSnippet().getChannelId())); activity.setVerb("post"); activity.setId(formatId(activity.getVerb(), Optional.fromNullable( video.getId()) .orNull())); activity.setPublished(new DateTime(video.getSnippet().getPublishedAt().getValue())); activity.setTitle(video.getSnippet().getTitle()); activity.setContent(video.getSnippet().getDescription()); activity.setUrl("https://www.youtube.com/watch?v=" + video.getId()); activity.setProvider(getProvider()); activity.setObject(buildActivityObject(video)); addYoutubeExtensions(activity, video); } /** * Given a {@link com.google.api.services.youtube.model.Channel} object and an * {@link org.apache.streams.pojo.json.Activity} object, fill out the appropriate details * * @param channel * @param activity * @throws org.apache.streams.exceptions.ActivitySerializerException */ public static void updateActivity(Channel channel, Activity activity, String channelId) throws ActivitySerializerException { try { activity.setProvider(getProvider()); activity.setVerb("post"); activity.setActor(createActorForChannel(channel)); Map<String, Object> extensions = Maps.newHashMap(); extensions.put("youtube", channel); activity.setAdditionalProperty("extensions", extensions); } catch (Throwable t) { throw new ActivitySerializerException(t); } } public static Actor createActorForChannel(Channel channel) { Actor actor = new Actor(); actor.setId("id:youtube:"+channel.getId()); actor.setSummary(channel.getSnippet().getDescription()); actor.setDisplayName(channel.getSnippet().getTitle()); Image image = new Image(); image.setUrl(channel.getSnippet().getThumbnails().getHigh().getUrl()); actor.setImage(image); actor.setUrl("https://youtube.com/user/" + channel.getId()); Map<String, Object> actorExtensions = Maps.newHashMap(); actorExtensions.put("followers", channel.getStatistics().getSubscriberCount()); actorExtensions.put("posts", channel.getStatistics().getVideoCount()); actor.setAdditionalProperty("extensions", actorExtensions); return actor; } /** * Given a video object, create the appropriate activity object with a valid image * (thumbnail) and video URL * @param video * @return Activity Object with Video URL and a thumbnail image */ private static ActivityObject buildActivityObject(Video video) { ActivityObject activityObject = new ActivityObject(); ThumbnailDetails thumbnailDetails = video.getSnippet().getThumbnails(); Thumbnail thumbnail = thumbnailDetails.getDefault(); if(thumbnail != null) { Image image = new Image(); image.setUrl(thumbnail.getUrl()); image.setHeight(thumbnail.getHeight().doubleValue()); image.setWidth(thumbnail.getWidth().doubleValue()); activityObject.setImage(image); } activityObject.setUrl("https://www.youtube.com/watch?v=" + video.getId()); activityObject.setObjectType("video"); return activityObject; } /** * Add the Youtube extensions to the Activity object that we're building * @param activity * @param video */ private static void addYoutubeExtensions(Activity activity, Video video) { Map<String, Object> extensions = ExtensionUtil.ensureExtensions(activity); extensions.put("youtube", video); if(video.getStatistics() != null) { Map<String, Object> likes = new HashMap<>(); likes.put("count", video.getStatistics().getCommentCount()); extensions.put("likes", likes); } } /** * Build an {@link org.apache.streams.pojo.json.Actor} object given the video object * @param video * @param id * @return Actor object */ private static Actor buildActor(Video video, String id) { Actor actor = new Actor(); actor.setId("id:youtube:" + id); actor.setDisplayName(video.getSnippet().getChannelTitle()); actor.setSummary(video.getSnippet().getDescription()); actor.setAdditionalProperty("handle", video.getSnippet().getChannelTitle()); return actor; } /** * Gets the common youtube {@link org.apache.streams.pojo.json.Provider} object * @return a provider object representing YouTube */ public static Provider getProvider() { Provider provider = new Provider(); provider.setId("id:providers:youtube"); provider.setDisplayName("YouTube"); return provider; } /** * Formats the ID to conform with the Apache Streams activity ID convention * @param idparts the parts of the ID to join * @return a valid Activity ID in format "id:youtube:part1:part2:...partN" */ public static String formatId(String... idparts) { return Joiner.on(":").join(Lists.asList("id:youtube", idparts)); } }
apache-2.0
JSDanielPark/algorhythm_backend
algorhythm/src/main/java/kr/devdogs/algorhythm/template/controller/TemplateController.java
1580
package kr.devdogs.algorhythm.template.controller; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.servlet.http.HttpSession; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpHeaders; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import kr.devdogs.algorhythm.exam.mapper.ExamMapper; import kr.devdogs.algorhythm.exam.service.ExamService; import kr.devdogs.algorhythm.member.dto.Member; import kr.devdogs.algorhythm.member.service.MemberService; import kr.devdogs.algorhythm.template.service.TemplateService; import kr.devdogs.algorhythm.utils.FileUtils; /** * * @author Daniel */ @RestController public class TemplateController { @Autowired TemplateService templateService; @RequestMapping(value="/api/template/{lang}", method=RequestMethod.GET) public ResponseEntity<Map<String, Object>> compile( @PathVariable(value="lang", required=true) String language) { Map<String, Object> result = new HashMap<String, Object>(); result.put("template",templateService.getTemplate(language)); return new ResponseEntity<>(result, HttpStatus.OK); } }
apache-2.0
maksimov/dasein-cloud-test
src/main/java/org/dasein/cloud/test/cloud/StatelessDCTests.java
24592
/** * Copyright (C) 2009-2015 Dell, Inc. * See annotations for authorship information * * ==================================================================== * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ==================================================================== */ package org.dasein.cloud.test.cloud; import org.dasein.cloud.CloudException; import org.dasein.cloud.InternalException; import org.dasein.cloud.dc.DataCenter; import org.dasein.cloud.dc.DataCenterServices; import org.dasein.cloud.dc.Folder; import org.dasein.cloud.dc.Region; import org.dasein.cloud.dc.ResourcePool; import org.dasein.cloud.dc.StoragePool; import org.dasein.cloud.test.DaseinTestManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; import static org.junit.Assert.*; import static org.junit.Assume.assumeTrue; import java.util.List; import java.util.Locale; import java.util.UUID; /** * Test cases to validate an implementation of Dasein Cloud data center services. * <p>Created by George Reese: 2/18/13 5:51 PM</p> * @author George Reese * @version 2013.04 initial version * @since 2013.04 */ public class StatelessDCTests { static private DaseinTestManager tm; @BeforeClass static public void configure() { tm = new DaseinTestManager(StatelessDCTests.class); } @AfterClass static public void cleanUp() { if( tm != null ) { tm.close(); } } @Rule public final TestName name = new TestName(); private String testDataCenterId; private String testResourcePoolId; private String testStoragePoolId; private String testFolderId; public StatelessDCTests() { } @Before public void before() { tm.begin(name.getMethodName()); assumeTrue(!tm.isTestSkipped()); try { testDataCenterId = DaseinTestManager.getDefaultDataCenterId(true); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (name.getMethodName().contains("Pool") && testDataCenterId != null) { if (services.getCapabilities().supportsResourcePools()) { for ( ResourcePool rp : services.listResourcePools(testDataCenterId)) { if (testResourcePoolId == null) { testResourcePoolId = rp.getProvideResourcePoolId(); break; } } } if (services.getCapabilities().supportsStoragePools()) { for ( StoragePool storagePool : services.listStoragePools()) { if (testStoragePoolId == null) { testStoragePoolId = storagePool.getStoragePoolId(); break; } } } } if (name.getMethodName().contains("Folder")) { if (services.getCapabilities().supportsFolders()) { for (Folder folder : services.listVMFolders()) { if (testFolderId == null) { testFolderId = folder.getId(); break; } } } } } catch( Throwable ignore ) { // ignore } } @After public void after() { tm.end(); } @Test public void configuration() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); tm.out("DC Services", services); assertNotNull("Data center services must be implemented for all clouds", services); } @Test public void checkMetaData() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); tm.out("Term for Region", services.getCapabilities().getProviderTermForRegion(Locale.getDefault())); tm.out("Term for DataCenter", services.getCapabilities().getProviderTermForDataCenter(Locale.getDefault())); assertNotNull("The provider term for region may not be null", services.getCapabilities().getProviderTermForRegion(Locale.getDefault())); assertNotNull("The provider term for data center may not be null", services.getCapabilities().getProviderTermForDataCenter(Locale.getDefault())); } @Test public void getBogusRegion() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); Region region = services.getRegion(UUID.randomUUID().toString()); tm.out("Bogus Region", region); assertNull("Dummy region must be null, but one was found", region); } @Test public void getRegion() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); Region region = services.getRegion(tm.getContext().getRegionId()); tm.out("Region", region); assertNotNull("Failed to find the region associated with the current operational context", region); } @Test public void regionContent() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); Region region = services.getRegion(tm.getContext().getRegionId()); assertNotNull("Failed to find the region associated with the current operational context", region); tm.out("Region ID", region.getProviderRegionId()); tm.out("Active", region.isActive()); tm.out("Available", region.isAvailable()); tm.out("Name", region.getName()); tm.out("Jurisdiction", region.getJurisdiction()); assertNotNull("Region ID may not be null", region.getProviderRegionId()); assertNotNull("Region name may not be null", region.getName()); assertNotNull("Region jurisdiction may not be null", region.getJurisdiction()); } @Test public void listRegions() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); Iterable<Region> regions = services.listRegions(); boolean found = false; int count = 0; assertNotNull("Null set of regions returned from listRegions()", regions); for( Region region : regions ) { count++; tm.out("Region", region); if( region.getProviderRegionId().equals(tm.getContext().getRegionId()) ) { found = true; } } tm.out("Total Region Count", count); assertTrue("There must be at least one region", count > 0); assertTrue("Did not find the context region ID among returned regions", found); } @Test public void getBogusDataCenter() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); DataCenter dc = services.getDataCenter(UUID.randomUUID().toString()); tm.out("Bogus Data Center", dc); assertNull("Dummy data center must be null, but one was found", dc); } @Test public void getDataCenter() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); DataCenter dc = services.getDataCenter(testDataCenterId); tm.out("Data Center", dc); assertNotNull("Failed to find the test data center", dc); } @Test public void dataCenterContent() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); DataCenter dc = services.getDataCenter(testDataCenterId); assertNotNull("Failed to find the test data center", dc); tm.out("Data Center ID", dc.getProviderDataCenterId()); tm.out("Active", dc.isActive()); tm.out("Available", dc.isAvailable()); tm.out("Name", dc.getName()); tm.out("Region ID", dc.getRegionId()); assertNotNull("Data center ID must not be null", dc.getProviderDataCenterId()); assertNotNull("Data center name must not be null", dc.getName()); assertEquals("Data center should be in the current region", tm.getContext().getRegionId(), dc.getRegionId()); } @Test public void listDataCenters() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); Iterable<DataCenter> dataCenters = services.listDataCenters(tm.getContext().getRegionId()); boolean found = false; int count = 0; assertNotNull("Null set of data centers returned from listDataCenters()", dataCenters); for( DataCenter dc : dataCenters ) { count++; tm.out("Data Center", dc); if( dc.getProviderDataCenterId().equals(testDataCenterId) ) { found = true; } } tm.out("Total Data Center Count", count); assertTrue("There must be at least one data center in this region", count > 0); assertTrue("Did not find the test data center ID among returned data centers", found); // for clouds boasting multiple regions let's test that listDataCenters can switch regions and deliver // different datacenters String anotherRegion = null; for( Region region : services.listRegions() ) { if( !region.getProviderRegionId().equalsIgnoreCase(tm.getContext().getRegionId()) ) { anotherRegion = region.getProviderRegionId(); } } if( anotherRegion != null ) { found = false; Iterable<DataCenter> dataCenters1 = services.listDataCenters(anotherRegion); for( DataCenter dataCenter : dataCenters ) { for( DataCenter dataCenter1 : dataCenters1 ) { if( dataCenter.getProviderDataCenterId().equalsIgnoreCase(dataCenter1.getProviderDataCenterId())) { found = true; break; } } } assertFalse("The returned datacenters for regions " + tm.getContext().getRegionId() + " and " + anotherRegion + " contained at least one datacenter with the same dataCenterId", found); } else { tm.out("Provider seems to only have one region so didn't perform a multiregion test of datacenters"); } } @Test public void regionIntegrity() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); for( Region region : services.listRegions() ) { if( region.isActive() ) { int count = 0; for( DataCenter dc : services.listDataCenters(region.getProviderRegionId()) ) { if( dc.isActive() ) { count++; } } tm.out("Data Centers in " + region, count); assertTrue("An active region must have at least one active data center; " + region.getProviderRegionId() + " has none", count > 0); } } } //Resource pool tests @Test public void getBogusResourcePool() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (services.getCapabilities().supportsResourcePools()) { ResourcePool rp = services.getResourcePool(UUID.randomUUID().toString()); tm.out("Bogus Resource pool", rp); assertNull("Dummy resource pool must be null, but one was found", rp); } else { tm.ok("Resource pools not supported in "+tm.getProvider().getCloudName()); } } @Test public void getResourcePool() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (testResourcePoolId != null) { ResourcePool rp = services.getResourcePool(testResourcePoolId); tm.out("Resource Pool", rp+" ["+rp.getProvideResourcePoolId()+"]"); assertNotNull("Failed to find the test resource pool", rp); } else { if (services.getCapabilities().supportsResourcePools()) { fail("No test resource pool exists and thus no test for getResourcePool could be run"); } else { tm.ok("Resource pools not supported in "+tm.getProvider().getCloudName()); } } } @Test public void resourcePoolContent() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (testResourcePoolId != null) { ResourcePool rp = services.getResourcePool(testResourcePoolId); assertNotNull("Failed to find the test resource pool", rp); tm.out("Resource Pool ID", rp.getProvideResourcePoolId()); tm.out("Name", rp.getName()); tm.out("Data center ID", rp.getDataCenterId()); tm.out("Available", rp.isAvailable()); assertNotNull("Resource Pool ID must not be null", rp.getProvideResourcePoolId()); assertNotNull("Resource Pool name must not be null", rp.getName()); assertNotNull("Data center id must not be null", rp.getDataCenterId()); } else { if (services.getCapabilities().supportsResourcePools()) { fail("No test resource pool exists and thus no test for resourcePoolContent could be run"); } else { tm.ok("Resource pools not supported in "+tm.getProvider().getCloudName()); } } } @Test public void listResourcePools() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (services.getCapabilities().supportsResourcePools() ) { Iterable<ResourcePool> resourcePools = services.listResourcePools(testDataCenterId); boolean found = false; int count = 0; assertNotNull("Null set of resource pools returned from listResourcePools()", resourcePools); for( ResourcePool resourcePool : resourcePools ) { count++; tm.out("Resource Pool", resourcePool+" ["+resourcePool.getProvideResourcePoolId()+"]"); if( resourcePool.getProvideResourcePoolId().equals(testResourcePoolId) ) { found = true; } } tm.out("Total Resource Pool Count", count); assertTrue("There must be at least one Resource Pool in this datacenter", count > 0); assertTrue("Did not find the test Resource Pool ID among returned Resource Pools", found); } else { tm.ok("Resource pools not supported in "+tm.getProvider().getCloudName()); } } //End resource pool tests //Storage pool tests @Test public void getBogusStoragePool() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (services.getCapabilities().supportsStoragePools()) { StoragePool storagePool = services.getStoragePool(UUID.randomUUID().toString()); tm.out("Bogus Storage pool", storagePool); assertNull("Dummy storage pool must be null, but one was found", storagePool); } else { tm.ok("Storage pools not supported in "+tm.getProvider().getCloudName()); } } @Test public void getStoragePool() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (testStoragePoolId != null) { StoragePool storagePool = services.getStoragePool(testStoragePoolId); tm.out("Storage Pool", storagePool+" ["+storagePool.getStoragePoolId()+"]"); assertNotNull("Failed to find the test storage pool", storagePool); } else { if (services.getCapabilities().supportsStoragePools()) { fail("No test storage pool exists and thus no test for getStoragePool could be run"); } else { tm.ok("Storage pools not supported in "+tm.getProvider().getCloudName()); } } } @Test public void storagePoolContent() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (testStoragePoolId != null) { StoragePool storagePool = services.getStoragePool(testStoragePoolId); assertNotNull("Failed to find the test storage pool", storagePool); tm.out("Storage Pool ID", storagePool.getStoragePoolId()); tm.out("Name", storagePool.getStoragePoolName()); tm.out("Data center ID", storagePool.getDataCenterId()); tm.out("Region ID", storagePool.getRegionId()); tm.out("Affinity group", storagePool.getAffinityGroupId()); tm.out("Capacity", storagePool.getCapacity()); tm.out("Provisioned", storagePool.getProvisioned()); tm.out("Free space", storagePool.getFreeSpace()); assertNotNull("Storage Pool ID must not be null", storagePool.getStoragePoolId()); assertNotNull("Storage Pool name must not be null", storagePool.getStoragePoolName()); } else { if (services.getCapabilities().supportsStoragePools()) { fail("No test storage pool exists and thus no test for storagePoolContent could be run"); } else { tm.ok("Storage pools not supported in "+tm.getProvider().getCloudName()); } } } @Test public void listStoragePools() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (services.getCapabilities().supportsStoragePools() ) { Iterable<StoragePool> storagePools = services.listStoragePools(); boolean found = false; int count = 0; assertNotNull("Null set of storage pools returned from listStoragePools()", storagePools); for( StoragePool storagePool : storagePools ) { count++; tm.out("Storage Pool", storagePool+" ["+storagePool.getStoragePoolId()+"]"); if( storagePool.getStoragePoolId().equals(testStoragePoolId) ) { found = true; } } tm.out("Total Storage Pool Count", count); assertTrue("There must be at least one Storage Pool in this datacenter", count > 0); assertTrue("Did not find the test Storage Pool ID among returned Storage Pools", found); } else { tm.ok("Storage pools not supported in "+tm.getProvider().getCloudName()); } } //End storage pool tests //VM folder tests @Test public void getBogusVMFolder() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (services.getCapabilities().supportsFolders()) { Folder folder = services.getVMFolder(UUID.randomUUID().toString()); tm.out("Bogus VM folder", folder); assertNull("Dummy VM folder must be null, but one was found", folder); } else { tm.ok("Folders not supported in "+tm.getProvider().getCloudName()); } } @Test public void getVMFolder() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (testFolderId != null) { Folder folder = services.getVMFolder(testFolderId); tm.out("VMFolder", folder+" ["+folder.getId()+"]"); assertNotNull("Failed to find the test folder", folder); } else { if (services.getCapabilities().supportsFolders()) { fail("No test folder exists and thus no test for getVMFolder could be run"); } else { tm.ok("Folders not supported in "+tm.getProvider().getCloudName()); } } } @Test public void vmFolderContent() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (testFolderId != null) { Folder folder = services.getVMFolder(testFolderId); assertNotNull("Failed to find the test folder", folder); tm.out("VM folder ID", folder.getId()); tm.out("Name", folder.getName()); tm.out("Type", folder.getType()); if (folder.getParent() != null) { tm.out("Parent", folder.getParent().getName()); } List<Folder> children = folder.getChildren(); for (Folder child : children) { tm.out("Child folder", child.getName()); } assertNotNull("VM folder ID must not be null", folder.getId()); assertNotNull("VM folder name must not be null", folder.getName()); assertNotNull("Type must not be null", folder.getType()); } else { if (services.getCapabilities().supportsFolders()) { fail("No test folder exists and thus no test for vmFolderContent could be run"); } else { tm.ok("Folders not supported in "+tm.getProvider().getCloudName()); } } } @Test public void listVMFolders() throws CloudException, InternalException { assumeTrue(!tm.isTestSkipped()); DataCenterServices services = tm.getProvider().getDataCenterServices(); if (services.getCapabilities().supportsFolders() ) { Iterable<Folder> folders = services.listVMFolders(); boolean found = false; int count = 0; assertNotNull("Null set of folders returned from listVMFolders()", folders); for( Folder folder : folders ) { count++; tm.out("VM folder", folder+" ["+folder.getId()+"]"); if( folder.getId().equals(testFolderId) ) { found = true; } } tm.out("Total VM folder Count", count); assertTrue("There must be at least one VM folder", count > 0); assertTrue("Did not find the test folder ID among returned folders", found); } else { tm.ok("Folders not supported in "+tm.getProvider().getCloudName()); } } //End VM folder tests }
apache-2.0
chitzkoy/twister-spring
src/main/java/ru/qatools/school/twister/repository/UserDaoImpl.java
3664
package ru.qatools.school.twister.repository; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.jdbc.core.JdbcTemplate; import org.springframework.jdbc.core.PreparedStatementCreator; import org.springframework.jdbc.core.RowMapper; import org.springframework.jdbc.support.GeneratedKeyHolder; import org.springframework.jdbc.support.KeyHolder; import org.springframework.stereotype.Repository; import ru.qatools.school.twister.model.User; import javax.sql.DataSource; import java.sql.*; import java.util.List; /** * Created by dima on 21.12.14. */ @Repository public class UserDaoImpl implements UserDao { @Override public List<User> findAll() { return this.jdbcTemplate.query( "select * from USERS", new UserMapper() ); } @Override public User findById( int id ) { return this.jdbcTemplate.queryForObject( "select * from USERS where id = ?", new Object[]{id}, new UserMapper() ); } @Override public User findByName(String name) { return this.jdbcTemplate.queryForObject( "select * from USERS where name = ?", new Object[]{name}, new UserMapper() ); } @Override public User persist(User user) { if ( user.getId() > 0 ) { update( user ); return user; } else { return add( user ); } } private void update(User user) { jdbcTemplate.update( "update users set name = ?, pass_hash = ?, avatar = ? where id = ?", user.getLogin(), user.getPassword(), user.getAvatar(), user.getId() ); } private User add( final User user ) { final String INSERT_SQL = "insert into users (name, pass_hash, avatar) values (?, ?, ?)"; KeyHolder keyHolder = new GeneratedKeyHolder(); jdbcTemplate.update( new PreparedStatementCreator() { public PreparedStatement createPreparedStatement(Connection connection) throws SQLException { PreparedStatement ps = connection.prepareStatement(INSERT_SQL, new String[] {"id", "pass_hash", "registered"}); ps.setString( 1, user.getLogin() ); ps.setString( 2, user.getPassword() ); ps.setBinaryStream(3, user.getAvatar()); return ps; } }, keyHolder ); Integer newId = (Integer) keyHolder.getKeys().get("id"); String passHash = (String) keyHolder.getKeys().get("pass_hash"); Timestamp registered = (Timestamp) keyHolder.getKeys().get("registered"); User registeredUser = new User(); registeredUser.setId( newId ); registeredUser.setLogin( user.getLogin() ); registeredUser.setPassword( passHash ); registeredUser.setAvatar( user.getAvatar() ); registeredUser.setRegistered( registered ); return registeredUser; } JdbcTemplate jdbcTemplate; @Autowired public void setDataSource(DataSource dataSource) { this.jdbcTemplate = new JdbcTemplate(dataSource); } private static final class UserMapper implements RowMapper<User> { public User mapRow(ResultSet rs, int rowNum) throws SQLException { User user = new User(); user.setId( rs.getInt("id") ); user.setLogin(rs.getString("name")); user.setPassword(rs.getString("pass_hash")); user.setRegistered(rs.getTimestamp("registered")); user.setAvatar( rs.getBlob("avatar").getBinaryStream() ); return user; } } }
apache-2.0
appcelerator/titanium_studio_sdk
desktop/plugins/com.appcelerator.titanium.desktop/src/com/appcelerator/titanium/desktop/ui/wizard/Release.java
5380
/** * Copyright 2011-2012 Appcelerator, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.appcelerator.titanium.desktop.ui.wizard; import java.util.ArrayList; import java.util.List; import org.eclipse.core.resources.IProject; import org.eclipse.core.resources.ProjectScope; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.osgi.service.prefs.BackingStoreException; import org.osgi.service.prefs.Preferences; import com.appcelerator.titanium.desktop.DesktopPlugin; import com.aptana.core.logging.IdeLog; class Release { /** * Preference keys used to store releases inside the project preferences. */ private static final String APP_PAGE_PREF_KEY = "app_page"; //$NON-NLS-1$ private static final String PUBDATE_PREF_KEY = "pubdate"; //$NON-NLS-1$ private static final String VERSION_PREF_KEY = "app_version"; //$NON-NLS-1$ private static final String PLATFORM_PREF_KEY = "platform"; //$NON-NLS-1$ private static final String LABEL_PREF_KEY = "label"; //$NON-NLS-1$ private static final String URL_PREF_KEY = "url"; //$NON-NLS-1$ /** * Nodename for the parent of all the release packages stored in prefs for the project. */ private static final String PACKAGES_PREF_NODE = "packages"; //$NON-NLS-1$ private String url; private String label; private String version; private String platform; private String pubDate; private String appPage; public Release(String url, String label, String platform, String version, String pubDate, String appPage) { this.url = url; this.label = label; this.version = version; this.platform = platform; this.pubDate = pubDate; this.appPage = appPage; } public String getURL() { return url; } public String getLabel() { return label; } public String getVersion() { return version; } public String getPlatform() { return platform; } public String getPubDate() { return pubDate; } public String getAppPage() { return appPage; } /** * Given the JSON response from the Cloud service, store the result in the preferences (the releases, the pubDate, * the public link). * * @param project * @param json */ public static void updateForProject(IProject project, JSONObject json) { final JSONArray releases = (JSONArray) json.get("releases"); //$NON-NLS-1$ final String appPage = (String) json.get(APP_PAGE_PREF_KEY); final String pubDate = (String) json.get(PUBDATE_PREF_KEY); // delete current rows deletePackagesForProject(project); // insert new rows for (Object r : releases) { String label = (String) ((JSONObject) r).get(LABEL_PREF_KEY); String url = (String) ((JSONObject) r).get(URL_PREF_KEY); String platform = (String) ((JSONObject) r).get(PLATFORM_PREF_KEY); String version = (String) ((JSONObject) r).get(VERSION_PREF_KEY); addPackageToDatabase(project, url, label, platform, version, pubDate, appPage); } } private static void addPackageToDatabase(IProject project, String url, String label, String platform, String version, String pubDate, String appPage) { ProjectScope scope = new ProjectScope(project); try { Preferences p = scope.getNode(DesktopPlugin.PLUGIN_ID).node(PACKAGES_PREF_NODE); p = p.node(label); p.put(URL_PREF_KEY, url); p.put(LABEL_PREF_KEY, label); p.put(PLATFORM_PREF_KEY, platform); p.put(VERSION_PREF_KEY, version); p.put(PUBDATE_PREF_KEY, pubDate); p.put(APP_PAGE_PREF_KEY, appPage); p.flush(); } catch (BackingStoreException e) { IdeLog.logError(DesktopPlugin.getDefault(), e); } } private static void deletePackagesForProject(IProject project) { ProjectScope scope = new ProjectScope(project); try { scope.getNode(DesktopPlugin.PLUGIN_ID).node(PACKAGES_PREF_NODE).removeNode(); } catch (BackingStoreException e) { IdeLog.logError(DesktopPlugin.getDefault(), e); } } /** * Load the saved/stored releases for the project (from prefs). * * @param project * @return */ public static List<Release> load(IProject project) { List<Release> releases = new ArrayList<Release>(); ProjectScope scope = new ProjectScope(project); try { Preferences p = scope.getNode(DesktopPlugin.PLUGIN_ID); if (p == null) { return releases; } p = p.node(PACKAGES_PREF_NODE); if (p == null) { return releases; } for (String childName : p.childrenNames()) { Preferences releasePref = p.node(childName); releases.add(new Release(releasePref.get(URL_PREF_KEY, null), releasePref.get(LABEL_PREF_KEY, null), releasePref.get(PLATFORM_PREF_KEY, null), releasePref.get(VERSION_PREF_KEY, null), releasePref .get(PUBDATE_PREF_KEY, null), releasePref.get(APP_PAGE_PREF_KEY, null))); } } catch (BackingStoreException e) { IdeLog.logError(DesktopPlugin.getDefault(), e); } return releases; } }
apache-2.0
weld/core
tests-arquillian/src/test/java/org/jboss/weld/tests/transientReference/metadata/Foo.java
1236
/* * JBoss, Home of Professional Open Source * Copyright 2013, Red Hat, Inc., and individual contributors * by the @authors tag. See the copyright.txt in the distribution for a * full listing of individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.weld.tests.transientReference.metadata; import jakarta.enterprise.context.Dependent; import jakarta.enterprise.inject.TransientReference; import jakarta.enterprise.inject.spi.Bean; import jakarta.inject.Inject; @Dependent public class Foo { private final Bean<Bar> bean; @Inject public Foo(@TransientReference Bar bar) { this.bean = bar.getBean(); } public Bean<Bar> getBean() { return bean; } }
apache-2.0
malictus/klang
src/malictus/klang/ui/klangeditor/DeleteBytesDialog.java
4051
/** * This file is distributed under a BSD-style license. See the included LICENSE.txt file * for more information. * Copyright (c) 2009, James Halliday * All rights reserved. */ package malictus.klang.ui.klangeditor; import javax.swing.*; import javax.swing.border.EmptyBorder; import java.awt.*; import malictus.klang.KlangConstants; import malictus.klang.KlangUtil; import malictus.klang.ui.*; /** * A DeleteBytesDialog is a helper dialog box for deleting raw bytes from a file. * @author Jim Halliday */ public class DeleteBytesDialog extends JDialog { JPanel pnlFrom; JPanel pnlTo; JPanel pnlOKCancel; JPanel pnlContent; JButton btnOK; JButton btnCancel; KlangLabel lblFrom; KlangTextField txtfFrom; KlangLabel lblTo; KlangTextField txtfTo; private boolean canceled = false; public DeleteBytesDialog(KlangEditor parent) { super(parent); this.setModalityType(ModalityType.APPLICATION_MODAL); this.setResizable(false); this.setTitle(KlangConstants.KLANGEDITOR_DELETEBYTESDIALOG_TITLE); this.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE); pnlContent = new JPanel(); pnlContent.setBorder( new EmptyBorder(11,11,11,11) ); pnlContent.setLayout(new BoxLayout(pnlContent, BoxLayout.Y_AXIS)); this.getContentPane().add(pnlContent); pnlFrom = new JPanel(); FlowLayout fl = new FlowLayout(); fl.setAlignment(FlowLayout.LEFT); pnlFrom.setLayout(fl); pnlContent.add(pnlFrom); pnlTo = new JPanel(); FlowLayout f2 = new FlowLayout(); f2.setAlignment(FlowLayout.LEFT); pnlTo.setLayout(f2); pnlContent.add(pnlTo); pnlOKCancel = new JPanel(); FlowLayout f3 = new FlowLayout(); f3.setAlignment(FlowLayout.RIGHT); pnlOKCancel.setLayout(f3); btnOK = new JButton("OK"); btnOK.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent e) { canceled = false; setVisible(false); } }); btnCancel = new JButton("Cancel"); btnCancel.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent e) { canceled = true; setVisible(false); } }); pnlOKCancel.add(btnOK); pnlOKCancel.add(btnCancel); pnlContent.add(pnlOKCancel); lblFrom = new KlangLabel(KlangConstants.KLANGEDITOR_BYTESDIALOG_FROM); lblTo = new KlangLabel(KlangConstants.KLANGEDITOR_BYTESDIALOG_TO); txtfFrom = new KlangTextField("", 125); txtfTo = new KlangTextField("", 125); if (parent.isHexMode()) { txtfFrom.setText(KlangUtil.convertToHex(parent.currentByte)); txtfTo.setText(KlangUtil.convertToHex(parent.currentByte + 1)); } else { txtfFrom.setText("" + parent.currentByte); txtfTo.setText("" + (parent.currentByte + 1)); } pnlFrom.add(lblFrom); pnlFrom.add(txtfFrom); pnlTo.add(lblTo); pnlTo.add(txtfTo); this.pack(); if (parent != null) { //center over parent window this.setLocation(parent.getX() + (parent.getWidth() / 2) - (this.getWidth() / 2), parent.getY() + (parent.getHeight() / 2) - (this.getHeight() / 2)); } this.setVisible(true); } /** * Tell if the user canceled the dialog or not * @return true if user canceled the dialog, and false otherwise */ public boolean wasCanceled() { return canceled; } /** * Get the start position that the user typed in * @return the start byte position * @throws NumberFormatException if the string cannot be decoded as a long */ public long getStartPosition() throws NumberFormatException { Long fromL = Long.decode(this.txtfFrom.getText()); return fromL.longValue(); } /** * Get the end position that the user typed in * @return the end byte position * @throws NumberFormatException if the string cannot be decoded as a long */ public long getEndPosition() throws NumberFormatException { Long toL = Long.decode(this.txtfTo.getText()); return toL.longValue(); } }
apache-2.0
Taller/sqlworkbench-plus
src/workbench/db/report/IndexReporter.java
7145
/* * IndexReporter.java * * This file is part of SQL Workbench/J, http://www.sql-workbench.net * * Copyright 2002-2015, Thomas Kellerer * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * To contact the author please send an email to: support@sql-workbench.net * */ package workbench.db.report; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import workbench.log.LogMgr; import workbench.db.IndexColumn; import workbench.db.IndexDefinition; import workbench.db.IndexReader; import workbench.db.TableIdentifier; import workbench.db.WbConnection; import workbench.db.oracle.OracleIndexPartition; import workbench.util.CollectionUtil; import workbench.util.SqlUtil; import workbench.util.StringUtil; import static workbench.db.report.ReportTable.*; /** * Class to retrieve all index definitions for a table and * generate an XML string from that. * * @author Thomas Kellerer */ public class IndexReporter { public static final String TAG_INDEX = "index-def"; public static final String TAG_INDEX_NAME = "name"; public static final String TAG_INDEX_UNIQUE = "unique"; public static final String TAG_INDEX_PK = "primary-key"; public static final String TAG_INDEX_TYPE = "type"; public static final String TAG_INDEX_EXPR = "index-expression"; public static final String TAG_INDEX_COLUMN_LIST = "column-list"; public static final String TAG_INDEX_COLUMN_NAME = "column"; public static final String TAG_INDEX_OPTION = "index-option"; private Collection<IndexDefinition> indexList; private TagWriter tagWriter = new TagWriter(); private String mainTagToUse; private Map<IndexDefinition, List<ObjectOption>> indexOptions = new HashMap<>(); public IndexReporter(TableIdentifier tbl, WbConnection conn, boolean includePartitions) { indexList = conn.getMetadata().getIndexReader().getTableIndexList(tbl); removeEmptyIndexes(); if (includePartitions) { retrieveOracleOptions(conn); } retrieveSourceOptions(tbl, conn); } public IndexReporter(IndexDefinition index) { indexList = new LinkedList<>(); indexList.add(index); } public void setMainTagToUse(String tag) { mainTagToUse = tag; } public void appendXml(StringBuilder result, StringBuilder indent) { int numIndex = this.indexList.size(); if (numIndex == 0) return; StringBuilder defIndent = new StringBuilder(indent); defIndent.append(" "); for (IndexDefinition index : indexList) { tagWriter.appendOpenTag(result, indent, mainTagToUse == null ? TAG_INDEX : mainTagToUse); result.append('\n'); tagWriter.appendTag(result, defIndent, TAG_INDEX_NAME, index.getName()); tagWriter.appendTag(result, defIndent, TAG_INDEX_EXPR, index.getExpression()); tagWriter.appendTag(result, defIndent, TAG_INDEX_UNIQUE, index.isUnique()); if (index.isUniqueConstraint()) { tagWriter.appendTag(result, defIndent, ForeignKeyDefinition.TAG_CONSTRAINT_NAME, index.getUniqueConstraintName()); } tagWriter.appendTag(result, defIndent, TAG_INDEX_PK, index.isPrimaryKeyIndex()); tagWriter.appendTag(result, defIndent, TAG_INDEX_TYPE, index.getIndexType()); List<IndexColumn> columns = index.getColumns(); if (columns.size() > 0) { StringBuilder colIndent = new StringBuilder(defIndent); colIndent.append(" "); tagWriter.appendOpenTag(result, defIndent, TAG_INDEX_COLUMN_LIST); result.append('\n'); for (IndexColumn col : columns) { List<TagAttribute> attrs = new ArrayList<>(2); attrs.add(new TagAttribute("name", SqlUtil.removeObjectQuotes(col.getColumn()))); if (col.getDirection() != null) { attrs.add(new TagAttribute("direction", col.getDirection())); } tagWriter.appendOpenTag(result, colIndent, TAG_INDEX_COLUMN_NAME, attrs, false); result.append("/>\n"); } tagWriter.appendCloseTag(result, defIndent, TAG_INDEX_COLUMN_LIST); } if (StringUtil.isNonBlank(index.getTablespace())) { tagWriter.appendTag(result, defIndent, TAG_TABLESPACE, index.getTablespace(), false); } writeDbmsOptions(result, defIndent, index); tagWriter.appendCloseTag(result, indent, mainTagToUse == null ? TAG_INDEX : mainTagToUse); } } private void writeDbmsOptions(StringBuilder output, StringBuilder indent, IndexDefinition index) { List<ObjectOption> options = indexOptions.get(index); if (CollectionUtil.isEmpty(options)) return; StringBuilder myindent = new StringBuilder(indent); myindent.append(" "); output.append(indent); output.append("<index-options>\n"); for (ObjectOption option : options) { StringBuilder result = option.getXml(myindent); output.append(result); } output.append(indent); output.append("</index-options>\n"); } private void retrieveSourceOptions(TableIdentifier table, WbConnection conn) { IndexReader reader = conn.getMetadata().getIndexReader(); for (IndexDefinition index : indexList) { reader.getIndexOptions(table, index); Map<String, String> config = index.getSourceOptions().getConfigSettings(); for (Map.Entry<String, String> entry : config.entrySet()) { ObjectOption option = new ObjectOption(entry.getKey(), entry.getValue()); option.setWriteFlaxXML(!TagWriter.needsCData(entry.getValue())); addOption(index, option); } } } private void retrieveOracleOptions(WbConnection conn) { if (!conn.getMetadata().isOracle()) return; try { for (IndexDefinition index : indexList) { OracleIndexPartition reader = new OracleIndexPartition(conn); reader.retrieve(index, conn); if (reader.isPartitioned()) { ObjectOption option = new ObjectOption("partition", reader.getSourceForIndexDefinition()); addOption(index, option); } } } catch (SQLException sql) { LogMgr.logWarning("IndexReporter.retrieveOracleOptions()", "Could not retrieve index options", sql); } } private void addOption(IndexDefinition index, ObjectOption option) { List<ObjectOption> options = indexOptions.get(index); if (options == null) { options = new ArrayList<>(); indexOptions.put(index, options); } options.add(option); } public Collection<IndexDefinition> getIndexList() { return this.indexList; } private void removeEmptyIndexes() { if (indexList == null) return; Iterator<IndexDefinition> itr = indexList.iterator(); while (itr.hasNext()) { IndexDefinition idx = itr.next(); if (idx.isEmpty()) { itr.remove(); } } } }
apache-2.0
minborg/beacon
src/main/java/com/speedment/beacon/resource/DefaultResource.java
692
package com.speedment.beacon.resource; import com.speedment.beacon.resource.content.OnePng; import com.speedment.beacon.resource.mime.MimeType; import java.io.InputStream; /** * * @author pemi */ public class DefaultResource implements Resource { private final MimeType mimeType; private final String resourceName; public DefaultResource(MimeType mimeType, String resourceName) { this.mimeType = mimeType; this.resourceName = resourceName; } @Override public MimeType getMimeType() { return mimeType; } @Override public InputStream newInputStream() { return OnePng.class.getResourceAsStream(resourceName); } }
apache-2.0
JackAnansi/NHLPlayoffsSim
app/src/main/java/com/ambrogio/dan/playoffs/EditTeam.java
5394
package com.ambrogio.dan.playoffs; import android.content.Intent; import android.database.sqlite.SQLiteDatabase; import android.support.v7.app.ActionBarActivity; import android.os.Bundle; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.Button; import android.widget.EditText; import android.widget.Spinner; import android.widget.Toast; import java.util.List; public class EditTeam extends ActionBarActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_editteam); // code here Spinner spinner = (Spinner) findViewById(R.id.spinner); ArrayAdapter<CharSequence> adapter = ArrayAdapter.createFromResource( this, R.array.teamnames, android.R.layout.simple_spinner_item); adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); spinner.setAdapter(adapter); spinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { @Override public void onItemSelected(AdapterView<?> parent, View view, int position, long id) { Spinner spinner = (Spinner)findViewById(R.id.spinner); String spinnerString = null; spinnerString = spinner.getSelectedItem().toString(); EditText wins = (EditText) findViewById(R.id.inputWins); EditText losses = (EditText) findViewById(R.id.inputLosses); EditText otl = (EditText) findViewById(R.id.inputOtl); EditText pts = (EditText) findViewById(R.id.inputPts); // If a valid value is selected if (!spinnerString.equals("Select a team...")) { //MyDBHandler db = MyDBHandler.getInstance(getApplicationContext()); Team team = MyDBHandler.findTeam(spinnerString); // inputWins wins.setText(String.valueOf(team.getWins())); // inputLosses losses.setText(String.valueOf(team.getLosses())); // inputOtl otl.setText(String.valueOf(team.getOvertimeLosses())); // inputPts pts.setText(String.valueOf(team.getPoints())); } // Clear the text fields else { wins.setText(""); losses.setText(""); otl.setText(""); pts.setText(""); } } @Override public void onNothingSelected(AdapterView<?> parent) { } }); } /** * Goes back to the main screen * @param v */ public void onClickCancel(View v){ //Button cancelBtn = (Button)findViewById(R.id.btnCancel); //startActivity(new Intent(getApplicationContext(), MainActivity.class)); finish(); } public void onClickSave(View v){ Spinner spinner = (Spinner)findViewById(R.id.spinner); String spinnerString = null; spinnerString = spinner.getSelectedItem().toString(); if (!spinnerString.equals("Select a team...")) { EditText wins = (EditText) findViewById(R.id.inputWins); EditText losses = (EditText) findViewById(R.id.inputLosses); EditText otl = (EditText) findViewById(R.id.inputOtl); EditText pts = (EditText) findViewById(R.id.inputPts); // Check for valid input (integers) try { Integer.parseInt(wins.getText().toString()); Integer.parseInt(losses.getText().toString()); Integer.parseInt(otl.getText().toString()); MyDBHandler db = MyDBHandler.getInstance(getApplicationContext()); db.updateRecord(spinnerString, Integer.parseInt(wins.getText().toString()), Integer.parseInt(losses.getText().toString()), Integer.parseInt(otl.getText().toString())); Team temp = db.findTeam(spinnerString); pts.setText(String.valueOf(temp.getPoints())); Toast.makeText(getApplicationContext(), "Team Updated!", Toast.LENGTH_SHORT).show(); } catch (NumberFormatException e){ // Input was not integers! Reload old values Toast.makeText(getApplicationContext(), "Whole numbers only, please", Toast.LENGTH_SHORT).show(); } } } @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.menu_editteam, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle action bar item clicks here. The action bar will // automatically handle clicks on the Home/Up button, so long // as you specify a parent activity in AndroidManifest.xml. int id = item.getItemId(); //noinspection SimplifiableIfStatement if (id == R.id.action_settings) { return true; } return super.onOptionsItemSelected(item); } }
apache-2.0
danmcginnis/SBird
src/com/cis4350/framework/implementation/AndroidFileIO.java
1283
package com.cis4350.framework.implementation; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import android.content.Context; import android.content.SharedPreferences; import android.content.res.AssetManager; import android.os.Environment; import android.preference.PreferenceManager; import com.cis4350.framework.FileIO; public class AndroidFileIO implements FileIO { Context context; AssetManager assets; String externalStoragePath; public AndroidFileIO(Context context) { this.context = context; this.assets = context.getAssets(); this.externalStoragePath = Environment.getExternalStorageDirectory() .getAbsolutePath() + File.separator; } @Override public InputStream readAsset(String file) throws IOException { return assets.open(file); } @Override public InputStream readFile(String file) throws IOException { return new FileInputStream(externalStoragePath + file); } @Override public OutputStream writeFile(String file) throws IOException { return new FileOutputStream(externalStoragePath + file); } public SharedPreferences getSharedPref() { return PreferenceManager.getDefaultSharedPreferences(context); } }
apache-2.0
baratine/auction
src/test/java/examples/auction/AuctionReplayTest.java
2616
package examples.auction; import java.util.logging.Logger; import javax.inject.Inject; import io.baratine.service.Service; import io.baratine.service.Services; import io.baratine.vault.IdAsset; import com.caucho.junit.ConfigurationBaratine; import com.caucho.junit.RunnerBaratine; import com.caucho.junit.ServiceTest; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; /** * */ @RunWith(RunnerBaratine.class) @ServiceTest(UserVault.class) @ServiceTest(AuctionVault.class) @ServiceTest(AuditServiceImpl.class) @ServiceTest(AuctionSettlementVault.class) @ConfigurationBaratine(workDir = "/tmp/baratine", testTime = ConfigurationBaratine.TEST_TIME, journalDelay = 12000) public class AuctionReplayTest { private static final Logger log = Logger.getLogger(AuctionReplayTest.class.getName()); @Inject @Service("public:///user") UserVaultSync _users; @Inject @Service("public:///auction") AuctionVaultSync _auctions; @Inject RunnerBaratine _testContext; @Inject Services _services; /** * Tests normal bid. */ @Test public void testAuctionBid() throws InterruptedException { UserSync userSpock = createUser("Spock", "test"); UserSync userKirk = createUser("Kirk", "test"); AuctionSync auction = createAuction(userSpock, "book", 15); Assert.assertNotNull(auction); boolean result = auction.open(); Assert.assertTrue(result); String auctionId = auction.get().getEncodedId(); // successful bid result = auction.bid(new AuctionBid(userKirk.get().getEncodedId(), 20)); Assert.assertTrue(result); AuctionData data = auction.get(); Assert.assertEquals(data.getLastBid().getBid(), 20); Assert.assertEquals(data.getLastBid().getUserId(), userKirk.get().getEncodedId()); //State.sleep(10); _testContext.stopImmediate(); _testContext.start(); auction = _services.service(AuctionSync.class, auctionId); data = auction.get(); Assert.assertEquals(data.getLastBid().getBid(), 20); } UserSync createUser(String name, String password) { IdAsset id = _users.create( new AuctionSession.UserInitData(name, password, false)); return _services.service(UserSync.class, id.toString()); } AuctionSync createAuction(UserSync user, String title, int bid) { IdAsset id = _auctions.create( new AuctionDataInit(user.get().getEncodedId(), title, bid)); return _services.service(AuctionSync.class, id.toString()); } }
apache-2.0
deeplearning4j/DataVec
datavec-api/src/main/java/org/datavec/api/transform/transform/doubletransform/StandardizeNormalizer.java
2068
/*- * * Copyright 2016 Skymind, Inc. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. */ package org.datavec.api.transform.transform.doubletransform; import lombok.Data; import org.datavec.api.writable.DoubleWritable; import org.datavec.api.writable.Writable; import org.nd4j.shade.jackson.annotation.JsonProperty; /** * Normalize using (x-mean)/stdev. * Also known as a standard score, standardization etc. * * @author Alex Black */ @Data public class StandardizeNormalizer extends BaseDoubleTransform { protected final double mean; protected final double stdev; public StandardizeNormalizer(@JsonProperty("columnName") String columnName, @JsonProperty("mean") double mean, @JsonProperty("stdev") double stdev) { super(columnName); this.mean = mean; this.stdev = stdev; } @Override public Writable map(Writable writable) { double val = writable.toDouble(); return new DoubleWritable((val - mean) / stdev); } @Override public String toString() { return "StandardizeNormalizer(mean=" + mean + ",stdev=" + stdev + ")"; } /** * Transform an object * in to another object * * @param input the record to transform * @return the transformed writable */ @Override public Object map(Object input) { Number n = (Number) input; double val = n.doubleValue(); return new DoubleWritable((val - mean) / stdev); } }
apache-2.0
OliverJiangnice/LearnSSH
broadcastreceivertwo/src/test/java/com/example/oliverjiang/broadcastreceivertwo/ExampleUnitTest.java
351
package com.example.oliverjiang.broadcastreceivertwo; import org.junit.Test; import static org.junit.Assert.*; /** * To work on unit tests, switch the Test Artifact in the Build Variants view. */ public class ExampleUnitTest { @Test public void addition_isCorrect() throws Exception { assertEquals(4, 2 + 2); } }
apache-2.0
DataSketches/sketches-core
src/main/java/org/apache/datasketches/tuple/arrayofdoubles/ArrayOfDoublesIntersection.java
5461
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.datasketches.tuple.arrayofdoubles; import static java.lang.Math.min; import org.apache.datasketches.SketchesStateException; import org.apache.datasketches.memory.WritableMemory; import org.apache.datasketches.tuple.Util; /** * Computes the intersection of two or more tuple sketches of type ArrayOfDoubles. * A new instance represents the Universal Set. * Every update() computes an intersection with the internal set * and can only reduce the internal set. */ public abstract class ArrayOfDoublesIntersection { final int numValues_; final long seed_; final short seedHash_; ArrayOfDoublesQuickSelectSketch sketch_; boolean isEmpty_; long theta_; boolean isFirstCall_; ArrayOfDoublesIntersection(final int numValues, final long seed) { numValues_ = numValues; seed_ = seed; seedHash_ = Util.computeSeedHash(seed); isEmpty_ = false; theta_ = Long.MAX_VALUE; isFirstCall_ = true; } /** * Performs a stateful intersection of the internal set with the given tupleSketch. * @param tupleSketch Input sketch to intersect with the internal set. * @param combiner Method of combining two arrays of double values * @deprecated 2.0.0 Please use {@link #intersect(ArrayOfDoublesSketch, ArrayOfDoublesCombiner)}. */ @Deprecated public void update(final ArrayOfDoublesSketch tupleSketch, final ArrayOfDoublesCombiner combiner) { intersect(tupleSketch, combiner); } /** * Performs a stateful intersection of the internal set with the given tupleSketch. * @param tupleSketch Input sketch to intersect with the internal set. * @param combiner Method of combining two arrays of double values */ public void intersect(final ArrayOfDoublesSketch tupleSketch, final ArrayOfDoublesCombiner combiner) { final boolean isFirstCall = isFirstCall_; isFirstCall_ = false; if (tupleSketch == null) { isEmpty_ = true; sketch_ = null; return; } Util.checkSeedHashes(seedHash_, tupleSketch.getSeedHash()); theta_ = min(theta_, tupleSketch.getThetaLong()); isEmpty_ |= tupleSketch.isEmpty(); if (isEmpty_ || tupleSketch.getRetainedEntries() == 0) { sketch_ = null; return; } if (isFirstCall) { sketch_ = createSketch(tupleSketch.getRetainedEntries(), numValues_, seed_); final ArrayOfDoublesSketchIterator it = tupleSketch.iterator(); while (it.next()) { sketch_.insert(it.getKey(), it.getValues()); } } else { //not the first call final int matchSize = min(sketch_.getRetainedEntries(), tupleSketch.getRetainedEntries()); final long[] matchKeys = new long[matchSize]; final double[][] matchValues = new double[matchSize][]; int matchCount = 0; final ArrayOfDoublesSketchIterator it = tupleSketch.iterator(); while (it.next()) { final double[] values = sketch_.find(it.getKey()); if (values != null) { matchKeys[matchCount] = it.getKey(); matchValues[matchCount] = combiner.combine(values, it.getValues()); matchCount++; } } sketch_ = null; if (matchCount > 0) { sketch_ = createSketch(matchCount, numValues_, seed_); for (int i = 0; i < matchCount; i++) { sketch_.insert(matchKeys[i], matchValues[i]); } } if (sketch_ != null) { sketch_.setThetaLong(theta_); sketch_.setNotEmpty(); } } } /** * Gets the internal set as an off-heap compact sketch using the given memory. * @param dstMem Memory for the compact sketch (can be null). * @return Result of the intersections so far as a compact sketch. */ public ArrayOfDoublesCompactSketch getResult(final WritableMemory dstMem) { if (isFirstCall_) { throw new SketchesStateException( "getResult() with no intervening intersections is not a legal result."); } if (sketch_ == null) { return new HeapArrayOfDoublesCompactSketch( null, null, Long.MAX_VALUE, true, numValues_, seedHash_); } return sketch_.compact(dstMem); } /** * Gets the internal set as an on-heap compact sketch. * @return Result of the intersections so far as a compact sketch. */ public ArrayOfDoublesCompactSketch getResult() { return getResult(null); } /** * Resets the internal set to the initial state, which represents the Universal Set */ public void reset() { isEmpty_ = false; theta_ = Long.MAX_VALUE; sketch_ = null; isFirstCall_ = true; } abstract ArrayOfDoublesQuickSelectSketch createSketch(int size, int numValues, long seed); }
apache-2.0