repo_name stringlengths 5 108 | path stringlengths 6 333 | size stringlengths 1 6 | content stringlengths 4 977k | license stringclasses 15 values |
|---|---|---|---|---|
afs/lizard | lizard-base/src/main/java/lizard/api/TLZ/TLZ_Patch.java | 13636 | /**
* Autogenerated by Thrift Compiler (0.10.0)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package lizard.api.TLZ;
@SuppressWarnings("all")
@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.10.0)", date = "2018-06-17")
public class TLZ_Patch implements org.apache.thrift.TBase<TLZ_Patch, TLZ_Patch._Fields>, java.io.Serializable, Cloneable, Comparable<TLZ_Patch> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TLZ_Patch");
private static final org.apache.thrift.protocol.TField ENTITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("entities", org.apache.thrift.protocol.TType.LIST, (short)1);
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TLZ_PatchStandardSchemeFactory();
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TLZ_PatchTupleSchemeFactory();
public java.util.List<TLZ_PatchEntry> entities; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
ENTITIES((short)1, "entities");
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
static {
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // ENTITIES
return ENTITIES;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(java.lang.String name) {
return byName.get(name);
}
private final short _thriftId;
private final java.lang.String _fieldName;
_Fields(short thriftId, java.lang.String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public java.lang.String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.ENTITIES, new org.apache.thrift.meta_data.FieldMetaData("entities", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TLZ_PatchEntry.class))));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TLZ_Patch.class, metaDataMap);
}
public TLZ_Patch() {
}
public TLZ_Patch(
java.util.List<TLZ_PatchEntry> entities)
{
this();
this.entities = entities;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public TLZ_Patch(TLZ_Patch other) {
if (other.isSetEntities()) {
java.util.List<TLZ_PatchEntry> __this__entities = new java.util.ArrayList<TLZ_PatchEntry>(other.entities.size());
for (TLZ_PatchEntry other_element : other.entities) {
__this__entities.add(new TLZ_PatchEntry(other_element));
}
this.entities = __this__entities;
}
}
public TLZ_Patch deepCopy() {
return new TLZ_Patch(this);
}
@Override
public void clear() {
this.entities = null;
}
public int getEntitiesSize() {
return (this.entities == null) ? 0 : this.entities.size();
}
public java.util.Iterator<TLZ_PatchEntry> getEntitiesIterator() {
return (this.entities == null) ? null : this.entities.iterator();
}
public void addToEntities(TLZ_PatchEntry elem) {
if (this.entities == null) {
this.entities = new java.util.ArrayList<TLZ_PatchEntry>();
}
this.entities.add(elem);
}
public java.util.List<TLZ_PatchEntry> getEntities() {
return this.entities;
}
public TLZ_Patch setEntities(java.util.List<TLZ_PatchEntry> entities) {
this.entities = entities;
return this;
}
public void unsetEntities() {
this.entities = null;
}
/** Returns true if field entities is set (has been assigned a value) and false otherwise */
public boolean isSetEntities() {
return this.entities != null;
}
public void setEntitiesIsSet(boolean value) {
if (!value) {
this.entities = null;
}
}
public void setFieldValue(_Fields field, java.lang.Object value) {
switch (field) {
case ENTITIES:
if (value == null) {
unsetEntities();
} else {
setEntities((java.util.List<TLZ_PatchEntry>)value);
}
break;
}
}
public java.lang.Object getFieldValue(_Fields field) {
switch (field) {
case ENTITIES:
return getEntities();
}
throw new java.lang.IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new java.lang.IllegalArgumentException();
}
switch (field) {
case ENTITIES:
return isSetEntities();
}
throw new java.lang.IllegalStateException();
}
@Override
public boolean equals(java.lang.Object that) {
if (that == null)
return false;
if (that instanceof TLZ_Patch)
return this.equals((TLZ_Patch)that);
return false;
}
public boolean equals(TLZ_Patch that) {
if (that == null)
return false;
if (this == that)
return true;
boolean this_present_entities = true && this.isSetEntities();
boolean that_present_entities = true && that.isSetEntities();
if (this_present_entities || that_present_entities) {
if (!(this_present_entities && that_present_entities))
return false;
if (!this.entities.equals(that.entities))
return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 1;
hashCode = hashCode * 8191 + ((isSetEntities()) ? 131071 : 524287);
if (isSetEntities())
hashCode = hashCode * 8191 + entities.hashCode();
return hashCode;
}
@Override
public int compareTo(TLZ_Patch other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = java.lang.Boolean.valueOf(isSetEntities()).compareTo(other.isSetEntities());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetEntities()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.entities, other.entities);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
scheme(iprot).read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
scheme(oprot).write(oprot, this);
}
@Override
public java.lang.String toString() {
java.lang.StringBuilder sb = new java.lang.StringBuilder("TLZ_Patch(");
boolean first = true;
sb.append("entities:");
if (this.entities == null) {
sb.append("null");
} else {
sb.append(this.entities);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
if (entities == null) {
throw new org.apache.thrift.protocol.TProtocolException("Required field 'entities' was not present! Struct: " + toString());
}
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class TLZ_PatchStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
public TLZ_PatchStandardScheme getScheme() {
return new TLZ_PatchStandardScheme();
}
}
private static class TLZ_PatchStandardScheme extends org.apache.thrift.scheme.StandardScheme<TLZ_Patch> {
public void read(org.apache.thrift.protocol.TProtocol iprot, TLZ_Patch struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // ENTITIES
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list8 = iprot.readListBegin();
struct.entities = new java.util.ArrayList<TLZ_PatchEntry>(_list8.size);
TLZ_PatchEntry _elem9;
for (int _i10 = 0; _i10 < _list8.size; ++_i10)
{
_elem9 = new TLZ_PatchEntry();
_elem9.read(iprot);
struct.entities.add(_elem9);
}
iprot.readListEnd();
}
struct.setEntitiesIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, TLZ_Patch struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.entities != null) {
oprot.writeFieldBegin(ENTITIES_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.entities.size()));
for (TLZ_PatchEntry _iter11 : struct.entities)
{
_iter11.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class TLZ_PatchTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
public TLZ_PatchTupleScheme getScheme() {
return new TLZ_PatchTupleScheme();
}
}
private static class TLZ_PatchTupleScheme extends org.apache.thrift.scheme.TupleScheme<TLZ_Patch> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, TLZ_Patch struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
{
oprot.writeI32(struct.entities.size());
for (TLZ_PatchEntry _iter12 : struct.entities)
{
_iter12.write(oprot);
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, TLZ_Patch struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
{
org.apache.thrift.protocol.TList _list13 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.entities = new java.util.ArrayList<TLZ_PatchEntry>(_list13.size);
TLZ_PatchEntry _elem14;
for (int _i15 = 0; _i15 < _list13.size; ++_i15)
{
_elem14 = new TLZ_PatchEntry();
_elem14.read(iprot);
struct.entities.add(_elem14);
}
}
struct.setEntitiesIsSet(true);
}
}
private static <S extends org.apache.thrift.scheme.IScheme> S scheme(org.apache.thrift.protocol.TProtocol proto) {
return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
}
}
| apache-2.0 |
moosbusch/xbLIDO | src/net/opengis/gml/MultiSurfaceType.java | 10068 | /*
* Copyright 2013 Gunnar Kappei.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.opengis.gml;
/**
* An XML MultiSurfaceType(@http://www.opengis.net/gml).
*
* This is a complex type.
*/
public interface MultiSurfaceType extends net.opengis.gml.AbstractGeometricAggregateType
{
public static final org.apache.xmlbeans.SchemaType type = (org.apache.xmlbeans.SchemaType)
org.apache.xmlbeans.XmlBeans.typeSystemForClassLoader(MultiSurfaceType.class.getClassLoader(), "schemaorg_apache_xmlbeans.system.s6E28D279B6C224D74769DB8B98AF1665").resolveHandle("multisurfacetypeb44dtype");
/**
* Gets a List of "surfaceMember" elements
*/
java.util.List<net.opengis.gml.SurfacePropertyType> getSurfaceMemberList();
/**
* Gets array of all "surfaceMember" elements
* @deprecated
*/
@Deprecated
net.opengis.gml.SurfacePropertyType[] getSurfaceMemberArray();
/**
* Gets ith "surfaceMember" element
*/
net.opengis.gml.SurfacePropertyType getSurfaceMemberArray(int i);
/**
* Returns number of "surfaceMember" element
*/
int sizeOfSurfaceMemberArray();
/**
* Sets array of all "surfaceMember" element
*/
void setSurfaceMemberArray(net.opengis.gml.SurfacePropertyType[] surfaceMemberArray);
/**
* Sets ith "surfaceMember" element
*/
void setSurfaceMemberArray(int i, net.opengis.gml.SurfacePropertyType surfaceMember);
/**
* Inserts and returns a new empty value (as xml) as the ith "surfaceMember" element
*/
net.opengis.gml.SurfacePropertyType insertNewSurfaceMember(int i);
/**
* Appends and returns a new empty value (as xml) as the last "surfaceMember" element
*/
net.opengis.gml.SurfacePropertyType addNewSurfaceMember();
/**
* Removes the ith "surfaceMember" element
*/
void removeSurfaceMember(int i);
/**
* Gets the "surfaceMembers" element
*/
net.opengis.gml.SurfaceArrayPropertyType getSurfaceMembers();
/**
* True if has "surfaceMembers" element
*/
boolean isSetSurfaceMembers();
/**
* Sets the "surfaceMembers" element
*/
void setSurfaceMembers(net.opengis.gml.SurfaceArrayPropertyType surfaceMembers);
/**
* Appends and returns a new empty "surfaceMembers" element
*/
net.opengis.gml.SurfaceArrayPropertyType addNewSurfaceMembers();
/**
* Unsets the "surfaceMembers" element
*/
void unsetSurfaceMembers();
/**
* A factory class with static methods for creating instances
* of this type.
*/
public static final class Factory
{
public static net.opengis.gml.MultiSurfaceType newInstance() {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().newInstance( type, null ); }
public static net.opengis.gml.MultiSurfaceType newInstance(org.apache.xmlbeans.XmlOptions options) {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().newInstance( type, options ); }
/** @param xmlAsString the string value to parse */
public static net.opengis.gml.MultiSurfaceType parse(java.lang.String xmlAsString) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( xmlAsString, type, null ); }
public static net.opengis.gml.MultiSurfaceType parse(java.lang.String xmlAsString, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( xmlAsString, type, options ); }
/** @param file the file from which to load an xml document */
public static net.opengis.gml.MultiSurfaceType parse(java.io.File file) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( file, type, null ); }
public static net.opengis.gml.MultiSurfaceType parse(java.io.File file, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( file, type, options ); }
public static net.opengis.gml.MultiSurfaceType parse(java.net.URL u) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( u, type, null ); }
public static net.opengis.gml.MultiSurfaceType parse(java.net.URL u, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( u, type, options ); }
public static net.opengis.gml.MultiSurfaceType parse(java.io.InputStream is) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( is, type, null ); }
public static net.opengis.gml.MultiSurfaceType parse(java.io.InputStream is, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( is, type, options ); }
public static net.opengis.gml.MultiSurfaceType parse(java.io.Reader r) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( r, type, null ); }
public static net.opengis.gml.MultiSurfaceType parse(java.io.Reader r, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( r, type, options ); }
public static net.opengis.gml.MultiSurfaceType parse(javax.xml.stream.XMLStreamReader sr) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( sr, type, null ); }
public static net.opengis.gml.MultiSurfaceType parse(javax.xml.stream.XMLStreamReader sr, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( sr, type, options ); }
public static net.opengis.gml.MultiSurfaceType parse(org.w3c.dom.Node node) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( node, type, null ); }
public static net.opengis.gml.MultiSurfaceType parse(org.w3c.dom.Node node, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( node, type, options ); }
/** @deprecated {@link org.apache.xmlbeans.xml.stream.XMLInputStream} */
@Deprecated
public static net.opengis.gml.MultiSurfaceType parse(org.apache.xmlbeans.xml.stream.XMLInputStream xis) throws org.apache.xmlbeans.XmlException, org.apache.xmlbeans.xml.stream.XMLStreamException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( xis, type, null ); }
/** @deprecated {@link org.apache.xmlbeans.xml.stream.XMLInputStream} */
@Deprecated
public static net.opengis.gml.MultiSurfaceType parse(org.apache.xmlbeans.xml.stream.XMLInputStream xis, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, org.apache.xmlbeans.xml.stream.XMLStreamException {
return (net.opengis.gml.MultiSurfaceType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( xis, type, options ); }
/** @deprecated {@link org.apache.xmlbeans.xml.stream.XMLInputStream} */
@Deprecated
public static org.apache.xmlbeans.xml.stream.XMLInputStream newValidatingXMLInputStream(org.apache.xmlbeans.xml.stream.XMLInputStream xis) throws org.apache.xmlbeans.XmlException, org.apache.xmlbeans.xml.stream.XMLStreamException {
return org.apache.xmlbeans.XmlBeans.getContextTypeLoader().newValidatingXMLInputStream( xis, type, null ); }
/** @deprecated {@link org.apache.xmlbeans.xml.stream.XMLInputStream} */
@Deprecated
public static org.apache.xmlbeans.xml.stream.XMLInputStream newValidatingXMLInputStream(org.apache.xmlbeans.xml.stream.XMLInputStream xis, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, org.apache.xmlbeans.xml.stream.XMLStreamException {
return org.apache.xmlbeans.XmlBeans.getContextTypeLoader().newValidatingXMLInputStream( xis, type, options ); }
private Factory() { } // No instance of this class allowed
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-redshift/src/main/java/com/amazonaws/services/redshift/model/transform/AuthenticationProfileNotFoundExceptionUnmarshaller.java | 1678 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.redshift.model.transform;
import org.w3c.dom.Node;
import javax.annotation.Generated;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.transform.StandardErrorUnmarshaller;
import com.amazonaws.services.redshift.model.AuthenticationProfileNotFoundException;
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AuthenticationProfileNotFoundExceptionUnmarshaller extends StandardErrorUnmarshaller {
public AuthenticationProfileNotFoundExceptionUnmarshaller() {
super(AuthenticationProfileNotFoundException.class);
}
@Override
public AmazonServiceException unmarshall(Node node) throws Exception {
// Bail out if this isn't the right error code that this
// marshaller understands
String errorCode = parseErrorCode(node);
if (errorCode == null || !errorCode.equals("AuthenticationProfileNotFoundFault"))
return null;
AuthenticationProfileNotFoundException e = (AuthenticationProfileNotFoundException) super.unmarshall(node);
return e;
}
}
| apache-2.0 |
dimir2/vivanov | part2/ch1/src/test/java/ru/job4j/pro/collections/list/NodeListTest.java | 2090 | package ru.job4j.pro.collections.list;
import org.junit.Test;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* NodeListTest class.
*
* @author Vladimir Ivanov
* @version 0.1
* @since 30.08.2017
*/
public class NodeListTest {
/**
* Test list is not cycled.
*/
@Test
public void whenListIsNotCycledThenGetFalse() {
Node<Integer> first = new Node<>(1);
Node<Integer> two = new Node<>(2);
Node<Integer> third = new Node<>(3);
Node<Integer> four = new Node<>(4);
Node<Integer> five = new Node<>(5);
first.next = two;
two.next = third;
third.next = four;
four.next = five;
NodeList<Integer> list = new NodeList<>(first);
boolean result = list.hasCycle();
assertThat(result, is(false));
}
/**
* Test list is cycled.
*/
@Test
public void whenListIsCycledThenGetTrue() {
Node<Integer> first = new Node<>(1);
Node<Integer> two = new Node<>(2);
Node<Integer> third = new Node<>(3);
Node<Integer> four = new Node<>(4);
first.next = two;
two.next = third;
third.next = four;
four.next = first;
NodeList<Integer> list = new NodeList<>(first);
boolean result = list.hasCycle();
assertThat(result, is(true));
}
/**
* Test list is cycled.
*/
@Test
public void whenBigListIsCycledThenGetTrue() {
Node<Integer> node = new Node<>(0);
Node<Integer> cycleFrom = null;
Node<Integer> cycleTo = null;
NodeList<Integer> list = new NodeList<>(node);
for (int value = 1; value < 10000000; value++) {
node.next = new Node<>(value);
node = node.next;
if (value == 900000) {
cycleTo = node;
} else if (value == 9990000) {
cycleFrom = node;
}
}
cycleFrom.next = cycleTo;
boolean result = list.hasCycle();
assertThat(result, is(true));
}
} | apache-2.0 |
zhubinqiang/myTMS | src/test/java/com/intel/media/mts/dao/impl/SoftwareDaoImplTest.java | 598 | package com.intel.media.mts.dao.impl;
import org.junit.Assert;
import org.junit.Test;
import com.intel.media.mts.dao.DaoTestSupport;
import com.intel.media.mts.model.Software;
public class SoftwareDaoImplTest extends DaoTestSupport {
@Test
public void test(){
Software s = new Software();
String name = "Ubuntu";
String desc = "OS type 12.04";
s.setName(name);
s.setDescription(desc);
softwareDao.doSave(s);
System.out.println(s);
Software s2 = softwareDao.findById(s.getId());
Assert.assertEquals(name, s2.getName());
Assert.assertEquals(desc, s2.getDescription());
}
}
| apache-2.0 |
cuiqunhao/jeesite | src/main/java/com/thinkgem/jeesite/modules/purifier/dao/WareDao.java | 411 | package com.thinkgem.jeesite.modules.purifier.dao;
import com.thinkgem.jeesite.common.persistence.CrudDao;
import com.thinkgem.jeesite.common.persistence.annotation.MyBatisDao;
import com.thinkgem.jeesite.modules.purifier.entity.Ware;
/**
* 仓库管理dao
*
* @author addison
* @since 2017年03月02日
*/
@MyBatisDao
public interface WareDao extends CrudDao<Ware> {
int deleteByWare(Ware ware);
}
| apache-2.0 |
MKLab-ITI/image-forensics | java_service/src/main/java/gr/iti/mklab/reveal/forensics/util/ThumbnailExtractor/image/jpeg/TIFFHeader.java | 3339 | /*
* @(#)TIFFHeader.java
*
* $Date: 2014-03-13 04:15:48 -0400 (Thu, 13 Mar 2014) $
*
* Copyright (c) 2011 by Jeremy Wood.
* All rights reserved.
*
* The copyright of this software is owned by Jeremy Wood.
* You may not use, copy or modify this software, except in
* accordance with the license agreement you entered into with
* Jeremy Wood. For details see accompanying license terms.
*
* This software is probably, but not necessarily, discussed here:
* https://javagraphics.java.net/
*
* That site should also contain the most recent official version
* of this software. (See the SVN repository for more details.)
*
Modified BSD License
Copyright (c) 2015, Jeremy Wood.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* The name of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package gr.iti.mklab.reveal.forensics.util.ThumbnailExtractor.image.jpeg;
import java.io.IOException;
import java.io.InputStream;
class TIFFHeader {
boolean bigEndian;
int ifdOffset;
TIFFHeader(InputStream in) throws IOException {
byte[] array = new byte[4];
if(JPEGMarkerInputStream.readFully(in, array, 2, false)!=2) {
throw new IOException("Incomplete TIFF Header");
}
if(array[0]==73 && array[1]==73) { //little endian
bigEndian = false;
} else if(array[0]==77 && array[1]==77) { //big endian
bigEndian = true;
} else {
throw new IOException("Unrecognized endian encoding.");
}
if(JPEGMarkerInputStream.readFully(in, array, 2, !bigEndian)!=2) {
throw new IOException("Incomplete TIFF Header");
}
if(!(array[0]==0 && array[1]==42)) { //required byte in TIFF header
throw new IOException("Missing required identifier 0x002A.");
}
if(JPEGMarkerInputStream.readFully(in, array, 4, !bigEndian)!=4) {
throw new IOException("Incomplete TIFF Header");
}
ifdOffset = ((array[0] & 0xff) << 24) + ((array[1] & 0xff) << 16) +
((array[2] & 0xff) << 8) + ((array[3] & 0xff) << 0) ;
}
/** The length of this TIFF header. */
int getLength() {
return 8;
}
}
| apache-2.0 |
damico/amanaje | source-code/Amanaje/src/com/amanaje/activities/package-info.java | 70 | /**
*
*/
/**
* @author root
*
*/
package com.amanaje.activities; | apache-2.0 |
brmson/hub | src/main/java/eu/ailao/hub/dialog/Dialog.java | 1699 | package eu.ailao.hub.dialog;
import eu.ailao.hub.corefresol.answers.ClueMemorizer;
import eu.ailao.hub.corefresol.concepts.ConceptMemorizer;
import eu.ailao.hub.questions.Question;
import java.util.ArrayList;
/**
* Created by Petr Marek on 24.02.2016.
* Dialog class represents dialog. It contains id of dialog and ids of questions in this dialog.
*/
public class Dialog {
private int id;
private ArrayList<Question> questionsOfDialogue;
private ConceptMemorizer conceptMemorizer;
private ClueMemorizer clueMemorizer;
public Dialog(int id) {
this.id = id;
this.questionsOfDialogue = new ArrayList<>();
this.conceptMemorizer = new ConceptMemorizer();
this.clueMemorizer = new ClueMemorizer();
}
/**
* Adds question to dialog
* @param questionID id of question
*/
public void addQuestion(Question questionID) {
questionsOfDialogue.add(questionID);
}
public ArrayList<Question> getQuestions() {
return questionsOfDialogue;
}
/**
* Gets all question's ids of dialog
* @return array list of question's ids in dialog
*/
public ArrayList<Integer> getQuestionsIDs() {
ArrayList<Integer> questionIDs = new ArrayList<Integer>();
for (int i = 0; i < questionsOfDialogue.size(); i++) {
questionIDs.add(questionsOfDialogue.get(i).getYodaQuestionID());
}
return questionIDs;
}
public int getId() {
return id;
}
public ConceptMemorizer getConceptMemorizer() {
return conceptMemorizer;
}
public ClueMemorizer getClueMemorizer() {
return clueMemorizer;
}
public boolean hasQuestionWithId(int id) {
for (Question question : questionsOfDialogue) {
if (question.getYodaQuestionID() == id) {
return true;
}
}
return false;
}
}
| apache-2.0 |
yahoo/fili | fili-core/src/main/java/com/yahoo/bard/webservice/data/metric/LogicalMetricColumn.java | 1271 | // Copyright 2016 Yahoo Inc.
// Licensed under the terms of the Apache license. Please see LICENSE.md file distributed with this work for terms.
package com.yahoo.bard.webservice.data.metric;
/**
* LogicalMetricColumn.
*/
public class LogicalMetricColumn extends MetricColumn {
private final LogicalMetric metric;
/**
* Constructor.
*
* @param name The column name
* @param metric The logical metric
*
* @deprecated because LogicalMetricColumn is really only a thing for LogicalTable, so there's no reason for there
* to be an alias on the LogicalMetric inside the LogicalTableSchema.
*/
@Deprecated
public LogicalMetricColumn(String name, LogicalMetric metric) {
super(name);
this.metric = metric;
}
/**
* Constructor.
*
* @param metric The logical metric
*/
public LogicalMetricColumn(LogicalMetric metric) {
super(metric.getName());
this.metric = metric;
}
/**
* Getter for a logical metric.
*
* @return logical metric
*/
public LogicalMetric getLogicalMetric() {
return this.metric;
}
@Override
public String toString() {
return "{logicalMetric:'" + getName() + "'}";
}
}
| apache-2.0 |
imie-source/Ekologia | EkologiaGUI/src/coop/ekologia/presentation/controller/user/LoginDeconnectionServlet.java | 1605 | package coop.ekologia.presentation.controller.user;
import java.io.IOException;
import javax.inject.Inject;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import coop.ekologia.presentation.EkologiaServlet;
import coop.ekologia.presentation.session.LoginSession;
/**
* Servlet implementation class LoginConnectionServlet
*/
@WebServlet(LoginDeconnectionServlet.routing)
public class LoginDeconnectionServlet extends EkologiaServlet {
private static final long serialVersionUID = 1L;
public static final String routing = "/login/deconnection";
public static final String routing(HttpServletRequest request) {
return getUrl(request, routing);
}
@Inject
LoginSession loginSession;
/**
* @see HttpServlet#HttpServlet()
*/
public LoginDeconnectionServlet() {
super();
// TODO Auto-generated constructor stub
}
/**
* @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse response)
*/
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
loginSession.setUser(null);
response.sendRedirect(request.getHeader("referer"));
}
/**
* @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse response)
*/
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
// TODO Auto-generated method stub
}
}
| apache-2.0 |
Activiti/Activiti | activiti-core/activiti-engine/src/main/java/org/activiti/engine/impl/cmd/CreateAttachmentCmd.java | 5884 | /*
* Copyright 2010-2020 Alfresco Software, Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.activiti.engine.impl.cmd;
import java.io.InputStream;
import org.activiti.engine.ActivitiException;
import org.activiti.engine.ActivitiObjectNotFoundException;
import org.activiti.engine.delegate.event.ActivitiEventType;
import org.activiti.engine.delegate.event.impl.ActivitiEventBuilder;
import org.activiti.engine.impl.identity.Authentication;
import org.activiti.engine.impl.interceptor.Command;
import org.activiti.engine.impl.interceptor.CommandContext;
import org.activiti.engine.impl.persistence.entity.AttachmentEntity;
import org.activiti.engine.impl.persistence.entity.ByteArrayEntity;
import org.activiti.engine.impl.persistence.entity.ExecutionEntity;
import org.activiti.engine.impl.persistence.entity.TaskEntity;
import org.activiti.engine.impl.util.IoUtil;
import org.activiti.engine.runtime.ProcessInstance;
import org.activiti.engine.task.Attachment;
import org.activiti.engine.task.Task;
/**
*/
// Not Serializable
public class CreateAttachmentCmd implements Command<Attachment> {
protected String attachmentType;
protected String taskId;
protected String processInstanceId;
protected String attachmentName;
protected String attachmentDescription;
protected InputStream content;
protected String url;
public CreateAttachmentCmd(String attachmentType, String taskId, String processInstanceId, String attachmentName, String attachmentDescription, InputStream content, String url) {
this.attachmentType = attachmentType;
this.taskId = taskId;
this.processInstanceId = processInstanceId;
this.attachmentName = attachmentName;
this.attachmentDescription = attachmentDescription;
this.content = content;
this.url = url;
}
public Attachment execute(CommandContext commandContext) {
if (taskId != null) {
verifyTaskParameters(commandContext);
}
if (processInstanceId != null) {
verifyExecutionParameters(commandContext);
}
return executeInternal(commandContext);
}
protected Attachment executeInternal(CommandContext commandContext) {
AttachmentEntity attachment = commandContext.getAttachmentEntityManager().create();
attachment.setName(attachmentName);
attachment.setProcessInstanceId(processInstanceId);
attachment.setTaskId(taskId);
attachment.setDescription(attachmentDescription);
attachment.setType(attachmentType);
attachment.setUrl(url);
attachment.setUserId(Authentication.getAuthenticatedUserId());
attachment.setTime(commandContext.getProcessEngineConfiguration().getClock().getCurrentTime());
commandContext.getAttachmentEntityManager().insert(attachment, false);
if (content != null) {
byte[] bytes = IoUtil.readInputStream(content, attachmentName);
ByteArrayEntity byteArray = commandContext.getByteArrayEntityManager().create();
byteArray.setBytes(bytes);
commandContext.getByteArrayEntityManager().insert(byteArray);
attachment.setContentId(byteArray.getId());
attachment.setContent(byteArray);
}
commandContext.getHistoryManager().createAttachmentComment(taskId, processInstanceId, attachmentName, true);
if (commandContext.getProcessEngineConfiguration().getEventDispatcher().isEnabled()) {
// Forced to fetch the process-instance to associate the right
// process definition
String processDefinitionId = null;
if (attachment.getProcessInstanceId() != null) {
ExecutionEntity process = commandContext.getExecutionEntityManager().findById(processInstanceId);
if (process != null) {
processDefinitionId = process.getProcessDefinitionId();
}
}
commandContext.getProcessEngineConfiguration().getEventDispatcher()
.dispatchEvent(ActivitiEventBuilder.createEntityEvent(ActivitiEventType.ENTITY_CREATED, attachment, processInstanceId, processInstanceId, processDefinitionId));
commandContext.getProcessEngineConfiguration().getEventDispatcher()
.dispatchEvent(ActivitiEventBuilder.createEntityEvent(ActivitiEventType.ENTITY_INITIALIZED, attachment, processInstanceId, processInstanceId, processDefinitionId));
}
return attachment;
}
protected TaskEntity verifyTaskParameters(CommandContext commandContext) {
TaskEntity task = commandContext.getTaskEntityManager().findById(taskId);
if (task == null) {
throw new ActivitiObjectNotFoundException("Cannot find task with id " + taskId, Task.class);
}
if (task.isSuspended()) {
throw new ActivitiException("It is not allowed to add an attachment to a suspended task");
}
return task;
}
protected ExecutionEntity verifyExecutionParameters(CommandContext commandContext) {
ExecutionEntity execution = commandContext.getExecutionEntityManager().findById(processInstanceId);
if (execution == null) {
throw new ActivitiObjectNotFoundException("Process instance " + processInstanceId + " doesn't exist", ProcessInstance.class);
}
if (execution.isSuspended()) {
throw new ActivitiException("It is not allowed to add an attachment to a suspended process instance");
}
return execution;
}
}
| apache-2.0 |
domaframework/doma | doma-core/src/main/java/org/seasar/doma/internal/jdbc/dialect/StandardCountCalculatingTransformer.java | 656 | package org.seasar.doma.internal.jdbc.dialect;
import org.seasar.doma.internal.jdbc.sql.SimpleSqlNodeVisitor;
import org.seasar.doma.internal.jdbc.sql.node.AnonymousNode;
import org.seasar.doma.jdbc.SqlNode;
public class StandardCountCalculatingTransformer extends SimpleSqlNodeVisitor<SqlNode, Void> {
protected boolean processed;
public SqlNode transform(SqlNode sqlNode) {
AnonymousNode result = new AnonymousNode();
for (SqlNode child : sqlNode.getChildren()) {
result.appendNode(child.accept(this, null));
}
return result;
}
@Override
protected SqlNode defaultAction(SqlNode node, Void p) {
return node;
}
}
| apache-2.0 |
irudyak/ignite | modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/LocalWalModeChangeDuringRebalancingSelfTest.java | 21207 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.persistence;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.file.OpenOption;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteException;
import org.apache.ignite.IgniteSystemProperties;
import org.apache.ignite.cache.CacheMode;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.DataRegionConfiguration;
import org.apache.ignite.configuration.DataStorageConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.internal.IgniteEx;
import org.apache.ignite.internal.managers.communication.GridIoMessage;
import org.apache.ignite.internal.processors.cache.CacheGroupContext;
import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessage;
import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory;
import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
import org.apache.ignite.internal.util.lang.GridAbsPredicate;
import org.apache.ignite.internal.util.typedef.G;
import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteInClosure;
import org.apache.ignite.plugin.extensions.communication.Message;
import org.apache.ignite.spi.IgniteSpiException;
import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
import org.apache.ignite.testframework.GridTestUtils;
import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
import org.junit.Assert;
/**
*
*/
public class LocalWalModeChangeDuringRebalancingSelfTest extends GridCommonAbstractTest {
/** */
private static boolean disableWalDuringRebalancing = true;
/** */
private static final AtomicReference<CountDownLatch> supplyMessageLatch = new AtomicReference<>();
/** */
private static final AtomicReference<CountDownLatch> fileIOLatch = new AtomicReference<>();
/** Replicated cache name. */
private static final String REPL_CACHE = "cache";
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
cfg.setDataStorageConfiguration(
new DataStorageConfiguration()
.setDefaultDataRegionConfiguration(
new DataRegionConfiguration()
.setPersistenceEnabled(true)
.setMaxSize(200 * 1024 * 1024)
.setInitialSize(200 * 1024 * 1024)
)
// Test verifies checkpoint count, so it is essencial that no checkpoint is triggered by timeout
.setCheckpointFrequency(999_999_999_999L)
.setFileIOFactory(new TestFileIOFactory(new DataStorageConfiguration().getFileIOFactory()))
);
cfg.setCacheConfiguration(
new CacheConfiguration(DEFAULT_CACHE_NAME)
// Test checks internal state before and after rebalance, so it is configured to be triggered manually
.setRebalanceDelay(-1),
new CacheConfiguration(REPL_CACHE)
.setRebalanceDelay(-1)
.setCacheMode(CacheMode.REPLICATED)
);
cfg.setCommunicationSpi(new TcpCommunicationSpi() {
@Override public void sendMessage(ClusterNode node, Message msg) throws IgniteSpiException {
if (msg instanceof GridIoMessage && ((GridIoMessage)msg).message() instanceof GridDhtPartitionSupplyMessage) {
int grpId = ((GridDhtPartitionSupplyMessage)((GridIoMessage)msg).message()).groupId();
if (grpId == CU.cacheId(DEFAULT_CACHE_NAME)) {
CountDownLatch latch0 = supplyMessageLatch.get();
if (latch0 != null)
try {
latch0.await();
}
catch (InterruptedException ex) {
throw new IgniteException(ex);
}
}
}
super.sendMessage(node, msg);
}
@Override public void sendMessage(ClusterNode node, Message msg,
IgniteInClosure<IgniteException> ackC) throws IgniteSpiException {
if (msg instanceof GridIoMessage && ((GridIoMessage)msg).message() instanceof GridDhtPartitionSupplyMessage) {
int grpId = ((GridDhtPartitionSupplyMessage)((GridIoMessage)msg).message()).groupId();
if (grpId == CU.cacheId(DEFAULT_CACHE_NAME)) {
CountDownLatch latch0 = supplyMessageLatch.get();
if (latch0 != null)
try {
latch0.await();
}
catch (InterruptedException ex) {
throw new IgniteException(ex);
}
}
}
super.sendMessage(node, msg, ackC);
}
});
cfg.setConsistentId(igniteInstanceName);
System.setProperty(IgniteSystemProperties.IGNITE_DISABLE_WAL_DURING_REBALANCING,
Boolean.toString(disableWalDuringRebalancing));
return cfg;
}
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
super.beforeTestsStarted();
cleanPersistenceDir();
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
super.afterTest();
CountDownLatch msgLatch = supplyMessageLatch.get();
if (msgLatch != null) {
while (msgLatch.getCount() > 0)
msgLatch.countDown();
supplyMessageLatch.set(null);
}
CountDownLatch fileLatch = fileIOLatch.get();
if (fileLatch != null) {
while (fileLatch.getCount() > 0)
fileLatch.countDown();
fileIOLatch.set(null);
}
stopAllGrids();
cleanPersistenceDir();
disableWalDuringRebalancing = true;
}
/**
* @return Count of entries to be processed within test.
*/
protected int getKeysCount() {
return 10_000;
}
/**
* @throws Exception If failed.
*/
public void testWalDisabledDuringRebalancing() throws Exception {
doTestSimple();
}
/**
* @throws Exception If failed.
*/
public void testWalNotDisabledIfParameterSetToFalse() throws Exception {
disableWalDuringRebalancing = false;
doTestSimple();
}
/**
* @throws Exception If failed.
*/
private void doTestSimple() throws Exception {
Ignite ignite = startGrids(3);
ignite.cluster().active(true);
IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME);
int keysCnt = getKeysCount();
for (int k = 0; k < keysCnt; k++)
cache.put(k, k);
IgniteEx newIgnite = startGrid(3);
final CheckpointHistory cpHist =
((GridCacheDatabaseSharedManager)newIgnite.context().cache().context().database()).checkpointHistory();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return !cpHist.checkpoints().isEmpty();
}
}, 10_000);
U.sleep(10); // To ensure timestamp granularity.
long newIgniteStartedTimestamp = System.currentTimeMillis();
ignite.cluster().setBaselineTopology(4);
CacheGroupContext grpCtx = newIgnite.cachex(DEFAULT_CACHE_NAME).context().group();
assertEquals(!disableWalDuringRebalancing, grpCtx.walEnabled());
U.sleep(10); // To ensure timestamp granularity.
long rebalanceStartedTimestamp = System.currentTimeMillis();
for (Ignite g : G.allGrids())
g.cache(DEFAULT_CACHE_NAME).rebalance();
awaitPartitionMapExchange();
assertTrue(grpCtx.walEnabled());
U.sleep(10); // To ensure timestamp granularity.
long rebalanceFinishedTimestamp = System.currentTimeMillis();
for (Integer k = 0; k < keysCnt; k++)
assertEquals("k=" + k, k, cache.get(k));
int checkpointsBeforeNodeStarted = 0;
int checkpointsBeforeRebalance = 0;
int checkpointsAfterRebalance = 0;
for (Long timestamp : cpHist.checkpoints()) {
if (timestamp < newIgniteStartedTimestamp)
checkpointsBeforeNodeStarted++;
else if (timestamp >= newIgniteStartedTimestamp && timestamp < rebalanceStartedTimestamp)
checkpointsBeforeRebalance++;
else if (timestamp >= rebalanceStartedTimestamp && timestamp <= rebalanceFinishedTimestamp)
checkpointsAfterRebalance++;
}
assertEquals(1, checkpointsBeforeNodeStarted); // checkpoint on start
assertEquals(0, checkpointsBeforeRebalance);
assertEquals(disableWalDuringRebalancing ? 1 : 0, checkpointsAfterRebalance); // checkpoint if WAL was re-activated
}
/**
* @throws Exception If failed.
*/
public void testLocalAndGlobalWalStateInterdependence() throws Exception {
Ignite ignite = startGrids(3);
ignite.cluster().active(true);
IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME);
for (int k = 0; k < getKeysCount(); k++)
cache.put(k, k);
IgniteEx newIgnite = startGrid(3);
ignite.cluster().setBaselineTopology(ignite.cluster().nodes());
CacheGroupContext grpCtx = newIgnite.cachex(DEFAULT_CACHE_NAME).context().group();
assertFalse(grpCtx.walEnabled());
ignite.cluster().disableWal(DEFAULT_CACHE_NAME);
for (Ignite g : G.allGrids())
g.cache(DEFAULT_CACHE_NAME).rebalance();
awaitPartitionMapExchange();
assertFalse(grpCtx.walEnabled()); // WAL is globally disabled
ignite.cluster().enableWal(DEFAULT_CACHE_NAME);
assertTrue(grpCtx.walEnabled());
}
/**
* Test that local WAL mode changing works well with exchanges merge.
*
* @throws Exception If failed.
*/
public void testWithExchangesMerge() throws Exception {
final int nodeCnt = 5;
final int keyCnt = getKeysCount();
Ignite ignite = startGrids(nodeCnt);
ignite.cluster().active(true);
IgniteCache<Integer, Integer> cache = ignite.cache(REPL_CACHE);
for (int k = 0; k < keyCnt; k++)
cache.put(k, k);
stopGrid(2);
stopGrid(3);
stopGrid(4);
// Rewrite data to trigger further rebalance.
for (int k = 0; k < keyCnt; k++)
cache.put(k, k * 2);
// Start several grids in parallel to trigger exchanges merge.
startGridsMultiThreaded(2, 3);
for (int nodeIdx = 2; nodeIdx < nodeCnt; nodeIdx++) {
CacheGroupContext grpCtx = grid(nodeIdx).cachex(REPL_CACHE).context().group();
assertFalse(grpCtx.walEnabled());
}
// Invoke rebalance manually.
for (Ignite g : G.allGrids())
g.cache(REPL_CACHE).rebalance();
awaitPartitionMapExchange();
for (int nodeIdx = 2; nodeIdx < nodeCnt; nodeIdx++) {
CacheGroupContext grpCtx = grid(nodeIdx).cachex(REPL_CACHE).context().group();
assertTrue(grpCtx.walEnabled());
}
// Check no data loss.
for (int nodeIdx = 2; nodeIdx < nodeCnt; nodeIdx++) {
IgniteCache<Integer, Integer> cache0 = grid(nodeIdx).cache(REPL_CACHE);
for (int k = 0; k < keyCnt; k++)
Assert.assertEquals("nodeIdx=" + nodeIdx + ", key=" + k, (Integer) (2 * k), cache0.get(k));
}
}
/**
* @throws Exception If failed.
*/
public void testParallelExchangeDuringRebalance() throws Exception {
doTestParallelExchange(supplyMessageLatch);
}
/**
* @throws Exception If failed.
*/
public void testParallelExchangeDuringCheckpoint() throws Exception {
doTestParallelExchange(fileIOLatch);
}
/**
* @throws Exception If failed.
*/
private void doTestParallelExchange(AtomicReference<CountDownLatch> latchRef) throws Exception {
Ignite ignite = startGrids(3);
ignite.cluster().active(true);
IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME);
for (int k = 0; k < getKeysCount(); k++)
cache.put(k, k);
IgniteEx newIgnite = startGrid(3);
CacheGroupContext grpCtx = newIgnite.cachex(DEFAULT_CACHE_NAME).context().group();
CountDownLatch latch = new CountDownLatch(1);
latchRef.set(latch);
ignite.cluster().setBaselineTopology(ignite.cluster().nodes());
for (Ignite g : G.allGrids())
g.cache(DEFAULT_CACHE_NAME).rebalance();
assertFalse(grpCtx.walEnabled());
// TODO : test with client node as well
startGrid(4); // Trigger exchange
assertFalse(grpCtx.walEnabled());
latch.countDown();
assertFalse(grpCtx.walEnabled());
for (Ignite g : G.allGrids())
g.cache(DEFAULT_CACHE_NAME).rebalance();
awaitPartitionMapExchange();
assertTrue(grpCtx.walEnabled());
}
/**
* @throws Exception If failed.
*/
public void testDataClearedAfterRestartWithDisabledWal() throws Exception {
Ignite ignite = startGrid(0);
ignite.cluster().active(true);
IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME);
int keysCnt = getKeysCount();
for (int k = 0; k < keysCnt; k++)
cache.put(k, k);
IgniteEx newIgnite = startGrid(1);
ignite.cluster().setBaselineTopology(2);
CacheGroupContext grpCtx = newIgnite.cachex(DEFAULT_CACHE_NAME).context().group();
assertFalse(grpCtx.localWalEnabled());
stopGrid(1);
stopGrid(0);
newIgnite = startGrid(1);
newIgnite.cluster().active(true);
newIgnite.cluster().setBaselineTopology(newIgnite.cluster().nodes());
cache = newIgnite.cache(DEFAULT_CACHE_NAME);
for (int k = 0; k < keysCnt; k++)
assertFalse("k=" + k +", v=" + cache.get(k), cache.containsKey(k));
}
/**
* @throws Exception If failed.
*/
public void testWalNotDisabledAfterShrinkingBaselineTopology() throws Exception {
Ignite ignite = startGrids(4);
ignite.cluster().active(true);
IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME);
int keysCnt = getKeysCount();
for (int k = 0; k < keysCnt; k++)
cache.put(k, k);
for (Ignite g : G.allGrids()) {
CacheGroupContext grpCtx = ((IgniteEx)g).cachex(DEFAULT_CACHE_NAME).context().group();
assertTrue(grpCtx.walEnabled());
}
stopGrid(2);
ignite.cluster().setBaselineTopology(5);
for (Ignite g : G.allGrids()) {
CacheGroupContext grpCtx = ((IgniteEx)g).cachex(DEFAULT_CACHE_NAME).context().group();
assertTrue(grpCtx.walEnabled());
g.cache(DEFAULT_CACHE_NAME).rebalance();
}
awaitPartitionMapExchange();
for (Ignite g : G.allGrids()) {
CacheGroupContext grpCtx = ((IgniteEx)g).cachex(DEFAULT_CACHE_NAME).context().group();
assertTrue(grpCtx.walEnabled());
}
}
/**
*
*/
private static class TestFileIOFactory implements FileIOFactory {
/** */
private final FileIOFactory delegate;
/**
* @param delegate Delegate.
*/
TestFileIOFactory(FileIOFactory delegate) {
this.delegate = delegate;
}
/** {@inheritDoc} */
@Override public FileIO create(File file) throws IOException {
return new TestFileIO(delegate.create(file));
}
/** {@inheritDoc} */
@Override public FileIO create(File file, OpenOption... modes) throws IOException {
return new TestFileIO(delegate.create(file, modes));
}
}
/**
*
*/
private static class TestFileIO implements FileIO {
/** */
private final FileIO delegate;
/**
* @param delegate Delegate.
*/
TestFileIO(FileIO delegate) {
this.delegate = delegate;
}
/** {@inheritDoc} */
@Override public long position() throws IOException {
return delegate.position();
}
/** {@inheritDoc} */
@Override public void position(long newPosition) throws IOException {
delegate.position(newPosition);
}
/** {@inheritDoc} */
@Override public int read(ByteBuffer destBuf) throws IOException {
return delegate.read(destBuf);
}
/** {@inheritDoc} */
@Override public int read(ByteBuffer destBuf, long position) throws IOException {
return delegate.read(destBuf, position);
}
/** {@inheritDoc} */
@Override public int read(byte[] buf, int off, int len) throws IOException {
return delegate.read(buf, off, len);
}
/** {@inheritDoc} */
@Override public int write(ByteBuffer srcBuf) throws IOException {
CountDownLatch latch = fileIOLatch.get();
if (latch != null && Thread.currentThread().getName().contains("checkpoint"))
try {
latch.await();
}
catch (InterruptedException ex) {
throw new IgniteException(ex);
}
return delegate.write(srcBuf);
}
/** {@inheritDoc} */
@Override public int write(ByteBuffer srcBuf, long position) throws IOException {
CountDownLatch latch = fileIOLatch.get();
if (latch != null && Thread.currentThread().getName().contains("checkpoint"))
try {
latch.await();
}
catch (InterruptedException ex) {
throw new IgniteException(ex);
}
return delegate.write(srcBuf, position);
}
/** {@inheritDoc} */
@Override public int write(byte[] buf, int off, int len) throws IOException {
CountDownLatch latch = fileIOLatch.get();
if (latch != null && Thread.currentThread().getName().contains("checkpoint"))
try {
latch.await();
}
catch (InterruptedException ex) {
throw new IgniteException(ex);
}
return delegate.write(buf, off, len);
}
/** {@inheritDoc} */
@Override public MappedByteBuffer map(int maxWalSegmentSize) throws IOException {
return delegate.map(maxWalSegmentSize);
}
/** {@inheritDoc} */
@Override public void force() throws IOException {
delegate.force();
}
/** {@inheritDoc} */
@Override public void force(boolean withMetadata) throws IOException {
delegate.force(withMetadata);
}
/** {@inheritDoc} */
@Override public long size() throws IOException {
return delegate.size();
}
/** {@inheritDoc} */
@Override public void clear() throws IOException {
delegate.clear();
}
/** {@inheritDoc} */
@Override public void close() throws IOException {
delegate.close();
}
}
}
| apache-2.0 |
MarcinSzyszka/MobileSecondHand | AndroidStudio_Android/MobileSeconndHand/app/src/main/java/marcin_szyszka/mobileseconndhand/models/RegisterUserModel.java | 260 | package marcin_szyszka.mobileseconndhand.models;
/**
* Created by marcianno on 2016-03-02.
*/
public class RegisterUserModel {
public String Email;
public String Password;
public String ConfirmPassword;
public RegisterUserModel(){
}
}
| apache-2.0 |
Roma7-7-7/annoconf | src/main/java/org/annoconf/PropertyValueSource.java | 173 | package org.annoconf;
/**
* Created by roma on 3/19/17.
*/
public interface PropertyValueSource {
boolean hasValue(String key);
String getValue(String key);
}
| apache-2.0 |
ldlqdsdcn/wms | core/src/main/java/com/delmar/core/service/CorePageService.java | 729 |
/******************************************************************************
* 版权所有 刘大磊 2013-07-01 *
* 作者:刘大磊 *
* 电话:13336390671 *
* email:ldlqdsd@126.com *
*****************************************************************************/
package com.delmar.core.service;
import com.delmar.core.model.CorePage;
import com.delmar.core.service.CoreService;
/**
* @author 刘大磊 2016-08-26 17:08:24
*/
public interface CorePageService extends CoreService<CorePage> {
/**
* @param ids
*/
void deleteCorePageList(Integer[] ids);
} | apache-2.0 |
ArnoHeid/PubApp | geocoder/src/main/java/de/hsmainz/pubapp/geocoder/controller/HttpAPIRequest.java | 5824 | package de.hsmainz.pubapp.geocoder.controller;
import com.google.gson.Gson;
import de.hsmainz.pubapp.geocoder.model.ClientInputJson;
import de.hsmainz.pubapp.geocoder.model.ErrorJson;
import de.hsmainz.pubapp.geocoder.model.geojson.GeoJsonCollection;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Locale;
import java.util.ResourceBundle;
/**
* Interface for all different geocoder APIs
*
* @author Arno
* @since 15.12.2016
*/
public abstract class HttpAPIRequest {
//****************************************
// CONSTANTS
//****************************************
static final ResourceBundle lables = ResourceBundle.getBundle("lable", Locale.getDefault());
static final Logger logger = LogManager.getLogger(HttpGraphhopperRequest.class);
//****************************************
// VARIABLES
//****************************************
Gson gson = new Gson();
//****************************************
// INIT/CONSTRUCTOR
//****************************************
//****************************************
// GETTER/SETTER
//****************************************
//****************************************
// PUBLIC METHODS
//****************************************
/**
* Executes request to geocoder API and creates GeoJSON. Custom ClientJson is used for the input
*
* @param inputJson the request parameters combined in a custom ClientJson
* @return API response converted to a String
*/
public String requestGeocoder(ClientInputJson inputJson) {
String returnString;
if (!validateInput(inputJson)) {
returnString = gson.toJson(new ErrorJson(lables.getString("message_Input_Empty")));
} else {
returnString = requestGeocoder(inputJson.getQueryString(), inputJson.getLocale());
}
return returnString;
}
/**
* Executes request to geocoder API and creates GeoJSON
*
* @param queryString the string containing the address
* @param locale the string defining the used language
* @return API response converted to a String
*/
public String requestGeocoder(String queryString, String locale) {
String returnString;
if (!validateInput(queryString)) {
returnString = gson.toJson(new ErrorJson(lables.getString("message_Input_Empty")));
} else {
try {
URI uri = buildUri(queryString, locale);
returnString = request(uri);
} catch (URISyntaxException e) {
logger.catching(e);
returnString = gson.toJson(new ErrorJson(lables.getString("error_incorrect_URI")));
}
}
return returnString;
}
//****************************************
// PRIVATE METHODS
//****************************************
/**
* Creates the URI for API request
*
* @param queryString the string containing the address
* @param locale the string defining the used language
* @return Uri for geocoder request to graphhopper API
*/
abstract URI buildUri(String queryString, String locale) throws URISyntaxException;
/**
* Executes the request to the API
*
* @param uri the geocoder URL
* @return the requested geoJSON
* @throws throws an exception if the request fails
*/
abstract GeoJsonCollection doHttpGet(URI uri) throws IOException;
/**
* Method to catch exceptions and create ErrorJSONs
*
* @param uri
* @return returns the GeoJSON or ErrorJSON as a String
*/
String request(URI uri) {
String returnString;
try {
GeoJsonCollection geoJsonCollection = doHttpGet(uri);
if (validateOutput(geoJsonCollection)) {
returnString = gson.toJson(geoJsonCollection);
} else {
returnString = gson.toJson(new ErrorJson(lables.getString("message_no_location")));
}
} catch (IOException e) {
logger.catching(e);
returnString = gson.toJson(new ErrorJson(lables.getString("error_API_request_Faild")));
}
return returnString;
}
/**
* validates the Input to reduce unnecessary request to API
*
* @param inputJson the InputJSON to be validated
* @return returns true if InputJSON is valid
*/
boolean validateInput(ClientInputJson inputJson) {
boolean returnValue = true;
if (inputJson.getQueryString() == null || inputJson.getQueryString().isEmpty()) {
returnValue = false;
}
if (inputJson.getLocale() == null || inputJson.getLocale().isEmpty()) {
returnValue = false;
}
return returnValue;
}
/**
* validates the Input to reduce unnecessary request to API
*
* @param inputString the Input String to be validated
* @return true if Input String is not Empty
*/
boolean validateInput(String inputString) {
boolean returnValue = true;
if (inputString == null || inputString.isEmpty()) {
returnValue = false;
}
return returnValue;
}
/**
* validates the output from the API
*
* @param geoJsonCollection the API outputJSON to be validated
* @return returns true if the outputJSON is not empty
*/
private boolean validateOutput(GeoJsonCollection geoJsonCollection) {
return !geoJsonCollection.getFeatures().isEmpty();
}
//****************************************
// INNER CLASSES
//****************************************
}
| apache-2.0 |
storecast/hibernate-delta | src/main/java/com/txtr/hibernatedelta/model/HibernateTable.java | 2751 | package com.txtr.hibernatedelta.model;
import static javax.xml.bind.annotation.XmlAccessType.FIELD;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlElementWrapper;
import javax.xml.bind.annotation.XmlType;
import org.apache.commons.lang3.StringUtils;
@XmlAccessorType(FIELD)
@XmlType(propOrder = {"name", "columns", "explicitIndexes"})
public class HibernateTable implements IHibernateDatabaseObject {
@XmlAttribute
private String name;
@XmlElementWrapper(name = "columns")
@XmlElement(name = "column")
private List<HibernateColumn> columns = new ArrayList<HibernateColumn>();
@XmlElementWrapper(name = "indexes")
@XmlElement(name = "index")
private List<ExplicitHibernateIndex> explicitIndexes = new ArrayList<ExplicitHibernateIndex>();
@XmlAttribute
private String sequenceName;
@XmlAttribute
private boolean virtualRootTable;
public HibernateTable(String name, String sequenceName, boolean virtualRootTable) {
this.sequenceName = sequenceName;
this.virtualRootTable = virtualRootTable;
this.name = name;
}
@SuppressWarnings("UnusedDeclaration")
public HibernateTable() {
}
@Override
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<HibernateColumn> getColumns() {
return columns;
}
public List<ExplicitHibernateIndex> getExplicitIndexes() {
return explicitIndexes;
}
public void addColumn(HibernateColumn column) {
columns.add(column);
}
public HibernateColumn getColumn(String name) {
for (HibernateColumn column : columns) {
if (column.getName().equalsIgnoreCase(name)) {
return column;
}
}
throw new IllegalArgumentException("column not found: " + name);
}
public void addExplicitIndex(ExplicitHibernateIndex hibernateIndex) {
explicitIndexes.add(hibernateIndex);
}
public String getIndexPrefix() {
return StringUtils.left(name, 28);
}
public List<HibernateColumn> getPrimaryKeyColumns() {
List<HibernateColumn> result = new ArrayList<HibernateColumn>();
for (HibernateColumn column : columns) {
if (column.isPrimaryKey()) {
result.add(column);
}
}
return result;
}
public String getSequenceName() {
return sequenceName;
}
public boolean isVirtualRootTable() {
return virtualRootTable;
}
}
| apache-2.0 |
balajiboggaram/algorithms | src/me/learn/personal/month5/WordBreakable.java | 724 | /**
*
*/
package me.learn.personal.month5;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Title :
*
* Date : Dec 23, 2020
*
* @author bramanarayan
*
*/
public class WordBreakable {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
}
public boolean wordBreak(String s, List<String> wordDict) {
Set<String> wordDictSet = new HashSet<>(wordDict);
boolean[] dp = new boolean[s.length() + 1];
dp[0] = true;
for (int i = 1; i <= s.length(); i++) {
for (int j = 0; j < i; j++) {
if (dp[j] && wordDictSet.contains(s.substring(j, i))) {
dp[i] = true;
break;
}
}
}
return dp[s.length()];
}
}
| apache-2.0 |
rma350/kidneyExchange | kidneyMatching/src/web/KidneyServerSolver.java | 8249 | package web;
import graphUtil.CycleChainDecomposition;
import graphUtil.EdgeChain;
import ilog.concert.IloException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import kepLib.KepInstance;
import kepLib.KepProblemData;
import kepModeler.ChainsForcedRemainOpenOptions;
import kepModeler.KepModeler;
import kepModeler.ModelerInputs;
import kepModeler.ObjectiveMode;
import replicator.DonorEdge;
import threading.FixedThreadPool;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import data.Donor;
import data.ExchangeUnit;
import database.KidneyDataBase;
import exchangeGraph.CycleChainPackingSubtourElimination;
import exchangeGraph.SolverOption;
public class KidneyServerSolver {
private KidneyDataBase database;
private Map<String, ModelerInputs<ExchangeUnit, DonorEdge>> dataCache = new HashMap<String, ModelerInputs<ExchangeUnit, DonorEdge>>();
private Optional<FixedThreadPool> threadPool;
Optional<Double> maxSolveTimeMs = Optional.of(100.0);
public KidneyServerSolver(KidneyDataBase database,
Optional<FixedThreadPool> threadPool) {
this.database = database;
this.threadPool = threadPool;
}
public ImmutableList<String> availableDatasets() {
return database.availableDatasets();
}
public Map<Object, Object> getInputs(String databaseName) {
return flattenModelerInputs(getModelerInputs(databaseName));
}
public Map<Object, Object> getSolution(String databaseName)
throws IloException {
ModelerInputs<ExchangeUnit, DonorEdge> inputs = getModelerInputs(databaseName);
KepModeler modeler = new KepModeler(3, Integer.MAX_VALUE,
ChainsForcedRemainOpenOptions.none,
new ObjectiveMode.MaximumCardinalityMode());
KepInstance<ExchangeUnit, DonorEdge> instance = modeler.makeKepInstance(
inputs, null);
CycleChainPackingSubtourElimination<ExchangeUnit, DonorEdge> solver = new CycleChainPackingSubtourElimination<ExchangeUnit, DonorEdge>(
instance, true, maxSolveTimeMs, threadPool,
SolverOption.makeCheckedOptions(SolverOption.cutsetMode,
SolverOption.lazyConstraintCallback, SolverOption.userCutCallback));
solver.solve();
CycleChainDecomposition<ExchangeUnit, DonorEdge> solution = solver
.getSolution();
solver.cleanUp();
return flattenSolution(inputs.getKepProblemData(), solution);
}
private ModelerInputs<ExchangeUnit, DonorEdge> getModelerInputs(
String databaseName) {
if (this.dataCache.containsKey(databaseName)) {
return this.dataCache.get(databaseName);
} else {
ModelerInputs<ExchangeUnit, DonorEdge> inputs = database
.loadInputs(databaseName);
this.dataCache.put(databaseName, inputs);
return inputs;
}
}
public static Map<Object, Object> flattenModelerInputs(
ModelerInputs<ExchangeUnit, DonorEdge> inputs) {
Map<Object, Object> ans = new HashMap<Object, Object>();
List<Map<Object, Object>> flatUnits = Lists.newArrayList();
List<Map<Object, Object>> flatEdges = Lists.newArrayList();
for (ExchangeUnit unit : inputs.getKepProblemData().getGraph()
.getVertices()) {
flatUnits.add(flattenExchangeUnit(inputs, unit));
}
for (DonorEdge edge : inputs.getKepProblemData().getGraph().getEdges()) {
flatEdges.add(flattenDonorEdge(inputs.getKepProblemData(), edge));
}
ans.put("nodes", flatUnits);
ans.put("links", flatEdges);
return ans;
}
public static Map<Object, Object> flattenSolution(
KepProblemData<ExchangeUnit, DonorEdge> problemData,
CycleChainDecomposition<ExchangeUnit, DonorEdge> solution) {
Map<Object, Object> ans = new HashMap<Object, Object>();
List<Map<Object, Object>> flatEdges = Lists.newArrayList();
for (EdgeChain<DonorEdge> edgeChain : solution.getEdgeChains()) {
for (DonorEdge edge : edgeChain) {
flatEdges.add(flattenDonorEdge(problemData, edge));
}
}
ans.put("links", flatEdges);
return ans;
}
private static Map<Object, Object> flattenDonorEdge(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, DonorEdge edge) {
Map<Object, Object> ans = new HashMap<Object, Object>();
ExchangeUnit source = kepProblemData.getGraph().getSource(edge);
ExchangeUnit dest = kepProblemData.getGraph().getDest(edge);
String sourceId = makeNodeId(kepProblemData, source);
String destId = makeNodeId(kepProblemData, dest);
ans.put("sourceId", sourceId);
ans.put("targetId", destId);
ans.put("id", sourceId + destId);
return ans;
}
private static Map<Object, Object> flattenExchangeUnit(
ModelerInputs<ExchangeUnit, DonorEdge> inputs, ExchangeUnit unit) {
Map<Object, Object> ans = new HashMap<Object, Object>();
ans.put("id", makeNodeId(inputs.getKepProblemData(), unit));
ans.put("type", makeType(inputs.getKepProblemData(), unit));
ans.put("reachable", true);
ans.put("sensitized", computeSensitization(inputs, unit));
return ans;
}
private static String makeNodeId(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, ExchangeUnit unit) {
if (kepProblemData.getRootNodes().contains(unit)) {
return unit.getDonor().get(0).getId();
} else {
return unit.getReceiver().getId();
}
}
private static String makeType(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, ExchangeUnit unit) {
if (kepProblemData.getRootNodes().contains(unit)) {
return "root";
} else if (kepProblemData.getPairedNodes().contains(unit)) {
return "paired";
} else if (kepProblemData.getTerminalNodes().contains(unit)) {
return "terminal";
} else {
throw new RuntimeException();
}
}
private static int computeSensitization(
ModelerInputs<ExchangeUnit, DonorEdge> inputs, ExchangeUnit unit) {
Map<ExchangeUnit, Double> donorPower = inputs.getAuxiliaryInputStatistics()
.getDonorPowerPostPreference();
Map<ExchangeUnit, Double> receiverPower = inputs
.getAuxiliaryInputStatistics().getReceiverPowerPostPreference();
// System.out.println(donorPower);
// System.out.println(receiverPower);
if (inputs.getKepProblemData().getRootNodes().contains(unit)) {
if (donorPower.containsKey(unit.getDonor().get(0))) {
return singlePersonSensitization(donorPower.get(unit.getDonor().get(0)));
} else {
// System.err.println("missing donor power data for: " + unit);
return 0;
}
} else if (inputs.getKepProblemData().getPairedNodes().contains(unit)) {
double unitDonorPower = 0;
for (Donor donor : unit.getDonor()) {
if (donorPower.containsKey(donor)) {
unitDonorPower += donorPower.get(donor);
} else {
// System.err.println("missing donor power data for: " + unit);
return 0;
}
}
if (receiverPower.containsKey(unit.getReceiver())) {
return twoPersonSensitization(unitDonorPower,
receiverPower.get(unit.getReceiver()));
} else {
// System.err.println("missing receiver power for: " + unit);
return 0;
}
} else if (inputs.getKepProblemData().getTerminalNodes().contains(unit)) {
if (receiverPower.containsKey(unit.getReceiver())) {
return singlePersonSensitization(receiverPower.get(unit.getReceiver()));
} else {
// System.err.println("missing receiver power for: " + unit);
return 0;
}
} else {
throw new RuntimeException();
}
}
private static int singlePersonSensitization(double matchPower) {
if (matchPower < .01) {
return 3;
} else if (matchPower < .08) {
return 2;
} else if (matchPower < .2) {
return 1;
} else {
return 0;
}
}
private static int twoPersonSensitization(double donorMatchPower,
double receiverMatchPower) {
double pmp = 10000 * donorMatchPower * receiverMatchPower;
if (pmp < .1) {
return 4;
} else if (pmp < 5) {
return 3;
} else if (pmp < 20) {
return 2;
} else if (pmp < 60) {
return 1;
} else {
return 0;
}
}
}
| apache-2.0 |
jaivox/tools | v2/com/jaivox/ui/appmaker/Rule2Fsm.java | 4520 | package com.jaivox.ui.appmaker;
import java.io.*;
import java.util.*;
import bitpix.list.*;
public class Rule2Fsm {
static String dir = "./";
basicTree tree;
TreeMap <String, String> states;
TreeMap <String, String> tags;
static String name = "data/road1.tree";
static String yes = "yes";
String startState = "def";
static String casedefault = "(default) (def)";
static basicNode casedefaultnode;
Vector <String> store;
public Rule2Fsm () {
String filename = dir + name;
startState = startState;
tree = new basicTree (filename);
// tree.WriteTree ();
states = new TreeMap <String, String> ();
tags = new TreeMap <String, String> ();
Vector <bitpix.list.basicNode> list = tree.Root.ListChild;
casedefaultnode = new basicNode (casedefault);
store = new Vector <String> ();
store.add ("\n#include errors.dlg\n");
for (int i=0; i<list.size (); i++) {
basicNode child = list.elementAt (i);
gt (child, startState);
}
int pos = filename.lastIndexOf (".");
String outfile = filename.substring (0, pos+1) + "dlg";
// writefile (outfile, store);
}
void Debug (String s) {
System.out.println ("[Rule2Fsm]" + s);
}
void gt (basicNode node, String sofar) {
Vector <bitpix.list.basicNode> list = node.ListChild;
if (list == null || list.size () == 0) {
// emit a state with def
emit (node, sofar, "def");
}
else {
String nextstate = createNextState (node);
String morefar = sofar + " " + nextstate;
emit (node, sofar, nextstate);
list.add (casedefaultnode);
for (int i=0; i<list.size (); i++) {
basicNode child = list.elementAt (i);
gt (child, morefar);
}
}
}
void emit (basicNode node, String sofar, String next) {
int pos = sofar.lastIndexOf (" ");
pos++;
String last = sofar.substring (pos);
String tag = sofar.replaceAll (" ", "_");
tag = tag + "_" + next;
tag = getuniquetag (tag);
StringBuffer sb = new StringBuffer ();
sb.append ("{\n["+tag+"]\n");
String t = (String)node.Tag;
if (t.trim ().length () == 0) return;
StringTokenizer st = new StringTokenizer (t, "()");
if (st.countTokens () < 2) {
Debug ("Don't have two tokens from "+t);
return;
}
String input = filter (st.nextToken ()).trim ();
String output = filter (st.nextToken ()).trim ();
while (output.length () == 0)
output = filter (st.nextToken ()).trim ();
// Debug ("tag="+t+" / input="+input+" output="+output);
// sb.append ("\t"+sofar+" ;\n");
// with Gui2Gram, convert input and output to use dotted head tag form
String indot = input.replaceAll (" ", ".");
String outdot = output.replaceAll (" ", ".");
sb.append ("\t"+last+" ;\n");
// sb.append ("\t"+input+" ;\n");
// sb.append ("\t"+output+" ;\n");
sb.append ("\t"+indot+" ;\n");
sb.append ("\t"+outdot+" ;\n");
sb.append ("\t"+next+" ;\n");
sb.append ("}\n");
String all = new String (sb);
store.add (all);
// System.out.println (all);
}
static String filter (String line) {
return Gui2Gram.filter (line);
}
String createNextState (basicNode node) {
String tag = (String)(node.Tag);
StringTokenizer st = new StringTokenizer (tag, "()");
if (st.countTokens () < 2) {
Debug ("don't have two tokens in "+tag);
return "def";
}
String input = st.nextToken ().trim ();
String output = st.nextToken ().trim ();
while (output.length () == 0)
output = st.nextToken ().trim ();
StringTokenizer tt = new StringTokenizer (output);
int n = tt.countTokens ();
StringBuffer sb = new StringBuffer ();
for (int i=0; i<Math.min (n, 3); i++) {
String token = tt.nextToken ();
sb.append (token.charAt (0));
}
if (n < 3) {
for (int j=n; j<3; j++) {
sb.append ('x');
}
}
String s = new String (sb);
String test = states.get (s);
if (test != null) {
for (int i=1; i<10; i++) {
String next = s + i;
if (states.get (next) == null) {
s = next;
break;
}
}
}
states.put (s, yes);
return s;
}
String getuniquetag (String in) {
if (tags.get (in) == null) {
tags.put (in, yes);
return in;
}
else {
for (int i=1; i<99; i++) {
String next = in+"_"+i;
if (tags.get (next) != null) {
continue;
}
tags.put (next, yes);
return next;
}
Debug ("More than 99 tags starting with "+in);
return "error";
}
}
void writeRules (PrintWriter out) {
try {
for (int i=0; i<store.size (); i++) {
out.println (store.elementAt (i));
}
}
catch (Exception e) {
e.printStackTrace ();
}
}
}
| apache-2.0 |
ghedlund/opgraph | app/src/main/java/ca/phon/opgraph/app/commands/package-info.java | 855 | /*
* Copyright (C) 2012 Jason Gedge <http://www.gedge.ca>
*
* This file is part of the OpGraph project.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Provides commands for the Application API.
*/
package ca.phon.opgraph.app.commands;
| apache-2.0 |
JCERTIFLab/jcertif-android-2013 | src/main/java/com/jcertif/android/fragments/SessionListFragment.java | 12340 | /*
* Copyright 2013 JCertifLab.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jcertif.android.fragments;
import android.app.Activity;
import android.content.Intent;
import android.content.res.Configuration;
import android.net.Uri;
import android.os.Bundle;
import android.provider.CalendarContract;
import android.provider.CalendarContract.Events;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemClickListener;
import android.widget.ListView;
import android.widget.Toast;
import com.actionbarsherlock.view.ActionMode;
import com.actionbarsherlock.view.Menu;
import com.actionbarsherlock.view.MenuInflater;
import com.actionbarsherlock.view.MenuItem;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.jcertif.android.JcertifApplication;
import com.jcertif.android.MainActivity;
import com.jcertif.android.R;
import com.jcertif.android.adapters.SessionAdapter;
import com.jcertif.android.adapters.SpeedScrollListener;
import com.jcertif.android.dao.SessionProvider;
import com.jcertif.android.dao.SpeakerProvider;
import com.jcertif.android.model.Session;
import com.jcertif.android.model.Speaker;
import com.jcertif.android.service.RESTService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.List;
import uk.co.senab.actionbarpulltorefresh.extras.actionbarsherlock.PullToRefreshAttacher;
/**
*
* @author Patrick Bashizi
*
*/
public class SessionListFragment extends RESTResponderFragment implements PullToRefreshAttacher.OnRefreshListener{
public static final String SESSIONS_LIST_URI = JcertifApplication.BASE_URL
+ "/session/list";
public static final String CATEGORY_LIST_URI = JcertifApplication.BASE_URL
+ "/ref/category/list";
private static String TAG = SessionListFragment.class.getName();
private List<Session> mSessions = new ArrayList<Session>();;
private ListView mLvSessions;
private SessionAdapter mAdapter;
private SessionProvider mProvider;
private SpeedScrollListener mListener;
private ActionMode mActionMode;
private Session mSelectedSession;
private PullToRefreshAttacher mPullToRefreshAttacher ;
public SessionListFragment() {
// Empty constructor required for fragment subclasses
}
public interface OnSessionUpdatedListener {
void onSessionUpdated(Session session);
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// setRetainInstance(true);
View rootView = inflater.inflate(R.layout.fragment_session, container,
false);
mLvSessions = (ListView) rootView.findViewById(R.id.lv_session);
String session = getResources().getStringArray(R.array.menu_array)[0];
setHasOptionsMenu(true);
getActivity().setTitle(session);
mLvSessions = (ListView) rootView.findViewById(R.id.lv_session);
mPullToRefreshAttacher=((MainActivity)getSherlockActivity()).getmPullToRefreshAttacher();
mPullToRefreshAttacher.addRefreshableView(mLvSessions, this);
mLvSessions.setOnItemClickListener(new OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int pos,
long position) {
mAdapter.setSelectedIndex(pos);
mSelectedSession = ((Session) parent
.getItemAtPosition((int) position));
updateSession(mSelectedSession);
}
});
mLvSessions
.setOnItemLongClickListener(new AdapterView.OnItemLongClickListener() {
@Override
public boolean onItemLongClick(AdapterView<?> arg0,
View arg1, int pos, long arg3) {
if (mActionMode != null) {
return false;
}
mActionMode = getSherlockActivity().startActionMode(
mActionModeCallback);
mSelectedSession = ((Session) arg0
.getItemAtPosition((int) pos));
mAdapter.setSelectedIndex(pos);
return true;
}
});
return rootView;
}
private ActionMode.Callback mActionModeCallback = new ActionMode.Callback() {
@Override
public boolean onCreateActionMode(ActionMode mode, Menu menu) {
MenuInflater inflater = mode.getMenuInflater();
inflater.inflate(R.menu.context_menu_session, menu);
return true;
}
@Override
public boolean onPrepareActionMode(ActionMode mode, Menu menu) {
return false;
}
@Override
public boolean onActionItemClicked(ActionMode mode, MenuItem item) {
switch (item.getItemId()) {
case R.id.menu_share:
shareSessionItem();
mode.finish(); // Action picked, so close the CAB
break;
case R.id.menu_add_to_schedule:
addSessionItemToSchedule();
mode.finish(); // Action picked, so close the CAB
break;
default:
return false;
}
return true;
}
public void onDestroyActionMode(ActionMode mode) {
mActionMode = null;
}
};
private void addSessionItemToSchedule() {
if (android.os.Build.VERSION.SDK_INT >= 14){
Intent intent = new Intent(Intent.ACTION_INSERT);
intent.setType("vnd.android.cursor.item/event");
intent.putExtra(Events.TITLE, mSelectedSession.getTitle());
intent.putExtra(Events.EVENT_LOCATION,"Room"+ mSelectedSession.getSalle());
intent.putExtra(Events.DESCRIPTION, mSelectedSession.getDescription());
Date evStartDate= mSelectedSession.getStart();
Date evEndDate= mSelectedSession.getStart();
// Setting dates
GregorianCalendar startcalDate = new GregorianCalendar();
startcalDate.setTime(evStartDate);
// Setting dates
GregorianCalendar endCalDate = new GregorianCalendar();
endCalDate.setTime(evEndDate);
intent.putExtra(CalendarContract.EXTRA_EVENT_BEGIN_TIME,startcalDate.getTimeInMillis());
intent.putExtra(CalendarContract.EXTRA_EVENT_END_TIME,endCalDate.getTimeInMillis());
// Make it a full day event
intent.putExtra(CalendarContract.EXTRA_EVENT_ALL_DAY, true);
// Make it a recurring Event
// intent.putExtra(Events.RRULE, "WKST=SU");
// Making it private and shown as busy
intent.putExtra(Events.ACCESS_LEVEL, Events.ACCESS_PRIVATE);
intent.putExtra(Events.AVAILABILITY, Events.AVAILABILITY_BUSY);
//intent.putExtra(Events.DISPLAY_COLOR, Events.EVENT_COLOR);
startActivity(intent);
}else{
Toast.makeText(this.getSherlockActivity(),
"Not supported for your device :(", Toast.LENGTH_SHORT).show();
}
}
private void shareSessionItem() {
Speaker sp = new SpeakerProvider(this.getSherlockActivity())
.getByEmail(mSelectedSession.getSpeakers()[0]);
Intent intent = new Intent(android.content.Intent.ACTION_SEND);
intent.setType("text/plain");
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_WHEN_TASK_RESET);
intent.putExtra(Intent.EXTRA_SUBJECT, "Share Session");
intent.putExtra(
Intent.EXTRA_TEXT,
"Checking out this #Jcertif2013 session : "
+ mSelectedSession.getTitle() + " by "
+ sp.getFirstname() + " " + sp.getLastname());
startActivity(intent);
}
protected void updateSession(Session s) {
if(onTablet()){
((OnSessionUpdatedListener) getParentFragment()).onSessionUpdated(s);
}else{
Intent intent = new Intent(this.getActivity().getApplicationContext(),
SessionDetailFragmentActivity.class);
String sessionJson= new Gson().toJson(s);
intent.putExtra("session",sessionJson);
startActivity(intent);
getSherlockActivity().overridePendingTransition ( 0 , R.anim.slide_up_left);
}
}
public SessionProvider getProvider() {
if (mProvider == null)
mProvider = new SessionProvider(this.getSherlockActivity());
return mProvider;
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
// This gets called each time our Activity has finished creating itself.
// First check the local cache, if it's empty data will be fetched from
// web
mSessions = loadSessionsFromCache();
setSessions();
}
@Override
public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) {
super.onCreateOptionsMenu(menu, inflater);
}
/**
* We cache our stored session here so that we can return right away on
* multiple calls to setSession() during the Activity lifecycle events (such
* as when the user rotates their device).
*/
private void setSessions() {
MainActivity activity = (MainActivity) getActivity();
setLoading(true);
if (mSessions.isEmpty() && activity != null) {
// This is where we make our REST call to the service. We also pass
// in our ResultReceiver
// defined in the RESTResponderFragment super class.
// We will explicitly call our Service since we probably want to
// keep it as a private component in our app.
Intent intent = new Intent(activity, RESTService.class);
intent.setData(Uri.parse(SESSIONS_LIST_URI));
// Here we are going to place our REST call parameters.
Bundle params = new Bundle();
params.putString(RESTService.KEY_JSON_PLAYLOAD, null);
intent.putExtra(RESTService.EXTRA_PARAMS, params);
intent.putExtra(RESTService.EXTRA_RESULT_RECEIVER,getResultReceiver());
// Here we send our Intent to our RESTService.
activity.startService(intent);
} else if (activity != null) {
// Here we check to see if our activity is null or not.
// We only want to update our views if our activity exists.
// Load our list adapter with our session.
updateList();
setLoading(false);
}
}
void updateList() {
mListener = new SpeedScrollListener();
mLvSessions.setOnScrollListener(mListener);
mAdapter = new SessionAdapter(this.getActivity(), mListener, mSessions);
mLvSessions.setAdapter(mAdapter);
if(refreshing){
refreshing=false;
mPullToRefreshAttacher.setRefreshComplete();
}
}
private boolean onTablet() {
return ((getResources().getConfiguration().screenLayout & Configuration.SCREENLAYOUT_SIZE_MASK) >= Configuration.SCREENLAYOUT_SIZE_LARGE);
}
public void updateList(String cat) {
if (cat.equals("All") || cat.equals("Tous")) {
mSessions = loadSessionsFromCache();
} else {
mSessions = getProvider().getSessionsByCategory(cat);
}
updateList();
}
@Override
public void onRESTResult(int code, Bundle resultData) {
// Here is where we handle our REST response.
// Check to see if we got an HTTP 200 code and have some data.
String result = null;
if (resultData != null) {
result = resultData.getString(RESTService.REST_RESULT);
} else {
return;
}
if (code == 200 && result != null) {
mSessions = parseSessionJson(result);
Log.d(TAG, result);
setSessions();
saveToCache(mSessions);
} else {
Activity activity = getActivity();
if (activity != null) {
Toast.makeText(
activity,
"Failed to load Session data. Check your internet settings.",
Toast.LENGTH_SHORT).show();
}
}
setLoading(false);
}
private List<Session> parseSessionJson(String result) {
Gson gson = new GsonBuilder().setDateFormat("dd/MM/yyyy hh:mm")
.create();
Session[] sessions = gson.fromJson(result, Session[].class);
return Arrays.asList(sessions);
}
protected void saveToCache(final List<Session> sessions) {
new Thread(new Runnable() {
@Override
public void run() {
for (Session session : sessions)
mProvider.store(session);
}
}).start();
}
private List<Session> loadSessionsFromCache() {
List<Session> list = getProvider().getAll(Session.class);
return list;
}
@Override
public void onPause() {
super.onDestroy();
}
@Override
public void onDestroy() {
super.onDestroy();
}
@Override
public void onRefreshStarted(View view) {
mProvider.deleteAll(Session.class);
//mLvSessions.setAdapter(null);
mSessions = loadSessionsFromCache();
setSessions();
refreshing=true;
}
}
| apache-2.0 |
soabase/soabase | soabase-core/src/main/java/io/soabase/core/features/attributes/DynamicAttributes.java | 2045 | /**
* Copyright 2014 Jordan Zimmerman
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.soabase.core.features.attributes;
import io.soabase.core.listening.Listenable;
import java.util.Collection;
/**
* Gives access to dynamic attributes. The various get methods return
* the current value for the given key after applying overrides and scopes, etc.
* Always call the methods to get the current value as it may change during runtime.
*/
public interface DynamicAttributes
{
public String getAttribute(String key);
public String getAttribute(String key, String defaultValue);
public boolean getAttributeBoolean(String key);
public boolean getAttributeBoolean(String key, boolean defaultValue);
public int getAttributeInt(String key);
public int getAttributeInt(String key, int defaultValue);
public long getAttributeLong(String key);
public long getAttributeLong(String key, long defaultValue);
public double getAttributeDouble(String key);
public double getAttributeDouble(String key, double defaultValue);
public void temporaryOverride(String key, boolean value);
public void temporaryOverride(String key, int value);
public void temporaryOverride(String key, long value);
public void temporaryOverride(String key, double value);
public void temporaryOverride(String key, String value);
public boolean removeOverride(String key);
public Collection<String> getKeys();
public Listenable<DynamicAttributeListener> getListenable();
}
| apache-2.0 |
hotpads/datarouter | datarouter-instrumentation/src/main/java/io/datarouter/instrumentation/trace/Traceparent.java | 4663 | /*
* Copyright © 2009 HotPads (admin@hotpads.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.datarouter.instrumentation.trace;
import java.time.Instant;
import java.util.Objects;
import java.util.Optional;
import java.util.Random;
import java.util.regex.Pattern;
public class Traceparent{
private static final Pattern TRACEPARENT_PATTERN = Pattern.compile(
"^[0-9a-f]{2}-[0-9a-f]{32}-[0-9a-f]{16}-[0-9a-f]{2}$");
private static final String TRACEPARENT_DELIMITER = "-";
private static final Integer MIN_CHARS_TRACEPARENT = 55;
private static final String CURRENT_VERSION = "00";
public static final int TRACE_ID_HEX_SIZE = 32;
public static final int PARENT_ID_HEX_SIZE = 16;
public final String version = CURRENT_VERSION;
public final String traceId;
public final String parentId;
private String traceFlags;
public Traceparent(String traceId, String parentId, String traceFlags){
this.traceId = traceId;
this.parentId = parentId;
this.traceFlags = traceFlags;
}
public Traceparent(String traceId){
this(traceId, createNewParentId());
}
public Traceparent(String traceId, String parentId){
this(traceId, parentId, createDefaultTraceFlag());
}
public static Traceparent generateNew(long createdTimestamp){
return new Traceparent(createNewTraceId(createdTimestamp), createNewParentId(),
createDefaultTraceFlag());
}
public static Traceparent generateNewWithCurrentTimeInNs(){
return new Traceparent(createNewTraceId(Trace2Dto.getCurrentTimeInNs()), createNewParentId(),
createDefaultTraceFlag());
}
public Traceparent updateParentId(){
return new Traceparent(traceId, createNewParentId(), traceFlags);
}
/*
* TraceId is a 32 hex digit String. We convert the root request created unix time into lowercase base16
* and append it with a randomly generated long lowercase base16 representation.
* */
private static String createNewTraceId(long createdTimestamp){
return String.format("%016x", createdTimestamp) + String.format("%016x", new Random().nextLong());
}
/*
* ParentId is a 16 hex digit String. We use a randomly generated long and convert it into lowercase base16
* representation.
* */
public static String createNewParentId(){
return String.format("%016x", new Random().nextLong());
}
public long getTimestampInMs(){
return Long.parseLong(traceId.substring(0, 16), 16);
}
public Instant getInstant(){
return Instant.ofEpochMilli(getTimestampInMs());
}
/*----------- trace flags ------------*/
private static String createDefaultTraceFlag(){
return TraceContextFlagMask.DEFAULT.toHexCode();
}
public void enableSample(){
this.traceFlags = TraceContextFlagMask.enableTrace(traceFlags);
}
public void enableLog(){
this.traceFlags = TraceContextFlagMask.enableLog(traceFlags);
}
public boolean shouldSample(){
return TraceContextFlagMask.isTraceEnabled(traceFlags);
}
public boolean shouldLog(){
return TraceContextFlagMask.isLogEnabled(traceFlags);
}
@Override
public String toString(){
return String.join(TRACEPARENT_DELIMITER, version, traceId, parentId, traceFlags);
}
@Override
public boolean equals(Object obj){
if(!(obj instanceof Traceparent)){
return false;
}
Traceparent other = (Traceparent)obj;
return Objects.equals(version, other.version)
&& Objects.equals(traceId, other.traceId)
&& Objects.equals(parentId, other.parentId)
&& Objects.equals(traceFlags, other.traceFlags);
}
@Override
public int hashCode(){
return Objects.hash(version, traceId, parentId, traceFlags);
}
public static Optional<Traceparent> parse(String traceparentStr){
if(traceparentStr == null || traceparentStr.isEmpty()){
return Optional.empty();
}else if(traceparentStr.length() < MIN_CHARS_TRACEPARENT){
return Optional.empty();
}else if(!TRACEPARENT_PATTERN.matcher(traceparentStr).matches()){
return Optional.empty();
}
String[] tokens = traceparentStr.split(Traceparent.TRACEPARENT_DELIMITER);
if(!Traceparent.CURRENT_VERSION.equals(tokens[0])){
return Optional.empty();
}
return Optional.of(new Traceparent(tokens[1], tokens[2], tokens[3]));
}
}
| apache-2.0 |
GolvenH/PocketCampus | app/src/main/java/com/bzu/yhd/pocketcampus/bottomnav/user/view/AnimateImageView.java | 3350 | package com.bzu.yhd.pocketcampus.bottomnav.user.view;
import android.content.Context;
import android.util.AttributeSet;
import com.facebook.rebound.SimpleSpringListener;
import com.facebook.rebound.Spring;
import com.facebook.rebound.SpringSystem;
import de.hdodenhof.circleimageview.CircleImageView;
/**
* Created by xmuSistone.
*/
public class AnimateImageView extends CircleImageView {
private Spring springX, springY;
private SimpleSpringListener followerListenerX, followerListenerY; // 此为跟踪的回调,当前面一个view移动的时候,此为后面的view,需要更新endValue
public AnimateImageView(Context context) {
this(context, null);
}
public AnimateImageView(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public AnimateImageView(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
SpringSystem mSpringSystem = SpringSystem.create();
springX = mSpringSystem.createSpring();
springY = mSpringSystem.createSpring();
springX.addListener(new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int xPos = (int) spring.getCurrentValue();
setScreenX(xPos);
}
});
springY.addListener(new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int yPos = (int) spring.getCurrentValue();
setScreenY(yPos);
}
});
followerListenerX = new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int xPos = (int) spring.getCurrentValue();
springX.setEndValue(xPos);
}
};
followerListenerY = new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int yPos = (int) spring.getCurrentValue();
springY.setEndValue(yPos);
}
};
}
private void setScreenX(int screenX) {
this.offsetLeftAndRight(screenX - getLeft());
}
private void setScreenY(int screenY) {
this.offsetTopAndBottom(screenY - getTop());
}
public void animTo(int xPos, int yPos) {
springX.setEndValue(xPos);
springY.setEndValue(yPos);
}
/**
* 顶部ImageView强行停止动画
*/
public void stopAnimation() {
springX.setAtRest();
springY.setAtRest();
}
/**
* 只为最顶部的view调用,触点松开后,回归原点
*/
public void onRelease(int xPos, int yPos) {
setCurrentSpringPos(getLeft(), getTop());
animTo(xPos, yPos);
}
/**
* 设置当前spring位置
*/
public void setCurrentSpringPos(int xPos, int yPos) {
springX.setCurrentValue(xPos);
springY.setCurrentValue(yPos);
}
public Spring getSpringX() {
return springX;
}
public Spring getSpringY() {
return springY;
}
public SimpleSpringListener getFollowerListenerX() {
return followerListenerX;
}
public SimpleSpringListener getFollowerListenerY() {
return followerListenerY;
}
}
| apache-2.0 |
trasa/aws-sdk-java | aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/DeleteAssociationRequestMarshaller.java | 3452 | /*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import static com.amazonaws.util.StringUtils.UTF8;
import static com.amazonaws.util.StringUtils.COMMA_SEPARATOR;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.List;
import java.util.regex.Pattern;
import com.amazonaws.AmazonClientException;
import com.amazonaws.Request;
import com.amazonaws.DefaultRequest;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.util.BinaryUtils;
import com.amazonaws.util.StringUtils;
import com.amazonaws.util.StringInputStream;
import com.amazonaws.util.json.*;
/**
* DeleteAssociationRequest Marshaller
*/
public class DeleteAssociationRequestMarshaller implements
Marshaller<Request<DeleteAssociationRequest>, DeleteAssociationRequest> {
public Request<DeleteAssociationRequest> marshall(
DeleteAssociationRequest deleteAssociationRequest) {
if (deleteAssociationRequest == null) {
throw new AmazonClientException(
"Invalid argument passed to marshall(...)");
}
Request<DeleteAssociationRequest> request = new DefaultRequest<DeleteAssociationRequest>(
deleteAssociationRequest, "AWSSimpleSystemsManagement");
request.addHeader("X-Amz-Target", "AmazonSSM.DeleteAssociation");
request.setHttpMethod(HttpMethodName.POST);
request.setResourcePath("");
try {
StringWriter stringWriter = new StringWriter();
JSONWriter jsonWriter = new JSONWriter(stringWriter);
jsonWriter.object();
if (deleteAssociationRequest.getName() != null) {
jsonWriter.key("Name")
.value(deleteAssociationRequest.getName());
}
if (deleteAssociationRequest.getInstanceId() != null) {
jsonWriter.key("InstanceId").value(
deleteAssociationRequest.getInstanceId());
}
jsonWriter.endObject();
String snippet = stringWriter.toString();
byte[] content = snippet.getBytes(UTF8);
request.setContent(new StringInputStream(snippet));
request.addHeader("Content-Length",
Integer.toString(content.length));
request.addHeader("Content-Type", "application/x-amz-json-1.1");
} catch (Throwable t) {
throw new AmazonClientException(
"Unable to marshall request to JSON: " + t.getMessage(), t);
}
return request;
}
}
| apache-2.0 |
lingyi2017/wxmp | src/main/java/com/qmx/wxmp/common/web/MediaTypes.java | 1306 | package com.qmx.wxmp.common.web;
/**
* 带UTF-8 charset 定义的MediaType.
*
* Jax-RS和Spring的MediaType没有UTF-8的版本,
* Google的MediaType必须再调用toString()函数而不是常量,不能用于Restful方法的annotation。
*
* @author free lance
*/
public class MediaTypes {
public static final String APPLICATION_XML = "application/xml";
public static final String APPLICATION_XML_UTF_8 = "application/xml; charset=UTF-8";
public static final String JSON = "application/json";
public static final String JSON_UTF_8 = "application/json; charset=UTF-8";
public static final String JAVASCRIPT = "application/javascript";
public static final String JAVASCRIPT_UTF_8 = "application/javascript; charset=UTF-8";
public static final String APPLICATION_XHTML_XML = "application/xhtml+xml";
public static final String APPLICATION_XHTML_XML_UTF_8 = "application/xhtml+xml; charset=UTF-8";
public static final String TEXT_PLAIN = "text/plain";
public static final String TEXT_PLAIN_UTF_8 = "text/plain; charset=UTF-8";
public static final String TEXT_XML = "text/xml";
public static final String TEXT_XML_UTF_8 = "text/xml; charset=UTF-8";
public static final String TEXT_HTML = "text/html";
public static final String TEXT_HTML_UTF_8 = "text/html; charset=UTF-8";
}
| apache-2.0 |
PennState/directory-fortress-core-1 | src/test/java/org/apache/directory/fortress/core/samples/CreateUserOrgHierarchySample.java | 15806 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.directory.fortress.core.samples;
import org.apache.directory.fortress.core.DelAdminMgr;
import org.apache.directory.fortress.core.DelAdminMgrFactory;
import org.apache.directory.fortress.core.SecurityException;
import org.apache.directory.fortress.core.model.OrgUnit;
import org.apache.directory.fortress.core.impl.TestUtils;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* CreateUserOrgHierarchySample JUnit Test. This test program will show how to build a simple User OrgUnit hierarchy which are
* used to enable administrators to group Users by organizational structure. This system supports multiple
* inheritance between OrgUnits and there are no limits on how deep a hierarchy can be. The OrgUnits require name and type. Optionally can
* include a description. The User OrgUnit must be associated with Users and are used to provide Administratrive RBAC control
* over who may perform User Role assigns and deassigns in directory.
* @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a>
*/
public class CreateUserOrgHierarchySample extends TestCase
{
private static final String CLS_NM = CreateUserOrgHierarchySample.class.getName();
private static final Logger LOG = LoggerFactory.getLogger( CLS_NM );
// This constant will be added to index for creation of multiple nodes in directory.
public static final String TEST_HIER_USERORG_PREFIX = "sampleHierUserOrg";
public static final String TEST_HIER_BASE_USERORG = "sampleHierUserOrg1";
public static final int TEST_NUMBER = 6;
public static final String TEST_HIER_DESC_USERORG_PREFIX = "sampleHierUserOrgD";
public static final String TEST_HIER_ASC_USERORG_PREFIX = "sampleHierUserOrgA";
/**
* Simple constructor kicks off JUnit test suite.
* @param name
*/
public CreateUserOrgHierarchySample(String name)
{
super(name);
}
/**
* Run the User OrgUnit test cases.
*
* @return Test
*/
public static Test suite()
{
TestSuite suite = new TestSuite();
if(!AllSamplesJUnitTest.isFirstRun())
{
suite.addTest(new CreateUserOrgHierarchySample("testDeleteHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteAscendantUserOrgs"));
}
suite.addTest(new CreateUserOrgHierarchySample("testCreateHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateAscendantUserOrgs"));
/*
suite.addTest(new CreateUserOrgHierarchySample("testDeleteHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteAscendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateAscendantUserOrgs"));
*/
return suite;
}
/**
* Remove the simple hierarchical OrgUnits from the directory. Before removal call the API to move the relationship
* between the parent and child OrgUnits. Once the relationship is removed the parent OrgUnit can be removed.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgSimple.png" alt="">
*/
public static void testDeleteHierUserOrgs()
{
String szLocation = ".testDeleteHierUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
for (int i = 1; i < TEST_NUMBER; i++)
{
// The key that must be set to locate any OrgUnit is simply the name and type.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + i, OrgUnit.Type.USER);
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Remove the relationship from the parent and child OrgUnit:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
}
// Remove the child OrgUnit from directory:
delAdminMgr.delete(new OrgUnit(TEST_HIER_USERORG_PREFIX + TEST_NUMBER, OrgUnit.Type.USER));
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Add a simple OrgUnit hierarchy to ldap. The OrgUnits will named to include a name,'sampleHierUserOrg', appended with the
* sequence of 1 - 6. 'sampleHierUserOrg1' is the root or highest level OrgUnit in the structure while sampleHierUserOrg6 is the lowest
* most child. Fortress OrgUnits may have multiple parents which is demonstrated in testCreateAscendantUserOrgs sample.
* <p>
* <img src="./doc-files/HierUserOrgSimple.png" alt="">
*/
public static void testCreateHierUserOrgs()
{
String szLocation = ".testCreateHierUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the root OrgUnit entity. OrgUnit requires name and type before addition.
OrgUnit baseOrgUnit = new OrgUnit(TEST_HIER_BASE_USERORG, OrgUnit.Type.USER);
// Add the root OrgUnit entity to the directory.
delAdminMgr.add(baseOrgUnit);
// Create User OrgUnits, 'sampleHierUserOrg2' - 'sampleHierUserOrg6'.
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the OrgUnit entity.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Add the OrgUnit entity to the directory.
delAdminMgr.add(childOrgUnit);
// Instantiate the parent OrgUnit. The key is the name and type.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + (i - 1), OrgUnit.Type.USER);
// Add a relationship between the parent and child OrgUnits:
delAdminMgr.addInheritance(parentOrgUnit, childOrgUnit);
}
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate teardown of a parent to child relationship of one-to-many. Each child must first remove the inheritance
* relationship with parent before being removed from ldap. The parent OrgUnit will be removed from ldap last.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgDescendants.png" alt="">
*/
public static void testDeleteDescendantUserOrgs()
{
String szLocation = ".testDeleteDescendantUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// This parent has many children. They must be deleted before parent itself can.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// There are N User OrgUnits to process:
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the child OrgUnit entity. The key is the name and type.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Remove the relationship from the parent and child OrgUnit:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the child OrgUnit from directory:
delAdminMgr.delete(childOrgUnit);
}
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate a parent to child OrgUnit structure of one-to-many. The parent OrgUnit must be created before
* the call to addDescendant which will Add a new OrgUnit node and set a OrgUnit relationship with parent node.
* <p>
* <img src="./doc-files/HierUserOrgDescendants.png" alt="">
*/
public static void testCreateDescendantUserOrgs()
{
String szLocation = ".testCreateDescendantUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the parent User OrgUnit entity. This needs a name and type before it can be added to ldap.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// This parent will have many children:
delAdminMgr.add(parentOrgUnit);
// Create User OrgUnits, 'sampleHierUserOrgD2' - 'sampleHierUserOrgD6'.
for (int i = 1; i < TEST_NUMBER; i++)
{
// Now add relationship to the directory between parent and child User OrgUnits.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Now add child OrgUnit entity to directory and add relationship with existing parent OrgUnit.
delAdminMgr.addDescendant(parentOrgUnit, childOrgUnit);
}
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* This example demonstrates tear down of a child to parent represented as one-to-many. The parents must all
* be removed from the child before the child can be removed.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgAscendants.png" alt="">
*/
public static void testDeleteAscendantUserOrgs()
{
String szLocation = ".testDeleteAscendantUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// This child OrgUnit has many parents:
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the parent. This needs a name and type before it can be used in operation.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Remove the relationship between parent and child OrgUnits:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
}
// Remove the child OrgUnit from directory:
delAdminMgr.delete(childOrgUnit);
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate a child to parent OrgUnit structure of one-to-many. To use this API, the child OrgUnit must be created before
* the call to addAscendant which will Add a new OrgUnit node and set a OrgUnit relationship with child node.
* <p>
* <img src="./doc-files/HierUserOrgAscendants.png" alt="">
*/
public static void testCreateAscendantUserOrgs()
{
String szLocation = ".testCreateAscendantUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the child OrgUnit. This needs a name and type.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// This child will have many parents:
delAdminMgr.add(childOrgUnit);
// Create OrgUnits, 'sampleHierUserOrgA2' - 'sampleHierUserOrgA6'.
for (int i = 1; i < TEST_NUMBER; i++)
{
// Instantiate the parent OrgUnit. This needs a name and type before it can be added to ldap.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Now add parent OrgUnit entity to directory and add relationship with existing child OrgUnit.
delAdminMgr.addAscendant(childOrgUnit, parentOrgUnit);
}
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
}
| apache-2.0 |
LimitPointSystems/SheafSystem | tools/viewer/user/HedgeHogFieldActorPropertiesPanel.java | 4423 |
//
// Copyright (c) 2014 Limit Point Systems, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package tools.viewer.user;
import tools.viewer.common.*;
import tools.viewer.render.*;
import tools.common.gui.*;
import java.awt.*;
import java.awt.event.*;
import java.util.*;
import javax.swing.*;
import javax.swing.event.*;
import javax.swing.border.*;
import java.text.*;
import vtk.*;
/**
* Implementation of <code>G3DFieldActorPropertiesPanel</code> for editing the
* values of a <code>HedgeHogFieldActorDescriptor</code>.
*/
public class HedgeHogFieldActorPropertiesPanel
extends G3DFieldActorPropertiesPanel
{
// CONSTANTS FACET
protected static final String[] VECTOR_MODES =
{ ViewerConstants.VECTOR_MAGNITUDE,
ViewerConstants.VECTOR_NORMAL };
// GUI FACET
protected JPanel hedgeHogPanel;
protected JSpinner scaleFactorSpinner;
protected JComboBox vectorModeComboBox;
// CONSTRUCTORS
/**
* Constructor
*/
public HedgeHogFieldActorPropertiesPanel(G3DViewer xviewer,
FieldActorDescriptor[] xdescriptors)
{
super(xviewer, xdescriptors);
hedgeHogPanel = createHedgeHogPanel();
tabbedPane.addTab("Hedge Hog", hedgeHogPanel);
initValues();
}
// CREATE FACET
/**
* Create hedge hog panel
*/
protected JPanel createHedgeHogPanel()
{
JPanel result = new JPanel();
result.setLayout(new BoxLayout(result, BoxLayout.PAGE_AXIS));
result.setBorder(
BorderFactory.createCompoundBorder(
BorderFactory.createEmptyBorder(6, 12, 6, 12),
BorderFactory.createTitledBorder("Hedge Hog:")));
//=====
result.add(Box.createVerticalGlue());
JPanel panel = new JPanel();
JLabel scaleFactorLabel = new JLabel("Scale Factor: ", JLabel.RIGHT);
scaleFactorLabel.setAlignmentX(Component.CENTER_ALIGNMENT);
SpinnerModel scaleFactorModel = new SpinnerNumberModel(1.0, 0.0,
10000000.0, 0.01);
scaleFactorSpinner = new JSpinner(scaleFactorModel);
panel.add(scaleFactorLabel);
panel.add(scaleFactorSpinner);
result.add(panel);
result.add(Box.createVerticalGlue());
//=====
panel = new JPanel();
JLabel vectorModeLabel = new JLabel("Vector Mode:", JLabel.RIGHT);
vectorModeLabel.setAlignmentX(Component.CENTER_ALIGNMENT);
vectorModeComboBox = new JComboBox(VECTOR_MODES);
panel.add(vectorModeLabel);
panel.add(vectorModeComboBox);
result.add(panel);
result.add(Box.createVerticalGlue());
//=====
return result;
}
// INITIALIZE FACET
/**
*
*/
public void initValues()
{
super.initValues();
// Use the first actor in the list to initialize the
// user interface.
HedgeHogFieldActorDescriptor actor =
(HedgeHogFieldActorDescriptor) descriptors[0];
initHedgeHogPanel(actor);
}
/**
*
*/
protected void initHedgeHogPanel(HedgeHogFieldActorDescriptor actor)
{
scaleFactorSpinner.setValue(actor.scaleFactor);
vectorModeComboBox.setSelectedItem(actor.vectorMode);
}
// APPLY FACET
/**
*
*/
public void doApply()
{
// Set the wait state to true, it is restored by
// UpdatePropertiesPanelEvent.
setWaitState(true);
synchronized (viewer.getScript())
{
synchronized (viewer.getScene())
{
// Apply the changed to the descriptors
HedgeHogFieldActorDescriptor actor;
for(int i=0; i<descriptors.length; i++)
{
actor = (HedgeHogFieldActorDescriptor) descriptors[i];
applyHedgeHog(actor);
}
}
}
super.doApply(false);
}
/**
*
*/
public void applyHedgeHog(HedgeHogFieldActorDescriptor actor)
{
actor.scaleFactor = ((SpinnerNumberModel)scaleFactorSpinner.getModel()).getNumber().doubleValue();
actor.vectorMode = (String) vectorModeComboBox.getSelectedItem();
}
}
| apache-2.0 |
applicationsky/MyCoolWeather | app/src/main/java/cn/oeaom/CoolWeather/WeatherActivity.java | 12783 | package cn.oeaom.CoolWeather;
import android.content.Intent;
import android.content.SharedPreferences;
import android.graphics.Typeface;
import android.media.Image;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.support.design.widget.FloatingActionButton;
import android.support.design.widget.Snackbar;
import android.support.v4.view.GravityCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import java.io.IOException;
import cn.oeaom.CoolWeather.GSON.Weather;
import cn.oeaom.CoolWeather.Util.Utility;
import okhttp3.Call;
import okhttp3.Callback;
import cn.oeaom.CoolWeather.Util.HttpUtil;
import okhttp3.Response;
public class WeatherActivity extends AppCompatActivity {
private static final String TAG = "WeatherActivity";
private static final String API_KEY = "bc0418b57b2d4918819d3974ac1285d9";
//鉴权码
//天气信息面板所要展现的东西
public DrawerLayout drawerLayout; //左侧滑动和点击小房子展现的界面
//public TextView tvTitle; //标题 *弃用
private TextView weatherTime; //天气信息的时间
private TextView weatherDegree; //天气信息的温度值
private TextView measure2; //天气信息的温度单位
private TextView weatherPlace; //天气信息的地点
private TextView weatherType; //天气信息的类型
private String mWeatherId; //城市的编号
private ImageView weatherStat;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_weather);
//Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
//setSupportActionBar(toolbar);
// FloatingActionButton fab = (FloatingActionButton) findViewById(R.id.fab);
// fab.setOnClickListener(new View.OnClickListener() {
// @Override
// public void onClick(View view) {
// Snackbar.make(view, "Replace with your own action", Snackbar.LENGTH_LONG)
// .setAction("Action", null).show();
// }
// });
Typeface fontFace = Typeface.createFromAsset(getAssets(), "fonts/AndroidClock.ttf");
// 字体文件必须是true type font的格式(ttf);
// 当使用外部字体却又发现字体没有变化的时候(以 Droid Sans代替),通常是因为
// 这个字体android没有支持,而非你的程序发生了错误
weatherTime = (TextView)findViewById(R.id.weather_info_time);
weatherTime.setTypeface(fontFace);
//
weatherDegree = (TextView)findViewById(R.id.degree_value);
weatherDegree.setTypeface(fontFace);
TextView measure = (TextView)findViewById(R.id.degree_measure);
// measure.setTypeface(fontFace);
measure2 = (TextView)findViewById(R.id.degree_measure2);
//measure2.setTypeface(fontFace);
weatherPlace = (TextView)findViewById(R.id.weather_info_place);
//weatherPlace.setTypeface(fontFace);
weatherType = (TextView)findViewById(R.id.weather_info_text);
//weatherType.setTypeface(fontFace);
weatherStat = (ImageView)findViewById(R.id.weatherIcon);
//
// TextView weatherInfo = (TextView)findViewById(R.id.weather_info_text);
//
// weatherInfo.setTypeface(fontFace);
//
//text.setTextSize(50);
Intent intent=getIntent();
//获取这个Intent对象的Extra中对应键的值
String weatherId=intent.getStringExtra("weather_id");
String CountryName = intent.getStringExtra("CountryName");
// tvTitle = (TextView)findViewById(R.id.title_text_weather);
// //tvTitle.setText(weatherId);
// tvTitle.setText(CountryName);
// // tvTitle.setTextSize(60);
// tvTitle.setTypeface(fontFace);
drawerLayout = (DrawerLayout)findViewById(R.id.drawer_layout);
Button btnBack = (Button)findViewById(R.id.btn_home);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
//Intent intent = new Intent(WeatherActivity.this,MainActivity.class);
//startActivity(intent);
// WeatherActivity.this.finish();
drawerLayout.openDrawer(GravityCompat.START);
Log.v(TAG,"Clicked nav btn");
}
});
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this);
String weatherString = prefs.getString("weather", null);
if (weatherString != null) {
// 有缓存时直接解析天气数据
Weather weather = Utility.handleWeatherResponse(weatherString);
mWeatherId = weather.basic.weatherId;
showWeatherInfo(weather);
} else {
// 无缓存时去服务器查询天气
mWeatherId = getIntent().getStringExtra("weather_id");
// weatherLayout.setVisibility(View.INVISIBLE);
requestWeather(mWeatherId);
}
// swipeRefresh.setOnRefreshListener(new SwipeRefreshLayout.OnRefreshListener() {
// @Override
// public void onRefresh() {
// requestWeather(mWeatherId);
// }
// });
}
// public void requestWeather(final String weatherId){
// tvTitle.setText(weatherId);
// }
public void requestWeather(final String weatherId) {
String weatherUrl = "http://guolin.tech/api/weather?cityid=" + weatherId + "&key="+API_KEY;
HttpUtil.sendOkHttpRequest(weatherUrl, new Callback() {
@Override
public void onResponse(Call call, Response response) throws IOException {
final String responseText = response.body().string();
Log.v(TAG,"=======================================================================");
Log.v(TAG,responseText);
Log.v(TAG,"=======================================================================");
final Weather weather = Utility.handleWeatherResponse(responseText);
runOnUiThread(new Runnable() {
@Override
public void run() {
if (weather != null && "ok".equals(weather.status)) {
SharedPreferences.Editor editor = PreferenceManager.getDefaultSharedPreferences(WeatherActivity.this).edit();
editor.putString("weather", responseText);
editor.apply();
//mWeatherId = weather.basic.weatherId;
showWeatherInfo(weather);
} else {
Toast.makeText(WeatherActivity.this, "获取天气信息失败", Toast.LENGTH_SHORT).show();
}
//swipeRefresh.setRefreshing(false);
}
});
}
@Override
public void onFailure(Call call, IOException e) {
e.printStackTrace();
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(WeatherActivity.this, "获取天气信息失败", Toast.LENGTH_SHORT).show();
//swipeRefresh.setRefreshing(false);
}
});
}
});
//loadBingPic();
}
private int findWeatherIconByName(String weatherName)
{
switch(weatherName)
{
case "晴":return R.drawable.a044;
case "多云":return R.drawable.a045;
case "少云":return R.drawable.a046;
case "晴间多云":return R.drawable.a047;
case "阴":return R.drawable.a048;
case "有风":return R.drawable.a049;
case "平静":return R.drawable.a050;
case "微风":return R.drawable.a000;
case "和风":return R.drawable.a001;
case "清风":return R.drawable.a002;
case "强风":return R.drawable.a003;
case "劲风":return R.drawable.a003;
case "大风":return R.drawable.a004;
case "烈风":return R.drawable.a005;
case "风暴":return R.drawable.a006;
case "狂爆风":return R.drawable.a007;
case "龙卷风":return R.drawable.a008;
case "热带风暴":return R.drawable.a009;
case "阵雨":return R.drawable.a012;
case "强阵雨":return R.drawable.a013;
case "雷阵雨":return R.drawable.a014;
case "强雷阵雨":return R.drawable.a015;
case "雷阵雨伴有冰雹":return R.drawable.a016;
case "小雨":return R.drawable.a017;
case "中雨":return R.drawable.a018;
case "大雨":return R.drawable.a019;
case "极端降雨":return R.drawable.a020;
case "毛毛雨":return R.drawable.a021;
case "细雨":return R.drawable.a021;
case "暴雨":return R.drawable.a022;
case "大暴雨":return R.drawable.a023;
case "特大暴雨":return R.drawable.a024;
case "冻雨":return R.drawable.a025;
case "小雪":return R.drawable.a026;
case "中雪":return R.drawable.a027;
case "大雪":return R.drawable.a028;
case "暴雪":return R.drawable.a029;
case "雨夹雪":return R.drawable.a030;
case "雨雪天气":return R.drawable.a031;
case "阵雨夹雪":return R.drawable.a032;
case "阵雪":return R.drawable.a033;
case "薄雾":return R.drawable.a034;
case "雾":return R.drawable.a035;
case "霾":return R.drawable.a036;
case "扬沙":return R.drawable.a037;
case "浮尘":return R.drawable.a038;
case "沙尘暴":return R.drawable.a039;
case "热":return R.drawable.a041;
case "冷":return R.drawable.a042;
case "强沙尘暴":return R.drawable.a040;
case "未知":return R.drawable.a043;
default:{
break;
}
}
return -1;
}
private void showWeatherInfo(Weather weather) {
String cityName = weather.basic.cityName;
String updateTime = weather.basic.update.updateTime.split(" ")[1];
String degree = weather.now.temperature;
String weatherInfo = weather.now.more.info;
weatherPlace.setText(cityName);
weatherTime.setText(updateTime);
weatherDegree.setText(degree);
weatherType.setText(weatherInfo);
weatherStat.setImageResource(findWeatherIconByName(weatherInfo));
// forecastLayout.removeAllViews();
// for (Forecast forecast : weather.forecastList) {
// View view = LayoutInflater.from(this).inflate(R.layout.forecast_item, forecastLayout, false);
// TextView dateText = (TextView) view.findViewById(R.id.date_text);
// TextView infoText = (TextView) view.findViewById(R.id.info_text);
// TextView maxText = (TextView) view.findViewById(R.id.max_text);
// TextView minText = (TextView) view.findViewById(R.id.min_text);
// dateText.setText(forecast.date);
// infoText.setText(forecast.more.info);
// maxText.setText(forecast.temperature.max);
// minText.setText(forecast.temperature.min);
// forecastLayout.addView(view);
// }
// if (weather.aqi != null) {
// aqiText.setText(weather.aqi.city.aqi);
// pm25Text.setText(weather.aqi.city.pm25);
// }
// String comfort = "舒适度:" + weather.suggestion.comfort.info;
// String carWash = "洗车指数:" + weather.suggestion.carWash.info;
// String sport = "运行建议:" + weather.suggestion.sport.info;
// comfortText.setText(comfort);
// carWashText.setText(carWash);
// sportText.setText(sport);
// weatherLayout.setVisibility(View.VISIBLE);
// Intent intent = new Intent(this, AutoUpdateService.class);
// startService(intent);
}
}
| apache-2.0 |
JDriven/structurizr-java | structurizr-core/src/com/structurizr/view/View.java | 5965 | package com.structurizr.view;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.structurizr.model.*;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.stream.Collectors;
@JsonIgnoreProperties(ignoreUnknown=true)
public abstract class View implements Comparable<View> {
private SoftwareSystem softwareSystem;
private String softwareSystemId;
private String description = "";
private PaperSize paperSize = PaperSize.A4_Portrait;
private Set<ElementView> elementViews = new LinkedHashSet<>();
View() {
}
public View(SoftwareSystem softwareSystem) {
this.softwareSystem = softwareSystem;
}
@JsonIgnore
public Model getModel() {
return softwareSystem.getModel();
}
@JsonIgnore
public SoftwareSystem getSoftwareSystem() {
return softwareSystem;
}
public void setSoftwareSystem(SoftwareSystem softwareSystem) {
this.softwareSystem = softwareSystem;
}
public String getSoftwareSystemId() {
if (this.softwareSystem != null) {
return this.softwareSystem.getId();
} else {
return this.softwareSystemId;
}
}
void setSoftwareSystemId(String softwareSystemId) {
this.softwareSystemId = softwareSystemId;
}
public abstract ViewType getType();
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public PaperSize getPaperSize() {
return paperSize;
}
public void setPaperSize(PaperSize paperSize) {
this.paperSize = paperSize;
}
/**
* Adds all software systems in the model to this view.
*/
public void addAllSoftwareSystems() {
getModel().getSoftwareSystems().forEach(this::addElement);
}
/**
* Adds the given software system to this view.
*
* @param softwareSystem the SoftwareSystem to add
*/
public void addSoftwareSystem(SoftwareSystem softwareSystem) {
addElement(softwareSystem);
}
/**
* Adds all software systems in the model to this view.
*/
public void addAllPeople() {
getModel().getPeople().forEach(this::addElement);
}
/**
* Adds the given person to this view.
*
* @param person the Person to add
*/
public void addPerson(Person person) {
addElement(person);
}
protected void addElement(Element element) {
if (softwareSystem.getModel().contains(element)) {
elementViews.add(new ElementView(element));
}
}
protected void removeElement(Element element) {
ElementView elementView = new ElementView(element);
elementViews.remove(elementView);
}
/**
* Gets the set of elements in this view.
*
* @return a Set of ElementView objects
*/
public Set<ElementView> getElements() {
return elementViews;
}
public Set<RelationshipView> getRelationships() {
Set<Relationship> relationships = new HashSet<>();
Set<Element> elements = getElements().stream()
.map(ElementView::getElement)
.collect(Collectors.toSet());
elements.forEach(b -> relationships.addAll(b.getRelationships()));
return relationships.stream()
.filter(r -> elements.contains(r.getSource()) && elements.contains(r.getDestination()))
.map(RelationshipView::new)
.collect(Collectors.toSet());
}
public void setRelationships(Set<RelationshipView> relationships) {
// do nothing ... this are determined automatically
}
/**
* Removes all elements that have no relationships
* to other elements in this view.
*/
public void removeElementsWithNoRelationships() {
Set<RelationshipView> relationships = getRelationships();
Set<String> elementIds = new HashSet<>();
relationships.forEach(rv -> elementIds.add(rv.getRelationship().getSourceId()));
relationships.forEach(rv -> elementIds.add(rv.getRelationship().getDestinationId()));
elementViews.removeIf(ev -> !elementIds.contains(ev.getId()));
}
public void removeElementsThatCantBeReachedFrom(Element element) {
Set<String> elementIdsToShow = new HashSet<>();
findElementsToShow(element, elementIdsToShow, 1);
elementViews.removeIf(ev -> !elementIdsToShow.contains(ev.getId()));
}
private void findElementsToShow(Element element, Set<String> elementIds, int depth) {
if (elementViews.contains(new ElementView(element))) {
elementIds.add(element.getId());
if (depth < 100) {
element.getRelationships().forEach(r -> findElementsToShow(r.getDestination(), elementIds, depth + 1));
}
}
}
public abstract String getName();
@Override
public int compareTo(View view) {
return getTitle().compareTo(view.getTitle());
}
private String getTitle() {
return getName() + " - " + getDescription();
}
ElementView findElementView(Element element) {
for (ElementView elementView : getElements()) {
if (elementView.getElement().equals(element)) {
return elementView;
}
}
return null;
}
public void copyLayoutInformationFrom(View source) {
this.setPaperSize(source.getPaperSize());
for (ElementView sourceElementView : source.getElements()) {
ElementView destinationElementView = findElementView(sourceElementView.getElement());
if (destinationElementView != null) {
destinationElementView.copyLayoutInformationFrom(sourceElementView);
}
}
}
} | apache-2.0 |
ruslansennov/vertx-consul-client | src/main/java/io/vertx/ext/consul/Coordinate.java | 4253 | /*
* Copyright (c) 2016 The original author or authors
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Apache License v2.0 which accompanies this distribution.
*
* The Eclipse Public License is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* The Apache License v2.0 is available at
* http://www.opensource.org/licenses/apache2.0.php
*
* You may elect to redistribute this code under either of these licenses.
*/
package io.vertx.ext.consul;
import io.vertx.codegen.annotations.DataObject;
import io.vertx.core.json.JsonObject;
import java.util.List;
/**
* Holds network coordinates of node
*
* @author <a href="mailto:ruslan.sennov@gmail.com">Ruslan Sennov</a>
* @see <a href="https://www.consul.io/docs/internals/coordinates.html">Network coordinates</a>
*/
@DataObject(generateConverter = true)
public class Coordinate {
private String node;
private float adj;
private float err;
private float height;
private List<Float> vec;
/**
* Default constructor
*/
public Coordinate() {}
/**
* Copy constructor
*
* @param coordinate the one to copy
*/
public Coordinate(Coordinate coordinate) {
this.node = coordinate.node;
this.adj = coordinate.adj;
this.err = coordinate.err;
this.height = coordinate.height;
this.vec = coordinate.vec;
}
/**
* Constructor from JSON
*
* @param coordinate the JSON
*/
public Coordinate(JsonObject coordinate) {
CoordinateConverter.fromJson(coordinate, this);
}
/**
* Convert to JSON
*
* @return the JSON
*/
public JsonObject toJson() {
JsonObject jsonObject = new JsonObject();
CoordinateConverter.toJson(this, jsonObject);
return jsonObject;
}
/**
* Get name of node
*
* @return name of node
*/
public String getNode() {
return node;
}
/**
* Get adjustment
*
* @return adjustment
*/
public float getAdj() {
return adj;
}
/**
* Get error
*
* @return error
*/
public float getErr() {
return err;
}
/**
* Get height
*
* @return height
*/
public float getHeight() {
return height;
}
/**
* Get vector
*
* @return vector
*/
public List<Float> getVec() {
return vec;
}
/**
* Set name of node
*
* @param node name of node
* @return reference to this, for fluency
*/
public Coordinate setNode(String node) {
this.node = node;
return this;
}
/**
* Set adjustment
*
* @param adj adjustment
* @return reference to this, for fluency
*/
public Coordinate setAdj(float adj) {
this.adj = adj;
return this;
}
/**
* Set error
*
* @param err error
* @return reference to this, for fluency
*/
public Coordinate setErr(float err) {
this.err = err;
return this;
}
/**
* Set height
*
* @param height height
* @return reference to this, for fluency
*/
public Coordinate setHeight(float height) {
this.height = height;
return this;
}
/**
* Set vector
*
* @param vec vector
* @return reference to this, for fluency
*/
public Coordinate setVec(List<Float> vec) {
this.vec = vec;
return this;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Coordinate that = (Coordinate) o;
if (Float.compare(that.adj, adj) != 0) return false;
if (Float.compare(that.err, err) != 0) return false;
if (Float.compare(that.height, height) != 0) return false;
if (node != null ? !node.equals(that.node) : that.node != null) return false;
return vec != null ? vec.equals(that.vec) : that.vec == null;
}
@Override
public int hashCode() {
int result = node != null ? node.hashCode() : 0;
result = 31 * result + (adj != +0.0f ? Float.floatToIntBits(adj) : 0);
result = 31 * result + (err != +0.0f ? Float.floatToIntBits(err) : 0);
result = 31 * result + (height != +0.0f ? Float.floatToIntBits(height) : 0);
result = 31 * result + (vec != null ? vec.hashCode() : 0);
return result;
}
}
| apache-2.0 |
Legostaev/xmlsec-gost | src/test/java/org/apache/xml/security/test/dom/transforms/implementations/TransformBase64DecodeTest.java | 5574 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.xml.security.test.dom.transforms.implementations;
import java.io.ByteArrayInputStream;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathFactory;
import org.apache.xml.security.signature.XMLSignatureInput;
import org.apache.xml.security.test.dom.DSNamespaceContext;
import org.apache.xml.security.transforms.Transforms;
import org.apache.xml.security.transforms.implementations.TransformBase64Decode;
import org.apache.xml.security.utils.XMLUtils;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
/**
* Unit test for {@link org.apache.xml.security.transforms.implementations.TransformBase64Decode}
*
* @author Christian Geuer-Pollmann
*/
public class TransformBase64DecodeTest extends org.junit.Assert {
static org.slf4j.Logger log =
org.slf4j.LoggerFactory.getLogger(TransformBase64DecodeTest.class);
static {
org.apache.xml.security.Init.init();
}
@org.junit.Test
public void test1() throws Exception {
// base64 encoded
String s1 =
"VGhlIFVSSSBvZiB0aGUgdHJhbnNmb3JtIGlzIGh0dHA6Ly93d3cudzMub3JnLzIwMDAvMDkveG1s\n"
+ "ZHNpZyNiYXNlNjQ=";
Document doc = TransformBase64DecodeTest.createDocument();
Transforms t = new Transforms(doc);
doc.appendChild(t.getElement());
t.addTransform(TransformBase64Decode.implementedTransformURI);
XMLSignatureInput in =
new XMLSignatureInput(new ByteArrayInputStream(s1.getBytes()));
XMLSignatureInput out = t.performTransforms(in);
String result = new String(out.getBytes());
assertTrue(
result.equals("The URI of the transform is http://www.w3.org/2000/09/xmldsig#base64")
);
}
@org.junit.Test
public void test2() throws Exception {
// base64 encoded twice
String s2 =
"VkdobElGVlNTU0J2WmlCMGFHVWdkSEpoYm5ObWIzSnRJR2x6SUdoMGRIQTZMeTkzZDNjdWR6TXVi\n"
+ "M0puTHpJd01EQXZNRGt2ZUcxcwpaSE5wWnlOaVlYTmxOalE9";
Document doc = TransformBase64DecodeTest.createDocument();
Transforms t = new Transforms(doc);
doc.appendChild(t.getElement());
t.addTransform(TransformBase64Decode.implementedTransformURI);
XMLSignatureInput in =
new XMLSignatureInput(new ByteArrayInputStream(s2.getBytes()));
XMLSignatureInput out = t.performTransforms(t.performTransforms(in));
String result = new String(out.getBytes());
assertTrue(
result.equals("The URI of the transform is http://www.w3.org/2000/09/xmldsig#base64")
);
}
@org.junit.Test
public void test3() throws Exception {
//J-
String input = ""
+ "<Object xmlns:signature='http://www.w3.org/2000/09/xmldsig#'>\n"
+ "<signature:Base64>\n"
+ "VGhlIFVSSSBvZiB0aGU gdHJhbn<RealText>Nmb 3JtIGlzIG<test/>h0dHA6</RealText>Ly93d3cudzMub3JnLzIwMDAvMDkveG1s\n"
+ "ZHNpZyNiYXNlNjQ=\n"
+ "</signature:Base64>\n"
+ "</Object>\n"
;
//J+
DocumentBuilder db = XMLUtils.createDocumentBuilder(false);
db.setErrorHandler(new org.apache.xml.security.utils.IgnoreAllErrorHandler());
Document doc = db.parse(new ByteArrayInputStream(input.getBytes()));
//XMLUtils.circumventBug2650(doc);
XPathFactory xpf = XPathFactory.newInstance();
XPath xpath = xpf.newXPath();
xpath.setNamespaceContext(new DSNamespaceContext());
String expression = "//ds:Base64";
Node base64Node =
(Node) xpath.evaluate(expression, doc, XPathConstants.NODE);
XMLSignatureInput xmlinput = new XMLSignatureInput(base64Node);
Document doc2 = TransformBase64DecodeTest.createDocument();
Transforms t = new Transforms(doc2);
doc2.appendChild(t.getElement());
t.addTransform(Transforms.TRANSFORM_BASE64_DECODE);
XMLSignatureInput out = t.performTransforms(xmlinput);
String result = new String(out.getBytes());
assertTrue(
"\"" + result + "\"",
result.equals("The URI of the transform is http://www.w3.org/2000/09/xmldsig#base64")
);
}
private static Document createDocument() throws ParserConfigurationException {
DocumentBuilder db = XMLUtils.createDocumentBuilder(false);
Document doc = db.newDocument();
if (doc == null) {
throw new RuntimeException("Could not create a Document");
} else {
log.debug("I could create the Document");
}
return doc;
}
}
| apache-2.0 |
ShawnDongAi/AEASSISTANT | AEAssistant/src/com/zzn/aeassistant/zxing/decoding/FinishListener.java | 1311 | /*
* Copyright (C) 2010 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.zzn.aeassistant.zxing.decoding;
import android.app.Activity;
import android.content.DialogInterface;
/**
* Simple listener used to exit the app in a few cases.
*
* @author Sean Owen
*/
public final class FinishListener
implements DialogInterface.OnClickListener, DialogInterface.OnCancelListener, Runnable {
private final Activity activityToFinish;
public FinishListener(Activity activityToFinish) {
this.activityToFinish = activityToFinish;
}
@Override
public void onCancel(DialogInterface dialogInterface) {
run();
}
@Override
public void onClick(DialogInterface dialogInterface, int i) {
run();
}
@Override
public void run() {
activityToFinish.finish();
}
}
| apache-2.0 |
moparisthebest/beehive | beehive-netui-core/src/main/java/org/apache/beehive/netui/util/ParamHelper.java | 5988 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* $Header:$
*/
package org.apache.beehive.netui.util;
import java.util.Map;
import java.util.List;
import java.lang.reflect.Array;
import org.apache.beehive.netui.util.logging.Logger;
/**
* This class is used by NetUI tags that use parameters.
*/
public class ParamHelper
{
private static final Logger logger = Logger.getInstance(ParamHelper.class);
/**
* Add a new parameter or update an existing parameter's list of values.
* <p/>
* <em>Implementation Note:</em> in the case that a Map was provided for
* the <code>value</code> parameter, the this returns without doing
* anything; in any other case, params is updated (even in
* <code>value</code> is null).
* </p>
* <p/>
* If value is some object (not an array or list), the string
* representation of that object is added as a value for name. If the
* value is a list (or array) of objects, then the string representation
* of each element is added as a value for name. When there are multiple
* values for a name, then an array of Strings is used in Map.
* </p>
*
* @param params an existing Map of names and values to update
* @param name the name of the parameter to add or update
* @param value an item or list of items to put into the map
* @throws IllegalArgumentException in the case that either the params
* <p/>
* or name given was null
*/
public static void addParam(Map params, String name, Object value)
{
if (params == null)
throw new IllegalArgumentException("Parameter map cannot be null");
if (name == null)
throw new IllegalArgumentException("Parameter name cannot be null");
if (value instanceof Map) {
logger.warn(Bundle.getString("Tags_BadParameterType", name));
return;
}
if (value == null)
value = "";
// check to see if we are adding a new element
// or if this is an existing element
Object o = params.get(name);
int length = 0;
if (o != null) {
assert (o instanceof String ||
o instanceof String[]);
if (o.getClass().isArray()) {
length = Array.getLength(o);
}
else {
length++;
}
}
// check how much size the output needs to be
if (value.getClass().isArray()) {
length += Array.getLength(value);
}
else if (value instanceof List) {
length += ((List) value).size();
}
else {
length++;
}
if (length == 0)
return;
//System.err.println("Number of vaues:" + length);
// if there is only a single value push it to the parameter table
if (length == 1) {
if (value.getClass().isArray()) {
Object val = Array.get(value, 0);
if (val != null)
params.put(name,val.toString());
else
params.put(name,"");
}
else if (value instanceof List) {
List list = (List) value;
Object val = list.get(0);
if (val != null)
params.put(name,val.toString());
else
params.put(name,"");
}
else
params.put(name,value.toString());
return;
}
// allocate the string for the multiple values
String[] values = new String[length];
int offset = 0;
// if we had old values, push them to the new array
if (o != null) {
if (o.getClass().isArray()) {
String[] obs = (String[]) o;
for (;offset<obs.length;offset++) {
values[offset] = obs[offset];
}
}
else {
values[0] = o.toString();
offset = 1;
}
}
// now move the new values to the array starting at the offset
// position
if (value.getClass().isArray())
{
//need to convert this array into a String[]
int size = Array.getLength(value);
for (int i=0; i < size; i++)
{
Object val = Array.get(value, i);
if (val != null)
values[i+offset] = val.toString();
else
values[i+offset] = "";
}
}
else if (value instanceof List)
{
List list = (List) value;
int size = list.size();
for (int i=0; i < size; i++)
{
if (list.get(i) != null)
values[i+offset] = list.get(i).toString();
else
values[i+offset] = "";
}
}
else {
values[offset] = value.toString();
}
// store the new values array
params.put(name, values);
}
}
| apache-2.0 |
raphaelning/resteasy-client-android | jaxrs/resteasy-jaxrs/src/main/java/org/jboss/resteasy/spi/ResteasyUriInfo.java | 10699 | package org.jboss.resteasy.spi;
import org.jboss.resteasy.specimpl.MultivaluedMapImpl;
import org.jboss.resteasy.specimpl.PathSegmentImpl;
import org.jboss.resteasy.specimpl.ResteasyUriBuilder;
import org.jboss.resteasy.util.Encode;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.PathSegment;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.List;
/**
* UriInfo implementation with some added extra methods to help process requests
*
* @author <a href="mailto:bill@burkecentral.com">Bill Burke</a>
* @version $Revision: 1 $
*/
public class ResteasyUriInfo implements UriInfo
{
private String path;
private String encodedPath;
private String matchingPath;
private MultivaluedMap<String, String> queryParameters;
private MultivaluedMap<String, String> encodedQueryParameters;
private MultivaluedMap<String, String> pathParameters;
private MultivaluedMap<String, String> encodedPathParameters;
private MultivaluedMap<String, PathSegment[]> pathParameterPathSegments;
private MultivaluedMap<String, PathSegment[]> encodedPathParameterPathSegments;
private List<PathSegment> pathSegments;
private List<PathSegment> encodedPathSegments;
private URI absolutePath;
private URI requestURI;
private URI baseURI;
private List<String> matchedUris;
private List<String> encodedMatchedUris;
private List<String> encodedMatchedPaths = new ArrayList<String>();
private List<Object> ancestors;
public ResteasyUriInfo(URI base, URI relative)
{
String b = base.toString();
if (!b.endsWith("/")) b += "/";
String r = relative.getRawPath();
if (r.startsWith("/"))
{
encodedPath = r;
path = relative.getPath();
}
else
{
encodedPath = "/" + r;
path = "/" + relative.getPath();
}
requestURI = UriBuilder.fromUri(base).path(relative.getRawPath()).replaceQuery(relative.getRawQuery()).build();
baseURI = base;
encodedPathSegments = PathSegmentImpl.parseSegments(encodedPath, false);
this.pathSegments = new ArrayList<PathSegment>(encodedPathSegments.size());
for (PathSegment segment : encodedPathSegments)
{
pathSegments.add(new PathSegmentImpl(((PathSegmentImpl) segment).getOriginal(), true));
}
extractParameters(requestURI.getRawQuery());
extractMatchingPath(encodedPathSegments);
absolutePath = UriBuilder.fromUri(requestURI).replaceQuery(null).build();
}
public ResteasyUriInfo(URI requestURI)
{
String r = requestURI.getRawPath();
if (r.startsWith("/"))
{
encodedPath = r;
path = requestURI.getPath();
}
else
{
encodedPath = "/" + r;
path = "/" + requestURI.getPath();
}
this.requestURI = requestURI;
baseURI = UriBuilder.fromUri(requestURI).replacePath("").build();
encodedPathSegments = PathSegmentImpl.parseSegments(encodedPath, false);
this.pathSegments = new ArrayList<PathSegment>(encodedPathSegments.size());
for (PathSegment segment : encodedPathSegments)
{
pathSegments.add(new PathSegmentImpl(((PathSegmentImpl) segment).getOriginal(), true));
}
extractParameters(requestURI.getRawQuery());
extractMatchingPath(encodedPathSegments);
absolutePath = UriBuilder.fromUri(requestURI).replaceQuery(null).build();
}
/**
* matching path without matrix parameters
*
* @param encodedPathSegments
*/
protected void extractMatchingPath(List<PathSegment> encodedPathSegments)
{
StringBuilder preprocessedPath = new StringBuilder();
for (PathSegment pathSegment : encodedPathSegments)
{
preprocessedPath.append("/").append(pathSegment.getPath());
}
matchingPath = preprocessedPath.toString();
}
/**
* Encoded path without matrix parameters
*
* @return
*/
public String getMatchingPath()
{
return matchingPath;
}
/**
* Create a UriInfo from the baseURI
*
* @param relative
* @return
*/
public ResteasyUriInfo setRequestUri(URI relative)
{
String rel = relative.toString();
if (rel.startsWith(baseURI.toString()))
{
relative = URI.create(rel.substring(baseURI.toString().length()));
}
return new ResteasyUriInfo(baseURI, relative);
}
public String getPath()
{
return path;
}
public String getPath(boolean decode)
{
if (decode) return getPath();
return encodedPath;
}
public List<PathSegment> getPathSegments()
{
return pathSegments;
}
public List<PathSegment> getPathSegments(boolean decode)
{
if (decode) return getPathSegments();
return encodedPathSegments;
}
public URI getRequestUri()
{
return requestURI;
}
public UriBuilder getRequestUriBuilder()
{
return UriBuilder.fromUri(requestURI);
}
public URI getAbsolutePath()
{
return absolutePath;
}
public UriBuilder getAbsolutePathBuilder()
{
return UriBuilder.fromUri(absolutePath);
}
public URI getBaseUri()
{
return baseURI;
}
public UriBuilder getBaseUriBuilder()
{
return UriBuilder.fromUri(baseURI);
}
public MultivaluedMap<String, String> getPathParameters()
{
if (pathParameters == null)
{
pathParameters = new MultivaluedMapImpl<String, String>();
}
return pathParameters;
}
public void addEncodedPathParameter(String name, String value)
{
getEncodedPathParameters().add(name, value);
String value1 = Encode.decodePath(value);
getPathParameters().add(name, value1);
}
private MultivaluedMap<String, String> getEncodedPathParameters()
{
if (encodedPathParameters == null)
{
encodedPathParameters = new MultivaluedMapImpl<String, String>();
}
return encodedPathParameters;
}
public MultivaluedMap<String, PathSegment[]> getEncodedPathParameterPathSegments()
{
if (encodedPathParameterPathSegments == null)
{
encodedPathParameterPathSegments = new MultivaluedMapImpl<String, PathSegment[]>();
}
return encodedPathParameterPathSegments;
}
public MultivaluedMap<String, PathSegment[]> getPathParameterPathSegments()
{
if (pathParameterPathSegments == null)
{
pathParameterPathSegments = new MultivaluedMapImpl<String, PathSegment[]>();
}
return pathParameterPathSegments;
}
public MultivaluedMap<String, String> getPathParameters(boolean decode)
{
if (decode) return getPathParameters();
return getEncodedPathParameters();
}
public MultivaluedMap<String, String> getQueryParameters()
{
if (queryParameters == null)
{
queryParameters = new MultivaluedMapImpl<String, String>();
}
return queryParameters;
}
protected MultivaluedMap<String, String> getEncodedQueryParameters()
{
if (encodedQueryParameters == null)
{
this.encodedQueryParameters = new MultivaluedMapImpl<String, String>();
}
return encodedQueryParameters;
}
public MultivaluedMap<String, String> getQueryParameters(boolean decode)
{
if (decode) return getQueryParameters();
else return getEncodedQueryParameters();
}
protected void extractParameters(String queryString)
{
if (queryString == null || queryString.equals("")) return;
String[] params = queryString.split("&");
for (String param : params)
{
if (param.indexOf('=') >= 0)
{
String[] nv = param.split("=", 2);
try
{
String name = URLDecoder.decode(nv[0], "UTF-8");
String val = nv.length > 1 ? nv[1] : "";
getEncodedQueryParameters().add(name, val);
getQueryParameters().add(name, URLDecoder.decode(val, "UTF-8"));
}
catch (UnsupportedEncodingException e)
{
throw new RuntimeException(e);
}
}
else
{
try
{
String name = URLDecoder.decode(param, "UTF-8");
getEncodedQueryParameters().add(name, "");
getQueryParameters().add(name, "");
}
catch (UnsupportedEncodingException e)
{
throw new RuntimeException(e);
}
}
}
}
public List<String> getMatchedURIs(boolean decode)
{
if (decode)
{
if (matchedUris == null) matchedUris = new ArrayList<String>();
return matchedUris;
}
else
{
if (encodedMatchedUris == null) encodedMatchedUris = new ArrayList<String>();
return encodedMatchedUris;
}
}
public List<String> getMatchedURIs()
{
return getMatchedURIs(true);
}
public List<Object> getMatchedResources()
{
if (ancestors == null) ancestors = new ArrayList<Object>();
return ancestors;
}
public void pushCurrentResource(Object resource)
{
if (ancestors == null) ancestors = new ArrayList<Object>();
ancestors.add(0, resource);
}
public void pushMatchedPath(String encoded)
{
encodedMatchedPaths.add(0, encoded);
}
public List<String> getEncodedMatchedPaths()
{
return encodedMatchedPaths;
}
public void popMatchedPath()
{
encodedMatchedPaths.remove(0);
}
public void pushMatchedURI(String encoded)
{
if (encoded.endsWith("/")) encoded = encoded.substring(0, encoded.length() - 1);
if (encoded.startsWith("/")) encoded = encoded.substring(1);
String decoded = Encode.decode(encoded);
if (encodedMatchedUris == null) encodedMatchedUris = new ArrayList<String>();
encodedMatchedUris.add(0, encoded);
if (matchedUris == null) matchedUris = new ArrayList<String>();
matchedUris.add(0, decoded);
}
@Override
public URI resolve(URI uri)
{
return getBaseUri().resolve(uri);
}
@Override
public URI relativize(URI uri)
{
URI from = getRequestUri();
URI to = uri;
if (uri.getScheme() == null && uri.getHost() == null)
{
to = getBaseUriBuilder().replaceQuery(null).path(uri.getPath()).replaceQuery(uri.getQuery()).fragment(uri.getFragment()).build();
}
return ResteasyUriBuilder.relativize(from, to);
}
}
| apache-2.0 |
wangning82/CabinetMS | src/main/java/com/cabinetms/client/TacticMediaCommand.java | 1292 | package com.cabinetms.client;
import java.util.List;
import com.google.common.collect.Lists;
public class TacticMediaCommand {
private String command; // 指令
private String clientIp; // 终端IP地址
private String destination; // 终端队列地址
private Integer startDate;// 策略开始日期
private Integer endDate;// 策略结束日期
private List<TacticDetailMediaCommand> detailList = Lists.newLinkedList();
public List<TacticDetailMediaCommand> getDetailList() {
return detailList;
}
public void setDetailList(List<TacticDetailMediaCommand> detailList) {
this.detailList = detailList;
}
public String getCommand() {
return command;
}
public void setCommand(String command) {
this.command = command;
}
public String getClientIp() {
return clientIp;
}
public void setClientIp(String clientIp) {
this.clientIp = clientIp;
}
public String getDestination() {
return destination;
}
public void setDestination(String destination) {
this.destination = destination;
}
public Integer getStartDate() {
return startDate;
}
public void setStartDate(Integer startDate) {
this.startDate = startDate;
}
public Integer getEndDate() {
return endDate;
}
public void setEndDate(Integer endDate) {
this.endDate = endDate;
}
}
| apache-2.0 |
fuyongde/jason | showcase/src/main/java/com/jason/showcase/lambdas/Lambda.java | 342 | package com.jason.showcase.lambdas;
/**
* Created by Qinjianf on 2016/7/19.
*/
public class Lambda {
public void execute(Action action) {
action.run("Hello Lambda!");
}
public void test() {
execute(System.out::println);
}
public static void main(String[] args) {
new Lambda().test();
}
}
| apache-2.0 |
cbaenziger/oozie | sharelib/spark/src/main/java/org/apache/oozie/action/hadoop/SparkArgsExtractor.java | 22247 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.oozie.action.hadoop;
import com.google.common.annotations.VisibleForTesting;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang.StringUtils;
import org.apache.directory.api.util.Strings;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.StringReader;
import java.io.Writer;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Pattern;
import static org.apache.oozie.action.hadoop.SparkActionExecutor.SPARK_DEFAULT_OPTS;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "Properties file should be specified by user")
class SparkArgsExtractor {
private static final Pattern SPARK_DEFAULTS_FILE_PATTERN = Pattern.compile("spark-defaults.conf");
private static final String FILES_OPTION = "--files";
private static final String ARCHIVES_OPTION = "--archives";
private static final String LOG4J_CONFIGURATION_JAVA_OPTION = "-Dlog4j.configuration=";
private static final String SECURITY_TOKENS_HADOOPFS = "spark.yarn.security.tokens.hadoopfs.enabled";
private static final String SECURITY_TOKENS_HIVE = "spark.yarn.security.tokens.hive.enabled";
private static final String SECURITY_TOKENS_HBASE = "spark.yarn.security.tokens.hbase.enabled";
private static final String SECURITY_CREDENTIALS_HADOOPFS = "spark.yarn.security.credentials.hadoopfs.enabled";
private static final String SECURITY_CREDENTIALS_HIVE = "spark.yarn.security.credentials.hive.enabled";
private static final String SECURITY_CREDENTIALS_HBASE = "spark.yarn.security.credentials.hbase.enabled";
private static final String PWD = "$PWD" + File.separator + "*";
private static final String MASTER_OPTION = "--master";
private static final String MODE_OPTION = "--deploy-mode";
private static final String JOB_NAME_OPTION = "--name";
private static final String CLASS_NAME_OPTION = "--class";
private static final String VERBOSE_OPTION = "--verbose";
private static final String DRIVER_CLASSPATH_OPTION = "--driver-class-path";
private static final String EXECUTOR_CLASSPATH = "spark.executor.extraClassPath=";
private static final String DRIVER_CLASSPATH = "spark.driver.extraClassPath=";
private static final String EXECUTOR_EXTRA_JAVA_OPTIONS = "spark.executor.extraJavaOptions=";
private static final String DRIVER_EXTRA_JAVA_OPTIONS = "spark.driver.extraJavaOptions=";
private static final Pattern SPARK_VERSION_1 = Pattern.compile("^1.*");
private static final String SPARK_YARN_JAR = "spark.yarn.jar";
private static final String SPARK_YARN_JARS = "spark.yarn.jars";
private static final String OPT_SEPARATOR = "=";
private static final String OPT_VALUE_SEPARATOR = ",";
private static final String CONF_OPTION = "--conf";
private static final String MASTER_OPTION_YARN_CLUSTER = "yarn-cluster";
private static final String MASTER_OPTION_YARN_CLIENT = "yarn-client";
private static final String MASTER_OPTION_YARN = "yarn";
private static final String DEPLOY_MODE_CLUSTER = "cluster";
private static final String DEPLOY_MODE_CLIENT = "client";
private static final String SPARK_YARN_TAGS = "spark.yarn.tags";
private static final String OPT_PROPERTIES_FILE = "--properties-file";
public static final String SPARK_DEFAULTS_GENERATED_PROPERTIES = "spark-defaults-oozie-generated.properties";
private boolean pySpark = false;
private final Configuration actionConf;
SparkArgsExtractor(final Configuration actionConf) {
this.actionConf = actionConf;
}
boolean isPySpark() {
return pySpark;
}
List<String> extract(final String[] mainArgs) throws OozieActionConfiguratorException, IOException, URISyntaxException {
final List<String> sparkArgs = new ArrayList<>();
sparkArgs.add(MASTER_OPTION);
final String master = actionConf.get(SparkActionExecutor.SPARK_MASTER);
sparkArgs.add(master);
// In local mode, everything runs here in the Launcher Job.
// In yarn-client mode, the driver runs here in the Launcher Job and the
// executor in Yarn.
// In yarn-cluster mode, the driver and executor run in Yarn.
final String sparkDeployMode = actionConf.get(SparkActionExecutor.SPARK_MODE);
if (sparkDeployMode != null) {
sparkArgs.add(MODE_OPTION);
sparkArgs.add(sparkDeployMode);
}
final boolean yarnClusterMode = master.equals(MASTER_OPTION_YARN_CLUSTER)
|| (master.equals(MASTER_OPTION_YARN) && sparkDeployMode != null && sparkDeployMode.equals(DEPLOY_MODE_CLUSTER));
final boolean yarnClientMode = master.equals(MASTER_OPTION_YARN_CLIENT)
|| (master.equals(MASTER_OPTION_YARN) && sparkDeployMode != null && sparkDeployMode.equals(DEPLOY_MODE_CLIENT));
sparkArgs.add(JOB_NAME_OPTION);
sparkArgs.add(actionConf.get(SparkActionExecutor.SPARK_JOB_NAME));
final String className = actionConf.get(SparkActionExecutor.SPARK_CLASS);
if (className != null) {
sparkArgs.add(CLASS_NAME_OPTION);
sparkArgs.add(className);
}
appendOoziePropertiesToSparkConf(sparkArgs);
String jarPath = actionConf.get(SparkActionExecutor.SPARK_JAR);
if (jarPath != null && jarPath.endsWith(".py")) {
pySpark = true;
}
boolean addedSecurityTokensHadoopFS = false;
boolean addedSecurityTokensHive = false;
boolean addedSecurityTokensHBase = false;
boolean addedSecurityCredentialsHadoopFS = false;
boolean addedSecurityCredentialsHive = false;
boolean addedSecurityCredentialsHBase = false;
boolean addedLog4jDriverSettings = false;
boolean addedLog4jExecutorSettings = false;
final StringBuilder driverClassPath = new StringBuilder();
final StringBuilder executorClassPath = new StringBuilder();
final StringBuilder userFiles = new StringBuilder();
final StringBuilder userArchives = new StringBuilder();
final String sparkOpts = actionConf.get(SparkActionExecutor.SPARK_OPTS);
String propertiesFile = null;
if (StringUtils.isNotEmpty(sparkOpts)) {
final List<String> sparkOptions = SparkOptionsSplitter.splitSparkOpts(sparkOpts);
for (int i = 0; i < sparkOptions.size(); i++) {
String opt = sparkOptions.get(i);
boolean addToSparkArgs = true;
if (yarnClusterMode || yarnClientMode) {
if (opt.startsWith(EXECUTOR_CLASSPATH)) {
appendWithPathSeparator(opt.substring(EXECUTOR_CLASSPATH.length()), executorClassPath);
addToSparkArgs = false;
}
if (opt.startsWith(DRIVER_CLASSPATH)) {
appendWithPathSeparator(opt.substring(DRIVER_CLASSPATH.length()), driverClassPath);
addToSparkArgs = false;
}
if (opt.equals(DRIVER_CLASSPATH_OPTION)) {
// we need the next element after this option
appendWithPathSeparator(sparkOptions.get(i + 1), driverClassPath);
// increase i to skip the next element.
i++;
addToSparkArgs = false;
}
}
if (opt.startsWith(SECURITY_TOKENS_HADOOPFS)) {
addedSecurityTokensHadoopFS = true;
}
if (opt.startsWith(SECURITY_TOKENS_HIVE)) {
addedSecurityTokensHive = true;
}
if (opt.startsWith(SECURITY_TOKENS_HBASE)) {
addedSecurityTokensHBase = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HADOOPFS)) {
addedSecurityCredentialsHadoopFS = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HIVE)) {
addedSecurityCredentialsHive = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HBASE)) {
addedSecurityCredentialsHBase = true;
}
if (opt.startsWith(OPT_PROPERTIES_FILE)){
i++;
propertiesFile = sparkOptions.get(i);
addToSparkArgs = false;
}
if (opt.startsWith(EXECUTOR_EXTRA_JAVA_OPTIONS) || opt.startsWith(DRIVER_EXTRA_JAVA_OPTIONS)) {
if (!opt.contains(LOG4J_CONFIGURATION_JAVA_OPTION)) {
opt += " " + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS;
} else {
System.out.println("Warning: Spark Log4J settings are overwritten." +
" Child job IDs may not be available");
}
if (opt.startsWith(EXECUTOR_EXTRA_JAVA_OPTIONS)) {
addedLog4jExecutorSettings = true;
} else {
addedLog4jDriverSettings = true;
}
}
if (opt.startsWith(FILES_OPTION)) {
final String userFile;
if (opt.contains(OPT_SEPARATOR)) {
userFile = opt.substring(opt.indexOf(OPT_SEPARATOR) + OPT_SEPARATOR.length());
}
else {
userFile = sparkOptions.get(i + 1);
i++;
}
if (userFiles.length() > 0) {
userFiles.append(OPT_VALUE_SEPARATOR);
}
userFiles.append(userFile);
addToSparkArgs = false;
}
if (opt.startsWith(ARCHIVES_OPTION)) {
final String userArchive;
if (opt.contains(OPT_SEPARATOR)) {
userArchive = opt.substring(opt.indexOf(OPT_SEPARATOR) + OPT_SEPARATOR.length());
}
else {
userArchive = sparkOptions.get(i + 1);
i++;
}
if (userArchives.length() > 0) {
userArchives.append(OPT_VALUE_SEPARATOR);
}
userArchives.append(userArchive);
addToSparkArgs = false;
}
if (addToSparkArgs) {
sparkArgs.add(opt);
}
else if (sparkArgs.get(sparkArgs.size() - 1).equals(CONF_OPTION)) {
sparkArgs.remove(sparkArgs.size() - 1);
}
}
}
if ((yarnClusterMode || yarnClientMode)) {
// Include the current working directory (of executor container)
// in executor classpath, because it will contain localized
// files
appendWithPathSeparator(PWD, executorClassPath);
appendWithPathSeparator(PWD, driverClassPath);
sparkArgs.add(CONF_OPTION);
sparkArgs.add(EXECUTOR_CLASSPATH + executorClassPath.toString());
sparkArgs.add(CONF_OPTION);
sparkArgs.add(DRIVER_CLASSPATH + driverClassPath.toString());
}
if (actionConf.get(LauncherMain.MAPREDUCE_JOB_TAGS) != null) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_TAGS + OPT_SEPARATOR + actionConf.get(LauncherMain.MAPREDUCE_JOB_TAGS));
}
if (!addedSecurityTokensHadoopFS) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HADOOPFS + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityTokensHive) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HIVE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityTokensHBase) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HBASE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHadoopFS) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HADOOPFS + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHive) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HIVE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHBase) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HBASE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedLog4jExecutorSettings) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(EXECUTOR_EXTRA_JAVA_OPTIONS + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS);
}
if (!addedLog4jDriverSettings) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(DRIVER_EXTRA_JAVA_OPTIONS + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS);
}
mergeAndAddPropertiesFile(sparkArgs, propertiesFile);
if ((yarnClusterMode || yarnClientMode)) {
final Map<String, URI> fixedFileUrisMap =
SparkMain.fixFsDefaultUrisAndFilterDuplicates(DistributedCache.getCacheFiles(actionConf));
fixedFileUrisMap.put(SparkMain.SPARK_LOG4J_PROPS, new Path(SparkMain.SPARK_LOG4J_PROPS).toUri());
fixedFileUrisMap.put(SparkMain.HIVE_SITE_CONF, new Path(SparkMain.HIVE_SITE_CONF).toUri());
addUserDefined(userFiles.toString(), fixedFileUrisMap);
final Collection<URI> fixedFileUris = fixedFileUrisMap.values();
final JarFilter jarFilter = new JarFilter(fixedFileUris, jarPath);
jarFilter.filter();
jarPath = jarFilter.getApplicationJar();
final String cachedFiles = StringUtils.join(fixedFileUris, OPT_VALUE_SEPARATOR);
if (cachedFiles != null && !cachedFiles.isEmpty()) {
sparkArgs.add(FILES_OPTION);
sparkArgs.add(cachedFiles);
}
final Map<String, URI> fixedArchiveUrisMap = SparkMain.fixFsDefaultUrisAndFilterDuplicates(DistributedCache.
getCacheArchives(actionConf));
addUserDefined(userArchives.toString(), fixedArchiveUrisMap);
final String cachedArchives = StringUtils.join(fixedArchiveUrisMap.values(), OPT_VALUE_SEPARATOR);
if (cachedArchives != null && !cachedArchives.isEmpty()) {
sparkArgs.add(ARCHIVES_OPTION);
sparkArgs.add(cachedArchives);
}
setSparkYarnJarsConf(sparkArgs, jarFilter.getSparkYarnJar(), jarFilter.getSparkVersion());
}
if (!sparkArgs.contains(VERBOSE_OPTION)) {
sparkArgs.add(VERBOSE_OPTION);
}
sparkArgs.add(jarPath);
sparkArgs.addAll(Arrays.asList(mainArgs));
return sparkArgs;
}
private void mergeAndAddPropertiesFile(final List<String> sparkArgs, final String userDefinedPropertiesFile)
throws IOException {
final Properties properties = new Properties();
loadServerDefaultProperties(properties);
loadLocalizedDefaultPropertiesFile(properties);
loadUserDefinedPropertiesFile(userDefinedPropertiesFile, properties);
final boolean persisted = persistMergedProperties(properties);
if (persisted) {
sparkArgs.add(OPT_PROPERTIES_FILE);
sparkArgs.add(SPARK_DEFAULTS_GENERATED_PROPERTIES);
}
}
private boolean persistMergedProperties(final Properties properties) throws IOException {
if (!properties.isEmpty()) {
try (final Writer writer = new OutputStreamWriter(
new FileOutputStream(new File(SPARK_DEFAULTS_GENERATED_PROPERTIES)),
StandardCharsets.UTF_8.name())) {
properties.store(writer, "Properties file generated by Oozie");
System.out.println(String.format("Persisted merged Spark configs in file %s. Merged properties are: %s",
SPARK_DEFAULTS_GENERATED_PROPERTIES, Arrays.toString(properties.stringPropertyNames().toArray())));
return true;
} catch (IOException e) {
System.err.println(String.format("Could not persist derived Spark config file. Reason: %s", e.getMessage()));
throw e;
}
}
return false;
}
private void loadUserDefinedPropertiesFile(final String userDefinedPropertiesFile, final Properties properties) {
if (userDefinedPropertiesFile != null) {
System.out.println(String.format("Reading Spark config from %s %s...", OPT_PROPERTIES_FILE, userDefinedPropertiesFile));
loadProperties(new File(userDefinedPropertiesFile), properties);
}
}
private void loadLocalizedDefaultPropertiesFile(final Properties properties) {
final File localizedDefaultConfFile = SparkMain.getMatchingFile(SPARK_DEFAULTS_FILE_PATTERN);
if (localizedDefaultConfFile != null) {
System.out.println(String.format("Reading Spark config from file %s...", localizedDefaultConfFile.getName()));
loadProperties(localizedDefaultConfFile, properties);
}
}
private void loadServerDefaultProperties(final Properties properties) {
final String sparkDefaultsFromServer = actionConf.get(SPARK_DEFAULT_OPTS, "");
if (!sparkDefaultsFromServer.isEmpty()) {
System.out.println("Reading Spark config propagated from Oozie server...");
try (final StringReader reader = new StringReader(sparkDefaultsFromServer)) {
properties.load(reader);
} catch (IOException e) {
System.err.println(String.format("Could not read propagated Spark config! Reason: %s", e.getMessage()));
}
}
}
private void loadProperties(final File file, final Properties target) {
try (final Reader reader = new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8.name())) {
final Properties properties = new Properties();
properties.load(reader);
for(String key :properties.stringPropertyNames()) {
Object prevProperty = target.setProperty(key, properties.getProperty(key));
if(prevProperty != null){
System.out.println(String.format("Value of %s was overwritten from %s", key, file.getName()));
}
}
} catch (IOException e) {
System.err.println(String.format("Could not read Spark configs from file %s. Reason: %s", file.getName(),
e.getMessage()));
}
}
private void appendWithPathSeparator(final String what, final StringBuilder to) {
if (to.length() > 0) {
to.append(File.pathSeparator);
}
to.append(what);
}
private void addUserDefined(final String userList, final Map<String, URI> urisMap) {
if (userList != null) {
for (final String file : userList.split(OPT_VALUE_SEPARATOR)) {
if (!Strings.isEmpty(file)) {
final Path p = new Path(file);
urisMap.put(p.getName(), p.toUri());
}
}
}
}
/*
* Get properties that needs to be passed to Spark as Spark configuration from actionConf.
*/
@VisibleForTesting
void appendOoziePropertiesToSparkConf(final List<String> sparkArgs) {
for (final Map.Entry<String, String> oozieConfig : actionConf
.getValByRegex("^oozie\\.(?!launcher|spark).+").entrySet()) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(String.format("spark.%s=%s", oozieConfig.getKey(), oozieConfig.getValue()));
}
}
/**
* Sets spark.yarn.jars for Spark 2.X. Sets spark.yarn.jar for Spark 1.X.
*
* @param sparkArgs
* @param sparkYarnJar
* @param sparkVersion
*/
private void setSparkYarnJarsConf(final List<String> sparkArgs, final String sparkYarnJar, final String sparkVersion) {
if (SPARK_VERSION_1.matcher(sparkVersion).find()) {
// In Spark 1.X.X, set spark.yarn.jar to avoid
// multiple distribution
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_JAR + OPT_SEPARATOR + sparkYarnJar);
} else {
// In Spark 2.X.X, set spark.yarn.jars
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_JARS + OPT_SEPARATOR + sparkYarnJar);
}
}
}
| apache-2.0 |
ServicioReparaciones/ServicioReparaciones | ServicioReparaciones-ejb/src/main/java/com/common/dao/BaseDAO.java | 2856 |
package com.common.dao;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.logging.Logger;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.persistence.PersistenceException;
/**
* La Clase BaseDAO implementa las operaciones básicas de acceso a datos DAO
* utilizando usado por las clases DAO del módulo de ejecución de transacciones.
*
* @author Gestorinc S.A.
* @version $Rev $
*/
public class BaseDAO {
/**
* Constante que representa el character '%'.
*/
public static final String SYMBOLO_LIKE = "%";
/**
* Constante que representa la cadena "'".
*/
public static final String SYMBOLO_APOSTROFE = "'";
/**
* Creación del log de auditoría.
*/
protected static final Logger LOGGER = Logger.getLogger(BaseDAO.class.getName());
/**
* Objeto que maneja las operaciones de persistencia.
*/
@PersistenceContext(name = "punit")
private EntityManager em;
/**
* Constructor por defecto.
*/
public BaseDAO() {
}
/**
* Retorna una referencia al objeto que maneja las operaciones de
* persistencia definidas por JPA.
*
* @return Referencia al objeto que maneja las operaciones de persistencia.
* En caso de que el objeto no este inicializado lanza la excepción
* @see java.lang.IllegalStateException
*/
protected EntityManager getEntityManager() {
if (em == null) {
throw new IllegalStateException(
"EntityManager no ha sido asignado a DAO antes del uso.");
} else {
return em;
}
}
/**
* Ejecuta una sentencia SQL obteniendo una conexión a la BD, referenciado
* por la unidad de persistencia: <b>punit</b>.<br/>
* No utilizar este método para ejecutar sentencias SELECT.
*
* @param sentencia Sentencia SQL que será ejecutada.
*/
public void ejecutarNativo(String sentencia) {
try {
java.sql.Connection connection = em.unwrap(java.sql.Connection.class);
PreparedStatement ps = connection.prepareStatement(sentencia);
ps.execute();
ps.close();
} catch (PersistenceException e) {
LOGGER.info("Error al ejecutar sentencia"+ e.getMessage());
} catch (SQLException e) {
LOGGER.info("Error al ejecutar sentencia"+ e.getMessage());
}
}
/**
* Pone apóstrofes a una cadena de caracteres.
*
* @param cadena la cadena
* @return la cadena con apóstrofes
*/
protected String comillar(String cadena) {
return SYMBOLO_APOSTROFE + cadena + SYMBOLO_APOSTROFE;
}
}
| apache-2.0 |
dremio/dremio-oss | sabot/kernel/src/main/java/com/dremio/exec/planner/sql/parser/SqlTruncateTable.java | 3280 | /*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.exec.planner.sql.parser;
import java.util.List;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlLiteral;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.SqlSpecialOperator;
import org.apache.calcite.sql.SqlWriter;
import org.apache.calcite.sql.parser.SqlParserPos;
import com.dremio.service.namespace.NamespaceKey;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
public class SqlTruncateTable extends SqlCall {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("TRUNCATE_TABLE", SqlKind.OTHER_DDL) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
Preconditions.checkArgument(operands.length == 3, "SqlTruncateTable.createCall() " +
"has to get 3 operands!");
return new SqlTruncateTable(pos, (SqlIdentifier) operands[0], (SqlLiteral) operands[1], (SqlLiteral) operands[2]);
}
};
private SqlIdentifier tableName;
private boolean tableExistenceCheck;
private boolean tableKeywordPresent;
public SqlTruncateTable(SqlParserPos pos, SqlIdentifier tableName, SqlLiteral tableExistenceCheck,
SqlLiteral tableKeywordPresent) {
this(pos, tableName, tableExistenceCheck.booleanValue(), tableKeywordPresent.booleanValue());
}
public SqlTruncateTable(SqlParserPos pos, SqlIdentifier tableName, boolean tableExistenceCheck,
boolean tableKeywordPresent) {
super(pos);
this.tableName = tableName;
this.tableExistenceCheck = tableExistenceCheck;
this.tableKeywordPresent = tableKeywordPresent;
}
@Override
public SqlOperator getOperator() {
return OPERATOR;
}
@Override
public List<SqlNode> getOperandList() {
return ImmutableList.of(
tableName,
SqlLiteral.createBoolean(tableExistenceCheck, SqlParserPos.ZERO),
SqlLiteral.createBoolean(tableKeywordPresent, SqlParserPos.ZERO)
);
}
@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
writer.keyword("TRUNCATE");
if (tableKeywordPresent) {
writer.keyword("TABLE");
}
if (tableExistenceCheck) {
writer.keyword("IF");
writer.keyword("EXISTS");
}
tableName.unparse(writer, leftPrec, rightPrec);
}
public NamespaceKey getPath() {
return new NamespaceKey(tableName.names);
}
public boolean checkTableExistence() {
return tableExistenceCheck;
}
}
| apache-2.0 |
bit-zyl/Alluxio-Nvdimm | core/server/src/test/java/alluxio/StorageTierAssocTest.java | 3480 | /*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Unit tests for {@link StorageTierAssoc}.
*/
public class StorageTierAssocTest {
private void checkStorageTierAssoc(StorageTierAssoc assoc, PropertyKey levelsProperty,
PropertyKeyFormat aliasFormat) {
int size = Configuration.getInt(levelsProperty);
Assert.assertEquals(size, assoc.size());
List<String> expectedOrderedAliases = new ArrayList<>();
for (int i = 0; i < size; i++) {
String alias = Configuration.get(aliasFormat.format(i));
Assert.assertEquals(i, assoc.getOrdinal(alias));
Assert.assertEquals(alias, assoc.getAlias(i));
expectedOrderedAliases.add(alias);
}
Assert.assertEquals(expectedOrderedAliases, assoc.getOrderedStorageAliases());
}
/**
* Tests the constructors of the {@link MasterStorageTierAssoc} and {@link WorkerStorageTierAssoc}
* classes with a {@link Configuration}.
*/
@Test
public void masterWorkerConfConstructor() {
Configuration.set(PropertyKey.MASTER_TIERED_STORE_GLOBAL_LEVELS, "3");
Configuration.set(
PropertyKeyFormat.MASTER_TIERED_STORE_GLOBAL_LEVEL_ALIAS_FORMAT.format(2), "BOTTOM");
Configuration.set(PropertyKey.WORKER_TIERED_STORE_LEVELS, "2");
Configuration.set(
PropertyKeyFormat.WORKER_TIERED_STORE_LEVEL_ALIAS_FORMAT.format(1), "BOTTOM");
checkStorageTierAssoc(new MasterStorageTierAssoc(),
PropertyKey.MASTER_TIERED_STORE_GLOBAL_LEVELS,
PropertyKeyFormat.MASTER_TIERED_STORE_GLOBAL_LEVEL_ALIAS_FORMAT);
checkStorageTierAssoc(new WorkerStorageTierAssoc(), PropertyKey.WORKER_TIERED_STORE_LEVELS,
PropertyKeyFormat.WORKER_TIERED_STORE_LEVEL_ALIAS_FORMAT);
ConfigurationTestUtils.resetConfiguration();
}
/**
* Tests the constructors of the {@link MasterStorageTierAssoc} and {@link WorkerStorageTierAssoc}
* classes with different storage alias.
*/
@Test
public void storageAliasListConstructor() {
List<String> orderedAliases = Arrays.asList("MEM", "HDD", "SOMETHINGELSE", "SSD");
MasterStorageTierAssoc masterAssoc = new MasterStorageTierAssoc(orderedAliases);
WorkerStorageTierAssoc workerAssoc = new WorkerStorageTierAssoc(orderedAliases);
Assert.assertEquals(orderedAliases.size(), masterAssoc.size());
Assert.assertEquals(orderedAliases.size(), workerAssoc.size());
for (int i = 0; i < orderedAliases.size(); i++) {
String alias = orderedAliases.get(i);
Assert.assertEquals(alias, masterAssoc.getAlias(i));
Assert.assertEquals(i, masterAssoc.getOrdinal(alias));
Assert.assertEquals(alias, workerAssoc.getAlias(i));
Assert.assertEquals(i, workerAssoc.getOrdinal(alias));
}
Assert.assertEquals(orderedAliases, masterAssoc.getOrderedStorageAliases());
Assert.assertEquals(orderedAliases, workerAssoc.getOrderedStorageAliases());
}
}
| apache-2.0 |
parshimers/incubator-asterixdb | asterix-maven-plugins/lexer-generator-maven-plugin/src/main/java/edu/uci/ics/asterix/lexergenerator/rules/RuleAnythingUntil.java | 2007 | /*
* Copyright 2009-2013 by The Regents of the University of California
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may obtain a copy of the License from
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.uci.ics.asterix.lexergenerator.rules;
public class RuleAnythingUntil implements Rule {
private char expected;
public RuleAnythingUntil clone() {
return new RuleAnythingUntil(expected);
}
public RuleAnythingUntil(char expected) {
this.expected = expected;
}
@Override
public String toString() {
return " .* " + String.valueOf(expected);
}
@Override
public int hashCode() {
return 10 * (int) expected;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;
if (o instanceof RuleAnythingUntil) {
if (((RuleAnythingUntil) o).expected == this.expected) {
return true;
}
}
return false;
}
@Override
public String javaAction() {
return "currentChar = readNextChar();";
}
@Override
public String javaMatch(String action) {
return "boolean escaped = false;\n" + "while (currentChar != '" + expected + "' || escaped) {\n"
+ "if(!escaped && currentChar == '\\\\\\\\') {\n" + "escaped = true;\n" + "containsEscapes = true;\n"
+ "} else {\n" + "escaped = false;\n" + "}\n" + "currentChar = readNextChar();\n" + "}\n"
+ "if (currentChar == '" + expected + "') {" + action + "}\n";
}
}
| apache-2.0 |
WengJunFeng/hackerrank_java | src/main/java/com/netwebx/hackerrank/rpc/client/RpcImporter.java | 2053 | package com.netwebx.hackerrank.rpc.client;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetSocketAddress;
import java.net.Socket;
/**
* Created by apple on 2017/2/26.
*/
public class RpcImporter<S> {
public S importer(final Class<?> serviceClass, final InetSocketAddress addr) {
return (S) Proxy.newProxyInstance(
serviceClass.getClassLoader(),
new Class<?>[]{serviceClass.getInterfaces()[0]},
new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Socket socket = null;
ObjectOutputStream output = null;
ObjectInputStream input = null;
try {
socket = new Socket();
socket.connect(addr);
output = new ObjectOutputStream(socket.getOutputStream());
output.writeUTF(serviceClass.getName());
output.writeUTF(method.getName());
output.writeObject(method.getParameterTypes());
output.writeObject(args);
input = new ObjectInputStream(socket.getInputStream());
return input.readObject();
} finally {
if (socket != null) {
socket.close();
}
if (output != null) {
output.close();
}
if (input != null) {
input.close();
}
}
}
}
);
}
}
| apache-2.0 |
aol/cyclops | cyclops/src/main/java/com/oath/cyclops/internal/stream/spliterators/push/GroupedByTimeOperator.java | 4567 | package com.oath.cyclops.internal.stream.spliterators.push;
import com.oath.cyclops.types.persistent.PersistentCollection;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* Created by johnmcclean on 12/01/2017.
*/
public class GroupedByTimeOperator<T,C extends PersistentCollection<? super T>,R> extends BaseOperator<T,R> {
private final Supplier<? extends C> factory;
private final Function<? super C, ? extends R> finalizer;
private final long time;
private final TimeUnit t;
public GroupedByTimeOperator(Operator<T> source, Supplier<? extends C> factory,
Function<? super C, ? extends R> finalizer,long time,
TimeUnit t){
super(source);
this.factory = factory;
this.finalizer = finalizer;
this.time = time;
this.t = t;
}
@Override
public StreamSubscription subscribe(Consumer<? super R> onNext, Consumer<? super Throwable> onError, Runnable onComplete) {
long toRun = t.toNanos(time);
PersistentCollection[] next = {factory.get()};
long[] start ={System.nanoTime()};
StreamSubscription[] upstream = {null};
StreamSubscription sub = new StreamSubscription(){
@Override
public void request(long n) {
if(n<=0) {
onError.accept(new IllegalArgumentException("3.9 While the Subscription is not cancelled, Subscription.request(long n) MUST throw a java.lang.IllegalArgumentException if the argument is <= 0."));
return;
}
if(!isOpen)
return;
super.request(n);
upstream[0].request(n);
}
@Override
public void cancel() {
upstream[0].cancel();
super.cancel();
}
};
upstream[0] = source.subscribe(e-> {
try {
next[0] = next[0].plus(e);
if(System.nanoTime()-start[0] > toRun){
onNext.accept(finalizer.apply((C)next[0]));
sub.requested.decrementAndGet();
next[0] = factory.get();
start[0] = System.nanoTime();
}
else{
request( upstream,1l);
}
} catch (Throwable t) {
onError.accept(t);
}
}
,t->{onError.accept(t);
sub.requested.decrementAndGet();
if(sub.isActive())
request( upstream,1);
},()->{
if(next[0].size()>0) {
try {
onNext.accept(finalizer.apply((C) next[0]));
} catch(Throwable t){
onError.accept(t);
}
sub.requested.decrementAndGet();
}
sub.cancel();
onComplete.run();
});
return sub;
}
@Override
public void subscribeAll(Consumer<? super R> onNext, Consumer<? super Throwable> onError, Runnable onCompleteDs) {
long toRun = t.toNanos(time);
PersistentCollection[] next = {factory.get()};
long[] start ={System.nanoTime()};
source.subscribeAll(e-> {
try {
next[0] = next[0].plus(e);
if(System.nanoTime()-start[0] > toRun){
onNext.accept(finalizer.apply((C)next[0]));
next[0] = factory.get();
start[0] = System.nanoTime();
}
} catch (Throwable t) {
onError.accept(t);
}
}
,onError,()->{
if(next[0].size()>0) {
try {
onNext.accept(finalizer.apply((C) next[0]));
} catch(Throwable t){
onError.accept(t);
}
}
onCompleteDs.run();
});
}
}
| apache-2.0 |
zhangxx0/Java_Topic_prictice | src/net/tcp/socket/Server.java | 1059 | package net.tcp.socket;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
/**
* 必须先启动服务器 后连接 1、创建服务器 指定端口 ServerSocket(int port) 2、接收客户端连接 3、发送数据+接收数据
*
*/
public class Server {
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
// 1、创建服务器 指定端口 ServerSocket(int port)
ServerSocket server = new ServerSocket(8888);
// 2、接收客户端连接 阻塞式
while (true) {
Socket socket = server.accept();
System.out.println("一个客户端建立连接");
// 3、发送数据
String msg = "欢迎使用";
// 输出流
/*
* BufferedWriter bw = new BufferedWriter( new OutputStreamWriter(
* socket.getOutputStream()));
*
* bw.write(msg); bw.newLine(); bw.flush();
*/
DataOutputStream dos = new DataOutputStream(socket.getOutputStream());
dos.writeUTF(msg);
dos.flush();
}
}
}
| apache-2.0 |
HuangLS/neo4j | advanced/management/src/main/java/org/neo4j/management/impl/KernelProxy.java | 8016 | /*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.management.impl;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Hashtable;
import java.util.List;
import java.util.NoSuchElementException;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import org.neo4j.jmx.ManagementInterface;
/**
* Does not have any public methods - since the public interface of
* {@link org.neo4j.management.Neo4jManager} should be defined completely in
* that class.
*
* Does not have any (direct or transitive) dependencies on any part of the jmx
* component - since this class is used in
* {@link org.neo4j.management.impl.jconsole.Neo4jPlugin the JConsole plugin},
* and the jmx component is not on the class path in JConsole.
*
* @author Tobias Ivarsson <tobias.ivarsson@neotechnology.com>
*/
public abstract class KernelProxy
{
static final String KERNEL_BEAN_TYPE = "org.neo4j.jmx.Kernel";
protected static final String KERNEL_BEAN_NAME = "Kernel";
static final String MBEAN_QUERY = "MBeanQuery";
protected final MBeanServerConnection server;
protected final ObjectName kernel;
protected KernelProxy( MBeanServerConnection server, ObjectName kernel )
{
String className = null;
try
{
className = server.getMBeanInfo( kernel ).getClassName();
}
catch ( Exception e )
{
// fall through
}
if ( !KERNEL_BEAN_TYPE.equals( className ) )
{
throw new IllegalArgumentException(
"The specified ObjectName does not represent a Neo4j Kernel bean in the specified MBean server." );
}
this.server = server;
this.kernel = kernel;
}
protected List<Object> allBeans()
{
List<Object> beans = new ArrayList<Object>();
Iterable<ObjectInstance> mbeans;
try
{
mbeans = server.queryMBeans( mbeanQuery(), null );
}
catch ( IOException handled )
{
return beans;
}
for ( ObjectInstance instance : mbeans )
{
String className = instance.getClassName();
Class<?> beanType = null;
try
{
if ( className != null ) beanType = Class.forName( className );
}
catch ( Exception ignored )
{
// fall through
}
catch ( LinkageError ignored )
{
// fall through
}
if ( beanType != null )
{
try
{
beans.add( BeanProxy.load( server, beanType, instance.getObjectName() ) );
}
catch ( Exception ignored )
{
// fall through
}
}
}
return beans;
}
private ObjectName assertExists( ObjectName name )
{
try
{
if ( !server.queryNames( name, null ).isEmpty() )
{
return name;
}
}
catch ( IOException handled )
{
// fall through
}
throw new NoSuchElementException( "No MBeans matching " + name );
}
protected <T> T getBean( Class<T> beanInterface )
{
return BeanProxy.load( server, beanInterface, createObjectName( beanInterface ) );
}
protected <T> Collection<T> getBeans( Class<T> beanInterface )
{
return BeanProxy.loadAll( server, beanInterface, createObjectNameQuery( beanInterface ) );
}
private ObjectName createObjectNameQuery( Class<?> beanInterface )
{
return createObjectNameQuery( mbeanQuery(), beanInterface );
}
private ObjectName createObjectName( Class<?> beanInterface )
{
return assertExists( createObjectName( mbeanQuery(), beanInterface ) );
}
protected ObjectName createObjectName( String beanName )
{
return assertExists( createObjectName( mbeanQuery(), beanName, false ) );
}
protected ObjectName mbeanQuery()
{
try
{
return (ObjectName) server.getAttribute( kernel, MBEAN_QUERY );
}
catch ( Exception cause )
{
throw new IllegalStateException( "Could not get MBean query.", cause );
}
}
protected static ObjectName createObjectName( String kernelIdentifier, Class<?> beanInterface )
{
return createObjectName( kernelIdentifier, beanName( beanInterface ) );
}
protected static ObjectName createObjectName( String kernelIdentifier, String beanName, String... extraNaming )
{
Hashtable<String, String> properties = new Hashtable<String, String>();
properties.put( "instance", "kernel#" + kernelIdentifier );
return createObjectName( "org.neo4j", properties, beanName, false, extraNaming );
}
static ObjectName createObjectNameQuery( String kernelIdentifier, String beanName, String... extraNaming )
{
Hashtable<String, String> properties = new Hashtable<String, String>();
properties.put( "instance", "kernel#" + kernelIdentifier );
return createObjectName( "org.neo4j", properties, beanName, true, extraNaming );
}
static ObjectName createObjectName( ObjectName query, Class<?> beanInterface )
{
return createObjectName( query, beanName( beanInterface ), false );
}
static ObjectName createObjectNameQuery( ObjectName query, Class<?> beanInterface )
{
return createObjectName( query, beanName( beanInterface ), true );
}
private static ObjectName createObjectName( ObjectName query, String beanName, boolean isQuery )
{
Hashtable<String, String> properties = new Hashtable<String, String>(query.getKeyPropertyList());
return createObjectName( query.getDomain(), properties, beanName, isQuery );
}
static String beanName( Class<?> beanInterface )
{
if ( beanInterface.isInterface() )
{
ManagementInterface management = beanInterface.getAnnotation( ManagementInterface.class );
if ( management != null )
{
return management.name();
}
}
throw new IllegalArgumentException( beanInterface + " is not a Neo4j Management Been interface" );
}
private static ObjectName createObjectName( String domain, Hashtable<String, String> properties, String beanName,
boolean query, String... extraNaming )
{
properties.put( "name", beanName );
for ( int i = 0; i < extraNaming.length; i++ )
{
properties.put( "name" + i, extraNaming[i] );
}
ObjectName result;
try
{
result = new ObjectName( domain, properties );
if ( query ) result = ObjectName.getInstance( result.toString() + ",*" );
}
catch ( MalformedObjectNameException e )
{
return null;
}
return result;
}
}
| apache-2.0 |
mhus/mhus-inka | de.mhus.hair/hair3/de.mhus.cha.app/src/de/mhus/cha/cao/action/CopyToOperation.java | 3411 | package de.mhus.cha.cao.action;
import java.io.File;
import de.mhus.lib.cao.CaoElement;
import de.mhus.lib.cao.CaoException;
import de.mhus.lib.cao.CaoList;
import de.mhus.lib.cao.CaoMonitor;
import de.mhus.lib.cao.CaoOperation;
import de.mhus.cap.core.Access;
import de.mhus.cha.cao.ChaConnection;
import de.mhus.cha.cao.ChaElement;
import de.mhus.lib.MFile;
import de.mhus.lib.form.MForm;
import de.mhus.lib.form.annotations.FormElement;
import de.mhus.lib.form.annotations.FormSortId;
@FormElement("name='cha_copy_to_folder' title='Copy'")
public class CopyToOperation extends CaoOperation implements MForm {
private CaoList<Access> sources;
private ChaElement target;
private ChaConnection connection;
public CopyToOperation(ChaElement ChaElement) {
target = ChaElement;
}
@Override
public void dispose() throws CaoException {
}
@Override
public void execute() throws CaoException {
connection = (ChaConnection)target.getConnection();
//collect all affected entries
monitor.beginTask("count", CaoMonitor.UNKNOWN);
int cnt = 0;
for (CaoElement<Access> element : sources.getElements()) {
cnt = count( ((ChaElement)element).getFile(), cnt );
}
monitor.beginTask("copy", cnt);
cnt = 0;
for (CaoElement<Access> element : sources.getElements()) {
cnt = copy( target.getFile(), ((ChaElement)element).getFile(), cnt );
}
}
private int copy(File target, File file, int cnt) {
// validate action
if (monitor.isCanceled()) return cnt;
if ( !file.isDirectory()) return cnt; // for secure
// new path
File newTarget = null;
cnt++;
monitor.worked(cnt);
newTarget = new File(target,connection.createUID());
monitor.log().debug("Create Dir: " + newTarget.getAbsolutePath());
monitor.subTask(file.getAbsolutePath());
// validate path
if ( newTarget.exists() ) {
monitor.log().warn("Folder already exists: " + newTarget.getAbsolutePath());
return cnt;
}
// create
if ( ! newTarget.mkdir() ) {
newTarget = null;
monitor.log().warn("Can't create folder: " + target.getAbsolutePath() + "/" + file.getName());
return cnt;
}
// set id
connection.addIdPath(newTarget.getName(), newTarget.getAbsolutePath());
// events
connection.fireElementCreated(newTarget.getName());
connection.fireElementLink(target.getName(), newTarget.getName());
// copy files
for ( File sub : file.listFiles()) {
if (sub.isFile()) {
monitor.log().debug("Copy File: " + file.getAbsolutePath());
File targetFile = new File(target,file.getName());
if (targetFile.exists()) {
monitor.log().warn("Can't overwrite file: " + file.getAbsolutePath());
} else
if ( !MFile.copyFile(file, targetFile) ) {
monitor.log().warn("Can't copy file: " + file.getAbsolutePath());
}
}
}
// copy sub folders
for ( File sub : file.listFiles(connection.getDefaultFileFilter())) {
cnt = copy(newTarget, sub,cnt);
}
return cnt;
}
private int count(File file, int cnt) {
if (monitor.isCanceled()) return cnt;
if ( file.isDirectory() ) cnt++;
if (!file.isDirectory()) return cnt; // for secure
for ( File sub : file.listFiles(connection.getDefaultFileFilter())) {
cnt = count(sub,cnt);
}
return cnt;
}
@Override
public void initialize() throws CaoException {
}
public void setSources(CaoList<Access> list) {
sources = list;
}
}
| apache-2.0 |
ohadshacham/phoenix | phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java | 10075 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.util.csv;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.Base64;
import java.util.List;
import java.util.Properties;
import javax.annotation.Nullable;
import org.apache.commons.csv.CSVRecord;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.expression.function.EncodeFormat;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.IllegalDataException;
import org.apache.phoenix.schema.types.PBinary;
import org.apache.phoenix.schema.types.PBoolean;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PDataType.PDataCodec;
import org.apache.phoenix.schema.types.PTimestamp;
import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.util.ColumnInfo;
import org.apache.phoenix.util.DateUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.UpsertExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
/** {@link UpsertExecutor} over {@link CSVRecord}s. */
public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
private static final Logger LOG = LoggerFactory.getLogger(CsvUpsertExecutor.class);
protected final String arrayElementSeparator;
/** Testing constructor. Do not use in prod. */
@VisibleForTesting
protected CsvUpsertExecutor(Connection conn, List<ColumnInfo> columnInfoList,
PreparedStatement stmt, UpsertListener<CSVRecord> upsertListener,
String arrayElementSeparator) {
super(conn, columnInfoList, stmt, upsertListener);
this.arrayElementSeparator = arrayElementSeparator;
finishInit();
}
public CsvUpsertExecutor(Connection conn, String tableName,
List<ColumnInfo> columnInfoList, UpsertListener<CSVRecord> upsertListener,
String arrayElementSeparator) {
super(conn, tableName, columnInfoList, upsertListener);
this.arrayElementSeparator = arrayElementSeparator;
finishInit();
}
@Override
protected void execute(CSVRecord csvRecord) {
try {
if (csvRecord.size() < conversionFunctions.size()) {
String message = String.format("CSV record does not have enough values (has %d, but needs %d)",
csvRecord.size(), conversionFunctions.size());
throw new IllegalArgumentException(message);
}
for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) {
Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex));
if (sqlValue != null) {
preparedStatement.setObject(fieldIndex + 1, sqlValue);
} else {
preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType());
}
}
preparedStatement.execute();
upsertListener.upsertDone(++upsertCount);
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
// Even though this is an error we only log it with debug logging because we're notifying the
// listener, and it can do its own logging if needed
LOG.debug("Error on CSVRecord " + csvRecord, e);
}
upsertListener.errorOnRecord(csvRecord, e);
}
}
@Override
protected Function<String, Object> createConversionFunction(PDataType dataType) {
if (dataType.isArrayType()) {
return new ArrayDatatypeConversionFunction(
new StringToArrayConverter(
conn,
arrayElementSeparator,
PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE)));
} else {
return new SimpleDatatypeConversionFunction(dataType, this.conn);
}
}
/**
* Performs typed conversion from String values to a given column value type.
*/
static class SimpleDatatypeConversionFunction implements Function<String, Object> {
private final PDataType dataType;
private final PDataCodec codec;
private final DateUtil.DateTimeParser dateTimeParser;
private final String binaryEncoding;
SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
ReadOnlyProps props;
try {
props = conn.unwrap(PhoenixConnection.class).getQueryServices().getProps();
} catch (SQLException e) {
throw new RuntimeException(e);
}
this.dataType = dataType;
PDataCodec codec = dataType.getCodec();
if(dataType.isCoercibleTo(PTimestamp.INSTANCE)) {
codec = DateUtil.getCodecFor(dataType);
// TODO: move to DateUtil
String dateFormat;
int dateSqlType = dataType.getResultSetSqlType();
if (dateSqlType == Types.DATE) {
dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB,
DateUtil.DEFAULT_DATE_FORMAT);
} else if (dateSqlType == Types.TIME) {
dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB,
DateUtil.DEFAULT_TIME_FORMAT);
} else {
dateFormat = props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
DateUtil.DEFAULT_TIMESTAMP_FORMAT);
}
String timeZoneId = props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId);
} else {
this.dateTimeParser = null;
}
this.codec = codec;
this.binaryEncoding = props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING);
}
@Nullable
@Override
public Object apply(@Nullable String input) {
if (input == null || input.isEmpty()) {
return null;
}
if (dataType == PTimestamp.INSTANCE) {
return DateUtil.parseTimestamp(input);
}
if (dateTimeParser != null) {
long epochTime = dateTimeParser.parseDateTime(input);
byte[] byteValue = new byte[dataType.getByteSize()];
codec.encodeLong(epochTime, byteValue, 0);
return dataType.toObject(byteValue);
} else if (dataType == PBoolean.INSTANCE) {
switch (input.toLowerCase()) {
case "true":
case "t":
case "1":
return Boolean.TRUE;
case "false":
case "f":
case "0":
return Boolean.FALSE;
default:
throw new RuntimeException("Invalid boolean value: '" + input
+ "', must be one of ['true','t','1','false','f','0']");
}
}else if (dataType == PVarbinary.INSTANCE || dataType == PBinary.INSTANCE){
EncodeFormat format = EncodeFormat.valueOf(binaryEncoding.toUpperCase());
Object object = null;
switch (format) {
case BASE64:
object = Base64.getDecoder().decode(input);
if (object == null) { throw new IllegalDataException(
"Input: [" + input + "] is not base64 encoded"); }
break;
case ASCII:
object = Bytes.toBytes(input);
break;
default:
throw new IllegalDataException("Unsupported encoding \"" + binaryEncoding + "\"");
}
return object;
}
return dataType.toObject(input);
}
}
/**
* Converts string representations of arrays into Phoenix arrays of the correct type.
*/
private static class ArrayDatatypeConversionFunction implements Function<String, Object> {
private final StringToArrayConverter arrayConverter;
private ArrayDatatypeConversionFunction(StringToArrayConverter arrayConverter) {
this.arrayConverter = arrayConverter;
}
@Nullable
@Override
public Object apply(@Nullable String input) {
try {
return arrayConverter.toArray(input);
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}
}
| apache-2.0 |
mrietveld/drools | drools-reteoo/src/main/java/org/drools/reteoo/common/ReteWorkingMemory.java | 9985 | /*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.reteoo.common;
import org.drools.core.SessionConfiguration;
import org.drools.core.WorkingMemoryEntryPoint;
import org.drools.core.base.DroolsQuery;
import org.drools.core.common.BaseNode;
import org.drools.core.common.InternalAgenda;
import org.drools.core.common.InternalFactHandle;
import org.drools.core.common.InternalWorkingMemory;
import org.drools.core.common.WorkingMemoryAction;
import org.drools.core.event.AgendaEventSupport;
import org.drools.core.event.RuleEventListenerSupport;
import org.drools.core.event.RuleRuntimeEventSupport;
import org.drools.core.impl.InternalKnowledgeBase;
import org.drools.core.impl.StatefulKnowledgeSessionImpl;
import org.drools.core.phreak.PropagationEntry;
import org.drools.core.reteoo.LIANodePropagation;
import org.drools.core.spi.FactHandleFactory;
import org.drools.core.spi.PropagationContext;
import org.kie.api.runtime.Environment;
import org.kie.api.runtime.rule.AgendaFilter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
public class ReteWorkingMemory extends StatefulKnowledgeSessionImpl {
private List<LIANodePropagation> liaPropagations;
private Queue<WorkingMemoryAction> actionQueue;
private AtomicBoolean evaluatingActionQueue = new AtomicBoolean(false);
/** Flag to determine if a rule is currently being fired. */
private volatile AtomicBoolean firing = new AtomicBoolean(false);
public ReteWorkingMemory() {
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase) {
super(id, kBase);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, boolean initInitFactHandle, SessionConfiguration config, Environment environment) {
super(id, kBase, initInitFactHandle, config, environment);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, long propagationContext, SessionConfiguration config, InternalAgenda agenda, Environment environment) {
super(id, kBase, handleFactory, propagationContext, config, agenda, environment);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, InternalFactHandle initialFactHandle, long propagationContext, SessionConfiguration config, Environment environment, RuleRuntimeEventSupport workingMemoryEventSupport, AgendaEventSupport agendaEventSupport, RuleEventListenerSupport ruleEventListenerSupport, InternalAgenda agenda) {
super(id, kBase, handleFactory, false, propagationContext, config, environment, workingMemoryEventSupport, agendaEventSupport, ruleEventListenerSupport, agenda);
}
@Override
protected void init() {
this.actionQueue = new ConcurrentLinkedQueue<WorkingMemoryAction>();
this.propagationList = new RetePropagationList(this);
}
@Override
public void reset() {
super.reset();
actionQueue.clear();
}
@Override
public void reset(int handleId,
long handleCounter,
long propagationCounter) {
super.reset(handleId, handleCounter, propagationCounter );
if (liaPropagations != null) liaPropagations.clear();
actionQueue.clear();
}
@Override
public WorkingMemoryEntryPoint getWorkingMemoryEntryPoint(String name) {
WorkingMemoryEntryPoint ep = this.entryPoints.get(name);
return ep != null ? new ReteWorkingMemoryEntryPoint( this, ep ) : null;
}
public void addLIANodePropagation(LIANodePropagation liaNodePropagation) {
if (liaPropagations == null) liaPropagations = new ArrayList<LIANodePropagation>();
liaPropagations.add( liaNodePropagation );
}
private final Object syncLock = new Object();
public void initInitialFact() {
if ( initialFactHandle == null ) {
synchronized ( syncLock ) {
if ( initialFactHandle == null ) {
// double check, inside of sync point incase some other thread beat us to it.
initInitialFact(kBase, null);
}
}
}
}
@Override
public void fireUntilHalt(final AgendaFilter agendaFilter) {
initInitialFact();
super.fireUntilHalt( agendaFilter );
}
@Override
public int fireAllRules(final AgendaFilter agendaFilter,
int fireLimit) {
checkAlive();
if ( this.firing.compareAndSet( false,
true ) ) {
initInitialFact();
try {
startOperation();
return internalFireAllRules(agendaFilter, fireLimit);
} finally {
endOperation();
this.firing.set( false );
}
}
return 0;
}
private int internalFireAllRules(AgendaFilter agendaFilter, int fireLimit) {
int fireCount = 0;
try {
kBase.readLock();
// If we're already firing a rule, then it'll pick up the firing for any other assertObject(..) that get
// nested inside, avoiding concurrent-modification exceptions, depending on code paths of the actions.
if ( liaPropagations != null && isSequential() ) {
for ( LIANodePropagation liaPropagation : liaPropagations ) {
( liaPropagation ).doPropagation( this );
}
}
// do we need to call this in advance?
executeQueuedActionsForRete();
fireCount = this.agenda.fireAllRules( agendaFilter,
fireLimit );
} finally {
kBase.readUnlock();
if (kBase.flushModifications()) {
fireCount += internalFireAllRules(agendaFilter, fireLimit);
}
}
return fireCount;
}
@Override
public void closeLiveQuery(final InternalFactHandle factHandle) {
try {
startOperation();
this.kBase.readLock();
this.lock.lock();
final PropagationContext pCtx = pctxFactory.createPropagationContext(getNextPropagationIdCounter(), PropagationContext.INSERTION,
null, null, factHandle, getEntryPoint());
getEntryPointNode().retractQuery( factHandle,
pCtx,
this );
pCtx.evaluateActionQueue(this);
getFactHandleFactory().destroyFactHandle( factHandle );
} finally {
this.lock.unlock();
this.kBase.readUnlock();
endOperation();
}
}
@Override
protected BaseNode[] evalQuery(String queryName, DroolsQuery queryObject, InternalFactHandle handle, PropagationContext pCtx) {
initInitialFact();
BaseNode[] tnodes = kBase.getReteooBuilder().getTerminalNodesForQuery( queryName );
// no need to call retract, as no leftmemory used.
getEntryPointNode().assertQuery( handle,
pCtx,
this );
pCtx.evaluateActionQueue( this );
return tnodes;
}
public Collection<WorkingMemoryAction> getActionQueue() {
return actionQueue;
}
@Override
public void queueWorkingMemoryAction(final WorkingMemoryAction action) {
try {
startOperation();
actionQueue.add(action);
notifyWaitOnRest();
} finally {
endOperation();
}
}
public void addPropagation(PropagationEntry propagationEntry) {
if (propagationEntry instanceof WorkingMemoryAction) {
actionQueue.add((WorkingMemoryAction) propagationEntry);
} else {
super.addPropagation(propagationEntry);
}
}
@Override
public void executeQueuedActionsForRete() {
try {
startOperation();
if ( evaluatingActionQueue.compareAndSet( false,
true ) ) {
try {
if ( actionQueue!= null && !actionQueue.isEmpty() ) {
WorkingMemoryAction action;
while ( (action = actionQueue.poll()) != null ) {
try {
action.execute( (InternalWorkingMemory) this );
} catch ( Exception e ) {
throw new RuntimeException( "Unexpected exception executing action " + action.toString(),
e );
}
}
}
} finally {
evaluatingActionQueue.compareAndSet( true,
false );
}
}
} finally {
endOperation();
}
}
@Override
public Iterator<? extends PropagationEntry> getActionsIterator() {
return actionQueue.iterator();
}
}
| apache-2.0 |
hashiwa000/Elasticsearch-Auth-Plugin | src/jp/hashiwa/elasticsearch/authplugin/AuthRestHandler.java | 2877 | package jp.hashiwa.elasticsearch.authplugin;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.rest.*;
import java.util.*;
import java.util.regex.Pattern;
import java.util.stream.Stream;
public class AuthRestHandler implements RestHandler {
private final Logger logger = Loggers.getLogger(AuthRestHandler.class);
private final RestHandler originalHandler;
private final RestResponse unauthorizedResponse = new RestResponse() {
@Override
public String contentType() {
return "application/json";
}
@Override
public BytesReference content() {
return new BytesArray("");
}
@Override
public RestStatus status() {
return RestStatus.UNAUTHORIZED;
}
};
private final Map<RestRequest.Method, Stream<Pattern>> authPatterns = new HashMap<RestRequest.Method, Stream<Pattern>>() {
{
this.put(RestRequest.Method.POST, Stream.of(
Pattern.compile("^/testindex(/.*)?$")
));
this.put(RestRequest.Method.PUT, Stream.of(
Pattern.compile("^/testindex(/.*)?$")
));
// all methods
this.put(null, Stream.of(
Pattern.compile("^/adminindex(/.*)?$")
));
}
};
AuthRestHandler(RestHandler restHandler) {
this.originalHandler = restHandler;
}
@Override
public void handleRequest(RestRequest restRequest, RestChannel restChannel, NodeClient nodeClient) throws Exception {
this.logger.debug(restRequest.path());
this.logger.debug(restRequest.rawPath());
if (isOk(restRequest)) {
this.originalHandler.handleRequest(restRequest, restChannel, nodeClient);
} else {
restChannel.sendResponse(unauthorizedResponse);
}
}
private boolean needAuth(RestRequest.Method method, String path) {
if (authPatterns.containsKey(method)) {
Stream<Pattern> patterns = authPatterns.get(method);
boolean match = patterns.anyMatch(
p -> p.matcher(path).matches()
);
return match;
}
return false;
}
private boolean isOk(RestRequest restRequest) {
RestRequest.Method method = restRequest.method();
String path = restRequest.path(); // use rawpath() ?
boolean needAuth = needAuth(method, path)
|| needAuth(null, path);
if (! needAuth) {
return true;
}
for (java.util.Map.Entry<String, String> entry: restRequest.headers()) {
String key = entry.getKey();
String value = entry.getValue();
if (key.equals("user") && value.equals("admin")) {
return true;
}
}
return false;
// ES 5.4
// return restRequest.getHeaders().get("user").equals("admin");
}
}
| apache-2.0 |
s4/core | src/main/java/io/s4/persist/ConMapPersister.java | 5403 | /*
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License. See accompanying LICENSE file.
*/
package io.s4.persist;
import io.s4.util.clock.Clock;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.log4j.Logger;
public class ConMapPersister implements Persister {
private AtomicInteger persistCount = new AtomicInteger(0);
private boolean selfClean = false;
private int cleanWaitTime = 40; // 20 seconds by default
private String loggerName = "s4";
ConcurrentHashMap<String, CacheEntry> cache;
Clock s4Clock;
private int startCapacity = 5000;
public void setStartCapacity(int startCapacity) {
this.startCapacity = startCapacity;
}
public int getStartCapacity() {
return startCapacity;
}
public void setSelfClean(boolean selfClean) {
this.selfClean = selfClean;
}
public void setCleanWaitTime(int cleanWaitTime) {
this.cleanWaitTime = cleanWaitTime;
}
public void setLoggerName(String loggerName) {
this.loggerName = loggerName;
}
public ConMapPersister(Clock s4Clock) {
this.s4Clock = s4Clock;
}
public void setS4Clock(Clock s4Clock) {
this.s4Clock = s4Clock;
}
public ConMapPersister() {
}
public void init() {
cache = new ConcurrentHashMap<String, CacheEntry>(this.getStartCapacity());
if (selfClean) {
Runnable r = new Runnable() {
public void run() {
while (!Thread.interrupted()) {
int cleanCount = ConMapPersister.this.cleanOutGarbage();
Logger.getLogger(loggerName).info("Cleaned out "
+ cleanCount + " entries; Persister has "
+ cache.size() + " entries");
try {
Thread.sleep(cleanWaitTime * 1000);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
};
Thread t = new Thread(r);
t.start();
t.setPriority(Thread.MIN_PRIORITY);
}
}
public int getQueueSize() {
return 0;
}
public int getPersistCount() {
return persistCount.get();
}
public int getCacheEntryCount() {
return cache.size();
}
public void setAsynch(String key, Object value, int period) {
// there really is no asynch for the local cache
set(key, value, period);
}
public void set(String key, Object value, int period) {
persistCount.getAndIncrement();
CacheEntry ce = new CacheEntry();
ce.value = value;
ce.period = period;
ce.addTime = s4Clock.getCurrentTime();
cache.put(key, ce);
}
public Object get(String key) {
CacheEntry ce = cache.get(key);
if (ce == null) {
return null;
}
if (ce.isExpired()) {
return null;
}
return ce.value;
}
public Map<String, Object> getBulk(String[] keys) {
HashMap map = new HashMap<String, Object>();
for (String key : keys) {
Object value = get(key);
if (value != null) {
map.put(key, value);
}
}
return map;
}
public Object getObject(String key) {
return get(key);
}
public Map<String, Object> getBulkObjects(String[] keys) {
return getBulk(keys);
}
public void remove(String key) {
cache.remove(key);
}
public int cleanOutGarbage() {
int count = 0;
for (Enumeration en = cache.keys(); en.hasMoreElements();) {
String key = (String) en.nextElement();
CacheEntry ce = cache.get(key);
if (ce != null && ce.isExpired()) {
count++;
cache.remove(key);
}
}
return count;
}
public Set<String> keySet() {
return cache.keySet();
}
public class CacheEntry {
Object value;
long addTime;
int period;
public boolean isExpired() {
if (period > 0) {
if ((addTime + (1000 * (long) period)) <= s4Clock.getCurrentTime()) {
return true;
}
}
return false;
}
}
}
| apache-2.0 |
googleapis/java-orchestration-airflow | proto-google-cloud-orchestration-airflow-v1/src/main/java/com/google/cloud/orchestration/airflow/service/v1/DatabaseConfig.java | 23250 | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/cloud/orchestration/airflow/service/v1/environments.proto
package com.google.cloud.orchestration.airflow.service.v1;
/**
*
*
* <pre>
* The configuration of Cloud SQL instance that is used by the Apache Airflow
* software.
* </pre>
*
* Protobuf type {@code google.cloud.orchestration.airflow.service.v1.DatabaseConfig}
*/
public final class DatabaseConfig extends com.google.protobuf.GeneratedMessageV3
implements
// @@protoc_insertion_point(message_implements:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
DatabaseConfigOrBuilder {
private static final long serialVersionUID = 0L;
// Use DatabaseConfig.newBuilder() to construct.
private DatabaseConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private DatabaseConfig() {
machineType_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
return new DatabaseConfig();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
return this.unknownFields;
}
private DatabaseConfig(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10:
{
java.lang.String s = input.readStringRequireUtf8();
machineType_ = s;
break;
}
default:
{
if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.class,
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.Builder.class);
}
public static final int MACHINE_TYPE_FIELD_NUMBER = 1;
private volatile java.lang.Object machineType_;
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The machineType.
*/
@java.lang.Override
public java.lang.String getMachineType() {
java.lang.Object ref = machineType_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
machineType_ = s;
return s;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The bytes for machineType.
*/
@java.lang.Override
public com.google.protobuf.ByteString getMachineTypeBytes() {
java.lang.Object ref = machineType_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
machineType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, machineType_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, machineType_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig)) {
return super.equals(obj);
}
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig other =
(com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) obj;
if (!getMachineType().equals(other.getMachineType())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + MACHINE_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getMachineType().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseDelimitedFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseDelimitedFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.CodedInputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() {
return newBuilder();
}
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
*
* <pre>
* The configuration of Cloud SQL instance that is used by the Apache Airflow
* software.
* </pre>
*
* Protobuf type {@code google.cloud.orchestration.airflow.service.v1.DatabaseConfig}
*/
public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
implements
// @@protoc_insertion_point(builder_implements:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfigOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.class,
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.Builder.class);
}
// Construct using com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
}
@java.lang.Override
public Builder clear() {
super.clear();
machineType_ = "";
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstanceForType() {
return com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.getDefaultInstance();
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig build() {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig buildPartial() {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig result =
new com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig(this);
result.machineType_ = machineType_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) {
return mergeFrom((com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig other) {
if (other
== com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.getDefaultInstance())
return this;
if (!other.getMachineType().isEmpty()) {
machineType_ = other.machineType_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage =
(com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private java.lang.Object machineType_ = "";
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The machineType.
*/
public java.lang.String getMachineType() {
java.lang.Object ref = machineType_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
machineType_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The bytes for machineType.
*/
public com.google.protobuf.ByteString getMachineTypeBytes() {
java.lang.Object ref = machineType_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
machineType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @param value The machineType to set.
* @return This builder for chaining.
*/
public Builder setMachineType(java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
machineType_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return This builder for chaining.
*/
public Builder clearMachineType() {
machineType_ = getDefaultInstance().getMachineType();
onChanged();
return this;
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @param value The bytes for machineType to set.
* @return This builder for chaining.
*/
public Builder setMachineTypeBytes(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
machineType_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
}
// @@protoc_insertion_point(class_scope:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
private static final com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig();
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<DatabaseConfig> PARSER =
new com.google.protobuf.AbstractParser<DatabaseConfig>() {
@java.lang.Override
public DatabaseConfig parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatabaseConfig(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<DatabaseConfig> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<DatabaseConfig> getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
| apache-2.0 |
dsukhoroslov/bagri | bagri-server/bagri-server-hazelcast/src/main/java/com/bagri/server/hazelcast/task/schema/SchemaUpdater.java | 2552 | package com.bagri.server.hazelcast.task.schema;
import static com.bagri.core.Constants.pn_schema_password;
import static com.bagri.server.hazelcast.serialize.TaskSerializationFactory.cli_UpdateSchemaTask;
import static com.bagri.support.security.Encryptor.encrypt;
import java.io.IOException;
import java.util.Properties;
import java.util.Map.Entry;
import com.bagri.core.system.Schema;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
public class SchemaUpdater extends SchemaProcessor implements IdentifiedDataSerializable {
private boolean override;
private Properties properties;
public SchemaUpdater() {
//
}
public SchemaUpdater(int version, String admin, boolean override, Properties properties) {
super(version, admin);
this.override = override;
this.properties = properties;
}
@Override
public Object process(Entry<String, Schema> entry) {
logger.debug("process.enter; entry: {}", entry);
if (entry.getValue() != null) {
Schema schema = entry.getValue();
if (schema.getVersion() == getVersion()) {
//if (schema.isActive()) {
// if (denitSchemaInCluster(schema) > 0) {
// don't go further
// return null;
// }
//}
if (override) {
String pwd = properties.getProperty(pn_schema_password);
if (pwd != null) {
properties.setProperty(pn_schema_password, encrypt(pwd));
}
schema.setProperties(properties);
} else {
for (String name: properties.stringPropertyNames()) {
String value = properties.getProperty(name);
if (pn_schema_password.equals(name)) {
value = encrypt(value);
}
schema.setProperty(name, value);
}
}
//if (schema.isActive()) {
// if (initSchemaInCluster(schema) == 0) {
// schema.setActive(false);
// }
//}
schema.updateVersion(getAdmin());
entry.setValue(schema);
auditEntity(AuditType.update, schema);
return schema;
}
}
return null;
}
@Override
public int getId() {
return cli_UpdateSchemaTask;
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
override = in.readBoolean();
properties = in.readObject();
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeBoolean(override);
out.writeObject(properties);
}
}
| apache-2.0 |
caveman-frak/java-core | core-swing/src/test/java/uk/co/bluegecko/core/swing/table/rendering/RenderingHintTest.java | 4586 | package uk.co.bluegecko.core.swing.table.rendering;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.awt.Color;
import java.awt.Font;
import org.junit.Before;
import org.junit.Test;
public class RenderingHintTest
{
private Font font;
private Color color;
@Before
public final void setUp()
{
font = Font.decode( "Monospaced-12" );
color = new Color( 0x808080 );
}
@Test
public final void testWeightExceeds()
{
final FontHint min = new FontHint( HintWeight.MIN_WEIGHT );
final FontHint low = new FontHint( HintWeight.LOW_WEIGHT );
final FontHint def = new FontHint( HintWeight.DEFAULT_WEIGHT );
final FontHint selected = new FontHint( HintWeight.SELECTED_WEIGHT );
final FontHint high = new FontHint( HintWeight.HIGH_WEIGHT );
final FontHint focused = new FontHint( HintWeight.FOCUSED_WEIGHT );
final FontHint max = new FontHint( HintWeight.MAX_WEIGHT );
assertFalse( "min-min", min.exceeds( min ) );
assertFalse( "min-low", min.exceeds( low ) );
assertTrue( "low-min", low.exceeds( min ) );
assertTrue( "default-low", def.exceeds( low ) );
assertTrue( "selected-default", selected.exceeds( def ) );
assertTrue( "high-selected", high.exceeds( selected ) );
assertTrue( "focused-high", focused.exceeds( high ) );
assertTrue( "max-focused", max.exceeds( focused ) );
}
@Test
public final void testGetValueNone()
{
assertEquals( font, new FontHint( HintWeight.MAX_WEIGHT ).getValue( font ) );
assertNull( new FontHint( HintWeight.MAX_WEIGHT ).getValue() );
}
@Test
public final void testGetValueNonDerived()
{
final Font value = Font.decode( "Monospaced-BOLD-14" );
assertEquals( value, new FontHint( HintWeight.MAX_WEIGHT, value ).getValue( font ) );
assertEquals( value, new FontHint( HintWeight.MAX_WEIGHT, value ).getValue() );
}
@Test
public final void testGetValueDerived()
{
final Font value = Font.decode( "Monospaced-14" );
final FontHint fontHint = new FontHint( HintWeight.MAX_WEIGHT )
{
private static final long serialVersionUID = 1L;
@Override
protected Font derive( final Font original )
{
return original.deriveFont( 14.0f );
}
};
assertEquals( value, fontHint.getValue( font ) );
assertNull( fontHint.getValue() );
}
@Test
public final void testFontHintSize()
{
final Font value = Font.decode( "Monospaced-14" );
assertEquals( value, FontHint.size( HintWeight.MAX_WEIGHT, 14 )
.getValue( font ) );
}
@Test
public final void testFontHintLarger()
{
final Font value = Font.decode( "Monospaced-14" );
assertEquals( value, FontHint.larger( HintWeight.MAX_WEIGHT, 2 )
.getValue( font ) );
}
@Test
public final void testFontHintSmaller()
{
final Font value = Font.decode( "Monospaced-10" );
assertEquals( value, FontHint.smaller( HintWeight.MAX_WEIGHT, 2 )
.getValue( font ) );
}
@Test
public final void testFontHintScaled()
{
final Font value = Font.decode( "Monospaced-6" );
assertEquals( value, FontHint.scaled( HintWeight.MAX_WEIGHT, 0.5f )
.getValue( font ) );
}
@Test
public final void testFontHintStyle()
{
final Font value = Font.decode( "Monospaced-BOLD-12" );
assertEquals( value, FontHint.style( HintWeight.MAX_WEIGHT, Font.BOLD )
.getValue( font ) );
}
@Test
public final void testFontHintStyleAndSize()
{
final Font value = Font.decode( "Monospaced-BOLD-14" );
assertEquals( value, FontHint.style( HintWeight.MAX_WEIGHT, Font.BOLD, 14 )
.getValue( font ) );
}
@Test
public final void testForegroundHintDarker()
{
final Color value = new Color( 0x595959 );
assertEquals( value, ForegroundHint.darker( HintWeight.MAX_WEIGHT )
.getValue( color ) );
}
@Test
public final void testForegroundHintBrighter()
{
final Color value = new Color( 0xB6B6B6 );
assertEquals( value, ForegroundHint.brighter( HintWeight.MAX_WEIGHT )
.getValue( color ) );
}
@Test
public final void testBackgroundHintDarker()
{
final Color value = new Color( 0x595959 );
assertEquals( value, BackgroundHint.darker( HintWeight.MAX_WEIGHT )
.getValue( color ) );
}
@Test
public final void testBackgroundHintBrighter()
{
final Color value = new Color( 0xB6B6B6 );
assertEquals( value, BackgroundHint.brighter( HintWeight.MAX_WEIGHT )
.getValue( color ) );
}
}
| apache-2.0 |
devcon5io/common | cli/src/test/java/io/devcon5/cli/CLIExample.java | 1919 | /*
* Copyright 2015-2016 DevCon5 GmbH, info@devcon5.ch
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.devcon5.cli;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import org.junit.Test;
/**
*/
public class CLIExample {
@CliOption(value = "x",
hasArg = true)
private String example;
@CliOptionGroup
private Structured credentials;
private String postProcessed;
@PostInject
private void init(){
postProcessed = "an " + example;
}
@Test
public void example() {
//arrange
String[] exampleArgs = {"-u", "hans", "-p", "wurst", "-x", "example"};
//act
CLI.parse(exampleArgs).into(this);
run();
//assert
assertEquals("an example", postProcessed);
}
public void run() {
assertThat(example, is(not(nullValue())));
assertThat(credentials.user, is(not(nullValue())));
assertThat(credentials.password, is(not(nullValue())));
}
static class Structured {
@CliOption(value = "u",
hasArg = true)
private String user;
@CliOption(value = "p",
hasArg = true)
private String password;
}
}
| apache-2.0 |
dbflute-session/lastaflute-test-catalog | src/main/java/org/docksidestage/app/web/product/ProductSearchForm.java | 1252 | /*
* Copyright 2015-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.docksidestage.app.web.product;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.docksidestage.dbflute.allcommon.CDef;
import org.hibernate.validator.constraints.Length;
/**
* @author jflute
*/
public class ProductSearchForm {
@Length(max = 10) // #simple_for_example just for validtion example
public String productName;
public CDef.ProductStatus productStatus;
@Length(max = 5) // #simple_for_example just for validtion example
public String purchaseMemberName;
@Override
public String toString() {
return ToStringBuilder.reflectionToString(this);
}
}
| apache-2.0 |
liqilun/flora | src/main/java/com/flora/support/VelocityTemplate.java | 1644 | package com.flora.support;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.util.Map;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.app.VelocityEngine;
import org.apache.velocity.context.Context;
import com.flora.Config;
public class VelocityTemplate {
private VelocityEngine velocityEngine;
private Config config;
public VelocityTemplate(){
}
public String parseTemplate(String template, Map model){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
Writer writer = new StringWriter();
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
return writer.toString();
}
public void parseTemplate(String template, Map model, Writer writer){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
}
public void parseTemplate(String template, Map model, OutputStream os){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
Writer writer = new OutputStreamWriter(os);
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
}
public void setVelocityEngine(VelocityEngine velocityEngine) {
this.velocityEngine = velocityEngine;
}
public Config getConfig() {
return config;
}
public void setConfig(Config config) {
this.config = config;
}
}
| apache-2.0 |
lpxz/grail-lucene358684 | src/java/org/apache/lucene/search/WildcardTermEnum.java | 5708 | package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
/**
* Subclass of FilteredTermEnum for enumerating all terms that match the
* specified wildcard filter term.
* <p>
* Term enumerations are always ordered by Term.compareTo(). Each term in
* the enumeration is greater than all that precede it.
*
* @version $Id: WildcardTermEnum.java 329859 2005-10-31 17:05:36Z bmesser $
*/
public class WildcardTermEnum extends FilteredTermEnum {
Term searchTerm;
String field = "";
String text = "";
String pre = "";
int preLen = 0;
boolean endEnum = false;
/**
* Creates a new <code>WildcardTermEnum</code>. Passing in a
* {@link org.apache.lucene.index.Term Term} that does not contain a
* <code>WILDCARD_CHAR</code> will cause an exception to be thrown.
* <p>
* After calling the constructor the enumeration is already pointing to the first
* valid term if such a term exists.
*/
public WildcardTermEnum(IndexReader reader, Term term) throws IOException {
super();
searchTerm = term;
field = searchTerm.field();
text = searchTerm.text();
int sidx = text.indexOf(WILDCARD_STRING);
int cidx = text.indexOf(WILDCARD_CHAR);
int idx = sidx;
if (idx == -1) {
idx = cidx;
}
else if (cidx >= 0) {
idx = Math.min(idx, cidx);
}
pre = searchTerm.text().substring(0,idx);
preLen = pre.length();
text = text.substring(preLen);
setEnum(reader.terms(new Term(searchTerm.field(), pre)));
}
protected final boolean termCompare(Term term) {
if (field == term.field()) {
String searchText = term.text();
if (searchText.startsWith(pre)) {
return wildcardEquals(text, 0, searchText, preLen);
}
}
endEnum = true;
return false;
}
public final float difference() {
return 1.0f;
}
public final boolean endEnum() {
return endEnum;
}
/********************************************
* String equality with support for wildcards
********************************************/
public static final char WILDCARD_STRING = '*';
public static final char WILDCARD_CHAR = '?';
/**
* Determines if a word matches a wildcard pattern.
* <small>Work released by Granta Design Ltd after originally being done on
* company time.</small>
*/
public static final boolean wildcardEquals(String pattern, int patternIdx,
String string, int stringIdx)
{
int p = patternIdx;
for (int s = stringIdx; ; ++p, ++s)
{
// End of string yet?
boolean sEnd = (s >= string.length());
// End of pattern yet?
boolean pEnd = (p >= pattern.length());
// If we're looking at the end of the string...
if (sEnd)
{
// Assume the only thing left on the pattern is/are wildcards
boolean justWildcardsLeft = true;
// Current wildcard position
int wildcardSearchPos = p;
// While we haven't found the end of the pattern,
// and haven't encountered any non-wildcard characters
while (wildcardSearchPos < pattern.length() && justWildcardsLeft)
{
// Check the character at the current position
char wildchar = pattern.charAt(wildcardSearchPos);
// If it's not a wildcard character, then there is more
// pattern information after this/these wildcards.
if (wildchar != WILDCARD_CHAR && wildchar != WILDCARD_STRING)
{
justWildcardsLeft = false;
}
else
{
// to prevent "cat" matches "ca??"
if (wildchar == WILDCARD_CHAR) {
return false;
}
// Look at the next character
wildcardSearchPos++;
}
}
// This was a prefix wildcard search, and we've matched, so
// return true.
if (justWildcardsLeft)
{
return true;
}
}
// If we've gone past the end of the string, or the pattern,
// return false.
if (sEnd || pEnd)
{
break;
}
// Match a single character, so continue.
if (pattern.charAt(p) == WILDCARD_CHAR)
{
continue;
}
//
if (pattern.charAt(p) == WILDCARD_STRING)
{
// Look at the character beyond the '*'.
++p;
// Examine the string, starting at the last character.
for (int i = string.length(); i >= s; --i)
{
if (wildcardEquals(pattern, p, string, i))
{
return true;
}
}
break;
}
if (pattern.charAt(p) != string.charAt(s))
{
break;
}
}
return false;
}
public void close() throws IOException
{
super.close();
searchTerm = null;
field = null;
text = null;
}
}
| apache-2.0 |
ayltai/Foscam-CGI-Client | app/src/main/java/com/github/ayltai/foscam/client/RxBus.java | 2141 | package com.github.ayltai.foscam.client;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import android.support.annotation.NonNull;
import android.support.annotation.VisibleForTesting;
import android.support.v4.util.Pair;
import rx.Subscriber;
import rx.Subscription;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
public /* final */ class RxBus {
private static final ThreadLocal<RxBus> INSTANCE = new ThreadLocal<>();
private final Map<Pair<Class, Subscriber>, Subscription> subscriptions = new HashMap<>();
private final Subject<Object, ?> bus = new SerializedSubject<>(PublishSubject.create());
public static RxBus getInstance() {
final RxBus instance = RxBus.INSTANCE.get();
if (instance == null) {
RxBus.INSTANCE.set(new RxBus());
return RxBus.INSTANCE.get();
}
return instance;
}
@VisibleForTesting
RxBus() {
}
public <T> void register(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) {
final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber);
if (this.subscriptions.containsKey(key)) throw new IllegalArgumentException("The given subscriber is already registered");
this.subscriptions.put(key, this.bus.filter(event -> event != null && event.getClass().equals(eventType)).subscribe(value -> subscriber.onNext((T)value)));
}
public <T> void unregister(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) {
final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber);
if (this.subscriptions.containsKey(key)) this.subscriptions.remove(key).unsubscribe();
}
public void unregisterAll() {
for (final Pair<Class, Subscriber> pair : new HashSet<>(this.subscriptions.keySet())) {
this.unregister(pair.first, pair.second);
}
}
public <T> void send(@NonNull final T event) {
if (!this.subscriptions.isEmpty()) this.bus.onNext(event);
}
}
| apache-2.0 |
googleapis/google-api-java-client-services | clients/google-api-services-retail/v2/1.31.0/com/google/api/services/retail/v2/CloudRetailScopes.java | 1411 | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.retail.v2;
/**
* Available OAuth 2.0 scopes for use with the Retail API.
*
* @since 1.4
*/
public class CloudRetailScopes {
/** See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account.. */
public static final String CLOUD_PLATFORM = "https://www.googleapis.com/auth/cloud-platform";
/**
* Returns an unmodifiable set that contains all scopes declared by this class.
*
* @since 1.16
*/
public static java.util.Set<String> all() {
java.util.Set<String> set = new java.util.HashSet<String>();
set.add(CLOUD_PLATFORM);
return java.util.Collections.unmodifiableSet(set);
}
private CloudRetailScopes() {
}
}
| apache-2.0 |
andriell/craftyfox | src/test/java/com/github/andriell/collection/HashThreeTest.java | 2540 | package com.github.andriell.collection;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Created by Andrey on 13.02.2016
*/
public class HashThreeTest {
public static void main(String[] args) {
HashThreeTest test = new HashThreeTest();
test.test1();
}
@Test
public void test1() {
ObjectTest test1 = new ObjectTest(0x50000000);
ObjectTest test2 = new ObjectTest(0x60000000);
ObjectTest test3 = new ObjectTest(0x70000000);
ObjectTest test4 = new ObjectTest(0x00000005);
ObjectTest test5 = new ObjectTest(0x00000006);
ObjectTest test6 = new ObjectTest(0x00000007);
HashThree<ObjectTest> three = new HashThree<ObjectTest>();
assertEquals(0, three.getSize());
assertEquals(false, three.remove(test1));
assertEquals(true, three.add(test1));
assertEquals(1, three.getSize());
assertEquals(true, three.add(test2));
assertEquals(2, three.getSize());
assertEquals(true, three.add(test3));
assertEquals(3, three.getSize());
assertEquals(true, three.add(test4));
assertEquals(4, three.getSize());
assertEquals(true, three.add(test5));
assertEquals(5, three.getSize());
assertEquals(true, three.add(test6));
assertEquals(6, three.getSize());
assertEquals(false, three.add(test1));
assertEquals(false, three.add(test2));
assertEquals(false, three.add(test3));
assertEquals(false, three.add(test4));
assertEquals(true, three.replace(test1));
assertEquals(true, three.replace(test2));
assertEquals(true, three.replace(test3));
assertEquals(true, three.replace(test4));
System.out.println(three);
assertEquals(true, three.exist(test2));
assertEquals(true, three.remove(test2));
//assertEquals(false, three.remove(test2));
//assertEquals(true, three.exist(test1));
//assertEquals(false, three.exist(test2));
//assertEquals(true, three.exist(test3));
//assertEquals(true, three.exist(test4));
System.out.println(three);
}
private class ObjectTest {
private int hashCode;
public ObjectTest(int hashCode) {
this.hashCode = hashCode;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public String toString() {
return Integer.toString(hashCode);
}
}
}
| apache-2.0 |
rharter/auto-value-moshi | example/src/main/java/com/ryanharter/auto/value/moshi/example/GenericsExample.java | 870 | package com.ryanharter.auto.value.moshi.example;
import com.google.auto.value.AutoValue;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
import java.lang.reflect.Type;
@AutoValue public abstract class GenericsExample<A, B, C> {
public abstract A a();
public abstract B b();
public abstract C c();
@AutoValue.Builder
public interface Builder<A, B, C> {
Builder<A, B, C> a(A a);
Builder<A, B, C> b(B b);
Builder<A, B, C> c(C c);
GenericsExample<A, B, C> build();
}
public static <A, B, C> Builder<A, B, C> builder() {
return new AutoValue_GenericsExample.Builder<A, B, C>();
}
public static <A, B, C> JsonAdapter<GenericsExample<A, B, C>> jsonAdapter(Moshi moshi, Type[] types) {
return new AutoValue_GenericsExample.MoshiJsonAdapter(moshi, types);
}
}
| apache-2.0 |
liufeiit/WebMagic | WebMagic/src/test/java/web/magic/jvm/MBeanTyper.java | 4284 | package web.magic.jvm;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.lang.reflect.UndeclaredThrowableException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
class MBeanTyper {
static final boolean DEBUG = Boolean.getBoolean("jboss.jmx.debug");
/**
* create a typed object from an mbean
*/
public static final Object typeMBean(MBeanServer server, ObjectName mbean, Class<?> mainInterface) throws Exception {
List<Class<?>> interfaces = new ArrayList<Class<?>>();
if (mainInterface.isInterface()) {
interfaces.add(mainInterface);
}
addInterfaces(mainInterface.getInterfaces(), interfaces);
Class<?> cl[] = (Class[]) interfaces.toArray(new Class[interfaces.size()]);
if (DEBUG) {
System.err.println("typeMean->server=" + server + ",mbean=" + mbean + ",mainInterface=" + mainInterface);
for (int c = 0; c < cl.length; c++) {
System.err.println(" :" + cl[c]);
}
}
return Proxy.newProxyInstance(Thread.currentThread().getContextClassLoader(), cl, new MBeanTyperInvoker(server,
mbean));
}
private static final void addInterfaces(Class<?> cl[], List<Class<?>> list) {
if (cl == null)
return;
for (int c = 0; c < cl.length; c++) {
list.add(cl[c]);
addInterfaces(cl[c].getInterfaces(), list);
}
}
}
/**
* MBeanTyperInvoker handles method invocations against the MBeanTyper target
* object and forwards them to the MBeanServer and ObjectName for invocation.
*
* @author <a href="mailto:jhaynie@vocalocity.net">Jeff Haynie</a>
*/
final class MBeanTyperInvoker implements java.lang.reflect.InvocationHandler {
private final MBeanServer server;
private final ObjectName mbean;
private final Map<Method, String[]> signatureCache = Collections.synchronizedMap(new HashMap<Method, String[]>());
MBeanTyperInvoker(MBeanServer server, ObjectName mbean) {
this.server = server;
this.mbean = mbean;
}
private boolean isJMXAttribute(Method m) {
String name = m.getName();
return (name.startsWith("get"));
}
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (MBeanTyper.DEBUG) {
System.err.println(" ++ method=" + method.getName() + ",args=" + args);
}
try {
if (method.getDeclaringClass() == Object.class) {
String name = method.getName();
if (name.equals("hashCode")) {
return new Integer(this.hashCode());
} else if (name.equals("toString")) {
return this.toString();
} else if (name.equals("equals")) {
// FIXME: this needs to be reviewed - we should be
// smarter about this ...
return new Boolean(equals(args[0]));
}
} else if (isJMXAttribute(method) && (args == null || args.length <= 0)) {
String name = method.getName().substring(3);
return server.getAttribute(mbean, name);
}
String sig[] = (String[]) signatureCache.get(method);
if (sig == null) {
// get the method signature from the method argument directly
// vs. the arguments passed, since there may be primitives that
// are wrapped as objects in the arguments
Class<?> _args[] = method.getParameterTypes();
if (_args != null && _args.length > 0) {
sig = new String[_args.length];
for (int c = 0; c < sig.length; c++) {
if (_args[c] != null) {
sig[c] = _args[c].getName();
}
}
} else {
sig = new String[0];
}
signatureCache.put(method, sig);
}
return server.invoke(mbean, method.getName(), args, sig);
} catch (Throwable t) {
if (MBeanTyper.DEBUG) {
t.printStackTrace();
}
if (t instanceof UndeclaredThrowableException) {
UndeclaredThrowableException ut = (UndeclaredThrowableException) t;
throw ut.getUndeclaredThrowable();
} else if (t instanceof InvocationTargetException) {
InvocationTargetException it = (InvocationTargetException) t;
throw it.getTargetException();
} else if (t instanceof MBeanException) {
MBeanException me = (MBeanException) t;
throw me.getTargetException();
} else {
throw t;
}
}
}
} | apache-2.0 |
N0rp/Snabb | src/main/java/eu/dowsing/kolla/widget/brick/facade/BrickView.java | 6215 | package eu.dowsing.kolla.widget.brick.facade;
import javafx.scene.layout.Pane;
import javafx.scene.paint.Color;
import javafx.scene.shape.Circle;
import javafx.scene.shape.CircleBuilder;
import javafx.scene.shape.Rectangle;
import javafx.scene.shape.RectangleBuilder;
import com.leapmotion.leap.Hand;
import eu.dowsing.kolla.widget.brick.model.BrickModel;
import eu.dowsing.kolla.widget.brick.model.BrickModel.Position;
/**
* Represents a complete hand including its fingers.
*
* @author richardg
*
*/
public class BrickView {
// port(left hand:red) and starboard(right hand:green)
public enum Importance {
PRIMARY, SECONDARY
}
private Rectangle horizontal;
private Rectangle vertical;
private Rectangle[] fingerRects;
private Circle hint;
/** Hints at where the gesture started. **/
private Circle startHint;
public BrickView(Pane p, int rectHeight, int rectWidth, int rectX, int rectY, int miniRectHeight, int miniRectWidth) {
drawIndicator(p, rectHeight, rectWidth, rectX, rectY, miniRectHeight, miniRectWidth);
}
private void drawIndicator(Pane p, int hHeight, int hWidth, int rectX, int rectY, int mHeight, int mWidth) {
final int fingerCount = 5;
fingerRects = new Rectangle[fingerCount];
final int rectMargin = 10;
final int hRealWidth = hWidth - (2 * rectMargin);
// create the measure for the mini finger rectangles
int miniRectMargin = rectMargin / 2;
int mRealWidth = mWidth - miniRectMargin;
int mRectX = rectX + (miniRectMargin / 2);
int mRectY = rectY;
// create measures for the vertical rectangle
final int vWidth = hHeight;
final int vHeight = hWidth / 2;
// create the circle indicating where the hand can be
this.hint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2))
.centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build();
p.getChildren().add(hint);
// create the circle indicating where the gesture started
this.startHint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2))
.centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build();
p.getChildren().add(startHint);
// create the rectangle indicating position of the hand
horizontal = RectangleBuilder.create().height(hHeight).width(hRealWidth).arcHeight(0).arcWidth(0)
.stroke(Color.RED).fill(Color.web("blue", 0.1)).translateX(rectX).translateY(rectY).build();
p.getChildren().add(horizontal);
// create rectangle indicating if the hand is vertical
vertical = RectangleBuilder.create().height(vHeight).width(vWidth).arcHeight(0).arcWidth(0).stroke(Color.RED)
.fill(Color.web("blue", 0.1)).translateX(rectX + (vWidth / 2)).translateY(rectY - (vHeight / 2))
.build();
p.getChildren().add(vertical);
// now create the rectangles indicating fingers found
for (int i = 0; i < fingerRects.length; i++) {
Rectangle mini = RectangleBuilder.create().height(mHeight).width(mRealWidth).arcHeight(0).arcWidth(0)
.stroke(Color.GREEN).fill(Color.web("blue", 0.1)).translateX(mRectX + (i * mWidth))
.translateY(mRectY).build();
fingerRects[i] = mini;
p.getChildren().add(mini);
}
}
public Color getPitchColor(Hand h) {
double direction = Math.toDegrees(h.direction().pitch());
if (direction < 10 && direction > -10) {
return Color.web("blue", 0.1);
} else if (direction < 100 && direction > 80) {
return Color.web("green", 0.1);
} else if (direction < -80 && direction > -100) {
return Color.web("yellow", 0.1);
} else {
return Color.web("red", 0.1);
}
}
public Color getHandColor(Importance importance) {
// port(left hand/secondary:red) and starboard(right hand/primary:green)
if (importance == Importance.PRIMARY) {
return Color.web("green", 1);
} else if (importance == Importance.SECONDARY) {
return Color.web("red", 1);
} else {
return Color.web("yellow", 1);
}
}
public void setShowGestureStart(Importance importance) {
Color fill = getHandColor(importance);
this.startHint.setVisible(true);
this.startHint.setFill(fill);
}
/**
* Show the hand
*
* @param importance
* @param pos
* @param fingerCount
* @param handledGesture
*/
public void showHand(Importance importance, Position pos, int fingerCount, boolean handledGesture) {
// first all rectangles visible
setVisible(true);
// hide vertical or horizontal position
Color fill = getHandColor(importance);
if (pos == Position.HORIZONTAL) {
vertical.setVisible(false);
} else if (pos == Position.VERTICAL) {
horizontal.setVisible(false);
}
// notify the user that the gesture was handled
if (handledGesture) {
fill = Color.web("yellow", 1);
}
// color the rectangles
horizontal.setFill(fill);
vertical.setFill(fill);
// then we hide invisible fingers
for (int i = fingerCount; i < fingerRects.length; i++) {
fingerRects[i].setVisible(false);
}
}
/**
* Show or hide the complete hand with all indicators
*
* @param visible
*/
public void setVisible(boolean visible) {
hint.setVisible(visible);
startHint.setVisible(visible);
horizontal.setVisible(visible);
vertical.setVisible(visible);
for (Rectangle rect : this.fingerRects) {
rect.setVisible(visible);
}
}
/**
* Show or hide only the hand hint.
*
* @param visible
*/
public void setHintVisible(boolean visible) {
this.hint.setVisible(visible);
}
}
| apache-2.0 |
taniamahanama/product-msf4j | core/src/main/java/org/wso2/msf4j/ChunkResponder.java | 1625 | /*
* Copyright (c) 2016, WSO2 Inc. (http://wso2.com) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.msf4j;
import io.netty.buffer.ByteBuf;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* A responder for sending chunk-encoded response.
*/
public interface ChunkResponder extends Closeable {
/**
* Adds a chunk of data to the response. The content will be sent to the client asynchronously.
*
* @param chunk content to send
* @throws IOException if the connection is already closed
*/
void sendChunk(ByteBuffer chunk) throws IOException;
/**
* Adds a chunk of data to the response. The content will be sent to the client asynchronously.
*
* @param chunk content to send
* @throws IOException if this {@link ChunkResponder} already closed or the connection is closed
*/
void sendChunk(ByteBuf chunk) throws IOException;
/**
* Closes this responder which signals the end of the chunk response.
*/
@Override
void close() throws IOException;
}
| apache-2.0 |
mengmoya/onos | apps/l3vpn/nel3vpn/nemgr/src/main/java/org/onosproject/yang/gen/v1/ne/bgpcomm/rev20141225/nebgpcomm/bgpcomm/bgpvrfs/bgpvrf/bgpvrfafs/bgpvrfaf/package-info.java | 796 | /*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implementation of YANG node bgpVrfAf's children nodes.
*/
package org.onosproject.yang.gen.v1.ne.bgpcomm.rev20141225.nebgpcomm.bgpcomm.bgpvrfs.bgpvrf.bgpvrfafs.bgpvrfaf; | apache-2.0 |
Jackygq1982/hbase_src | hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java | 167470 | /**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.regionserver.SplitTransaction;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.ConfigUtil;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedHashMultimap;
/**
* Manages and performs region assignment.
* <p>
* Monitors ZooKeeper for events related to regions in transition.
* <p>
* Handles existing regions in transition during master failover.
*/
@InterfaceAudience.Private
public class AssignmentManager extends ZooKeeperListener {
private static final Log LOG = LogFactory.getLog(AssignmentManager.class);
public static final ServerName HBCK_CODE_SERVERNAME = ServerName.valueOf(HConstants.HBCK_CODE_NAME,
-1, -1L);
public static final String ASSIGNMENT_TIMEOUT = "hbase.master.assignment.timeoutmonitor.timeout";
public static final int DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT = 600000;
public static final String ASSIGNMENT_TIMEOUT_MANAGEMENT = "hbase.assignment.timeout.management";
public static final boolean DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT = false;
public static final String ALREADY_IN_TRANSITION_WAITTIME
= "hbase.assignment.already.intransition.waittime";
public static final int DEFAULT_ALREADY_IN_TRANSITION_WAITTIME = 60000; // 1 minute
protected final Server server;
private ServerManager serverManager;
private boolean shouldAssignRegionsWithFavoredNodes;
private CatalogTracker catalogTracker;
protected final TimeoutMonitor timeoutMonitor;
private final TimerUpdater timerUpdater;
private LoadBalancer balancer;
private final MetricsAssignmentManager metricsAssignmentManager;
private final TableLockManager tableLockManager;
private AtomicInteger numRegionsOpened = new AtomicInteger(0);
final private KeyLocker<String> locker = new KeyLocker<String>();
/**
* Map of regions to reopen after the schema of a table is changed. Key -
* encoded region name, value - HRegionInfo
*/
private final Map <String, HRegionInfo> regionsToReopen;
/*
* Maximum times we recurse an assignment/unassignment.
* See below in {@link #assign()} and {@link #unassign()}.
*/
private final int maximumAttempts;
/**
* Map of two merging regions from the region to be created.
*/
private final Map<String, PairOfSameType<HRegionInfo>> mergingRegions
= new HashMap<String, PairOfSameType<HRegionInfo>>();
/**
* The sleep time for which the assignment will wait before retrying in case of hbase:meta assignment
* failure due to lack of availability of region plan
*/
private final long sleepTimeBeforeRetryingMetaAssignment;
/** Plans for region movement. Key is the encoded version of a region name*/
// TODO: When do plans get cleaned out? Ever? In server open and in server
// shutdown processing -- St.Ack
// All access to this Map must be synchronized.
final NavigableMap<String, RegionPlan> regionPlans =
new TreeMap<String, RegionPlan>();
private final ZKTable zkTable;
/**
* Contains the server which need to update timer, these servers will be
* handled by {@link TimerUpdater}
*/
private final ConcurrentSkipListSet<ServerName> serversInUpdatingTimer;
private final ExecutorService executorService;
// For unit tests, keep track of calls to ClosedRegionHandler
private Map<HRegionInfo, AtomicBoolean> closedRegionHandlerCalled = null;
// For unit tests, keep track of calls to OpenedRegionHandler
private Map<HRegionInfo, AtomicBoolean> openedRegionHandlerCalled = null;
//Thread pool executor service for timeout monitor
private java.util.concurrent.ExecutorService threadPoolExecutorService;
// A bunch of ZK events workers. Each is a single thread executor service
private final java.util.concurrent.ExecutorService zkEventWorkers;
private List<EventType> ignoreStatesRSOffline = Arrays.asList(
EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED);
private final RegionStates regionStates;
// The threshold to use bulk assigning. Using bulk assignment
// only if assigning at least this many regions to at least this
// many servers. If assigning fewer regions to fewer servers,
// bulk assigning may be not as efficient.
private final int bulkAssignThresholdRegions;
private final int bulkAssignThresholdServers;
// Should bulk assignment wait till all regions are assigned,
// or it is timed out? This is useful to measure bulk assignment
// performance, but not needed in most use cases.
private final boolean bulkAssignWaitTillAllAssigned;
/**
* Indicator that AssignmentManager has recovered the region states so
* that ServerShutdownHandler can be fully enabled and re-assign regions
* of dead servers. So that when re-assignment happens, AssignmentManager
* has proper region states.
*
* Protected to ease testing.
*/
protected final AtomicBoolean failoverCleanupDone = new AtomicBoolean(false);
/** Is the TimeOutManagement activated **/
private final boolean tomActivated;
/**
* A map to track the count a region fails to open in a row.
* So that we don't try to open a region forever if the failure is
* unrecoverable. We don't put this information in region states
* because we don't expect this to happen frequently; we don't
* want to copy this information over during each state transition either.
*/
private final ConcurrentHashMap<String, AtomicInteger>
failedOpenTracker = new ConcurrentHashMap<String, AtomicInteger>();
// A flag to indicate if we are using ZK for region assignment
private final boolean useZKForAssignment;
// In case not using ZK for region assignment, region states
// are persisted in meta with a state store
private final RegionStateStore regionStateStore;
/**
* For testing only! Set to true to skip handling of split.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL")
public static boolean TEST_SKIP_SPLIT_HANDLING = false;
/** Listeners that are called on assignment events. */
private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>();
/**
* Constructs a new assignment manager.
*
* @param server
* @param serverManager
* @param catalogTracker
* @param service
* @throws KeeperException
* @throws IOException
*/
public AssignmentManager(Server server, ServerManager serverManager,
CatalogTracker catalogTracker, final LoadBalancer balancer,
final ExecutorService service, MetricsMaster metricsMaster,
final TableLockManager tableLockManager) throws KeeperException, IOException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
this.catalogTracker = catalogTracker;
this.executorService = service;
this.regionStateStore = new RegionStateStore(server);
this.regionsToReopen = Collections.synchronizedMap
(new HashMap<String, HRegionInfo> ());
Configuration conf = server.getConfiguration();
// Only read favored nodes if using the favored nodes load balancer.
this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
FavoredNodeLoadBalancer.class);
this.tomActivated = conf.getBoolean(
ASSIGNMENT_TIMEOUT_MANAGEMENT, DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT);
if (tomActivated){
this.serversInUpdatingTimer = new ConcurrentSkipListSet<ServerName>();
this.timeoutMonitor = new TimeoutMonitor(
conf.getInt("hbase.master.assignment.timeoutmonitor.period", 30000),
server, serverManager,
conf.getInt(ASSIGNMENT_TIMEOUT, DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT));
this.timerUpdater = new TimerUpdater(conf.getInt(
"hbase.master.assignment.timerupdater.period", 10000), server);
Threads.setDaemonThreadRunning(timerUpdater.getThread(),
server.getServerName() + ".timerUpdater");
} else {
this.serversInUpdatingTimer = null;
this.timeoutMonitor = null;
this.timerUpdater = null;
}
this.zkTable = new ZKTable(this.watcher);
// This is the max attempts, not retries, so it should be at least 1.
this.maximumAttempts = Math.max(1,
this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
this.sleepTimeBeforeRetryingMetaAssignment = this.server.getConfiguration().getLong(
"hbase.meta.assignment.retry.sleeptime", 1000l);
this.balancer = balancer;
int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(
maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("AM."));
this.regionStates = new RegionStates(server, serverManager, regionStateStore);
this.bulkAssignWaitTillAllAssigned =
conf.getBoolean("hbase.bulk.assignment.waittillallassigned", false);
this.bulkAssignThresholdRegions = conf.getInt("hbase.bulk.assignment.threshold.regions", 7);
this.bulkAssignThresholdServers = conf.getInt("hbase.bulk.assignment.threshold.servers", 3);
int workers = conf.getInt("hbase.assignment.zkevent.workers", 20);
ThreadFactory threadFactory = Threads.newDaemonThreadFactory("AM.ZK.Worker");
zkEventWorkers = Threads.getBoundedCachedThreadPool(workers, 60L,
TimeUnit.SECONDS, threadFactory);
this.tableLockManager = tableLockManager;
this.metricsAssignmentManager = new MetricsAssignmentManager();
useZKForAssignment = ConfigUtil.useZKForAssignment(conf);
}
void startTimeOutMonitor() {
if (tomActivated) {
Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), server.getServerName()
+ ".timeoutMonitor");
}
}
/**
* Add the listener to the notification list.
* @param listener The AssignmentListener to register
*/
public void registerListener(final AssignmentListener listener) {
this.listeners.add(listener);
}
/**
* Remove the listener from the notification list.
* @param listener The AssignmentListener to unregister
*/
public boolean unregisterListener(final AssignmentListener listener) {
return this.listeners.remove(listener);
}
/**
* @return Instance of ZKTable.
*/
public ZKTable getZKTable() {
// These are 'expensive' to make involving trip to zk ensemble so allow
// sharing.
return this.zkTable;
}
/**
* This SHOULD not be public. It is public now
* because of some unit tests.
*
* TODO: make it package private and keep RegionStates in the master package
*/
public RegionStates getRegionStates() {
return regionStates;
}
/**
* Used in some tests to mock up region state in meta
*/
@VisibleForTesting
RegionStateStore getRegionStateStore() {
return regionStateStore;
}
public RegionPlan getRegionReopenPlan(HRegionInfo hri) {
return new RegionPlan(hri, null, regionStates.getRegionServerOfRegion(hri));
}
/**
* Add a regionPlan for the specified region.
* @param encodedName
* @param plan
*/
public void addPlan(String encodedName, RegionPlan plan) {
synchronized (regionPlans) {
regionPlans.put(encodedName, plan);
}
}
/**
* Add a map of region plans.
*/
public void addPlans(Map<String, RegionPlan> plans) {
synchronized (regionPlans) {
regionPlans.putAll(plans);
}
}
/**
* Set the list of regions that will be reopened
* because of an update in table schema
*
* @param regions
* list of regions that should be tracked for reopen
*/
public void setRegionsToReopen(List <HRegionInfo> regions) {
for(HRegionInfo hri : regions) {
regionsToReopen.put(hri.getEncodedName(), hri);
}
}
/**
* Used by the client to identify if all regions have the schema updates
*
* @param tableName
* @return Pair indicating the status of the alter command
* @throws IOException
*/
public Pair<Integer, Integer> getReopenStatus(TableName tableName)
throws IOException {
List <HRegionInfo> hris =
MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
Integer pending = 0;
for (HRegionInfo hri : hris) {
String name = hri.getEncodedName();
// no lock concurrent access ok: sequential consistency respected.
if (regionsToReopen.containsKey(name)
|| regionStates.isRegionInTransition(name)) {
pending++;
}
}
return new Pair<Integer, Integer>(pending, hris.size());
}
/**
* Used by ServerShutdownHandler to make sure AssignmentManager has completed
* the failover cleanup before re-assigning regions of dead servers. So that
* when re-assignment happens, AssignmentManager has proper region states.
*/
public boolean isFailoverCleanupDone() {
return failoverCleanupDone.get();
}
/**
* To avoid racing with AM, external entities may need to lock a region,
* for example, when SSH checks what regions to skip re-assigning.
*/
public Lock acquireRegionLock(final String encodedName) {
return locker.acquireLock(encodedName);
}
/**
* Now, failover cleanup is completed. Notify server manager to
* process queued up dead servers processing, if any.
*/
void failoverCleanupDone() {
failoverCleanupDone.set(true);
serverManager.processQueuedDeadServers();
}
/**
* Called on startup.
* Figures whether a fresh cluster start of we are joining extant running cluster.
* @throws IOException
* @throws KeeperException
* @throws InterruptedException
*/
void joinCluster() throws IOException,
KeeperException, InterruptedException {
long startTime = System.currentTimeMillis();
// Concurrency note: In the below the accesses on regionsInTransition are
// outside of a synchronization block where usually all accesses to RIT are
// synchronized. The presumption is that in this case it is safe since this
// method is being played by a single thread on startup.
// TODO: Regions that have a null location and are not in regionsInTransitions
// need to be handled.
// Scan hbase:meta to build list of existing regions, servers, and assignment
// Returns servers who have not checked in (assumed dead) and their regions
Map<ServerName, List<HRegionInfo>> deadServers = rebuildUserRegions();
// This method will assign all user regions if a clean server startup or
// it will reconstruct master state and cleanup any leftovers from
// previous master process.
boolean failover = processDeadServersAndRegionsInTransition(deadServers);
if (!useZKForAssignment) {
// Not use ZK for assignment any more, remove the ZNode
ZKUtil.deleteNodeRecursively(watcher, watcher.assignmentZNode);
}
recoverTableInDisablingState();
recoverTableInEnablingState();
LOG.info("Joined the cluster in " + (System.currentTimeMillis()
- startTime) + "ms, failover=" + failover);
}
/**
* Process all regions that are in transition in zookeeper and also
* processes the list of dead servers by scanning the META.
* Used by master joining an cluster. If we figure this is a clean cluster
* startup, will assign all user regions.
* @param deadServers
* Map of dead servers and their regions. Can be null.
* @throws KeeperException
* @throws IOException
* @throws InterruptedException
*/
boolean processDeadServersAndRegionsInTransition(
final Map<ServerName, List<HRegionInfo>> deadServers)
throws KeeperException, IOException, InterruptedException {
List<String> nodes = ZKUtil.listChildrenNoWatch(watcher,
watcher.assignmentZNode);
if (nodes == null && useZKForAssignment) {
String errorMessage = "Failed to get the children from ZK";
server.abort(errorMessage, new IOException(errorMessage));
return true; // Doesn't matter in this case
}
boolean failover = !serverManager.getDeadServers().isEmpty();
if (failover) {
// This may not be a failover actually, especially if meta is on this master.
if (LOG.isDebugEnabled()) {
LOG.debug("Found dead servers out on cluster " + serverManager.getDeadServers());
}
} else {
// If any one region except meta is assigned, it's a failover.
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (Map.Entry<HRegionInfo, ServerName> en : regionStates.getRegionAssignments().entrySet()) {
HRegionInfo hri = en.getKey();
if (!hri.isMetaTable() && onlineServers.contains(en.getValue())) {
LOG.debug("Found " + hri + " out on cluster");
failover = true;
break;
}
}
}
if (!failover && nodes != null) {
// If any one region except meta is in transition, it's a failover.
for (String encodedName : nodes) {
RegionState regionState = regionStates.getRegionState(encodedName);
if (regionState != null && !regionState.getRegion().isMetaRegion()) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
if (!failover && !useZKForAssignment) {
// If any region except meta is in transition on a live server, it's a failover.
Map<String, RegionState> regionsInTransition = regionStates.getRegionsInTransition();
if (!regionsInTransition.isEmpty()) {
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (RegionState regionState : regionsInTransition.values()) {
if (!regionState.getRegion().isMetaRegion()
&& onlineServers.contains(regionState.getServerName())) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
}
if (!failover) {
// If we get here, we have a full cluster restart. It is a failover only
// if there are some HLogs are not split yet. For meta HLogs, they should have
// been split already, if any. We can walk through those queued dead servers,
// if they don't have any HLogs, this restart should be considered as a clean one
Set<ServerName> queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
if (!queuedDeadServers.isEmpty()) {
Configuration conf = server.getConfiguration();
Path rootdir = FSUtils.getRootDir(conf);
FileSystem fs = rootdir.getFileSystem(conf);
for (ServerName serverName : queuedDeadServers) {
Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
if (fs.exists(logDir) || fs.exists(splitDir)) {
LOG.debug("Found queued dead server " + serverName);
failover = true;
break;
}
}
if (!failover) {
// We figured that it's not a failover, so no need to
// work on these re-queued dead servers any more.
LOG.info("AM figured that it's not a failover and cleaned up " + queuedDeadServers.size()
+ " queued dead servers");
serverManager.removeRequeuedDeadServers();
}
}
}
Set<TableName> disabledOrDisablingOrEnabling = null;
if (!failover) {
disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher));
// Clean re/start, mark all user regions closed before reassignment
// TODO -Hbase-11319
regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling);
}
// Now region states are restored
regionStateStore.start();
// If we found user regions out on cluster, its a failover.
if (failover) {
LOG.info("Found regions out on cluster or in RIT; presuming failover");
// Process list of dead servers and regions in RIT.
// See HBASE-4580 for more information.
processDeadServersAndRecoverLostRegions(deadServers);
}
if (!failover && useZKForAssignment) {
// Cleanup any existing ZK nodes and start watching
ZKAssign.deleteAllNodes(watcher);
ZKUtil.listChildrenAndWatchForNewChildren(this.watcher, this.watcher.assignmentZNode);
}
// Now we can safely claim failover cleanup completed and enable
// ServerShutdownHandler for further processing. The nodes (below)
// in transition, if any, are for regions not related to those
// dead servers at all, and can be done in parallel to SSH.
failoverCleanupDone();
if (!failover) {
// Fresh cluster startup.
LOG.info("Clean cluster startup. Assigning user regions");
assignAllUserRegions(disabledOrDisablingOrEnabling);
}
return failover;
}
/**
* If region is up in zk in transition, then do fixup and block and wait until
* the region is assigned and out of transition. Used on startup for
* catalog regions.
* @param hri Region to look for.
* @return True if we processed a region in transition else false if region
* was not up in zk in transition.
* @throws InterruptedException
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransitionAndBlockUntilAssigned(final HRegionInfo hri)
throws InterruptedException, KeeperException, IOException {
String encodedRegionName = hri.getEncodedName();
if (!processRegionInTransition(encodedRegionName, hri)) {
return false; // The region is not in transition
}
LOG.debug("Waiting on " + HRegionInfo.prettyPrint(encodedRegionName));
while (!this.server.isStopped() &&
this.regionStates.isRegionInTransition(encodedRegionName)) {
RegionState state = this.regionStates.getRegionTransitionState(encodedRegionName);
if (state == null || !serverManager.isServerOnline(state.getServerName())) {
// The region is not in transition, or not in transition on an online
// server. Doesn't help to block here any more. Caller need to
// verify the region is actually assigned.
break;
}
this.regionStates.waitForUpdate(100);
}
return true;
}
/**
* Process failover of new master for region <code>encodedRegionName</code>
* up in zookeeper.
* @param encodedRegionName Region to process failover for.
* @param regionInfo If null we'll go get it from meta table.
* @return True if we processed <code>regionInfo</code> as a RIT.
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransition(final String encodedRegionName,
final HRegionInfo regionInfo) throws KeeperException, IOException {
// We need a lock here to ensure that we will not put the same region twice
// It has no reason to be a lock shared with the other operations.
// We can do the lock on the region only, instead of a global lock: what we want to ensure
// is that we don't have two threads working on the same region.
Lock lock = locker.acquireLock(encodedRegionName);
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, encodedRegionName, stat);
if (data == null) return false;
RegionTransition rt;
try {
rt = RegionTransition.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse znode data", e);
return false;
}
HRegionInfo hri = regionInfo;
if (hri == null) {
// The region info is not passed in. We will try to find the region
// from region states map/meta based on the encoded region name. But we
// may not be able to find it. This is valid for online merge that
// the region may have not been created if the merge is not completed.
// Therefore, it is not in meta at master recovery time.
hri = regionStates.getRegionInfo(rt.getRegionName());
EventType et = rt.getEventType();
if (hri == null && et != EventType.RS_ZK_REGION_MERGING
&& et != EventType.RS_ZK_REQUEST_REGION_MERGE) {
LOG.warn("Couldn't find the region in recovering " + rt);
return false;
}
}
return processRegionsInTransition(
rt, hri, stat.getVersion());
} finally {
lock.unlock();
}
}
/**
* This call is invoked only (1) master assign meta;
* (2) during failover mode startup, zk assignment node processing.
* The locker is set in the caller. It returns true if the region
* is in transition for sure, false otherwise.
*
* It should be private but it is used by some test too.
*/
boolean processRegionsInTransition(
final RegionTransition rt, final HRegionInfo regionInfo,
final int expectedVersion) throws KeeperException {
EventType et = rt.getEventType();
// Get ServerName. Could not be null.
final ServerName sn = rt.getServerName();
final byte[] regionName = rt.getRegionName();
final String encodedName = HRegionInfo.encodeRegionName(regionName);
final String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
LOG.info("Processing " + prettyPrintedRegionName + " in state: " + et);
if (regionStates.isRegionInTransition(encodedName)
&& (regionInfo.isMetaRegion() || !useZKForAssignment)) {
LOG.info("Processed region " + prettyPrintedRegionName + " in state: "
+ et + ", does nothing since the region is already in transition "
+ regionStates.getRegionTransitionState(encodedName));
// Just return
return true;
}
if (!serverManager.isServerOnline(sn)) {
// It was transitioning on a dead server, so it's closed now.
// Force to OFFLINE and put it in transition, but not assign it
// since log splitting for the dead server is not done yet.
LOG.debug("RIT " + encodedName + " in state=" + rt.getEventType() +
" was on deadserver; forcing offline");
if (regionStates.isRegionOnline(regionInfo)) {
// Meta could still show the region is assigned to the previous
// server. If that server is online, when we reload the meta, the
// region is put back to online, we need to offline it.
regionStates.regionOffline(regionInfo);
sendRegionClosedNotification(regionInfo);
}
// Put it back in transition so that SSH can re-assign it
regionStates.updateRegionState(regionInfo, State.OFFLINE, sn);
if (regionInfo.isMetaRegion()) {
// If it's meta region, reset the meta location.
// So that master knows the right meta region server.
MetaRegionTracker.setMetaLocation(watcher, sn);
} else {
// No matter the previous server is online or offline,
// we need to reset the last region server of the region.
regionStates.setLastRegionServerOfRegion(sn, encodedName);
// Make sure we know the server is dead.
if (!serverManager.isServerDead(sn)) {
serverManager.expireServer(sn);
}
}
return false;
}
switch (et) {
case M_ZK_REGION_CLOSING:
// Insert into RIT & resend the query to the region server: may be the previous master
// died before sending the query the first time.
final RegionState rsClosing = regionStates.updateRegionState(rt, State.CLOSING);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
unassign(regionInfo, rsClosing, expectedVersion, null, useZKForAssignment, null);
if (regionStates.isRegionOffline(regionInfo)) {
assign(regionInfo, true);
}
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_CLOSED:
case RS_ZK_REGION_FAILED_OPEN:
// Region is closed, insert into RIT and handle it
regionStates.updateRegionState(regionInfo, State.CLOSED, sn);
invokeAssign(regionInfo);
break;
case M_ZK_REGION_OFFLINE:
// Insert in RIT and resend to the regionserver
regionStates.updateRegionState(rt, State.PENDING_OPEN);
final RegionState rsOffline = regionStates.getRegionState(regionInfo);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
RegionPlan plan = new RegionPlan(regionInfo, null, sn);
addPlan(encodedName, plan);
assign(rsOffline, false, false);
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_OPENING:
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Region is opened, insert into RIT and handle it
// This could be done asynchronously, we would need then to acquire the lock in the
// handler.
regionStates.updateRegionState(rt, State.OPEN);
new OpenedRegionHandler(server, this, regionInfo, sn, expectedVersion).process();
break;
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
// Splitting region should be online. We could have skipped it during
// user region rebuilding since we may consider the split is completed.
// Put it in SPLITTING state to avoid complications.
regionStates.regionOnline(regionInfo, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
default:
throw new IllegalStateException("Received region in state:" + et + " is not valid.");
}
LOG.info("Processed region " + prettyPrintedRegionName + " in state "
+ et + ", on " + (serverManager.isServerOnline(sn) ? "" : "dead ")
+ "server: " + sn);
return true;
}
/**
* When a region is closed, it should be removed from the regionsToReopen
* @param hri HRegionInfo of the region which was closed
*/
public void removeClosedRegion(HRegionInfo hri) {
if (regionsToReopen.remove(hri.getEncodedName()) != null) {
LOG.debug("Removed region from reopening regions because it was closed");
}
}
/**
* Handles various states an unassigned node can be in.
* <p>
* Method is called when a state change is suspected for an unassigned node.
* <p>
* This deals with skipped transitions (we got a CLOSED but didn't see CLOSING
* yet).
* @param rt
* @param expectedVersion
*/
void handleRegion(final RegionTransition rt, int expectedVersion) {
if (rt == null) {
LOG.warn("Unexpected NULL input for RegionTransition rt");
return;
}
final ServerName sn = rt.getServerName();
// Check if this is a special HBCK transition
if (sn.equals(HBCK_CODE_SERVERNAME)) {
handleHBCK(rt);
return;
}
final long createTime = rt.getCreateTime();
final byte[] regionName = rt.getRegionName();
String encodedName = HRegionInfo.encodeRegionName(regionName);
String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
// Verify this is a known server
if (!serverManager.isServerOnline(sn)
&& !ignoreStatesRSOffline.contains(rt.getEventType())) {
LOG.warn("Attempted to handle region transition for server but " +
"it is not online: " + prettyPrintedRegionName + ", " + rt);
return;
}
RegionState regionState =
regionStates.getRegionState(encodedName);
long startTime = System.currentTimeMillis();
if (LOG.isDebugEnabled()) {
boolean lateEvent = createTime < (startTime - 15000);
LOG.debug("Handling " + rt.getEventType() +
", server=" + sn + ", region=" +
(prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName) +
(lateEvent ? ", which is more than 15 seconds late" : "") +
", current_state=" + regionState);
}
// We don't do anything for this event,
// so separate it out, no need to lock/unlock anything
if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) {
return;
}
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState latestState =
regionStates.getRegionState(encodedName);
if ((regionState == null && latestState != null)
|| (regionState != null && latestState == null)
|| (regionState != null && latestState != null
&& latestState.getState() != regionState.getState())) {
LOG.warn("Region state changed from " + regionState + " to "
+ latestState + ", while acquiring lock");
}
long waitedTime = System.currentTimeMillis() - startTime;
if (waitedTime > 5000) {
LOG.warn("Took " + waitedTime + "ms to acquire the lock");
}
regionState = latestState;
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
// Merged region is a new region, we can't find it in the region states now.
// However, the two merging regions are not new. They should be in state for merging.
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
case M_ZK_REGION_CLOSING:
// Should see CLOSING after we have asked it to CLOSE or additional
// times after already being in state of CLOSING
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to CLOSING (or update stamp if already CLOSING)
regionStates.updateRegionState(rt, State.CLOSING);
break;
case RS_ZK_REGION_CLOSED:
// Should see CLOSED after CLOSING but possible after PENDING_CLOSE
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Handle CLOSED by assigning elsewhere or stopping if a disable
// If we got here all is good. Need to update RegionState -- else
// what follows will fail because not in expected state.
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
updateClosedRegionHandlerTracker(regionState.getRegion());
break;
case RS_ZK_REGION_FAILED_OPEN:
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received FAILED_OPEN for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(rt, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
regionState = regionStates.updateRegionState(rt, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the regionplan. (HBASE-5546)
try {
getRegionPlan(regionState.getRegion(), sn, true);
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
}
break;
case RS_ZK_REGION_OPENING:
// Should see OPENING after we have asked it to OPEN or additional
// times after already being in state of OPENING
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to OPENING (or update stamp if already OPENING)
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Should see OPENED after OPENING but possible after PENDING_OPEN.
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
if (regionState != null) {
// Close it without updating the internal region states,
// so as not to create double assignments in unlucky scenarios
// mentioned in OpenRegionHandler#process
unassign(regionState.getRegion(), null, -1, null, false, sn);
}
return;
}
// Handle OPENED by removing from transition and deleted zk node
regionState = regionStates.updateRegionState(rt, State.OPEN);
if (regionState != null) {
failedOpenTracker.remove(encodedName); // reset the count, if any
new OpenedRegionHandler(
server, this, regionState.getRegion(), sn, expectedVersion).process();
updateOpenedRegionHandlerTracker(regionState.getRegion());
}
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
} finally {
lock.unlock();
}
}
//For unit tests only
boolean wasClosedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = closedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
boolean wasOpenedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = openedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
void initializeHandlerTrackers() {
closedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
openedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
}
void updateClosedRegionHandlerTracker(HRegionInfo hri) {
if (closedRegionHandlerCalled != null) { //only for unit tests this is true
closedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
void updateOpenedRegionHandlerTracker(HRegionInfo hri) {
if (openedRegionHandlerCalled != null) { //only for unit tests this is true
openedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
// TODO: processFavoredNodes might throw an exception, for e.g., if the
// meta could not be contacted/updated. We need to see how seriously to treat
// this problem as. Should we fail the current assignment. We should be able
// to recover from this problem eventually (if the meta couldn't be updated
// things should work normally and eventually get fixed up).
void processFavoredNodes(List<HRegionInfo> regions) throws IOException {
if (!shouldAssignRegionsWithFavoredNodes) return;
// The AM gets the favored nodes info for each region and updates the meta
// table with that info
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes =
new HashMap<HRegionInfo, List<ServerName>>();
for (HRegionInfo region : regions) {
regionToFavoredNodes.put(region,
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
}
FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
}
/**
* Handle a ZK unassigned node transition triggered by HBCK repair tool.
* <p>
* This is handled in a separate code path because it breaks the normal rules.
* @param rt
*/
private void handleHBCK(RegionTransition rt) {
String encodedName = HRegionInfo.encodeRegionName(rt.getRegionName());
LOG.info("Handling HBCK triggered transition=" + rt.getEventType() +
", server=" + rt.getServerName() + ", region=" +
HRegionInfo.prettyPrint(encodedName));
RegionState regionState = regionStates.getRegionTransitionState(encodedName);
switch (rt.getEventType()) {
case M_ZK_REGION_OFFLINE:
HRegionInfo regionInfo;
if (regionState != null) {
regionInfo = regionState.getRegion();
} else {
try {
byte [] name = rt.getRegionName();
Pair<HRegionInfo, ServerName> p = MetaReader.getRegion(catalogTracker, name);
regionInfo = p.getFirst();
} catch (IOException e) {
LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
return;
}
}
LOG.info("HBCK repair is triggering assignment of region=" +
regionInfo.getRegionNameAsString());
// trigger assign, node is already in OFFLINE so don't need to update ZK
assign(regionInfo, false);
break;
default:
LOG.warn("Received unexpected region state from HBCK: " + rt.toString());
break;
}
}
// ZooKeeper events
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING or CLOSING of a region by
* creating an unassigned node.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeCreated(String path) {
handleAssignmentEvent(path);
}
/**
* Existing unassigned node has had data changed.
*
* <p>This happens when an RS transitions from OFFLINE to OPENING, or between
* OPENING/OPENED and CLOSING/CLOSED.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeDataChanged(String path) {
handleAssignmentEvent(path);
}
// We don't want to have two events on the same region managed simultaneously.
// For this reason, we need to wait if an event on the same region is currently in progress.
// So we track the region names of the events in progress, and we keep a waiting list.
private final Set<String> regionsInProgress = new HashSet<String>();
// In a LinkedHashMultimap, the put order is kept when we retrieve the collection back. We need
// this as we want the events to be managed in the same order as we received them.
private final LinkedHashMultimap <String, RegionRunnable>
zkEventWorkerWaitingList = LinkedHashMultimap.create();
/**
* A specific runnable that works only on a region.
*/
private interface RegionRunnable extends Runnable{
/**
* @return - the name of the region it works on.
*/
String getRegionName();
}
/**
* Submit a task, ensuring that there is only one task at a time that working on a given region.
* Order is respected.
*/
protected void zkEventWorkersSubmit(final RegionRunnable regRunnable) {
synchronized (regionsInProgress) {
// If we're there is already a task with this region, we add it to the
// waiting list and return.
if (regionsInProgress.contains(regRunnable.getRegionName())) {
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.put(regRunnable.getRegionName(), regRunnable);
}
return;
}
// No event in progress on this region => we can submit a new task immediately.
regionsInProgress.add(regRunnable.getRegionName());
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
regRunnable.run();
} finally {
// now that we have finished, let's see if there is an event for the same region in the
// waiting list. If it's the case, we can now submit it to the pool.
synchronized (regionsInProgress) {
regionsInProgress.remove(regRunnable.getRegionName());
synchronized (zkEventWorkerWaitingList) {
java.util.Set<RegionRunnable> waiting = zkEventWorkerWaitingList.get(
regRunnable.getRegionName());
if (!waiting.isEmpty()) {
// We want the first object only. The only way to get it is through an iterator.
RegionRunnable toSubmit = waiting.iterator().next();
zkEventWorkerWaitingList.remove(toSubmit.getRegionName(), toSubmit);
zkEventWorkersSubmit(toSubmit);
}
}
}
}
}
});
}
}
@Override
public void nodeDeleted(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
Lock lock = locker.acquireLock(regionName);
try {
RegionState rs = regionStates.getRegionTransitionState(regionName);
if (rs == null) {
rs = regionStates.getRegionState(regionName);
if (rs == null || !rs.isMergingNew()) {
// MergingNew is an offline state
return;
}
}
HRegionInfo regionInfo = rs.getRegion();
String regionNameStr = regionInfo.getRegionNameAsString();
LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);
boolean disabled = getZKTable().isDisablingOrDisabledTable(regionInfo.getTable());
ServerName serverName = rs.getServerName();
if (serverManager.isServerOnline(serverName)) {
if (rs.isOnServer(serverName)
&& (rs.isOpened() || rs.isSplitting())) {
regionOnline(regionInfo, serverName);
if (disabled) {
// if server is offline, no hurt to unassign again
LOG.info("Opened " + regionNameStr
+ "but this table is disabled, triggering close of region");
unassign(regionInfo);
}
} else if (rs.isMergingNew()) {
synchronized (regionStates) {
String p = regionInfo.getEncodedName();
PairOfSameType<HRegionInfo> regions = mergingRegions.get(p);
if (regions != null) {
onlineMergingRegion(disabled, regions.getFirst(), serverName);
onlineMergingRegion(disabled, regions.getSecond(), serverName);
}
}
}
}
} finally {
lock.unlock();
}
}
private void onlineMergingRegion(boolean disabled,
final HRegionInfo hri, final ServerName serverName) {
RegionState regionState = regionStates.getRegionState(hri);
if (regionState != null && regionState.isMerging()
&& regionState.isOnServer(serverName)) {
regionOnline(regionState.getRegion(), serverName);
if (disabled) {
unassign(hri);
}
}
}
});
}
}
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING, SPLITTING or CLOSING of a
* region by creating a znode.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further children changed events</li>
* <li>Watch all new children for changed events</li>
* </ol>
*/
@Override
public void nodeChildrenChanged(String path) {
if (path.equals(watcher.assignmentZNode)) {
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
// Just make sure we see the changes for the new znodes
List<String> children =
ZKUtil.listChildrenAndWatchForNewChildren(
watcher, watcher.assignmentZNode);
if (children != null) {
Stat stat = new Stat();
for (String child : children) {
// if region is in transition, we already have a watch
// on it, so no need to watch it again. So, as I know for now,
// this is needed to watch splitting nodes only.
if (!regionStates.isRegionInTransition(child)) {
ZKAssign.getDataAndWatch(watcher, child, stat);
}
}
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned children", e);
}
}
});
}
}
/**
* Marks the region as online. Removes it from regions in transition and
* updates the in-memory assignment information.
* <p>
* Used when a region has been successfully opened on a region server.
* @param regionInfo
* @param sn
*/
void regionOnline(HRegionInfo regionInfo, ServerName sn) {
regionOnline(regionInfo, sn, HConstants.NO_SEQNUM);
}
void regionOnline(HRegionInfo regionInfo, ServerName sn, long openSeqNum) {
numRegionsOpened.incrementAndGet();
regionStates.regionOnline(regionInfo, sn, openSeqNum);
// Remove plan if one.
clearRegionPlan(regionInfo);
// Add the server to serversInUpdatingTimer
addToServersInUpdatingTimer(sn);
balancer.regionOnline(regionInfo, sn);
// Tell our listeners that a region was opened
sendRegionOpenedNotification(regionInfo, sn);
}
/**
* Pass the assignment event to a worker for processing.
* Each worker is a single thread executor service. The reason
* for just one thread is to make sure all events for a given
* region are processed in order.
*
* @param path
*/
private void handleAssignmentEvent(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, path, stat);
if (data == null) return;
RegionTransition rt = RegionTransition.parseFrom(data);
handleRegion(rt, stat.getVersion());
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned node data", e);
} catch (DeserializationException e) {
server.abort("Unexpected exception deserializing node data", e);
}
}
});
}
}
/**
* Add the server to the set serversInUpdatingTimer, then {@link TimerUpdater}
* will update timers for this server in background
* @param sn
*/
private void addToServersInUpdatingTimer(final ServerName sn) {
if (tomActivated){
this.serversInUpdatingTimer.add(sn);
}
}
/**
* Touch timers for all regions in transition that have the passed
* <code>sn</code> in common.
* Call this method whenever a server checks in. Doing so helps the case where
* a new regionserver has joined the cluster and its been given 1k regions to
* open. If this method is tickled every time the region reports in a
* successful open then the 1k-th region won't be timed out just because its
* sitting behind the open of 999 other regions. This method is NOT used
* as part of bulk assign -- there we have a different mechanism for extending
* the regions in transition timer (we turn it off temporarily -- because
* there is no regionplan involved when bulk assigning.
* @param sn
*/
private void updateTimers(final ServerName sn) {
Preconditions.checkState(tomActivated);
if (sn == null) return;
// This loop could be expensive.
// First make a copy of current regionPlan rather than hold sync while
// looping because holding sync can cause deadlock. Its ok in this loop
// if the Map we're going against is a little stale
List<Map.Entry<String, RegionPlan>> rps;
synchronized(this.regionPlans) {
rps = new ArrayList<Map.Entry<String, RegionPlan>>(regionPlans.entrySet());
}
for (Map.Entry<String, RegionPlan> e : rps) {
if (e.getValue() != null && e.getKey() != null && sn.equals(e.getValue().getDestination())) {
RegionState regionState = regionStates.getRegionTransitionState(e.getKey());
if (regionState != null) {
regionState.updateTimestampToNow();
}
}
}
}
/**
* Marks the region as offline. Removes it from regions in transition and
* removes in-memory assignment information.
* <p>
* Used when a region has been closed and should remain closed.
* @param regionInfo
*/
public void regionOffline(final HRegionInfo regionInfo) {
regionOffline(regionInfo, null);
}
public void offlineDisabledRegion(HRegionInfo regionInfo) {
if (useZKForAssignment) {
// Disabling so should not be reassigned, just delete the CLOSED node
LOG.debug("Table being disabled so deleting ZK node and removing from " +
"regions in transition, skipping assignment of region " +
regionInfo.getRegionNameAsString());
String encodedName = regionInfo.getEncodedName();
deleteNodeInStates(encodedName, "closed", null,
EventType.RS_ZK_REGION_CLOSED, EventType.M_ZK_REGION_OFFLINE);
}
regionOffline(regionInfo);
}
// Assignment methods
/**
* Assigns the specified region.
* <p>
* If a RegionPlan is available with a valid destination then it will be used
* to determine what server region is assigned to. If no RegionPlan is
* available, region will be assigned to a random available server.
* <p>
* Updates the RegionState and sends the OPEN RPC.
* <p>
* This will only succeed if the region is in transition and in a CLOSED or
* OFFLINE state or not in transition (in-memory not zk), and of course, the
* chosen server is up and running (It may have just crashed!). If the
* in-memory checks pass, the zk node is forced to OFFLINE before assigning.
*
* @param region server to be assigned
* @param setOfflineInZK whether ZK node should be created/transitioned to an
* OFFLINE state before assigning the region
*/
public void assign(HRegionInfo region, boolean setOfflineInZK) {
assign(region, setOfflineInZK, false);
}
/**
* Use care with forceNewPlan. It could cause double assignment.
*/
public void assign(HRegionInfo region,
boolean setOfflineInZK, boolean forceNewPlan) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
if (this.serverManager.isClusterShutdown()) {
LOG.info("Cluster shutdown is set; skipping assign of " +
region.getRegionNameAsString());
return;
}
String encodedName = region.getEncodedName();
Lock lock = locker.acquireLock(encodedName);
try {
RegionState state = forceRegionStateToOffline(region, forceNewPlan);
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
return;
}
assign(state, setOfflineInZK && useZKForAssignment, forceNewPlan);
}
} finally {
lock.unlock();
}
}
/**
* Bulk assign regions to <code>destination</code>.
* @param destination
* @param regions Regions to assign.
* @return true if successful
*/
boolean assign(final ServerName destination, final List<HRegionInfo> regions) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
int regionCount = regions.size();
if (regionCount == 0) {
return true;
}
LOG.debug("Assigning " + regionCount + " region(s) to " + destination.toString());
Set<String> encodedNames = new HashSet<String>(regionCount);
for (HRegionInfo region : regions) {
encodedNames.add(region.getEncodedName());
}
List<HRegionInfo> failedToOpenRegions = new ArrayList<HRegionInfo>();
Map<String, Lock> locks = locker.acquireLocks(encodedNames);
try {
AtomicInteger counter = new AtomicInteger(0);
Map<String, Integer> offlineNodesVersions = new ConcurrentHashMap<String, Integer>();
OfflineCallback cb = new OfflineCallback(
watcher, destination, counter, offlineNodesVersions);
Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>(regions.size());
List<RegionState> states = new ArrayList<RegionState>(regions.size());
for (HRegionInfo region : regions) {
String encodedName = region.getEncodedName();
if (!isDisabledorDisablingRegionInRIT(region)) {
RegionState state = forceRegionStateToOffline(region, false);
boolean onDeadServer = false;
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
onDeadServer = true;
} else if (!useZKForAssignment
|| asyncSetOfflineInZooKeeper(state, cb, destination)) {
RegionPlan plan = new RegionPlan(region, state.getServerName(), destination);
plans.put(encodedName, plan);
states.add(state);
continue;
}
}
// Reassign if the region wasn't on a dead server
if (!onDeadServer) {
LOG.info("failed to force region state to offline or "
+ "failed to set it offline in ZK, will reassign later: " + region);
failedToOpenRegions.add(region); // assign individually later
}
}
// Release the lock, this region is excluded from bulk assign because
// we can't update its state, or set its znode to offline.
Lock lock = locks.remove(encodedName);
lock.unlock();
}
if (useZKForAssignment) {
// Wait until all unassigned nodes have been put up and watchers set.
int total = states.size();
for (int oldCounter = 0; !server.isStopped();) {
int count = counter.get();
if (oldCounter != count) {
LOG.info(destination.toString() + " unassigned znodes=" + count + " of total="
+ total);
oldCounter = count;
}
if (count >= total) break;
Threads.sleep(5);
}
}
if (server.isStopped()) {
return false;
}
// Add region plans, so we can updateTimers when one region is opened so
// that unnecessary timeout on RIT is reduced.
this.addPlans(plans);
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos =
new ArrayList<Triple<HRegionInfo, Integer, List<ServerName>>>(states.size());
for (RegionState state: states) {
HRegionInfo region = state.getRegion();
String encodedRegionName = region.getEncodedName();
Integer nodeVersion = offlineNodesVersions.get(encodedRegionName);
if (useZKForAssignment && (nodeVersion == null || nodeVersion == -1)) {
LOG.warn("failed to offline in zookeeper: " + region);
failedToOpenRegions.add(region); // assign individually later
Lock lock = locks.remove(encodedRegionName);
lock.unlock();
} else {
regionStates.updateRegionState(
region, State.PENDING_OPEN, destination);
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenInfos.add(new Triple<HRegionInfo, Integer, List<ServerName>>(
region, nodeVersion, favoredNodes));
}
}
// Move on to open regions.
try {
// Send OPEN RPC. If it fails on a IOE or RemoteException,
// regions will be assigned individually.
long maxWaitTime = System.currentTimeMillis() +
this.server.getConfiguration().
getLong("hbase.regionserver.rpc.startup.waittime", 60000);
for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) {
try {
// regionOpenInfos is empty if all regions are in failedToOpenRegions list
if (regionOpenInfos.isEmpty()) {
break;
}
List<RegionOpeningState> regionOpeningStateList = serverManager
.sendRegionOpen(destination, regionOpenInfos);
if (regionOpeningStateList == null) {
// Failed getting RPC connection to this server
return false;
}
for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) {
RegionOpeningState openingState = regionOpeningStateList.get(k);
if (openingState != RegionOpeningState.OPENED) {
HRegionInfo region = regionOpenInfos.get(k).getFirst();
if (openingState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, destination);
} else if (openingState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, reassign it later
failedToOpenRegions.add(region);
} else {
LOG.warn("THIS SHOULD NOT HAPPEN: unknown opening state "
+ openingState + " in assigning region " + region);
}
}
}
break;
} catch (IOException e) {
if (e instanceof RemoteException) {
e = ((RemoteException)e).unwrapRemoteException();
}
if (e instanceof RegionServerStoppedException) {
LOG.warn("The region server was shut down, ", e);
// No need to retry, the region server is a goner.
return false;
} else if (e instanceof ServerNotRunningYetException) {
long now = System.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up; waiting up to " +
(maxWaitTime - now) + "ms", e);
Thread.sleep(100);
i--; // reset the try count
continue;
}
} else if (e instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(destination)) {
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server.
if (LOG.isDebugEnabled()) {
LOG.debug("Bulk assigner openRegion() to " + destination
+ " has timed out, but the regions might"
+ " already be opened on it.", e);
}
// wait and reset the re-try count, server might be just busy.
Thread.sleep(100);
i--;
continue;
}
throw e;
}
}
} catch (IOException e) {
// Can be a socket timeout, EOF, NoRouteToHost, etc
LOG.info("Unable to communicate with " + destination
+ " in order to assign regions, ", e);
return false;
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} finally {
for (Lock lock : locks.values()) {
lock.unlock();
}
}
if (!failedToOpenRegions.isEmpty()) {
for (HRegionInfo region : failedToOpenRegions) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
LOG.debug("Bulk assigning done for " + destination);
return true;
} finally {
metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
/**
* Send CLOSE RPC if the server is online, otherwise, offline the region.
*
* The RPC will be sent only to the region sever found in the region state
* if it is passed in, otherwise, to the src server specified. If region
* state is not specified, we don't update region state at all, instead
* we just send the RPC call. This is useful for some cleanup without
* messing around the region states (see handleRegion, on region opened
* on an unexpected server scenario, for an example)
*/
private void unassign(final HRegionInfo region,
final RegionState state, final int versionOfClosingNode,
final ServerName dest, final boolean transitionInZK,
final ServerName src) {
ServerName server = src;
if (state != null) {
server = state.getServerName();
}
long maxWaitTime = -1;
for (int i = 1; i <= this.maximumAttempts; i++) {
if (this.server.isStopped() || this.server.isAborted()) {
LOG.debug("Server stopped/aborted; skipping unassign of " + region);
return;
}
// ClosedRegionhandler can remove the server from this.regions
if (!serverManager.isServerOnline(server)) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", no need to unassign since it's on a dead server: " + server);
if (transitionInZK) {
// delete the node. if no node exists need not bother.
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
}
try {
// Send CLOSE RPC
if (serverManager.sendRegionClose(server, region,
versionOfClosingNode, dest, transitionInZK)) {
LOG.debug("Sent CLOSE to " + server + " for region " +
region.getRegionNameAsString());
if (useZKForAssignment && !transitionInZK && state != null) {
// Retry to make sure the region is
// closed so as to avoid double assignment.
unassign(region, state, versionOfClosingNode,
dest, transitionInZK, src);
}
return;
}
// This never happens. Currently regionserver close always return true.
// Todo; this can now happen (0.96) if there is an exception in a coprocessor
LOG.warn("Server " + server + " region CLOSE RPC returned false for " +
region.getRegionNameAsString());
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
}
boolean logRetries = true;
if (t instanceof NotServingRegionException
|| t instanceof RegionServerStoppedException
|| t instanceof ServerNotRunningYetException) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", it's not any more on " + server, t);
if (transitionInZK) {
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
} else if ((t instanceof FailedServerException) || (state != null &&
t instanceof RegionAlreadyInTransitionException)) {
long sleepTime = 0;
Configuration conf = this.server.getConfiguration();
if(t instanceof FailedServerException) {
sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT);
} else {
// RS is already processing this region, only need to update the timestamp
LOG.debug("update " + state + " the timestamp.");
state.updateTimestampToNow();
if (maxWaitTime < 0) {
maxWaitTime =
EnvironmentEdgeManager.currentTimeMillis()
+ conf.getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
}
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
sleepTime = 100;
i--; // reset the try count
logRetries = false;
}
}
try {
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
} catch (InterruptedException ie) {
LOG.warn("Failed to unassign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
return;
}
}
if (logRetries) {
LOG.info("Server " + server + " returned " + t + " for "
+ region.getRegionNameAsString() + ", try=" + i
+ " of " + this.maximumAttempts, t);
// Presume retry or server will expire.
}
}
}
// Run out of attempts
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
}
/**
* Set region to OFFLINE unless it is opening and forceNewPlan is false.
*/
private RegionState forceRegionStateToOffline(
final HRegionInfo region, final boolean forceNewPlan) {
RegionState state = regionStates.getRegionState(region);
if (state == null) {
LOG.warn("Assigning a region not in region states: " + region);
state = regionStates.createRegionState(region);
}
ServerName sn = state.getServerName();
if (forceNewPlan && LOG.isDebugEnabled()) {
LOG.debug("Force region state offline " + state);
}
switch (state.getState()) {
case OPEN:
case OPENING:
case PENDING_OPEN:
case CLOSING:
case PENDING_CLOSE:
if (!forceNewPlan) {
LOG.debug("Skip assigning " +
region + ", it is already " + state);
return null;
}
case FAILED_CLOSE:
case FAILED_OPEN:
unassign(region, state, -1, null, false, null);
state = regionStates.getRegionState(region);
if (state.isFailedClose()) {
// If we can't close the region, we can't re-assign
// it so as to avoid possible double assignment/data loss.
LOG.info("Skip assigning " +
region + ", we couldn't close it: " + state);
return null;
}
case OFFLINE:
// This region could have been open on this server
// for a while. If the server is dead and not processed
// yet, we can move on only if the meta shows the
// region is not on this server actually, or on a server
// not dead, or dead and processed already.
// In case not using ZK, we don't need this check because
// we have the latest info in memory, and the caller
// will do another round checking any way.
if (useZKForAssignment
&& regionStates.isServerDeadAndNotProcessed(sn)
&& wasRegionOnDeadServerByMeta(region, sn)) {
if (!regionStates.isRegionInTransition(region)) {
LOG.info("Updating the state to " + State.OFFLINE + " to allow to be reassigned by SSH");
regionStates.updateRegionState(region, State.OFFLINE);
}
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it is on a dead but not processed yet server: " + sn);
return null;
}
case CLOSED:
break;
default:
LOG.error("Trying to assign region " + region
+ ", which is " + state);
return null;
}
return state;
}
private boolean wasRegionOnDeadServerByMeta(
final HRegionInfo region, final ServerName sn) {
try {
if (region.isMetaRegion()) {
ServerName server = catalogTracker.getMetaLocation();
return regionStates.isServerDeadAndNotProcessed(server);
}
while (!server.isStopped()) {
try {
catalogTracker.waitForMeta();
Result r = MetaReader.getRegionResult(catalogTracker, region.getRegionName());
if (r == null || r.isEmpty()) return false;
ServerName server = HRegionInfo.getServerName(r);
return regionStates.isServerDeadAndNotProcessed(server);
} catch (IOException ioe) {
LOG.info("Received exception accessing hbase:meta during force assign "
+ region.getRegionNameAsString() + ", retrying", ioe);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.info("Interrupted accessing hbase:meta", e);
}
// Call is interrupted or server is stopped.
return regionStates.isServerDeadAndNotProcessed(sn);
}
/**
* Caller must hold lock on the passed <code>state</code> object.
* @param state
* @param setOfflineInZK
* @param forceNewPlan
*/
private void assign(RegionState state,
final boolean setOfflineInZK, final boolean forceNewPlan) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
Configuration conf = server.getConfiguration();
RegionState currentState = state;
int versionOfOfflineNode = -1;
RegionPlan plan = null;
long maxWaitTime = -1;
HRegionInfo region = state.getRegion();
RegionOpeningState regionOpenState;
Throwable previousException = null;
for (int i = 1; i <= maximumAttempts; i++) {
if (server.isStopped() || server.isAborted()) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", the server is stopped/aborted");
return;
}
if (plan == null) { // Get a server for the region at first
try {
plan = getRegionPlan(region, forceNewPlan);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
if (plan == null) {
LOG.warn("Unable to determine a plan to assign " + region);
if (tomActivated){
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
if (region.isMetaRegion()) {
try {
Thread.sleep(this.sleepTimeBeforeRetryingMetaAssignment);
if (i == maximumAttempts) i = 1;
continue;
} catch (InterruptedException e) {
LOG.error("Got exception while waiting for hbase:meta assignment");
Thread.currentThread().interrupt();
}
}
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
// get the version of the znode after setting it to OFFLINE.
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
versionOfOfflineNode = setOfflineInZooKeeper(currentState, plan.getDestination());
if (versionOfOfflineNode != -1) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
// In case of assignment from EnableTableHandler table state is ENABLING. Any how
// EnableTableHandler will set ENABLED after assigning all the table regions. If we
// try to set to ENABLED directly then client API may think table is enabled.
// When we have a case such as all the regions are added directly into hbase:meta and we call
// assignRegion then we need to make the table ENABLED. Hence in such case the table
// will not be in ENABLING or ENABLED state.
TableName tableName = region.getTable();
if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) {
LOG.debug("Setting table " + tableName + " to ENABLED state.");
setEnabledTable(tableName);
}
}
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
LOG.info("Unable to set offline in ZooKeeper to assign " + region);
// Setting offline in ZK must have been failed due to ZK racing or some
// exception which may make the server to abort. If it is ZK racing,
// we should retry since we already reset the region state,
// existing (re)assignment will fail anyway.
if (!server.isAborted()) {
continue;
}
}
LOG.info("Assigning " + region.getRegionNameAsString() +
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
currentState = regionStates.updateRegionState(region,
State.PENDING_OPEN, plan.getDestination());
boolean needNewPlan;
final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() +
" to " + plan.getDestination();
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenState = serverManager.sendRegionOpen(
plan.getDestination(), region, versionOfOfflineNode, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, looping again on a new server.
needNewPlan = true;
LOG.warn(assignMsg + ", regionserver says 'FAILED_OPENING', " +
" trying to assign elsewhere instead; " +
"try=" + i + " of " + this.maximumAttempts);
} else {
// we're done
if (regionOpenState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, plan.getDestination());
}
return;
}
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
previousException = t;
// Should we wait a little before retrying? If the server is starting it's yes.
// If the region is already in transition, it's yes as well: we want to be sure that
// the region will get opened but we don't want a double assignment.
boolean hold = (t instanceof RegionAlreadyInTransitionException ||
t instanceof ServerNotRunningYetException);
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server to avoid possible
// double assignment.
boolean retry = !hold && (t instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(plan.getDestination()));
if (hold) {
LOG.warn(assignMsg + ", waiting a little before trying on the same region server " +
"try=" + i + " of " + this.maximumAttempts, t);
if (maxWaitTime < 0) {
if (t instanceof RegionAlreadyInTransitionException) {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
} else {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(
"hbase.regionserver.rpc.startup.waittime", 60000);
}
}
try {
needNewPlan = false;
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up or region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
Thread.sleep(100);
i--; // reset the try count
} else if (!(t instanceof RegionAlreadyInTransitionException)) {
LOG.debug("Server is not up for a while; try a new one", t);
needNewPlan = true;
}
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
} else if (retry) {
needNewPlan = false;
i--; // we want to retry as many times as needed as long as the RS is not dead.
LOG.warn(assignMsg + ", trying to assign to the same region server due ", t);
} else {
needNewPlan = true;
LOG.warn(assignMsg + ", trying to assign elsewhere instead;" +
" try=" + i + " of " + this.maximumAttempts, t);
}
}
if (i == this.maximumAttempts) {
// Don't reset the region state or get a new plan any more.
// This is the last try.
continue;
}
// If region opened on destination of present plan, reassigning to new
// RS may cause double assignments. In case of RegionAlreadyInTransitionException
// reassigning to same RS.
if (needNewPlan) {
// Force a new plan and reassign. Will return null if no servers.
// The new plan could be the same as the existing plan since we don't
// exclude the server of the original plan, which should not be
// excluded since it could be the only server up now.
RegionPlan newPlan = null;
try {
newPlan = getRegionPlan(region, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
if (newPlan == null) {
if (tomActivated) {
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
LOG.warn("Unable to find a viable location to assign region " +
region.getRegionNameAsString());
return;
}
if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) {
// Clean out plan we failed execute and one that doesn't look like it'll
// succeed anyways; we need a new plan!
// Transition back to OFFLINE
currentState = regionStates.updateRegionState(region, State.OFFLINE);
versionOfOfflineNode = -1;
plan = newPlan;
} else if(plan.getDestination().equals(newPlan.getDestination()) &&
previousException instanceof FailedServerException) {
try {
LOG.info("Trying to re-assign " + region.getRegionNameAsString() +
" to the same failed server.");
Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT));
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
}
}
}
// Run out of attempts
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
} finally {
metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) {
// Remove region from in-memory transition and unassigned node from ZK
// While trying to enable the table the regions of the table were
// already enabled.
LOG.debug("ALREADY_OPENED " + region.getRegionNameAsString()
+ " to " + sn);
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "offline", sn, EventType.M_ZK_REGION_OFFLINE);
regionStates.regionOnline(region, sn);
}
private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
TableName tableName = region.getTable();
boolean disabled = this.zkTable.isDisabledTable(tableName);
if (disabled || this.zkTable.isDisablingTable(tableName)) {
LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") +
" skipping assign of " + region.getRegionNameAsString());
offlineDisabledRegion(region);
return true;
}
return false;
}
/**
* Set region as OFFLINED up in zookeeper
*
* @param state
* @return the version of the offline node if setting of the OFFLINE node was
* successful, -1 otherwise.
*/
private int setOfflineInZooKeeper(final RegionState state, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE.";
this.server.abort(msg, new IllegalStateException(msg));
return -1;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
int versionOfOfflineNode;
try {
// get the version after setting the znode to OFFLINE
versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(watcher,
state.getRegion(), destination);
if (versionOfOfflineNode == -1) {
LOG.warn("Attempted to create/force node into OFFLINE state before "
+ "completing assignment but failed to do so for " + state);
return -1;
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
return -1;
}
return versionOfOfflineNode;
}
/**
* @param region the region to assign
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final boolean forceNewPlan) throws HBaseIOException {
return getRegionPlan(region, null, forceNewPlan);
}
/**
* @param region the region to assign
* @param serverToExclude Server to exclude (we know its bad). Pass null if
* all servers are thought to be assignable.
* @param forceNewPlan If true, then if an existing plan exists, a new plan
* will be generated.
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final ServerName serverToExclude, final boolean forceNewPlan) throws HBaseIOException {
// Pickup existing plan or make a new one
final String encodedName = region.getEncodedName();
final List<ServerName> destServers =
serverManager.createDestinationServersList(serverToExclude);
if (destServers.isEmpty()){
LOG.warn("Can't move " + encodedName +
", there is no destination server available.");
return null;
}
RegionPlan randomPlan = null;
boolean newPlan = false;
RegionPlan existingPlan;
synchronized (this.regionPlans) {
existingPlan = this.regionPlans.get(encodedName);
if (existingPlan != null && existingPlan.getDestination() != null) {
LOG.debug("Found an existing plan for " + region.getRegionNameAsString()
+ " destination server is " + existingPlan.getDestination() +
" accepted as a dest server = " + destServers.contains(existingPlan.getDestination()));
}
if (forceNewPlan
|| existingPlan == null
|| existingPlan.getDestination() == null
|| !destServers.contains(existingPlan.getDestination())) {
newPlan = true;
randomPlan = new RegionPlan(region, null,
balancer.randomAssignment(region, destServers));
if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
regions.add(region);
try {
processFavoredNodes(regions);
} catch (IOException ie) {
LOG.warn("Ignoring exception in processFavoredNodes " + ie);
}
}
this.regionPlans.put(encodedName, randomPlan);
}
}
if (newPlan) {
if (randomPlan.getDestination() == null) {
LOG.warn("Can't find a destination for " + encodedName);
return null;
}
LOG.debug("No previous transition plan found (or ignoring " +
"an existing plan) for " + region.getRegionNameAsString() +
"; generated random plan=" + randomPlan + "; " +
serverManager.countOfRegionServers() +
" (online=" + serverManager.getOnlineServers().size() +
", available=" + destServers.size() + ") available servers" +
", forceNewPlan=" + forceNewPlan);
return randomPlan;
}
LOG.debug("Using pre-existing plan for " +
region.getRegionNameAsString() + "; plan=" + existingPlan);
return existingPlan;
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
*/
public void unassign(HRegionInfo region) {
unassign(region, false);
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
* @param force if region should be closed even if already closing
*/
public void unassign(HRegionInfo region, boolean force, ServerName dest) {
// TODO: Method needs refactoring. Ugly buried returns throughout. Beware!
LOG.debug("Starting unassign of " + region.getRegionNameAsString()
+ " (offlining), current state: " + regionStates.getRegionState(region));
String encodedName = region.getEncodedName();
// Grab the state of this region and synchronize on it
int versionOfClosingNode = -1;
// We need a lock here as we're going to do a put later and we don't want multiple states
// creation
ReentrantLock lock = locker.acquireLock(encodedName);
RegionState state = regionStates.getRegionTransitionState(encodedName);
boolean reassign = true;
try {
if (state == null) {
// Region is not in transition.
// We can unassign it only if it's not SPLIT/MERGED.
state = regionStates.getRegionState(encodedName);
if (state != null && state.isUnassignable()) {
LOG.info("Attempting to unassign " + state + ", ignored");
// Offline region will be reassigned below
return;
}
// Create the znode in CLOSING state
try {
if (state == null || state.getServerName() == null) {
// We don't know where the region is, offline it.
// No need to send CLOSE RPC
LOG.warn("Attempting to unassign a region not in RegionStates"
+ region.getRegionNameAsString() + ", offlined");
regionOffline(region);
return;
}
if (useZKForAssignment) {
versionOfClosingNode = ZKAssign.createNodeClosing(
watcher, region, state.getServerName());
if (versionOfClosingNode == -1) {
LOG.info("Attempting to unassign " +
region.getRegionNameAsString() + " but ZK closing node "
+ "can't be created.");
reassign = false; // not unassigned at all
return;
}
}
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
// Handle race between master initiated close and regionserver
// orchestrated splitting. See if existing node is in a
// SPLITTING or SPLIT state. If so, the regionserver started
// an op on node before we could get our CLOSING in. Deal.
NodeExistsException nee = (NodeExistsException)e;
String path = nee.getPath();
try {
if (isSplitOrSplittingOrMergedOrMerging(path)) {
LOG.debug(path + " is SPLIT or SPLITTING or MERGED or MERGING; " +
"skipping unassign because region no longer exists -- its split or merge");
reassign = false; // no need to reassign for split/merged region
return;
}
} catch (KeeperException.NoNodeException ke) {
LOG.warn("Failed getData on SPLITTING/SPLIT at " + path +
"; presuming split and that the region to unassign, " +
encodedName + ", no longer exists -- confirm", ke);
return;
} catch (KeeperException ke) {
LOG.error("Unexpected zk state", ke);
} catch (DeserializationException de) {
LOG.error("Failed parse", de);
}
}
// If we get here, don't understand whats going on -- abort.
server.abort("Unexpected ZK exception creating node CLOSING", e);
reassign = false; // heading out already
return;
}
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
} else if (state.isFailedOpen()) {
// The region is not open yet
regionOffline(region);
return;
} else if (force && state.isPendingCloseOrClosing()) {
LOG.debug("Attempting to unassign " + region.getRegionNameAsString() +
" which is already " + state.getState() +
" but forcing to send a CLOSE RPC again ");
if (state.isFailedClose()) {
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
}
state.updateTimestampToNow();
} else {
LOG.debug("Attempting to unassign " +
region.getRegionNameAsString() + " but it is " +
"already in transition (" + state.getState() + ", force=" + force + ")");
return;
}
unassign(region, state, versionOfClosingNode, dest, useZKForAssignment, null);
} finally {
lock.unlock();
// Region is expected to be reassigned afterwards
if (reassign && regionStates.isRegionOffline(region)) {
assign(region, true);
}
}
}
public void unassign(HRegionInfo region, boolean force){
unassign(region, force, null);
}
/**
* @param region regioninfo of znode to be deleted.
*/
public void deleteClosingOrClosedNode(HRegionInfo region, ServerName sn) {
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "closing", sn, EventType.M_ZK_REGION_CLOSING,
EventType.RS_ZK_REGION_CLOSED);
}
/**
* @param path
* @return True if znode is in SPLIT or SPLITTING or MERGED or MERGING state.
* @throws KeeperException Can happen if the znode went away in meantime.
* @throws DeserializationException
*/
private boolean isSplitOrSplittingOrMergedOrMerging(final String path)
throws KeeperException, DeserializationException {
boolean result = false;
// This may fail if the SPLIT or SPLITTING or MERGED or MERGING znode gets
// cleaned up before we can get data from it.
byte [] data = ZKAssign.getData(watcher, path);
if (data == null) {
LOG.info("Node " + path + " is gone");
return false;
}
RegionTransition rt = RegionTransition.parseFrom(data);
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGED:
case RS_ZK_REGION_MERGING:
result = true;
break;
default:
LOG.info("Node " + path + " is in " + rt.getEventType());
break;
}
return result;
}
/**
* Used by unit tests. Return the number of regions opened so far in the life
* of the master. Increases by one every time the master opens a region
* @return the counter value of the number of regions opened so far
*/
public int getNumRegionsOpened() {
return numRegionsOpened.get();
}
/**
* Waits until the specified region has completed assignment.
* <p>
* If the region is already assigned, returns immediately. Otherwise, method
* blocks until the region is assigned.
* @param regionInfo region to wait on assignment for
* @throws InterruptedException
*/
public boolean waitForAssignment(HRegionInfo regionInfo)
throws InterruptedException {
while (!regionStates.isRegionOnline(regionInfo)) {
if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN)
|| this.server.isStopped()) {
return false;
}
// We should receive a notification, but it's
// better to have a timeout to recheck the condition here:
// it lowers the impact of a race condition if any
regionStates.waitForUpdate(100);
}
return true;
}
/**
* Assigns the hbase:meta region.
* <p>
* Assumes that hbase:meta is currently closed and is not being actively served by
* any RegionServer.
* <p>
* Forcibly unsets the current meta region location in ZooKeeper and assigns
* hbase:meta to a random RegionServer.
* @throws KeeperException
*/
public void assignMeta() throws KeeperException {
MetaRegionTracker.deleteMetaLocation(this.watcher);
assign(HRegionInfo.FIRST_META_REGIONINFO, true);
}
/**
* Assigns specified regions retaining assignments, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(Map<HRegionInfo, ServerName> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Reuse existing assignment info
Map<ServerName, List<HRegionInfo>> bulkPlan =
balancer.retainAssignment(regions, servers);
assign(regions.size(), servers.size(),
"retainAssignment=true", bulkPlan);
}
/**
* Assigns specified regions round robin, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(List<HRegionInfo> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Generate a round-robin bulk assignment plan
Map<ServerName, List<HRegionInfo>> bulkPlan
= balancer.roundRobinAssignment(regions, servers);
processFavoredNodes(regions);
assign(regions.size(), servers.size(),
"round-robin=true", bulkPlan);
}
private void assign(int regions, int totalServers,
String message, Map<ServerName, List<HRegionInfo>> bulkPlan)
throws InterruptedException, IOException {
int servers = bulkPlan.size();
if (servers == 1 || (regions < bulkAssignThresholdRegions
&& servers < bulkAssignThresholdServers)) {
// Not use bulk assignment. This could be more efficient in small
// cluster, especially mini cluster for testing, so that tests won't time out
if (LOG.isTraceEnabled()) {
LOG.trace("Not using bulk assignment since we are assigning only " + regions +
" region(s) to " + servers + " server(s)");
}
for (Map.Entry<ServerName, List<HRegionInfo>> plan: bulkPlan.entrySet()) {
if (!assign(plan.getKey(), plan.getValue())) {
for (HRegionInfo region: plan.getValue()) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
}
} else {
LOG.info("Bulk assigning " + regions + " region(s) across "
+ totalServers + " server(s), " + message);
// Use fixed count thread pool assigning.
BulkAssigner ba = new GeneralBulkAssigner(
this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned);
ba.bulkAssign();
LOG.info("Bulk assigning done");
}
}
/**
* Assigns all user regions, if any exist. Used during cluster startup.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown and the cluster
* should be shutdown.
* @throws InterruptedException
* @throws IOException
* @throws KeeperException
*/
private void assignAllUserRegions(Set<TableName> disabledOrDisablingOrEnabling)
throws IOException, InterruptedException, KeeperException {
// Skip assignment for regions of tables in DISABLING state because during clean cluster startup
// no RS is alive and regions map also doesn't have any information about the regions.
// See HBASE-6281.
// Scan hbase:meta for all user regions, skipping any disabled tables
Map<HRegionInfo, ServerName> allRegions;
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(catalogTracker, disabledOrDisablingOrEnabling, true);
snapshotOfRegionAssignment.initialize();
allRegions = snapshotOfRegionAssignment.getRegionToRegionServerMap();
if (allRegions == null || allRegions.isEmpty()) {
return;
}
// Determine what type of assignment to do on startup
boolean retainAssignment = server.getConfiguration().
getBoolean("hbase.master.startup.retainassign", true);
if (retainAssignment) {
assign(allRegions);
} else {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(allRegions.keySet());
assign(regions);
}
for (HRegionInfo hri : allRegions.keySet()) {
TableName tableName = hri.getTable();
if (!zkTable.isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
}
/**
* Wait until no regions in transition.
* @param timeout How long to wait.
* @return True if nothing in regions in transition.
* @throws InterruptedException
*/
boolean waitUntilNoRegionsInTransition(final long timeout)
throws InterruptedException {
// Blocks until there are no regions in transition. It is possible that
// there
// are regions in transition immediately after this returns but guarantees
// that if it returns without an exception that there was a period of time
// with no regions in transition from the point-of-view of the in-memory
// state of the Master.
final long endTime = System.currentTimeMillis() + timeout;
while (!this.server.isStopped() && regionStates.isRegionsInTransition()
&& endTime > System.currentTimeMillis()) {
regionStates.waitForUpdate(100);
}
return !regionStates.isRegionsInTransition();
}
/**
* Rebuild the list of user regions and assignment information.
* <p>
* Returns a map of servers that are not found to be online and the regions
* they were hosting.
* @return map of servers not online to their assigned regions, as stored
* in META
* @throws IOException
*/
Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws IOException, KeeperException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
Set<TableName> disabledOrEnablingTables = ZKTable.getDisabledTables(watcher);
disabledOrEnablingTables.addAll(enablingTables);
Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(disabledOrEnablingTables);
// Region assignment from META
List<Result> results = MetaReader.fullScan(this.catalogTracker);
// Get any new but slow to checkin region server that joined the cluster
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
// Map of offline servers and their regions to be returned
Map<ServerName, List<HRegionInfo>> offlineServers =
new TreeMap<ServerName, List<HRegionInfo>>();
// Iterate regions in META
for (Result result : results) {
HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(result);
if (regionInfo == null) continue;
State state = RegionStateStore.getRegionState(result);
ServerName regionLocation = RegionStateStore.getRegionServer(result);
regionStates.createRegionState(regionInfo, state, regionLocation);
if (!regionStates.isRegionInState(regionInfo, State.OPEN)) {
// Region is not open (either offline or in transition), skip
continue;
}
TableName tableName = regionInfo.getTable();
if (!onlineServers.contains(regionLocation)) {
// Region is located on a server that isn't online
List<HRegionInfo> offlineRegions = offlineServers.get(regionLocation);
if (offlineRegions == null) {
offlineRegions = new ArrayList<HRegionInfo>(1);
offlineServers.put(regionLocation, offlineRegions);
}
if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
offlineRegions.add(regionInfo);
} else if (!disabledOrEnablingTables.contains(tableName)) {
// Region is being served and on an active server
// add only if region not in disabled or enabling table
regionStates.updateRegionState(regionInfo, State.OPEN, regionLocation);
regionStates.regionOnline(regionInfo, regionLocation);
balancer.regionOnline(regionInfo, regionLocation);
} else if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
// need to enable the table if not disabled or disabling or enabling
// this will be used in rolling restarts
if (!disabledOrDisablingOrEnabling.contains(tableName)
&& !getZKTable().isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
return offlineServers;
}
/**
* Recover the tables that were not fully moved to DISABLED state. These
* tables are in DISABLING state when the master restarted/switched.
*
* @throws KeeperException
* @throws TableNotFoundException
* @throws IOException
*/
private void recoverTableInDisablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> disablingTables = ZKTable.getDisablingTables(watcher);
if (disablingTables.size() != 0) {
for (TableName tableName : disablingTables) {
// Recover by calling DisableTableHandler
LOG.info("The table " + tableName
+ " is in DISABLING state. Hence recovering by moving the table"
+ " to DISABLED state.");
new DisableTableHandler(this.server, tableName, catalogTracker,
this, tableLockManager, true).prepare().process();
}
}
}
/**
* Recover the tables that are not fully moved to ENABLED state. These tables
* are in ENABLING state when the master restarted/switched
*
* @throws KeeperException
* @throws org.apache.hadoop.hbase.TableNotFoundException
* @throws IOException
*/
private void recoverTableInEnablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
if (enablingTables.size() != 0) {
for (TableName tableName : enablingTables) {
// Recover by calling EnableTableHandler
LOG.info("The table " + tableName
+ " is in ENABLING state. Hence recovering by moving the table"
+ " to ENABLED state.");
// enableTable in sync way during master startup,
// no need to invoke coprocessor
EnableTableHandler eth = new EnableTableHandler(this.server, tableName,
catalogTracker, this, tableLockManager, true);
try {
eth.prepare();
} catch (TableNotFoundException e) {
LOG.warn("Table " + tableName + " not found in hbase:meta to recover.");
continue;
}
eth.process();
}
}
}
/**
* Processes list of dead servers from result of hbase:meta scan and regions in RIT
* <p>
* This is used for failover to recover the lost regions that belonged to
* RegionServers which failed while there was no active master or regions
* that were in RIT.
* <p>
*
*
* @param deadServers
* The list of dead servers which failed while there was no active
* master. Can be null.
* @throws IOException
* @throws KeeperException
*/
private void processDeadServersAndRecoverLostRegions(
Map<ServerName, List<HRegionInfo>> deadServers)
throws IOException, KeeperException {
if (deadServers != null) {
for (Map.Entry<ServerName, List<HRegionInfo>> server: deadServers.entrySet()) {
ServerName serverName = server.getKey();
// We need to keep such info even if the server is known dead
regionStates.setLastRegionServerOfRegions(serverName, server.getValue());
if (!serverManager.isServerDead(serverName)) {
serverManager.expireServer(serverName); // Let SSH do region re-assign
}
}
}
List<String> nodes = useZKForAssignment ?
ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.assignmentZNode)
: ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode);
if (nodes != null && !nodes.isEmpty()) {
for (String encodedRegionName : nodes) {
processRegionInTransition(encodedRegionName, null);
}
} else if (!useZKForAssignment) {
// We need to send RPC call again for PENDING_OPEN/PENDING_CLOSE regions
// in case the RPC call is not sent out yet before the master was shut down
// since we update the state before we send the RPC call. We can't update
// the state after the RPC call. Otherwise, we don't know what's happened
// to the region if the master dies right after the RPC call is out.
Map<String, RegionState> rits = regionStates.getRegionsInTransition();
for (RegionState regionState: rits.values()) {
if (!serverManager.isServerOnline(regionState.getServerName())) {
continue; // SSH will handle it
}
State state = regionState.getState();
LOG.info("Processing " + regionState);
switch (state) {
case CLOSED:
invokeAssign(regionState.getRegion());
break;
case PENDING_OPEN:
retrySendRegionOpen(regionState);
break;
case PENDING_CLOSE:
retrySendRegionClose(regionState);
break;
default:
// No process for other states
}
}
}
}
/**
* At master failover, for pending_open region, make sure
* sendRegionOpen RPC call is sent to the target regionserver
*/
private void retrySendRegionOpen(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(hri);
}
RegionOpeningState regionOpenState = serverManager.sendRegionOpen(
serverName, hri, -1, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, this means the target server didn't get
// the original region open RPC, so re-assign it with a new plan
LOG.debug("Got failed_opening in retry sendRegionOpen for "
+ regionState + ", re-assign it");
invokeAssign(hri, true);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
// For other exceptions, re-assign it
LOG.debug("Got exception in retry sendRegionOpen for "
+ regionState + ", re-assign it", t);
invokeAssign(hri);
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* At master failover, for pending_close region, make sure
* sendRegionClose RPC call is sent to the target regionserver
*/
private void retrySendRegionClose(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
if (!serverManager.sendRegionClose(serverName, hri, -1, null, false)) {
// This means the region is still on the target server
LOG.debug("Got false in retry sendRegionClose for "
+ regionState + ", re-close it");
invokeUnAssign(hri);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
if (!(t instanceof NotServingRegionException
|| t instanceof RegionAlreadyInTransitionException)) {
// NotServingRegionException/RegionAlreadyInTransitionException
// means the target server got the original region close request.
// For other exceptions, re-close it
LOG.debug("Got exception in retry sendRegionClose for "
+ regionState + ", re-close it", t);
invokeUnAssign(hri);
}
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* Set Regions in transitions metrics.
* This takes an iterator on the RegionInTransition map (CLSM), and is not synchronized.
* This iterator is not fail fast, which may lead to stale read; but that's better than
* creating a copy of the map for metrics computation, as this method will be invoked
* on a frequent interval.
*/
public void updateRegionsInTransitionMetrics() {
long currentTime = System.currentTimeMillis();
int totalRITs = 0;
int totalRITsOverThreshold = 0;
long oldestRITTime = 0;
int ritThreshold = this.server.getConfiguration().
getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000);
for (RegionState state: regionStates.getRegionsInTransition().values()) {
totalRITs++;
long ritTime = currentTime - state.getStamp();
if (ritTime > ritThreshold) { // more than the threshold
totalRITsOverThreshold++;
}
if (oldestRITTime < ritTime) {
oldestRITTime = ritTime;
}
}
if (this.metricsAssignmentManager != null) {
this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime);
this.metricsAssignmentManager.updateRITCount(totalRITs);
this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold);
}
}
/**
* @param region Region whose plan we are to clear.
*/
void clearRegionPlan(final HRegionInfo region) {
synchronized (this.regionPlans) {
this.regionPlans.remove(region.getEncodedName());
}
}
/**
* Wait on region to clear regions-in-transition.
* @param hri Region to wait on.
* @throws IOException
*/
public void waitOnRegionToClearRegionsInTransition(final HRegionInfo hri)
throws IOException, InterruptedException {
waitOnRegionToClearRegionsInTransition(hri, -1L);
}
/**
* Wait on region to clear regions-in-transition or time out
* @param hri
* @param timeOut Milliseconds to wait for current region to be out of transition state.
* @return True when a region clears regions-in-transition before timeout otherwise false
* @throws InterruptedException
*/
public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut)
throws InterruptedException {
if (!regionStates.isRegionInTransition(hri)) return true;
long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis()
+ timeOut;
// There is already a timeout monitor on regions in transition so I
// should not have to have one here too?
LOG.info("Waiting for " + hri.getEncodedName() +
" to leave regions-in-transition, timeOut=" + timeOut + " ms.");
while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) {
regionStates.waitForUpdate(100);
if (EnvironmentEdgeManager.currentTimeMillis() > end) {
LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned.");
return false;
}
}
if (this.server.isStopped()) {
LOG.info("Giving up wait on regions in transition because stoppable.isStopped is set");
return false;
}
return true;
}
/**
* Update timers for all regions in transition going against the server in the
* serversInUpdatingTimer.
*/
public class TimerUpdater extends Chore {
public TimerUpdater(final int period, final Stoppable stopper) {
super("AssignmentTimerUpdater", period, stopper);
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
ServerName serverToUpdateTimer = null;
while (!serversInUpdatingTimer.isEmpty() && !stopper.isStopped()) {
if (serverToUpdateTimer == null) {
serverToUpdateTimer = serversInUpdatingTimer.first();
} else {
serverToUpdateTimer = serversInUpdatingTimer
.higher(serverToUpdateTimer);
}
if (serverToUpdateTimer == null) {
break;
}
updateTimers(serverToUpdateTimer);
serversInUpdatingTimer.remove(serverToUpdateTimer);
}
}
}
/**
* Monitor to check for time outs on region transition operations
*/
public class TimeoutMonitor extends Chore {
private boolean allRegionServersOffline = false;
private ServerManager serverManager;
private final int timeout;
/**
* Creates a periodic monitor to check for time outs on region transition
* operations. This will deal with retries if for some reason something
* doesn't happen within the specified timeout.
* @param period
* @param stopper When {@link Stoppable#isStopped()} is true, this thread will
* cleanup and exit cleanly.
* @param timeout
*/
public TimeoutMonitor(final int period, final Stoppable stopper,
ServerManager serverManager,
final int timeout) {
super("AssignmentTimeoutMonitor", period, stopper);
this.timeout = timeout;
this.serverManager = serverManager;
}
private synchronized void setAllRegionServersOffline(
boolean allRegionServersOffline) {
this.allRegionServersOffline = allRegionServersOffline;
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty();
// Iterate all regions in transition checking for time outs
long now = System.currentTimeMillis();
// no lock concurrent access ok: we will be working on a copy, and it's java-valid to do
// a copy while another thread is adding/removing items
for (String regionName : regionStates.getRegionsInTransition().keySet()) {
RegionState regionState = regionStates.getRegionTransitionState(regionName);
if (regionState == null) continue;
if (regionState.getStamp() + timeout <= now) {
// decide on action upon timeout
actOnTimeOut(regionState);
} else if (this.allRegionServersOffline && !noRSAvailable) {
RegionPlan existingPlan = regionPlans.get(regionName);
if (existingPlan == null
|| !this.serverManager.isServerOnline(existingPlan
.getDestination())) {
// if some RSs just came back online, we can start the assignment
// right away
actOnTimeOut(regionState);
}
}
}
setAllRegionServersOffline(noRSAvailable);
}
private void actOnTimeOut(RegionState regionState) {
HRegionInfo regionInfo = regionState.getRegion();
LOG.info("Regions in transition timed out: " + regionState);
// Expired! Do a retry.
switch (regionState.getState()) {
case CLOSED:
LOG.info("Region " + regionInfo.getEncodedName()
+ " has been CLOSED for too long, waiting on queued "
+ "ClosedRegionHandler to run or server shutdown");
// Update our timestamp.
regionState.updateTimestampToNow();
break;
case OFFLINE:
LOG.info("Region has been OFFLINE for too long, " + "reassigning "
+ regionInfo.getRegionNameAsString() + " to a random server");
invokeAssign(regionInfo);
break;
case PENDING_OPEN:
LOG.info("Region has been PENDING_OPEN for too "
+ "long, reassigning region=" + regionInfo.getRegionNameAsString());
invokeAssign(regionInfo);
break;
case OPENING:
processOpeningState(regionInfo);
break;
case OPEN:
LOG.error("Region has been OPEN for too long, " +
"we don't know where region was opened so can't do anything");
regionState.updateTimestampToNow();
break;
case PENDING_CLOSE:
LOG.info("Region has been PENDING_CLOSE for too "
+ "long, running forced unassign again on region="
+ regionInfo.getRegionNameAsString());
invokeUnassign(regionInfo);
break;
case CLOSING:
LOG.info("Region has been CLOSING for too " +
"long, this should eventually complete or the server will " +
"expire, send RPC again");
invokeUnassign(regionInfo);
break;
case SPLIT:
case SPLITTING:
case FAILED_OPEN:
case FAILED_CLOSE:
case MERGING:
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
}
}
private void processOpeningState(HRegionInfo regionInfo) {
LOG.info("Region has been OPENING for too long, reassigning region="
+ regionInfo.getRegionNameAsString());
// Should have a ZK node in OPENING state
try {
String node = ZKAssign.getNodeName(watcher, regionInfo.getEncodedName());
Stat stat = new Stat();
byte [] data = ZKAssign.getDataNoWatch(watcher, node, stat);
if (data == null) {
LOG.warn("Data is null, node " + node + " no longer exists");
return;
}
RegionTransition rt = RegionTransition.parseFrom(data);
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REGION_OPENED) {
LOG.debug("Region has transitioned to OPENED, allowing "
+ "watched event handlers to process");
return;
} else if (et != EventType.RS_ZK_REGION_OPENING && et != EventType.RS_ZK_REGION_FAILED_OPEN ) {
LOG.warn("While timing out a region, found ZK node in unexpected state: " + et);
return;
}
invokeAssign(regionInfo);
} catch (KeeperException ke) {
LOG.error("Unexpected ZK exception timing out CLOSING region", ke);
} catch (DeserializationException e) {
LOG.error("Unexpected exception parsing CLOSING region", e);
}
}
void invokeAssign(HRegionInfo regionInfo) {
invokeAssign(regionInfo, true);
}
void invokeAssign(HRegionInfo regionInfo, boolean newPlan) {
threadPoolExecutorService.submit(new AssignCallable(this, regionInfo, newPlan));
}
void invokeUnAssign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
private void invokeUnassign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
public boolean isCarryingMeta(ServerName serverName) {
return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO);
}
/**
* Check if the shutdown server carries the specific region.
* We have a bunch of places that store region location
* Those values aren't consistent. There is a delay of notification.
* The location from zookeeper unassigned node has the most recent data;
* but the node could be deleted after the region is opened by AM.
* The AM's info could be old when OpenedRegionHandler
* processing hasn't finished yet when server shutdown occurs.
* @return whether the serverName currently hosts the region
*/
private boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) {
RegionTransition rt = null;
try {
byte [] data = ZKAssign.getData(watcher, hri.getEncodedName());
// This call can legitimately come by null
rt = data == null? null: RegionTransition.parseFrom(data);
} catch (KeeperException e) {
server.abort("Exception reading unassigned node for region=" + hri.getEncodedName(), e);
} catch (DeserializationException e) {
server.abort("Exception parsing unassigned node for region=" + hri.getEncodedName(), e);
}
ServerName addressFromZK = rt != null? rt.getServerName(): null;
if (addressFromZK != null) {
// if we get something from ZK, we will use the data
boolean matchZK = addressFromZK.equals(serverName);
LOG.debug("Checking region=" + hri.getRegionNameAsString() + ", zk server=" + addressFromZK +
" current=" + serverName + ", matches=" + matchZK);
return matchZK;
}
ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
boolean matchAM = (addressFromAM != null &&
addressFromAM.equals(serverName));
LOG.debug("based on AM, current region=" + hri.getRegionNameAsString() +
" is on server=" + (addressFromAM != null ? addressFromAM : "null") +
" server being checked: " + serverName);
return matchAM;
}
/**
* Process shutdown server removing any assignments.
* @param sn Server that went down.
* @return list of regions in transition on this server
*/
public List<HRegionInfo> processServerShutdown(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator <Map.Entry<String, RegionPlan>> i =
this.regionPlans.entrySet().iterator(); i.hasNext();) {
Map.Entry<String, RegionPlan> e = i.next();
ServerName otherSn = e.getValue().getDestination();
// The name will be null if the region is planned for a random assign.
if (otherSn != null && otherSn.equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
}
}
List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) {
HRegionInfo hri = it.next();
String encodedName = hri.getEncodedName();
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState regionState =
regionStates.getRegionTransitionState(encodedName);
if (regionState == null
|| (regionState.getServerName() != null && !regionState.isOnServer(sn))
|| !(regionState.isFailedClose() || regionState.isOffline()
|| regionState.isPendingOpenOrOpening())) {
LOG.info("Skip " + regionState + " since it is not opening/failed_close"
+ " on the dead server any more: " + sn);
it.remove();
} else {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (zkTable.isDisablingOrDisabledTable(hri.getTable())) {
regionStates.regionOffline(hri);
it.remove();
continue;
}
// Mark the region offline and assign it again by SSH
regionStates.updateRegionState(hri, State.OFFLINE);
}
} finally {
lock.unlock();
}
}
return regions;
}
/**
* @param plan Plan to execute.
*/
public void balance(final RegionPlan plan) {
HRegionInfo hri = plan.getRegionInfo();
TableName tableName = hri.getTable();
if (zkTable.isDisablingOrDisabledTable(tableName)) {
LOG.info("Ignored moving region of disabling/disabled table "
+ tableName);
return;
}
// Move the region only if it's assigned
String encodedName = hri.getEncodedName();
ReentrantLock lock = locker.acquireLock(encodedName);
try {
if (!regionStates.isRegionOnline(hri)) {
RegionState state = regionStates.getRegionState(encodedName);
LOG.info("Ignored moving region not assigned: " + hri + ", "
+ (state == null ? "not in region states" : state));
return;
}
synchronized (this.regionPlans) {
this.regionPlans.put(plan.getRegionName(), plan);
}
unassign(hri, false, plan.getDestination());
} finally {
lock.unlock();
}
}
public void stop() {
shutdown(); // Stop executor service, etc
if (tomActivated){
this.timeoutMonitor.interrupt();
this.timerUpdater.interrupt();
}
}
/**
* Shutdown the threadpool executor service
*/
public void shutdown() {
// It's an immediate shutdown, so we're clearing the remaining tasks.
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.clear();
}
threadPoolExecutorService.shutdownNow();
zkEventWorkers.shutdownNow();
regionStateStore.stop();
}
protected void setEnabledTable(TableName tableName) {
try {
this.zkTable.setEnabledTable(tableName);
} catch (KeeperException e) {
// here we can abort as it is the start up flow
String errorMsg = "Unable to ensure that the table " + tableName
+ " will be" + " enabled because of a ZooKeeper issue";
LOG.error(errorMsg);
this.server.abort(errorMsg, e);
}
}
/**
* Set region as OFFLINED up in zookeeper asynchronously.
* @param state
* @return True if we succeeded, false otherwise (State was incorrect or failed
* updating zk).
*/
private boolean asyncSetOfflineInZooKeeper(final RegionState state,
final AsyncCallback.StringCallback cb, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
this.server.abort("Unexpected state trying to OFFLINE; " + state,
new IllegalStateException());
return false;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
try {
ZKAssign.asyncCreateNodeOffline(watcher, state.getRegion(),
destination, cb, state);
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
LOG.warn("Node for " + state.getRegion() + " already exists");
} else {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
}
return false;
}
return true;
}
private boolean deleteNodeInStates(String encodedName,
String desc, ServerName sn, EventType... types) {
try {
for (EventType et: types) {
if (ZKAssign.deleteNode(watcher, encodedName, et, sn)) {
return true;
}
}
LOG.info("Failed to delete the " + desc + " node for "
+ encodedName + ". The node type may not match");
} catch (NoNodeException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("The " + desc + " node for " + encodedName + " already deleted");
}
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting " + desc
+ " node for the region " + encodedName, ke);
}
return false;
}
private void deleteMergingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "merging", sn, EventType.RS_ZK_REGION_MERGING,
EventType.RS_ZK_REQUEST_REGION_MERGE, EventType.RS_ZK_REGION_MERGED);
}
private void deleteSplittingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "splitting", sn, EventType.RS_ZK_REGION_SPLITTING,
EventType.RS_ZK_REQUEST_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT);
}
private void onRegionFailedOpen(
final HRegionInfo hri, final ServerName sn) {
String encodedName = hri.getEncodedName();
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(hri, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the region plan. (HBASE-5546)
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
if (disablingOrDisabled.contains(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
// ZK Node is in CLOSED state, assign it.
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
try {
getRegionPlan(hri, sn, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
invokeAssign(hri, false);
}
}
}
private void onRegionOpen(
final HRegionInfo hri, final ServerName sn, long openSeqNum) {
regionOnline(hri, sn, openSeqNum);
if (useZKForAssignment) {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
}
// reset the count, if any
failedOpenTracker.remove(hri.getEncodedName());
if (isTableDisabledOrDisabling(hri.getTable())) {
invokeUnAssign(hri);
}
}
private void onRegionClosed(final HRegionInfo hri) {
if (isTableDisabledOrDisabling(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
invokeAssign(hri, false);
}
private String onRegionSplit(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_p.isOpenOrSplittingOnServer(sn)
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
return "Not in state good for split";
}
regionStates.updateRegionState(a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(p, State.SPLITTING);
if (code == TransitionCode.SPLIT) {
if (TEST_SKIP_SPLIT_HANDLING) {
return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set";
}
regionOffline(p, State.SPLIT);
regionOnline(a, sn, 1);
regionOnline(b, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
} else if (code == TransitionCode.SPLIT_PONR) {
try {
regionStateStore.splitRegion(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record split region " + p.getShortNameToLog());
return "Failed to record the splitting in meta";
}
} else if (code == TransitionCode.SPLIT_REVERTED) {
regionOnline(p, sn);
regionOffline(a);
regionOffline(b);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
}
return null;
}
private boolean isTableDisabledOrDisabling(TableName t) {
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
return disablingOrDisabled.contains(t) ? true : false;
}
private String onRegionMerge(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_a.isOpenOrMergingOnServer(sn) && rs_b.isOpenOrMergingOnServer(sn)
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
return "Not in state good for merge";
}
regionStates.updateRegionState(a, State.MERGING);
regionStates.updateRegionState(b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
String encodedName = p.getEncodedName();
if (code == TransitionCode.READY_TO_MERGE) {
mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(a, b));
} else if (code == TransitionCode.MERGED) {
mergingRegions.remove(encodedName);
regionOffline(a, State.MERGED);
regionOffline(b, State.MERGED);
regionOnline(p, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
} else if (code == TransitionCode.MERGE_PONR) {
try {
regionStateStore.mergeRegions(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record merged region " + p.getShortNameToLog());
return "Failed to record the merging in meta";
}
} else {
mergingRegions.remove(encodedName);
regionOnline(a, sn);
regionOnline(b, sn);
regionOffline(p);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
}
return null;
}
/**
* A helper to handle region merging transition event.
* It transitions merging regions to MERGING state.
*/
private boolean handleRegionMerging(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped merging! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfMerging = rt.getPayload();
List<HRegionInfo> mergingRegions;
try {
mergingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfMerging, 0, payloadOfMerging.length);
} catch (IOException e) {
LOG.error("Dropped merging! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert mergingRegions.size() == 3;
HRegionInfo p = mergingRegions.get(0);
HRegionInfo hri_a = mergingRegions.get(1);
HRegionInfo hri_b = mergingRegions.get(2);
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_a == null || rs_a.isOpenOrMergingOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrMergingOnServer(sn))
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
LOG.warn("Dropped merging! Not in state good for MERGING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_MERGE) {
try {
if (RegionMergeTransaction.transitionMergingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_MERGE,
EventType.RS_ZK_REGION_MERGING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_MERGED
&& currentType != EventType.RS_ZK_REGION_MERGING)) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.MERGING);
regionStates.updateRegionState(hri_b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
if (et != EventType.RS_ZK_REGION_MERGED) {
this.mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(hri_a, hri_b));
} else {
this.mergingRegions.remove(encodedName);
regionOffline(hri_a, State.MERGED);
regionOffline(hri_b, State.MERGED);
regionOnline(p, sn);
}
}
if (et == EventType.RS_ZK_REGION_MERGED) {
LOG.debug("Handling MERGED event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_MERGED, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting MERGED node " + encodedName, e);
}
}
LOG.info("Handled MERGED event; merged=" + p.getRegionNameAsString()
+ ", region_a=" + hri_a.getRegionNameAsString() + ", region_b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(p);
}
}
return true;
}
/**
* A helper to handle region splitting transition event.
*/
private boolean handleRegionSplitting(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped splitting! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfSplitting = rt.getPayload();
List<HRegionInfo> splittingRegions;
try {
splittingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfSplitting, 0, payloadOfSplitting.length);
} catch (IOException e) {
LOG.error("Dropped splitting! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert splittingRegions.size() == 2;
HRegionInfo hri_a = splittingRegions.get(0);
HRegionInfo hri_b = splittingRegions.get(1);
RegionState rs_p = regionStates.getRegionState(encodedName);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_p == null || rs_p.isOpenOrSplittingOnServer(sn))
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
LOG.warn("Dropped splitting! Not in state good for SPLITTING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
if (rs_p == null) {
// Splitting region should be online
rs_p = regionStates.updateRegionState(rt, State.OPEN);
if (rs_p == null) {
LOG.warn("Received splitting for region " + prettyPrintedRegionName
+ " from server " + sn + " but it doesn't exist anymore,"
+ " probably already processed its split");
return false;
}
regionStates.regionOnline(rs_p.getRegion(), sn);
}
HRegionInfo p = rs_p.getRegion();
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_SPLIT) {
try {
if (SplitTransaction.transitionSplittingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_SPLIT,
EventType.RS_ZK_REGION_SPLITTING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_SPLIT
&& currentType != EventType.RS_ZK_REGION_SPLITTING)) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(hri_b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
// The below is for testing ONLY! We can't do fault injection easily, so
// resort to this kinda uglyness -- St.Ack 02/25/2011.
if (TEST_SKIP_SPLIT_HANDLING) {
LOG.warn("Skipping split message, TEST_SKIP_SPLIT_HANDLING is set");
return true; // return true so that the splitting node stays
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
regionOffline(p, State.SPLIT);
regionOnline(hri_a, sn);
regionOnline(hri_b, sn);
}
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
LOG.debug("Handling SPLIT event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_SPLIT, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting SPLIT node " + encodedName, e);
}
}
LOG.info("Handled SPLIT event; parent=" + p.getRegionNameAsString()
+ ", daughter a=" + hri_a.getRegionNameAsString() + ", daughter b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(hri_a);
unassign(hri_b);
}
}
return true;
}
/**
* A region is offline. The new state should be the specified one,
* if not null. If the specified state is null, the new state is Offline.
* The specified state can be Split/Merged/Offline/null only.
*/
private void regionOffline(final HRegionInfo regionInfo, final State state) {
regionStates.regionOffline(regionInfo, state);
removeClosedRegion(regionInfo);
// remove the region plan as well just in case.
clearRegionPlan(regionInfo);
balancer.regionOffline(regionInfo);
// Tell our listeners that a region was closed
sendRegionClosedNotification(regionInfo);
}
private void sendRegionOpenedNotification(final HRegionInfo regionInfo,
final ServerName serverName) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionOpened(regionInfo, serverName);
}
}
}
private void sendRegionClosedNotification(final HRegionInfo regionInfo) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionClosed(regionInfo);
}
}
}
/**
* Try to update some region states. If the state machine prevents
* such update, an error message is returned to explain the reason.
*
* It's expected that in each transition there should have just one
* region for opening/closing, 3 regions for splitting/merging.
* These regions should be on the server that requested the change.
*
* Region state machine. Only these transitions
* are expected to be triggered by a region server.
*
* On the state transition:
* (1) Open/Close should be initiated by master
* (a) Master sets the region to pending_open/pending_close
* in memory and hbase:meta after sending the request
* to the region server
* (b) Region server reports back to the master
* after open/close is done (either success/failure)
* (c) If region server has problem to report the status
* to master, it must be because the master is down or some
* temporary network issue. Otherwise, the region server should
* abort since it must be a bug. If the master is not accessible,
* the region server should keep trying until the server is
* stopped or till the status is reported to the (new) master
* (d) If region server dies in the middle of opening/closing
* a region, SSH picks it up and finishes it
* (e) If master dies in the middle, the new master recovers
* the state during initialization from hbase:meta. Region server
* can report any transition that has not been reported to
* the previous active master yet
* (2) Split/merge is initiated by region servers
* (a) To split a region, a region server sends a request
* to master to try to set a region to splitting, together with
* two daughters (to be created) to splitting new. If approved
* by the master, the splitting can then move ahead
* (b) To merge two regions, a region server sends a request to
* master to try to set the new merged region (to be created) to
* merging_new, together with two regions (to be merged) to merging.
* If it is ok with the master, the merge can then move ahead
* (c) Once the splitting/merging is done, the region server
* reports the status back to the master either success/failure.
* (d) Other scenarios should be handled similarly as for
* region open/close
*/
protected String onRegionTransition(final ServerName serverName,
final RegionStateTransition transition) {
TransitionCode code = transition.getTransitionCode();
HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0));
RegionState current = regionStates.getRegionState(hri);
if (LOG.isDebugEnabled()) {
LOG.debug("Got transition " + code + " for "
+ (current != null ? current.toString() : hri.getShortNameToLog())
+ " from " + serverName);
}
String errorMsg = null;
switch (code) {
case OPENED:
if (current != null && current.isOpened() && current.isOnServer(serverName)) {
LOG.info("Region " + hri.getShortNameToLog() + " is already " + current.getState() + " on "
+ serverName);
break;
}
case FAILED_OPEN:
if (current == null
|| !current.isPendingOpenOrOpeningOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending open on " + serverName;
} else if (code == TransitionCode.FAILED_OPEN) {
onRegionFailedOpen(hri, serverName);
} else {
long openSeqNum = HConstants.NO_SEQNUM;
if (transition.hasOpenSeqNum()) {
openSeqNum = transition.getOpenSeqNum();
}
if (openSeqNum < 0) {
errorMsg = "Newly opened region has invalid open seq num " + openSeqNum;
} else {
onRegionOpen(hri, serverName, openSeqNum);
}
}
break;
case CLOSED:
if (current == null
|| !current.isPendingCloseOrClosingOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending close on " + serverName;
} else {
onRegionClosed(hri);
}
break;
case READY_TO_SPLIT:
case SPLIT_PONR:
case SPLIT:
case SPLIT_REVERTED:
errorMsg = onRegionSplit(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
case READY_TO_MERGE:
case MERGE_PONR:
case MERGED:
case MERGE_REVERTED:
errorMsg = onRegionMerge(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
default:
errorMsg = "Unexpected transition code " + code;
}
if (errorMsg != null) {
LOG.error("Failed to transtion region from " + current + " to "
+ code + " by " + serverName + ": " + errorMsg);
}
return errorMsg;
}
/**
* @return Instance of load balancer
*/
public LoadBalancer getBalancer() {
return this.balancer;
}
}
| apache-2.0 |
phambryan/dropwizard | dropwizard-jetty/src/test/java/io/dropwizard/jetty/HttpConnectorFactoryTest.java | 12103 | package io.dropwizard.jetty;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.jetty9.InstrumentedConnectionFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.dropwizard.configuration.ResourceConfigurationSourceProvider;
import io.dropwizard.configuration.YamlConfigurationFactory;
import io.dropwizard.jackson.DiscoverableSubtypeResolver;
import io.dropwizard.jackson.Jackson;
import io.dropwizard.logging.ConsoleAppenderFactory;
import io.dropwizard.logging.FileAppenderFactory;
import io.dropwizard.logging.SyslogAppenderFactory;
import io.dropwizard.util.DataSize;
import io.dropwizard.util.Duration;
import io.dropwizard.validation.BaseValidator;
import org.assertj.core.api.InstanceOfAssertFactories;
import org.eclipse.jetty.http.CookieCompliance;
import org.eclipse.jetty.http.HttpCompliance;
import org.eclipse.jetty.server.ForwardedRequestCustomizer;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.ProxyConnectionFactory;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler;
import org.eclipse.jetty.util.thread.ThreadPool;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.validation.Validator;
import java.util.Optional;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
class HttpConnectorFactoryTest {
private final ObjectMapper objectMapper = Jackson.newObjectMapper();
private final Validator validator = BaseValidator.newValidator();
@BeforeEach
void setUp() {
objectMapper.getSubtypeResolver().registerSubtypes(ConsoleAppenderFactory.class,
FileAppenderFactory.class, SyslogAppenderFactory.class, HttpConnectorFactory.class);
}
@Test
void isDiscoverable() {
assertThat(new DiscoverableSubtypeResolver().getDiscoveredSubtypes())
.contains(HttpConnectorFactory.class);
}
@Test
void testParseMinimalConfiguration() throws Exception {
HttpConnectorFactory http =
new YamlConfigurationFactory<>(HttpConnectorFactory.class, validator, objectMapper, "dw")
.build(new ResourceConfigurationSourceProvider(), "yaml/http-connector-minimal.yml");
assertThat(http.getPort()).isEqualTo(8080);
assertThat(http.getBindHost()).isNull();
assertThat(http.isInheritChannel()).isFalse();
assertThat(http.getHeaderCacheSize()).isEqualTo(DataSize.bytes(512));
assertThat(http.getOutputBufferSize()).isEqualTo(DataSize.kibibytes(32));
assertThat(http.getMaxRequestHeaderSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getMaxResponseHeaderSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getInputBufferSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getIdleTimeout()).isEqualTo(Duration.seconds(30));
assertThat(http.getMinBufferPoolSize()).isEqualTo(DataSize.bytes(64));
assertThat(http.getBufferPoolIncrement()).isEqualTo(DataSize.bytes(1024));
assertThat(http.getMaxBufferPoolSize()).isEqualTo(DataSize.kibibytes(64));
assertThat(http.getMinRequestDataPerSecond()).isEqualTo(DataSize.bytes(0));
assertThat(http.getMinResponseDataPerSecond()).isEqualTo(DataSize.bytes(0));
assertThat(http.getAcceptorThreads()).isEmpty();
assertThat(http.getSelectorThreads()).isEmpty();
assertThat(http.getAcceptQueueSize()).isNull();
assertThat(http.isReuseAddress()).isTrue();
assertThat(http.isUseServerHeader()).isFalse();
assertThat(http.isUseDateHeader()).isTrue();
assertThat(http.isUseForwardedHeaders()).isFalse();
assertThat(http.getHttpCompliance()).isEqualTo(HttpCompliance.RFC7230);
assertThat(http.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
assertThat(http.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
}
@Test
void testParseFullConfiguration() throws Exception {
HttpConnectorFactory http =
new YamlConfigurationFactory<>(HttpConnectorFactory.class, validator, objectMapper, "dw")
.build(new ResourceConfigurationSourceProvider(), "yaml/http-connector.yml");
assertThat(http.getPort()).isEqualTo(9090);
assertThat(http.getBindHost()).isEqualTo("127.0.0.1");
assertThat(http.isInheritChannel()).isTrue();
assertThat(http.getHeaderCacheSize()).isEqualTo(DataSize.bytes(256));
assertThat(http.getOutputBufferSize()).isEqualTo(DataSize.kibibytes(128));
assertThat(http.getMaxRequestHeaderSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getMaxResponseHeaderSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getInputBufferSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getIdleTimeout()).isEqualTo(Duration.seconds(10));
assertThat(http.getMinBufferPoolSize()).isEqualTo(DataSize.bytes(128));
assertThat(http.getBufferPoolIncrement()).isEqualTo(DataSize.bytes(500));
assertThat(http.getMaxBufferPoolSize()).isEqualTo(DataSize.kibibytes(32));
assertThat(http.getMinRequestDataPerSecond()).isEqualTo(DataSize.bytes(42));
assertThat(http.getMinResponseDataPerSecond()).isEqualTo(DataSize.bytes(200));
assertThat(http.getAcceptorThreads()).contains(1);
assertThat(http.getSelectorThreads()).contains(4);
assertThat(http.getAcceptQueueSize()).isEqualTo(1024);
assertThat(http.isReuseAddress()).isFalse();
assertThat(http.isUseServerHeader()).isTrue();
assertThat(http.isUseDateHeader()).isFalse();
assertThat(http.isUseForwardedHeaders()).isTrue();
HttpConfiguration httpConfiguration = http.buildHttpConfiguration();
assertThat(httpConfiguration.getCustomizers()).hasAtLeastOneElementOfType(ForwardedRequestCustomizer.class);
assertThat(http.getHttpCompliance()).isEqualTo(HttpCompliance.RFC2616);
assertThat(http.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC2965);
assertThat(http.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
}
@Test
void testBuildConnector() throws Exception {
HttpConnectorFactory http = spy(new HttpConnectorFactory());
http.setBindHost("127.0.0.1");
http.setAcceptorThreads(Optional.of(1));
http.setSelectorThreads(Optional.of(2));
http.setAcceptQueueSize(1024);
http.setMinResponseDataPerSecond(DataSize.bytes(200));
http.setMinRequestDataPerSecond(DataSize.bytes(42));
http.setRequestCookieCompliance(CookieCompliance.RFC6265);
http.setResponseCookieCompliance(CookieCompliance.RFC6265);
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector", threadPool);
assertThat(connector.getPort()).isEqualTo(8080);
assertThat(connector.getHost()).isEqualTo("127.0.0.1");
assertThat(connector.getAcceptQueueSize()).isEqualTo(1024);
assertThat(connector.getReuseAddress()).isTrue();
assertThat(connector.getIdleTimeout()).isEqualTo(30000);
assertThat(connector.getName()).isEqualTo("test-http-connector");
assertThat(connector.getServer()).isSameAs(server);
assertThat(connector.getScheduler()).isInstanceOf(ScheduledExecutorScheduler.class);
assertThat(connector.getExecutor()).isSameAs(threadPool);
verify(http).buildBufferPool(64, 1024, 64 * 1024);
assertThat(connector.getAcceptors()).isEqualTo(1);
assertThat(connector.getSelectorManager().getSelectorCount()).isEqualTo(2);
InstrumentedConnectionFactory connectionFactory =
(InstrumentedConnectionFactory) connector.getConnectionFactory("http/1.1");
assertThat(connectionFactory).isInstanceOf(InstrumentedConnectionFactory.class);
assertThat(connectionFactory)
.extracting("connectionFactory")
.asInstanceOf(InstanceOfAssertFactories.type(HttpConnectionFactory.class))
.satisfies(factory -> {
assertThat(factory.getInputBufferSize()).isEqualTo(8192);
assertThat(factory.getHttpCompliance()).isEqualByComparingTo(HttpCompliance.RFC7230);
})
.extracting(HttpConnectionFactory::getHttpConfiguration)
.satisfies(config -> {
assertThat(config.getHeaderCacheSize()).isEqualTo(512);
assertThat(config.getOutputBufferSize()).isEqualTo(32768);
assertThat(config.getRequestHeaderSize()).isEqualTo(8192);
assertThat(config.getResponseHeaderSize()).isEqualTo(8192);
assertThat(config.getSendDateHeader()).isTrue();
assertThat(config.getSendServerVersion()).isFalse();
assertThat(config.getCustomizers()).noneMatch(customizer -> customizer.getClass().equals(ForwardedRequestCustomizer.class));
assertThat(config.getMinRequestDataRate()).isEqualTo(42);
assertThat(config.getMinResponseDataRate()).isEqualTo(200);
assertThat(config.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
assertThat(config.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
});
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
@Test
void testBuildConnectorWithProxyProtocol() throws Exception {
HttpConnectorFactory http = new HttpConnectorFactory();
http.setBindHost("127.0.0.1");
http.setUseProxyProtocol(true);
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector-with-proxy-protocol", threadPool);
assertThat(connector.getConnectionFactories().toArray()[0]).isInstanceOf(ProxyConnectionFactory.class);
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
@Test
void testDefaultAcceptQueueSize() throws Exception {
HttpConnectorFactory http = new HttpConnectorFactory();
http.setBindHost("127.0.0.1");
http.setAcceptorThreads(Optional.of(1));
http.setSelectorThreads(Optional.of(2));
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector", threadPool);
assertThat(connector.getAcceptQueueSize()).isEqualTo(NetUtil.getTcpBacklog());
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
}
| apache-2.0 |
david-romero/Pachanga | src/main/java/com/p/service/UsersService.java | 5489 | package com.p.service;
import java.util.Collection;
import java.util.Optional;
import java.util.Random;
import java.util.UUID;
import javax.annotation.Resource;
import org.apache.log4j.Logger;
import org.hibernate.SessionFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Isolation;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.Assert;
import com.p.model.Notificacion;
import com.p.model.Role;
import com.p.model.User;
import com.p.model.modelAux.RegisterUser;
import com.p.model.repositories.UserRepository;
@Service("usersService")
@Transactional(isolation = Isolation.READ_UNCOMMITTED)
public class UsersService {
protected static Logger logger = Logger.getLogger("service");
@Resource(name = "sessionFactory")
private SessionFactory sessionFactory;
@Autowired
private UserRepository repository;
@Autowired
private NotificacionService notificacionService;
@Autowired
private EmailManager emailManager;
@Autowired
private PasswordEncoder passwordEncoder;
@Transactional
/**
* Borra un usuario según sea usuari de la web (su id empieza por 1) o usuario de llavero(su id empieza por 0)
*
* @param id
* el id del usuario existente
*/
public void delete(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > 0);
repository.delete(id);
}
/**
* Guarda o edita sengún si el ID esta o no relleno
*
* @param us
*/
@Transactional()
public User save(User us) {
gestionarAvatar(us);
gestionarAltaUsuario(us);
User usr = repository.save(us);
return usr;
}
protected void gestionarAltaUsuario(User us) {
if (us.getId() == null || us.getId().equals(0)) {
gestionarNotificacionAltaUsuario(us);
gestionarEmailAltaUsuario(us);
}
}
protected void gestionarEmailAltaUsuario(User us) {
emailManager.notify(us);
}
/**
* @param us
*/
protected void gestionarNotificacionAltaUsuario(User us) {
// Es nuevo usuario
// Le enviamos un email y una notificacion
Notificacion notificacion = notificacionService.create();
Optional<User> admin = repository.findAdministradores().stream()
.findFirst();
Assert.isTrue(admin.isPresent());
User administrador = admin.get();
notificacion.setEmisor(administrador);
notificacion.setReceptor(us);
notificacion.setTitulo("Gracias por registrarte en Pachanga!");
notificacion
.setContenido("¿Porque no completas tu perfil? Quedará mucho más mono :)");
notificacionService.save(notificacion);
}
/**
* @param us
*/
protected void gestionarAvatar(User us) {
if (us.getAvatar() == null) {
Random rd = new Random();
us.setAvatar(User.avatarCss[rd.nextInt(User.avatarCss.length)]);
}
}
@Transactional
public User getByEmail(String login) {
Assert.notNull(login);
Assert.isTrue(login.length() > 0);
return repository.findByEmail(login);
}
@Transactional
public User findOne(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > -1);
return repository.findOne(id);
}
@Transactional
public Collection<User> findAll() {
return repository.findAll();
}
@Transactional
public Collection<User> findAllDifferent(String email) {
return repository.findAllDifferent(email);
}
@Transactional(readOnly = true)
/**
*
* @author David Romero Alcaide
* @return
*/
public User getPrincipal() {
User result;
SecurityContext context;
Authentication authentication;
Object principal;
// If the asserts in this method fail, then you're
// likely to have your Tomcat's working directory
// corrupt. Please, clear your browser's cache, stop
// Tomcat, update your Maven's project configuration,
// clean your project, clean Tomcat's working directory,
// republish your project, and start it over.
context = SecurityContextHolder.getContext();
Assert.notNull(context);
authentication = context.getAuthentication();
Assert.notNull(authentication);
principal = authentication.getPrincipal();
Assert.isTrue(principal instanceof org.springframework.security.core.userdetails.User);
result = getByEmail(((org.springframework.security.core.userdetails.User) principal)
.getUsername());
Assert.notNull(result);
Assert.isTrue(result.getId() != 0);
return result;
}
public User map(RegisterUser user) {
User usr = create();
usr.setEmail(user.getEmail());
usr.setPassword(user.getPassword());
return usr;
}
public User create() {
User user = new User();
user.setFirstName(" ");
user.setLastName(" ");
user.setRole(Role.ROLE_USER);
return user;
}
@Transactional
public void regenerarPassword(User user) {
String newPass = UUID.randomUUID().toString();
newPass = passwordEncoder.encode(newPass);
user.setPassword(newPass);
save(user);
emailManager.notifyNewPassword(user,newPass);
}
@Transactional(isolation = Isolation.READ_UNCOMMITTED)
public byte[] findImage(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > 0);
return repository.findImage(id);
}
@Transactional(readOnly = true)
public Collection<? extends User> find(String texto) {
return repository.findFullText(texto);
}
}
| apache-2.0 |
halentest/solr | solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java | 11050 | package org.apache.solr.cloud;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.http.params.CoreConnectionPNames;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrServer;
import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Hash;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.update.DirectUpdateHandler2;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ShardSplitTest extends BasicDistributedZkTest {
public static final String SHARD1_0 = SHARD1 + "_0";
public static final String SHARD1_1 = SHARD1 + "_1";
@Override
@Before
public void setUp() throws Exception {
super.setUp();
System.setProperty("numShards", Integer.toString(sliceCount));
System.setProperty("solr.xml.persist", "true");
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
if (VERBOSE || printLayoutOnTearDown) {
super.printLayout();
}
if (controlClient != null) {
controlClient.shutdown();
}
if (cloudClient != null) {
cloudClient.shutdown();
}
if (controlClientCloud != null) {
controlClientCloud.shutdown();
}
super.tearDown();
System.clearProperty("zkHost");
System.clearProperty("numShards");
System.clearProperty("solr.xml.persist");
// insurance
DirectUpdateHandler2.commitOnClose = true;
}
@Override
public void doTest() throws Exception {
waitForThingsToLevelOut(15);
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
final int[] docCounts = new int[ranges.size()];
int numReplicas = shard1.getReplicas().size();
del("*:*");
for (int id = 0; id < 100; id++) {
indexAndUpdateCount(ranges, docCounts, id);
}
commit();
Thread indexThread = new Thread() {
@Override
public void run() {
for (int id = 101; id < atLeast(401); id++) {
try {
indexAndUpdateCount(ranges, docCounts, id);
Thread.sleep(atLeast(25));
} catch (Exception e) {
log.error("Exception while adding doc", e);
}
}
}
};
indexThread.start();
splitShard(SHARD1);
log.info("Layout after split: \n");
printLayout();
indexThread.join();
commit();
checkDocCountsAndShardStates(docCounts, numReplicas);
// todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
// and the new sub-shards don't have any.
waitForRecoveriesToFinish(true);
//waitForThingsToLevelOut(15);
}
protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws SolrServerException, KeeperException, InterruptedException {
SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
query.set("distrib", false);
ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
QueryResponse response = shard1_0Server.query(query);
long shard10Count = response.getResults().getNumFound();
ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
QueryResponse response2 = shard1_1Server.query(query);
long shard11Count = response2.getResults().getNumFound();
logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
assertEquals("Wrong doc count on shard1_0", docCounts[0], shard10Count);
assertEquals("Wrong doc count on shard1_1", docCounts[1], shard11Count);
ClusterState clusterState = null;
Slice slice1_0 = null, slice1_1 = null;
int i = 0;
for (i = 0; i < 10; i++) {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
zkStateReader.updateClusterState(true);
clusterState = zkStateReader.getClusterState();
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
if (Slice.ACTIVE.equals(slice1_0.getState()) && Slice.ACTIVE.equals(slice1_1.getState()))
break;
Thread.sleep(500);
}
log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
assertNotNull("Cluster state does not contain shard1_0", slice1_0);
assertNotNull("Cluster state does not contain shard1_0", slice1_1);
assertEquals("shard1_0 is not active", Slice.ACTIVE, slice1_0.getState());
assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState());
assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
}
protected void splitShard(String shardId) throws SolrServerException, IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
params.set("collection", "collection1");
params.set("shard", shardId);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
baseServer.setConnectionTimeout(15000);
baseServer.setSoTimeout((int) (CollectionsHandler.DEFAULT_ZK_TIMEOUT * 5));
baseServer.request(request);
}
protected void indexAndUpdateCount(List<DocRouter.Range> ranges, int[] docCounts, int id) throws Exception {
indexr("id", id);
// todo - hook in custom hashing
byte[] bytes = String.valueOf(id).getBytes("UTF-8");
int hash = Hash.murmurhash3_x86_32(bytes, 0, bytes.length, 0);
for (int i = 0; i < ranges.size(); i++) {
DocRouter.Range range = ranges.get(i);
if (range.includes(hash))
docCounts[i]++;
}
}
protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Expected docCount for shard1_{} = {}", i, docCount);
}
log.info("Actual docCount for shard1_0 = {}", shard10Count);
log.info("Actual docCount for shard1_1 = {}", shard11Count);
Map<String, String> idVsVersion = new HashMap<String, String>();
Map<String, SolrDocument> shard10Docs = new HashMap<String, SolrDocument>();
Map<String, SolrDocument> shard11Docs = new HashMap<String, SolrDocument>();
for (int i = 0; i < response.getResults().size(); i++) {
SolrDocument document = response.getResults().get(i);
idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString());
SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document);
if (old != null) {
log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
}
}
for (int i = 0; i < response2.getResults().size(); i++) {
SolrDocument document = response2.getResults().get(i);
String value = document.getFieldValue("id").toString();
String version = idVsVersion.get(value);
if (version != null) {
log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_"));
}
SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document);
if (old != null) {
log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
}
}
}
@Override
protected SolrServer createNewSolrServer(String collection, String baseUrl) {
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(collection, baseUrl);
server.setSoTimeout(5 * 60 * 1000);
return server;
}
@Override
protected SolrServer createNewSolrServer(int port) {
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(port);
server.setSoTimeout(5 * 60 * 1000);
return server;
}
@Override
protected CloudSolrServer createCloudClient(String defaultCollection) throws MalformedURLException {
CloudSolrServer client = super.createCloudClient(defaultCollection);
client.getLbServer().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
return client;
}
}
| apache-2.0 |
SoftwareKing/zstack | header/src/main/java/org/zstack/header/identity/APICreatePolicyMsg.java | 1059 | package org.zstack.header.identity;
import org.zstack.header.message.APICreateMessage;
import org.zstack.header.message.APIMessage;
import org.zstack.header.message.APIParam;
@NeedRoles(roles = {IdentityRoles.CREATE_POLICY_ROLE})
public class APICreatePolicyMsg extends APICreateMessage implements AccountMessage {
@APIParam
private String name;
private String description;
@APIParam
private String policyData;
@Override
public String getAccountUuid() {
return this.getSession().getAccountUuid();
}
public String getPolicyData() {
return policyData;
}
public void setPolicyData(String policyData) {
this.policyData = policyData;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
}
| apache-2.0 |
akira-baruah/bazel | src/main/java/com/google/devtools/build/lib/profiler/memory/AllocationTracker.java | 14364 | // Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.profiler.memory;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.MapMaker;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ConditionallyThreadCompatible;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ThreadSafe;
import com.google.devtools.build.lib.packages.AspectClass;
import com.google.devtools.build.lib.packages.RuleClass;
import com.google.devtools.build.lib.packages.RuleFunction;
import com.google.devtools.build.lib.syntax.Debug;
import com.google.devtools.build.lib.syntax.Location;
import com.google.devtools.build.lib.syntax.StarlarkCallable;
import com.google.devtools.build.lib.syntax.StarlarkThread;
import com.google.monitoring.runtime.instrumentation.Sampler;
import com.google.perftools.profiles.ProfileProto.Function;
import com.google.perftools.profiles.ProfileProto.Line;
import com.google.perftools.profiles.ProfileProto.Profile;
import com.google.perftools.profiles.ProfileProto.Sample;
import com.google.perftools.profiles.ProfileProto.ValueType;
import java.io.FileOutputStream;
import java.io.IOException;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.zip.GZIPOutputStream;
import javax.annotation.Nullable;
/** Tracks allocations for memory reporting. */
@ConditionallyThreadCompatible
@SuppressWarnings("ThreadLocalUsage") // the AllocationTracker is effectively a global
public final class AllocationTracker implements Sampler, Debug.ThreadHook {
// A mapping from Java thread to StarlarkThread.
// Used to effect a hidden StarlarkThread parameter to sampleAllocation.
// TODO(adonovan): opt: merge the three different ThreadLocals in use here.
private final ThreadLocal<StarlarkThread> starlarkThread = new ThreadLocal<>();
@Override
public void onPushFirst(StarlarkThread thread) {
starlarkThread.set(thread);
}
@Override
public void onPopLast(StarlarkThread thread) {
starlarkThread.remove();
}
private static class AllocationSample {
@Nullable final RuleClass ruleClass; // Current rule being analysed, if any
@Nullable final AspectClass aspectClass; // Current aspect being analysed, if any
final ImmutableList<Frame> callstack; // Starlark callstack, if any
final long bytes;
AllocationSample(
@Nullable RuleClass ruleClass,
@Nullable AspectClass aspectClass,
ImmutableList<Frame> callstack,
long bytes) {
this.ruleClass = ruleClass;
this.aspectClass = aspectClass;
this.callstack = callstack;
this.bytes = bytes;
}
}
private static class Frame {
final String name;
final Location loc;
@Nullable final RuleFunction ruleFunction;
Frame(String name, Location loc, @Nullable RuleFunction ruleFunction) {
this.name = name;
this.loc = loc;
this.ruleFunction = ruleFunction;
}
}
private final Map<Object, AllocationSample> allocations = new MapMaker().weakKeys().makeMap();
private final int samplePeriod;
private final int sampleVariance;
private boolean enabled = true;
/**
* Cheap wrapper class for a long. Avoids having to do two thread-local lookups per allocation.
*/
private static final class LongValue {
long value;
}
private final ThreadLocal<LongValue> currentSampleBytes = ThreadLocal.withInitial(LongValue::new);
private final ThreadLocal<Long> nextSampleBytes = ThreadLocal.withInitial(this::getNextSample);
private final Random random = new Random();
AllocationTracker(int samplePeriod, int variance) {
this.samplePeriod = samplePeriod;
this.sampleVariance = variance;
}
// Called by instrumentation.recordAllocation, which is in turn called
// by an instrumented version of the application assembled on the fly
// by instrumentation.AllocationInstrumenter.
// The instrumenter inserts a call to recordAllocation after every
// memory allocation instruction in the original class.
//
// This function runs within 'new', so is not supposed to allocate memory;
// see Sampler interface. In fact it allocates in nearly a dozen places.
// TODO(adonovan): suppress reentrant calls by setting a thread-local flag.
@Override
@ThreadSafe
public void sampleAllocation(int count, String desc, Object newObj, long size) {
if (!enabled) {
return;
}
@Nullable StarlarkThread thread = starlarkThread.get();
// Calling Debug.getCallStack is a dubious operation here.
// First it allocates memory, which breaks the Sampler contract.
// Second, the allocation could in principle occur while the thread's
// representation invariants are temporarily broken (that is, during
// the call to ArrayList.add when pushing a new stack frame).
// For now at least, the allocation done by ArrayList.add occurs before
// the representation of the ArrayList is changed, so it is safe,
// but this is a fragile assumption.
ImmutableList<Debug.Frame> callstack =
thread != null ? Debug.getCallStack(thread) : ImmutableList.of();
RuleClass ruleClass = CurrentRuleTracker.getRule();
AspectClass aspectClass = CurrentRuleTracker.getAspect();
// Should we bother sampling?
if (callstack.isEmpty() && ruleClass == null && aspectClass == null) {
return;
}
// Convert the thread's stack right away to our internal form.
// It is not safe to inspect Debug.Frame references once the thread resumes,
// and keeping StarlarkCallable values live defeats garbage collection.
ImmutableList.Builder<Frame> frames = ImmutableList.builderWithExpectedSize(callstack.size());
for (Debug.Frame fr : callstack) {
// The frame's PC location is currently not updated at every step,
// only at function calls, so the leaf frame's line number may be
// slightly off; see the tests.
// TODO(b/149023294): remove comment when we move to a compiled representation.
StarlarkCallable fn = fr.getFunction();
frames.add(
new Frame(
fn.getName(),
fr.getLocation(),
fn instanceof RuleFunction ? (RuleFunction) fn : null));
}
// If we start getting stack overflows here, it's because the memory sampling
// implementation has changed to call back into the sampling method immediately on
// every allocation. Since thread locals can allocate, this can in this case lead
// to infinite recursion. This method will then need to be rewritten to not
// allocate, or at least not allocate to obtain its sample counters.
LongValue bytesValue = currentSampleBytes.get();
long bytes = bytesValue.value + size;
if (bytes < nextSampleBytes.get()) {
bytesValue.value = bytes;
return;
}
bytesValue.value = 0;
nextSampleBytes.set(getNextSample());
allocations.put(newObj, new AllocationSample(ruleClass, aspectClass, frames.build(), bytes));
}
private long getNextSample() {
return (long) samplePeriod
+ (sampleVariance > 0 ? (random.nextInt(sampleVariance * 2) - sampleVariance) : 0);
}
/** A pair of rule/aspect name and the bytes it consumes. */
public static final class RuleBytes {
private final String name;
private long bytes;
public RuleBytes(String name) {
this.name = name;
}
/** The number of bytes total occupied by this rule or aspect class. */
public long getBytes() {
return bytes;
}
public RuleBytes addBytes(long bytes) {
this.bytes += bytes;
return this;
}
@Override
public String toString() {
return String.format("RuleBytes(%s, %d)", name, bytes);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RuleBytes ruleBytes = (RuleBytes) o;
return bytes == ruleBytes.bytes && Objects.equal(name, ruleBytes.name);
}
@Override
public int hashCode() {
return Objects.hashCode(name, bytes);
}
}
// If the topmost stack entry is a call to a rule function, returns it.
@Nullable
private static RuleFunction getRule(AllocationSample sample) {
Frame top = Iterables.getLast(sample.callstack, null);
return top != null ? top.ruleFunction : null;
}
/**
* Returns the total memory consumption for rules and aspects, keyed by {@link RuleClass#getKey}
* or {@link AspectClass#getKey}.
*/
public void getRuleMemoryConsumption(
Map<String, RuleBytes> rules, Map<String, RuleBytes> aspects) {
// Make sure we don't track our own allocations
enabled = false;
System.gc();
// Get loading phase memory for rules.
for (AllocationSample sample : allocations.values()) {
RuleFunction rule = getRule(sample);
if (rule != null) {
RuleClass ruleClass = rule.getRuleClass();
String key = ruleClass.getKey();
RuleBytes ruleBytes = rules.computeIfAbsent(key, k -> new RuleBytes(ruleClass.getName()));
rules.put(key, ruleBytes.addBytes(sample.bytes));
}
}
// Get analysis phase memory for rules and aspects
for (AllocationSample sample : allocations.values()) {
if (sample.ruleClass != null) {
String key = sample.ruleClass.getKey();
RuleBytes ruleBytes =
rules.computeIfAbsent(key, k -> new RuleBytes(sample.ruleClass.getName()));
rules.put(key, ruleBytes.addBytes(sample.bytes));
}
if (sample.aspectClass != null) {
String key = sample.aspectClass.getKey();
RuleBytes ruleBytes =
aspects.computeIfAbsent(key, k -> new RuleBytes(sample.aspectClass.getName()));
aspects.put(key, ruleBytes.addBytes(sample.bytes));
}
}
enabled = true;
}
/** Dumps all Starlark analysis time allocations to a pprof-compatible file. */
public void dumpSkylarkAllocations(String path) throws IOException {
// Make sure we don't track our own allocations
enabled = false;
System.gc();
Profile profile = buildMemoryProfile();
try (GZIPOutputStream outputStream = new GZIPOutputStream(new FileOutputStream(path))) {
profile.writeTo(outputStream);
outputStream.finish();
}
enabled = true;
}
Profile buildMemoryProfile() {
Profile.Builder profile = Profile.newBuilder();
StringTable stringTable = new StringTable(profile);
FunctionTable functionTable = new FunctionTable(profile, stringTable);
LocationTable locationTable = new LocationTable(profile, functionTable);
profile.addSampleType(
ValueType.newBuilder()
.setType(stringTable.get("memory"))
.setUnit(stringTable.get("bytes"))
.build());
for (AllocationSample sample : allocations.values()) {
// Skip empty callstacks
if (sample.callstack.isEmpty()) {
continue;
}
Sample.Builder b = Sample.newBuilder().addValue(sample.bytes);
for (Frame fr : sample.callstack.reverse()) {
b.addLocationId(locationTable.get(fr.loc.file(), fr.name, fr.loc.line()));
}
profile.addSample(b.build());
}
profile.setTimeNanos(Instant.now().getEpochSecond() * 1000000000);
return profile.build();
}
private static class StringTable {
final Profile.Builder profile;
final Map<String, Long> table = new HashMap<>();
long index = 0;
StringTable(Profile.Builder profile) {
this.profile = profile;
get(""); // 0 is reserved for the empty string
}
long get(String str) {
return table.computeIfAbsent(
str,
key -> {
profile.addStringTable(key);
return index++;
});
}
}
private static class FunctionTable {
final Profile.Builder profile;
final StringTable stringTable;
final Map<String, Long> table = new HashMap<>();
long index = 1; // 0 is reserved
FunctionTable(Profile.Builder profile, StringTable stringTable) {
this.profile = profile;
this.stringTable = stringTable;
}
long get(String file, String function) {
return table.computeIfAbsent(
file + "#" + function,
key -> {
Function fn =
Function.newBuilder()
.setId(index)
.setFilename(stringTable.get(file))
.setName(stringTable.get(function))
.build();
profile.addFunction(fn);
return index++;
});
}
}
private static class LocationTable {
final Profile.Builder profile;
final FunctionTable functionTable;
final Map<String, Long> table = new HashMap<>();
long index = 1; // 0 is reserved
LocationTable(Profile.Builder profile, FunctionTable functionTable) {
this.profile = profile;
this.functionTable = functionTable;
}
long get(String file, String function, long line) {
return table.computeIfAbsent(
file + "#" + function + "#" + line,
key -> {
com.google.perftools.profiles.ProfileProto.Location location =
com.google.perftools.profiles.ProfileProto.Location.newBuilder()
.setId(index)
.addLine(
Line.newBuilder()
.setFunctionId(functionTable.get(file, function))
.setLine(line)
.build())
.build();
profile.addLocation(location);
return index++;
});
}
}
}
| apache-2.0 |
m-m-m/service | base/src/main/java/net/sf/mmm/service/base/client/RemoteInvocationCallData.java | 2216 | /* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package net.sf.mmm.service.base.client;
import net.sf.mmm.service.api.RemoteInvocationCall;
import net.sf.mmm.util.lang.api.function.Consumer;
/**
* This is a simple container for the data corresponding to a {@link RemoteInvocationCall}.
*
* @param <RESULT> is the generic type of the method return-type.
* @param <CALL> is the generic type of the {@link #getCall() call} data.
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 1.0.0
*/
public class RemoteInvocationCallData<RESULT, CALL extends RemoteInvocationCall> {
/** The callback to receive the service result on success. */
private final Consumer<? extends RESULT> successCallback;
/** The callback to receive a potential service failure. */
private final Consumer<Throwable> failureCallback;
/** @see #getCall() */
private CALL call;
/**
* The constructor.
*
* @param successCallback is the callback that {@link Consumer#accept(Object) receives} the result on
* success.
* @param failureCallback is the callback that {@link Consumer#accept(Object) receives} the failure on
* error.
*/
public RemoteInvocationCallData(Consumer<? extends RESULT> successCallback, Consumer<Throwable> failureCallback) {
super();
this.successCallback = successCallback;
this.failureCallback = failureCallback;
}
/**
* @return the successCallback.
*/
public Consumer<? extends RESULT> getSuccessCallback() {
return this.successCallback;
}
/**
* @return the failureCallback.
*/
public Consumer<Throwable> getFailureCallback() {
return this.failureCallback;
}
/**
* @return the actual call data (either {@link net.sf.mmm.service.api.command.RemoteInvocationCommand}
* itself or {@link net.sf.mmm.service.base.rpc.GenericRemoteInvocationRpcCall}).
*/
public CALL getCall() {
return this.call;
}
/**
* @param call is the new value of {@link #getCall()}.
*/
public void setCall(CALL call) {
assert (this.call == null);
assert (call != null);
this.call = call;
}
}
| apache-2.0 |
dzh/jframe | jframe/jframe-core/src/main/java/jframe/core/plugin/PluginListener.java | 244 | /**
*
*/
package jframe.core.plugin;
import java.util.EventListener;
/**
* @author dzh
* @date Sep 12, 2013 9:42:33 PM
* @since 1.0
*/
public interface PluginListener extends EventListener {
void pluginChanged(PluginEvent event);
}
| apache-2.0 |
GillesMoris/OSS | src/org/parosproxy/paros/view/AbstractFrame.java | 10722 | /*
* Created on May 17, 2004
*
* Paros and its related class files.
*
* Paros is an HTTP/HTTPS proxy for assessing web application security.
* Copyright (C) 2003-2004 Chinotec Technologies Company
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the Clarified Artistic License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Clarified Artistic License for more details.
*
* You should have received a copy of the Clarified Artistic License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
// ZAP: 2013/01/16 Minor fix to prevent NPE
// ZAP: 2014/10/17 Issue 1308: Updated for latest icons
// ZAP: 2015/02/10 Issue 1528: Support user defined font size
// ZAP: 2015/09/07 Move icon loading to a utility class
package org.parosproxy.paros.view;
import java.awt.Dimension;
import java.awt.Frame;
import java.awt.Image;
import java.awt.Point;
import java.awt.Toolkit;
import java.awt.event.ComponentAdapter;
import java.awt.event.ComponentEvent;
import java.awt.event.WindowEvent;
import java.awt.event.WindowStateListener;
import java.util.ArrayList;
import java.util.List;
import java.util.prefs.BackingStoreException;
import java.util.prefs.Preferences;
import javax.swing.JFrame;
import org.apache.log4j.Logger;
import org.parosproxy.paros.Constant;
import org.zaproxy.zap.utils.DisplayUtils;
/**
* Generic Frame, which handles some basic properties.
* <ul>
* <li>Sets the icon(s) for the frame, which are the ZAP icons</li>
* <li>Centers the frame on screen</li>
* <li>Sets the frame to _not_ visible</li>
* <li>Sets a common font for the frame</li>
* <li>Sets a default title (ZAP application name)</li>
* <li>Preserves window state, location and size correctly (will survive multiple session)</li>
* </ul>
* Hint for implementers: If you use this class,
* don't use {@link #setSize(Dimension)}, but {@link #setPreferredSize(Dimension)}
* instead. Also, don't use {@link #setLocation(Point)}. This abstract class
* will automatically take care of size and position.
*/
public abstract class AbstractFrame extends JFrame {
private static final long serialVersionUID = 6751593232255236597L;
private static final String PREF_WINDOW_STATE = "window.state";
private static final String PREF_WINDOW_SIZE = "window.size";
private static final String PREF_WINDOW_POSITION = "window.position";
private static final int WINDOW_DEFAULT_WIDTH = 800;
private static final int WINDOW_DEFAULT_HEIGHT = 600;
/**
* Hint: Preferences are only saved by package.
* We have to use a prefix for separation.
*/
private final Preferences preferences;
private final String prefnzPrefix = this.getClass().getSimpleName()+".";
private final Logger logger = Logger.getLogger(AbstractFrame.class);
/**
* This is the default constructor
*/
public AbstractFrame() {
super();
this.preferences = Preferences.userNodeForPackage(getClass());
initialize();
}
/**
* This method initializes this
*/
private void initialize() {
// ZAP: Rebrand
this.setIconImages(DisplayUtils.getZapIconImages());
this.setVisible(false);
this.setTitle(Constant.PROGRAM_NAME);
final Dimension dim = restoreWindowSize();
if (dim == null) {
this.setSize(WINDOW_DEFAULT_WIDTH, WINDOW_DEFAULT_HEIGHT);
}
final Point point = restoreWindowLocation();
if (point == null) {
centerFrame();
}
restoreWindowState();
this.addWindowStateListener(new FrameWindowStateListener());
this.addComponentListener(new FrameResizedListener());
}
/**
* Centre this frame.
*
*/
public void centerFrame() {
final Dimension screenSize = Toolkit.getDefaultToolkit().getScreenSize();
final Dimension frameSize = this.getSize();
if (frameSize.height > screenSize.height) {
frameSize.height = screenSize.height;
}
if (frameSize.width > screenSize.width) {
frameSize.width = screenSize.width;
}
this.setLocation((screenSize.width - frameSize.width) / 2, (screenSize.height - frameSize.height) / 2);
}
/**
* @param windowstate integer value, see {@link JFrame#getExtendedState()}
*/
private void saveWindowState(int windowstate) {
if ((windowstate & Frame.ICONIFIED) == Frame.ICONIFIED) {
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.ICONFIED.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.ICONFIED);
}
if ((windowstate & Frame.MAXIMIZED_BOTH) == Frame.MAXIMIZED_BOTH) {
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.MAXIMIZED.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.MAXIMIZED);
}
if (windowstate == Frame.NORMAL) { // hint: Frame.NORMAL = 0, thats why no masking
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.NORMAL.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.NORMAL);
}
}
/**
* Loads and sets the last window state of the frame.
* Additionally, the last state will be returned.
*
* @return last window state OR null
*/
private SimpleWindowState restoreWindowState() {
SimpleWindowState laststate = null;
final String statestr = preferences.get(prefnzPrefix+PREF_WINDOW_STATE, null);
if (logger.isDebugEnabled()) logger.debug("Restoring preference "+PREF_WINDOW_STATE+"=" + statestr);
if (statestr != null) {
SimpleWindowState state = null;
try {
state = SimpleWindowState.valueOf(statestr);
} catch (final IllegalArgumentException e) { state = null; }
if (state != null) {
switch (state) {
case ICONFIED: this.setExtendedState(Frame.ICONIFIED); break;
case NORMAL: this.setExtendedState(Frame.NORMAL); break;
case MAXIMIZED: this.setExtendedState(Frame.MAXIMIZED_BOTH); break;
default:
logger.error("Invalid window state (nothing will changed): " + statestr);
}
}
laststate = state;
}
return laststate;
}
/**
* Saves the size of this frame, but only, if window state is 'normal'.
* If window state is iconfied or maximized, the size is not saved!
*
* @param size
*/
private void saveWindowSize(Dimension size) {
if (size != null) {
if (getExtendedState() == Frame.NORMAL) {
if (logger.isDebugEnabled()) logger.debug("Saving preference " + PREF_WINDOW_SIZE + "=" + size.width + "," + size.height);
this.preferences.put(prefnzPrefix+PREF_WINDOW_SIZE, size.width + "," + size.height);
} else {
if (logger.isDebugEnabled()) logger.debug("Preference " + PREF_WINDOW_SIZE + " not saved, cause window state is not 'normal'.");
}
}
}
/**
* Loads and set the saved size preferences for this frame.
*
* @return the size of the frame OR null, if there wasn't any preference.
*/
private Dimension restoreWindowSize() {
Dimension result = null;
final String sizestr = preferences.get(prefnzPrefix+PREF_WINDOW_SIZE, null);
if (sizestr != null) {
int width = 0;
int height = 0;
final String[] sizes = sizestr.split("[,]");
try {
width = Integer.parseInt(sizes[0].trim());
height = Integer.parseInt(sizes[1].trim());
} catch (final Exception e) {
// ignoring, cause is prevented by default values;
}
if (width > 0 && height > 0) {
result = new Dimension(width, height);
if (logger.isDebugEnabled()) logger.debug("Restoring preference " + PREF_WINDOW_SIZE + "=" + result.width + "," + result.height);
this.setSize(result);
}
}
return result;
}
/**
* Saves the location of this frame, but only, if window state is 'normal'.
* If window state is iconfied or maximized, the location is not saved!
*
* @param point
*/
private void saveWindowLocation(Point point) {
if (point != null) {
if (getExtendedState() == Frame.NORMAL) {
if (logger.isDebugEnabled()) logger.debug("Saving preference " + PREF_WINDOW_POSITION + "=" + point.x + "," + point.y);
this.preferences.put(prefnzPrefix+PREF_WINDOW_POSITION, point.x + "," + point.y);
} else {
if (logger.isDebugEnabled()) logger.debug("Preference " + PREF_WINDOW_POSITION + " not saved, cause window state is not 'normal'.");
}
}
}
/**
* Loads and set the saved position preferences for this frame.
*
* @return the size of the frame OR null, if there wasn't any preference.
*/
private Point restoreWindowLocation() {
Point result = null;
final String sizestr = preferences.get(prefnzPrefix+PREF_WINDOW_POSITION, null);
if (sizestr != null) {
int x = 0;
int y = 0;
final String[] sizes = sizestr.split("[,]");
try {
x = Integer.parseInt(sizes[0].trim());
y = Integer.parseInt(sizes[1].trim());
} catch (final Exception e) {
// ignoring, cause is prevented by default values;
}
if (x > 0 && y > 0) {
result = new Point(x, y);
if (logger.isDebugEnabled()) logger.debug("Restoring preference " + PREF_WINDOW_POSITION + "=" + result.x + "," + result.y);
this.setLocation(result);
}
}
return result;
}
/**
* @deprecated (2.4.2) Use {@link DisplayUtils#getZapIconImages()} instead. It will be removed in a future release.
*/
@Deprecated
@SuppressWarnings("javadoc")
protected List<Image> loadIconImages() {
return new ArrayList<>(DisplayUtils.getZapIconImages());
}
@Override
public void dispose() {
super.dispose();
try {
this.preferences.flush();
} catch (final BackingStoreException e) {
logger.error("Error while saving the preferences", e);
}
}
/*
* ========================================================================
*/
private final class FrameWindowStateListener implements WindowStateListener {
@Override
public void windowStateChanged(WindowEvent e) {
saveWindowState(e.getNewState());
}
}
private final class FrameResizedListener extends ComponentAdapter {
@Override
public void componentResized(ComponentEvent e) {
if (e.getComponent() != null) {
saveWindowSize(e.getComponent().getSize());
}
}
@Override
public void componentMoved(ComponentEvent e) {
if (e.getComponent() != null) {
saveWindowLocation(e.getComponent().getLocation());
}
}
}
/**
* Simplified version for easier handling of the states ...
*/
private enum SimpleWindowState {
ICONFIED,
NORMAL,
MAXIMIZED;
}
} // @jve:visual-info decl-index=0 visual-constraint="31,17"
| apache-2.0 |
cgtz/ambry | ambry-mysql/src/integration-test/java/com/github/ambry/accountstats/AccountStatsMySqlStoreIntegrationTest.java | 46523 | /**
* Copyright 2020 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.accountstats;
import com.codahale.metrics.MetricRegistry;
import com.github.ambry.config.AccountStatsMySqlConfig;
import com.github.ambry.config.ClusterMapConfig;
import com.github.ambry.config.VerifiableProperties;
import com.github.ambry.server.HostAccountStorageStatsWrapper;
import com.github.ambry.server.HostPartitionClassStorageStatsWrapper;
import com.github.ambry.server.StatsHeader;
import com.github.ambry.server.StatsReportType;
import com.github.ambry.server.StatsSnapshot;
import com.github.ambry.server.StatsWrapper;
import com.github.ambry.server.StorageStatsUtil;
import com.github.ambry.server.StorageStatsUtilTest;
import com.github.ambry.server.storagestats.AggregatedAccountStorageStats;
import com.github.ambry.server.storagestats.AggregatedPartitionClassStorageStats;
import com.github.ambry.server.storagestats.ContainerStorageStats;
import com.github.ambry.server.storagestats.HostAccountStorageStats;
import com.github.ambry.server.storagestats.HostPartitionClassStorageStats;
import com.github.ambry.utils.Pair;
import com.github.ambry.utils.TestUtils;
import com.github.ambry.utils.Utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import static org.junit.Assert.*;
/**
* Integration tests for {@link AccountStatsMySqlStore}.
*/
@RunWith(Parameterized.class)
public class AccountStatsMySqlStoreIntegrationTest {
private static final String clusterName1 = "Ambry-test";
private static final String clusterName2 = "Ambry-random";
// hostname1 and hostname2 are the same, but with different port numbers
private static final String hostname1 = "ambry1.test.github.com";
private static final String hostname2 = "ambry1.test.github.com";
private static final String hostname3 = "ambry3.test.github.com";
private static final int port1 = 12345;
private static final int port2 = 12346;
private static final int port3 = 12347;
private final int batchSize;
private final AccountStatsMySqlStore mySqlStore;
@Parameterized.Parameters
public static List<Object[]> data() {
return Arrays.asList(new Object[][]{{0}, {17}});
}
public AccountStatsMySqlStoreIntegrationTest(int batchSize) throws Exception {
this.batchSize = batchSize;
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
}
@Before
public void before() throws Exception {
mySqlStore.cleanupTables();
}
@After
public void after() {
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for multiple hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testMultiStoreStats() throws Exception {
AccountStatsMySqlStore mySqlStore1 = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
AccountStatsMySqlStore mySqlStore2 = createAccountStatsMySqlStore(clusterName1, hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Generating StatsWrappers, store StatsWrappers and retrieve StatsWrappers
StatsWrapper stats1 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats2 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats3 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeAccountStats(stats1);
mySqlStore2.storeAccountStats(stats2);
mySqlStore3.storeAccountStats(stats3);
assertTableSize(mySqlStore1, 3 * 10 * 10);
StatsWrapper obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsWrapper obtainedStats2 = mySqlStore2.queryAccountStatsByHost(hostname2, port2);
StatsWrapper obtainedStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
assertTwoStatsSnapshots(obtainedStats1.getSnapshot(), stats1.getSnapshot());
assertTwoStatsSnapshots(obtainedStats2.getSnapshot(), stats2.getSnapshot());
assertTwoStatsSnapshots(obtainedStats3.getSnapshot(), stats3.getSnapshot());
// Generating HostAccountStorageStatsWrappers, store and retrieve them
HostAccountStorageStatsWrapper hostStats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeHostAccountStorageStats(hostStats1);
mySqlStore2.storeHostAccountStorageStats(hostStats2);
mySqlStore3.storeHostAccountStorageStats(hostStats3);
HostAccountStorageStatsWrapper obtainedHostStats1 =
mySqlStore1.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper obtainedHostStats2 =
mySqlStore2.queryHostAccountStorageStatsByHost(hostname2, port2);
HostAccountStorageStatsWrapper obtainedHostStats3 =
mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
assertEquals(hostStats1.getStats().getStorageStats(), obtainedHostStats1.getStats().getStorageStats());
assertEquals(hostStats2.getStats().getStorageStats(), obtainedHostStats2.getStats().getStorageStats());
assertEquals(hostStats3.getStats().getStorageStats(), obtainedHostStats3.getStats().getStorageStats());
// Retrieve StatWrappers
obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsSnapshot converted =
StorageStatsUtil.convertHostAccountStorageStatsToStatsSnapshot(hostStats1.getStats(), false);
assertTwoStatsSnapshots(converted, obtainedStats1.getSnapshot());
mySqlStore1.shutdown();
mySqlStore2.shutdown();
mySqlStore3.shutdown();
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromMysqlDb() throws Exception {
//write a new stats into database.
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(1, 1, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
//initialized the mySqlStore and write a new stats with the same partition.
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
assertTrue(
mySqlStore.getPreviousHostAccountStorageStatsWrapper().getStats().getStorageStats().containsKey((long) 0));
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(0, 0, 0, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats2.getStats().getStorageStats());
newStorageStats.put((long) 0,
new HashMap<>()); // Remove partition 0's storage stats data, this would remove entire partition from database
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
// empty stats should remove all the data in the database
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromLocalBackUpFile() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 still empty
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats2.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 not empty
HostAccountStorageStatsWrapper stats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, stats.getStats().getStorageStats().get((long) 1));
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats3.getStats().getStorageStats().containsKey((long) 10));
// Write an empty HostAccountStorageStats
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
// Empty storage stats should remove all the data in the database
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats4.getStats().getStorageStats().isEmpty());
// Write an empty HostAccountStorageStats again
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
HostAccountStorageStatsWrapper obtainedStats5 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats5.getStats().getStorageStats().isEmpty());
HostAccountStorageStatsWrapper stats6 =
generateHostAccountStorageStatsWrapper(20, 20, 20, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats6);
HostAccountStorageStatsWrapper obtainedStats6 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats6.getStats().getStorageStats(), stats6.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test to delete partition, account and container data from database
* @throws Exception
*/
@Test
public void testStatsDeletePartitionAccountContainer() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
// Now remove one partition from stats
HostAccountStorageStats storageStatsCopy = new HostAccountStorageStats(stats.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.remove((long) 1);
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats2.getStats().getStorageStats(), stats2.getStats().getStorageStats());
// Now remove one account from stats
storageStatsCopy = new HostAccountStorageStats(stats2.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.get((long) 3).remove((short) 1);
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats2.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats3);
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats3.getStats().getStorageStats(), stats3.getStats().getStorageStats());
// Now remove some containers
storageStatsCopy = new HostAccountStorageStats(stats3.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
for (short containerId : new short[]{0, 1, 2}) {
newStorageStatsMap.get((long) 3).get((short) 3).remove(containerId);
}
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats3.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats4);
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats4.getStats().getStorageStats(), stats4.getStats().getStorageStats());
// Now write the stats back
stats = generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for one hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testStoreMultilpleWrites() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats1);
HostAccountStorageStats hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
ContainerStorageStats origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats2.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).physicalStorageUsage(origin.getPhysicalStorageUsage() + 1)
.build());
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats3);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats3.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0, new ContainerStorageStats.Builder(origin).numberOfBlobs(origin.getNumberOfBlobs() + 1).build());
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats4);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats4.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStats() throws Exception {
Map<String, Map<String, Long>> containerStorageUsages = TestUtils.makeStorageMap(10, 10, 100000, 1000);
StatsSnapshot snapshot = TestUtils.makeAccountStatsSnapshotFromContainerStorageMap(containerStorageUsages);
mySqlStore.storeAggregatedAccountStats(snapshot);
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(snapshot, obtainedSnapshot);
// Fetching aggregated account stats for clustername2 should result in empty stats
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(snapshot);
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 1))
.getSubMap()
.get(Utils.statsContainerKey((short) 1))
.setValue(1);
newSnapshot.updateValue();
containerStorageUsages.get("1").put("1", 1L);
mySqlStore.storeAggregatedAccountStats(newSnapshot);
obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
// Delete account and container
newSnapshot = new StatsSnapshot(newSnapshot);
newSnapshot.getSubMap().remove(Utils.statsAccountKey((short) 1));
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 2))
.getSubMap()
.remove(Utils.statsContainerKey((short) 1));
newSnapshot.updateValue();
// Now remove all containers for account 1 and container 1 of account 2
for (String containerId : containerStorageUsages.get(String.valueOf(1)).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, Short.valueOf(containerId));
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(newSnapshot, obtainedSnapshot);
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account storage stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStorageStats() throws Exception {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
// Compare container usage map
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(aggregatedAccountStorageStats, false),
obtainedContainerStorageUsages);
// Compare StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(
StorageStatsUtil.convertAggregatedAccountStorageStatsToStatsSnapshot(aggregatedAccountStorageStats, false),
obtainedSnapshot);
// Compare AggregatedAccountStorageStats
AggregatedAccountStorageStats obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
// Fetching aggregated account stats for clustername2 should result in a null;
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
assertEquals(mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName2).getStorageStats().size(), 0);
// Change one value and store it to mysql database again
Map<Short, Map<Short, ContainerStorageStats>> newStorageStatsMap =
new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get((short) 1).get((short) 1);
newStorageStatsMap.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
aggregatedAccountStorageStats = new AggregatedAccountStorageStats(newStorageStatsMap);
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
// Delete account and container
newStorageStatsMap = new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
newStorageStatsMap.remove((short) 1);
newStorageStatsMap.get((short) 2).remove((short) 1);
// Now remove all containers for account 1 and container 1 of account 2
for (short containerId : aggregatedAccountStorageStats.getStorageStats().get((short) 1).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, containerId);
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
mySqlStore.shutdown();
}
/**
* Test methods to store, delete and fetch monthly aggregated stats
* @throws Exception
*/
@Test
public void testMonthlyAggregatedStats() throws Exception {
String monthValue = "2020-01";
AggregatedAccountStorageStats currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
if (currentAggregatedStats.getStorageStats().size() == 0) {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
}
// fetch the month and it should return emtpy string
Assert.assertEquals("", mySqlStore.queryRecordedMonth());
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
Map<String, Map<String, Long>> monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
String obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Change the value and store it back to mysql database
monthValue = "2020-02";
currentAggregatedStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(currentAggregatedStats);
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Delete the snapshots
mySqlStore.deleteSnapshotOfAggregatedAccountStats();
assertTrue(mySqlStore.queryMonthlyAggregatedAccountStats(false).isEmpty());
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
StatsWrapper accountStats1 = mySqlStore.queryAccountStatsByHost(hostname1, port1);
StatsWrapper accountStats2 = mySqlStore.queryAccountStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
StatsWrapper accountStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<String> allPartitionKeys = new HashSet<String>() {
{
addAll(accountStats1.getSnapshot().getSubMap().keySet());
addAll(accountStats2.getSnapshot().getSubMap().keySet());
addAll(accountStats3.getSnapshot().getSubMap().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<String, String> partitionKeyToClassName = new HashMap<>();
int ind = 0;
for (String partitionKey : allPartitionKeys) {
partitionKeyToClassName.put(partitionKey, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
StatsWrapper partitionClassStats1 =
convertAccountStatsToPartitionClassStats(accountStats1, partitionKeyToClassName);
StatsWrapper partitionClassStats2 =
convertAccountStatsToPartitionClassStats(accountStats2, partitionKeyToClassName);
StatsWrapper partitionClassStats3 =
convertAccountStatsToPartitionClassStats(accountStats3, partitionKeyToClassName);
mySqlStore.storePartitionClassStats(partitionClassStats1);
mySqlStore.storePartitionClassStats(partitionClassStats2);
mySqlStore3.storePartitionClassStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<String, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(
ent -> ent.getValue().stream().map(pid -> new Pair<String, String>(ent.getKey(), "Partition[" + pid + "]")))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionKeyToClassName, dbPartitionKeyToClassName);
StatsWrapper obtainedStats1 = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getSnapshot(), obtainedStats1.getSnapshot());
StatsWrapper obtainedStats2 = mySqlStore.queryPartitionClassStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getSnapshot(), obtainedStats2.getSnapshot());
StatsWrapper obtainedStats3 = mySqlStore3.queryPartitionClassStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getSnapshot(), obtainedStats3.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class storage stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStorageStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
HostAccountStorageStatsWrapper accountStats1 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper accountStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
HostAccountStorageStatsWrapper accountStats3 = mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<Long> allPartitionKeys = new HashSet<Long>() {
{
addAll(accountStats1.getStats().getStorageStats().keySet());
addAll(accountStats2.getStats().getStorageStats().keySet());
addAll(accountStats3.getStats().getStorageStats().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<Long, String> partitionIdToClassName = new HashMap<>();
int ind = 0;
for (long partitionId : allPartitionKeys) {
partitionIdToClassName.put(partitionId, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
HostPartitionClassStorageStatsWrapper partitionClassStats1 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats1, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats2 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats2, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats3 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats3, partitionIdToClassName);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats1);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats2);
mySqlStore3.storeHostPartitionClassStorageStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<Long, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(ent -> ent.getValue().stream().map(pid -> new Pair<>(ent.getKey(), (long) pid)))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionIdToClassName, dbPartitionKeyToClassName);
// Fetch HostPartitionClassStorageStats
HostPartitionClassStorageStatsWrapper obtainedStats1 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getStats().getStorageStats(), obtainedStats1.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats2 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getStats().getStorageStats(), obtainedStats2.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats3 =
mySqlStore3.queryHostPartitionClassStorageStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getStats().getStorageStats(), obtainedStats3.getStats().getStorageStats());
// Fetch StatsSnapshot
StatsWrapper obtainedStats = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(
StorageStatsUtil.convertHostPartitionClassStorageStatsToStatsSnapshot(obtainedStats1.getStats(), false),
obtainedStats.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store, delete and fetch aggregated partition class stats.
* @throws Exception
*/
@Test
public void testAggregatedPartitionClassStats() throws Exception {
testHostPartitionClassStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
StatsSnapshot aggregated =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
StatsSnapshot aggregated3 =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore3.storeAggregatedPartitionClassStats(aggregated3);
StatsSnapshot obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
assertEquals(mySqlStore.queryAggregatedPartitionClassStatsByClusterName("random-cluster").getSubMap().size(), 0);
StatsSnapshot obtained3 = mySqlStore3.queryAggregatedPartitionClassStats();
assertEquals(aggregated3, obtained3);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(aggregated);
newSnapshot.getSubMap()
.get("default")
.getSubMap()
.get(Utils.partitionClassStatsAccountContainerKey((short) 1, (short) 1))
.setValue(1);
newSnapshot.updateValue();
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
// Delete some account and container
newSnapshot = new StatsSnapshot(newSnapshot);
short accountId = (short) 1;
short containerId = (short) 1;
String accountContainerKey = Utils.partitionClassStatsAccountContainerKey(accountId, containerId);
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newSnapshot.getSubMap().get(partitionClassName).getSubMap().remove(accountContainerKey);
}
newSnapshot.updateValue();
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(newSnapshot, obtained);
mySqlStore3.shutdown();
}
@Test
public void testAggregatedPartitionClassStorageStats() throws Exception {
testHostPartitionClassStorageStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
AggregatedPartitionClassStorageStats aggregatedStats = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedPartitionClassStorageStats(aggregatedStats);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
AggregatedPartitionClassStorageStats aggregatedStats3 = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore3.storeAggregatedPartitionClassStorageStats(aggregatedStats3);
AggregatedPartitionClassStorageStats obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats.getStorageStats(), obtained.getStorageStats());
assertEquals(
mySqlStore.queryAggregatedPartitionClassStorageStatsByClusterName("random-cluster").getStorageStats().size(),
0);
AggregatedPartitionClassStorageStats obtained3 = mySqlStore3.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats3.getStorageStats(), obtained3.getStorageStats());
// Fetch StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(StorageStatsUtil.convertAggregatedPartitionClassStorageStatsToStatsSnapshot(obtained, false),
obtainedSnapshot);
// Change one value and store it to mysql database again
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(aggregatedStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get("default").get((short) 1).get((short) 1);
newStorageStatsMap.get("default")
.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
mySqlStore.storeAggregatedPartitionClassStorageStats(new AggregatedPartitionClassStorageStats(newStorageStatsMap));
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
// Delete some account and container
short accountId = (short) 1;
short containerId = (short) 1;
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newStorageStatsMap.get(partitionClassName).get(accountId).remove(containerId);
}
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
mySqlStore3.shutdown();
}
private AccountStatsMySqlStore createAccountStatsMySqlStore(String clusterName, String hostname, int port)
throws Exception {
Path localBackupFilePath = createTemporaryFile();
Properties configProps = Utils.loadPropsFromResource("accountstats_mysql.properties");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_CLUSTER_NAME, clusterName);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_HOST_NAME, hostname);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_DATACENTER_NAME, "dc1");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_PORT, String.valueOf(port));
configProps.setProperty(AccountStatsMySqlConfig.DOMAIN_NAMES_TO_REMOVE, ".github.com");
configProps.setProperty(AccountStatsMySqlConfig.UPDATE_BATCH_SIZE, String.valueOf(batchSize));
configProps.setProperty(AccountStatsMySqlConfig.POOL_SIZE, String.valueOf(5));
configProps.setProperty(AccountStatsMySqlConfig.LOCAL_BACKUP_FILE_PATH, localBackupFilePath.toString());
VerifiableProperties verifiableProperties = new VerifiableProperties(configProps);
return (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(verifiableProperties,
new ClusterMapConfig(verifiableProperties), new MetricRegistry()).getAccountStatsStore();
}
private static Path createTemporaryFile() throws IOException {
Path tempDir = Files.createTempDirectory("AccountStatsMySqlStoreTest");
return tempDir.resolve("localbackup");
}
private static StatsWrapper generateStatsWrapper(int numPartitions, int numAccounts, int numContainers,
StatsReportType reportType) {
Random random = new Random();
List<StatsSnapshot> storeSnapshots = new ArrayList<>();
for (int i = 0; i < numPartitions; i++) {
storeSnapshots.add(TestUtils.generateStoreStats(numAccounts, numContainers, random, reportType));
}
return TestUtils.generateNodeStats(storeSnapshots, 1000, reportType);
}
private static HostAccountStorageStatsWrapper generateHostAccountStorageStatsWrapper(int numPartitions,
int numAccounts, int numContainersPerAccount, StatsReportType reportType) {
HostAccountStorageStats hostAccountStorageStats = new HostAccountStorageStats(
StorageStatsUtilTest.generateRandomHostAccountStorageStats(numPartitions, numAccounts, numContainersPerAccount,
100000L, 2, 10));
StatsHeader statsHeader =
new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 1000, numPartitions, numPartitions,
Collections.emptyList());
return new HostAccountStorageStatsWrapper(statsHeader, hostAccountStorageStats);
}
private void assertTableSize(AccountStatsMySqlStore mySqlStore, int expectedNumRows) throws SQLException {
int numRows = 0;
try (Connection connection = mySqlStore.getDataSource().getConnection()) {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("SELECT * FROM " + AccountReportsDao.ACCOUNT_REPORTS_TABLE)) {
while (resultSet.next()) {
numRows++;
}
}
}
}
assertEquals(expectedNumRows, numRows);
}
private void assertTwoStatsSnapshots(StatsSnapshot snapshot1, StatsSnapshot snapshot2) {
assertEquals("Snapshot values are not equal", snapshot1.getValue(), snapshot2.getValue());
if (snapshot1.getSubMap() == null) {
assertNull(snapshot2.getSubMap());
} else {
assertEquals("Snapshot submap size mismatch", snapshot1.getSubMap().size(), snapshot2.getSubMap().size());
for (String key : snapshot1.getSubMap().keySet()) {
assertTrue(snapshot2.getSubMap().containsKey(key));
assertTwoStatsSnapshots(snapshot1.getSubMap().get(key), snapshot2.getSubMap().get(key));
}
}
}
private StatsWrapper convertAccountStatsToPartitionClassStats(StatsWrapper accountStats,
Map<String, String> partitionKeyToClassName) {
Map<String, StatsSnapshot> partitionClassSubMap = new HashMap<>();
StatsSnapshot originHostStats = accountStats.getSnapshot();
for (String partitionKey : originHostStats.getSubMap().keySet()) {
StatsSnapshot originPartitionStats = originHostStats.getSubMap().get(partitionKey);
String currentClassName = partitionKeyToClassName.get(partitionKey);
StatsSnapshot partitionClassStats =
partitionClassSubMap.computeIfAbsent(currentClassName, k -> new StatsSnapshot(0L, new HashMap<>()));
Map<String, StatsSnapshot> accountContainerSubMap = new HashMap<>();
for (String accountKey : originPartitionStats.getSubMap().keySet()) {
for (Map.Entry<String, StatsSnapshot> containerEntry : originPartitionStats.getSubMap()
.get(accountKey)
.getSubMap()
.entrySet()) {
String containerKey = containerEntry.getKey();
StatsSnapshot containerStats = new StatsSnapshot(containerEntry.getValue());
String accountContainerKey =
Utils.partitionClassStatsAccountContainerKey(Utils.accountIdFromStatsAccountKey(accountKey),
Utils.containerIdFromStatsContainerKey(containerKey));
accountContainerSubMap.put(accountContainerKey, containerStats);
}
}
long accountContainerValue = accountContainerSubMap.values().stream().mapToLong(StatsSnapshot::getValue).sum();
StatsSnapshot partitionStats = new StatsSnapshot(accountContainerValue, accountContainerSubMap);
partitionClassStats.getSubMap().put(partitionKey, partitionStats);
partitionClassStats.setValue(partitionClassStats.getValue() + accountContainerValue);
}
return new StatsWrapper(new StatsHeader(accountStats.getHeader()),
new StatsSnapshot(originHostStats.getValue(), partitionClassSubMap));
}
private HostPartitionClassStorageStatsWrapper convertHostAccountStorageStatsToHostPartitionClassStorageStats(
HostAccountStorageStatsWrapper accountStatsWrapper, Map<Long, String> partitionIdToClassName) {
HostPartitionClassStorageStats hostPartitionClassStorageStats = new HostPartitionClassStorageStats();
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStats =
accountStatsWrapper.getStats().getStorageStats();
for (long partitionId : storageStats.keySet()) {
Map<Short, Map<Short, ContainerStorageStats>> accountStorageStatsMap = storageStats.get(partitionId);
String partitionClassName = partitionIdToClassName.get(partitionId);
for (short accountId : accountStorageStatsMap.keySet()) {
accountStorageStatsMap.get(accountId)
.values()
.forEach(containerStats -> hostPartitionClassStorageStats.addContainerStorageStats(partitionClassName,
partitionId, accountId, containerStats));
}
}
return new HostPartitionClassStorageStatsWrapper(new StatsHeader(accountStatsWrapper.getHeader()),
hostPartitionClassStorageStats);
}
}
| apache-2.0 |
dbe-it/webtester-core | webtester-support-assertj/src/test/java/info/novatec/testit/webtester/support/assertj/RadioButtonAssertTest.java | 1213 | package info.novatec.testit.webtester.support.assertj;
import static info.novatec.testit.webtester.support.assertj.WebTesterAssertions.assertThat;
import static org.mockito.Mockito.doReturn;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import info.novatec.testit.webtester.pageobjects.RadioButton;
@RunWith(MockitoJUnitRunner.class)
public class RadioButtonAssertTest {
@Mock
RadioButton selectedRadioButton;
@Mock
RadioButton radioButton;
@Before
public void setUp() {
doReturn(true).when(selectedRadioButton).isSelected();
}
/* selected */
@Test
public void selectedTrueTest() {
assertThat(selectedRadioButton).isSelected(true);
}
@Test(expected = AssertionError.class)
public void selectedFalseTest() {
assertThat(radioButton).isSelected(true);
}
@Test
public void notSelectedTrueTest() {
assertThat(radioButton).isNotSelected(true);
}
@Test(expected = AssertionError.class)
public void notSelectedFalseTest() {
assertThat(selectedRadioButton).isNotSelected(true);
}
}
| apache-2.0 |
jt120/algorithm | new-man/src/test/java/com/jt/test/sort/Select.java | 1378 | package com.jt.test.sort;
import java.util.Arrays;
import java.util.Random;
/**
* since 2016/10/19.
*/
public class Select {
public static void sort(Comparable[] data) {
for (int i = 0; i < data.length; i++) {
int min = i;
for (int j = i+1; j < data.length; j++) {
if (less(data, min, j)) {
min = j;
}
}
exch(data, i, min);
}
}
private static boolean less(Comparable[] data, int min, int j) {
return data[min].compareTo(data[j]) > 0;
}
private static void exch(Comparable[] data, int i, int min) {
Comparable tmp = data[i];
data[i] = data[min];
data[min] = tmp;
}
public static boolean isSort(Comparable[] data) {
for (int i = 0; i < data.length-1; i++) {
if (less(data, i, i + 1)) {
return false;
}
}
return true;
}
public static void main(String[] args) throws Exception {
Random random = new Random();
Integer[] datas = new Integer[10];
for (int i = 0; i < 10; i++) {
datas[i] = random.nextInt(100);
}
sort(datas);
if (!isSort(datas)) {
System.err.println("not sort");
}
System.out.println(Arrays.toString(datas));
}
}
| apache-2.0 |
jexp/idea2 | plugins/groovy/test/org/jetbrains/plugins/groovy/lang/actions/updown/GroovyMoveStatementTest.java | 4349 | /*
* Copyright 2000-2008 JetBrains s.r.o.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.groovy.lang.actions.updown;
import com.intellij.ide.DataManager;
import com.intellij.openapi.application.Result;
import com.intellij.openapi.command.WriteCommandAction;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.actionSystem.EditorActionHandler;
import com.intellij.openapi.editor.actionSystem.EditorActionManager;
import com.intellij.openapi.editor.ex.DocumentEx;
import com.intellij.testFramework.fixtures.LightCodeInsightFixtureTestCase;
import org.jetbrains.plugins.groovy.GroovyFileType;
import org.jetbrains.plugins.groovy.util.TestUtils;
import org.jetbrains.plugins.groovy.lang.editor.actions.GroovyEditorActionsManager;
import java.util.List;
/**
* @author ilyas
*/
public class GroovyMoveStatementTest extends LightCodeInsightFixtureTestCase {
@Override
protected String getBasePath() {
return TestUtils.getTestDataPath() + "groovy/actions/moveStatement/";
}
public void testClazz1() throws Throwable { downTest(); }
public void testClazz2() throws Throwable { upTest(); }
public void testClos2() throws Throwable { upTest(); }
public void testMeth1() throws Throwable { downTest(); }
public void testMeth2() throws Throwable { downTest(); }
public void testMeth3() throws Throwable { upTest(); }
public void testMeth4() throws Throwable { upTest(); }
public void testIfst() throws Throwable { downTest(); }
public void testIfst2() throws Throwable { upTest(); }
public void testSimple1() throws Throwable { downTest(); }
public void testSimple2() throws Throwable { upTest(); }
public void testTryst1() throws Throwable { downTest(); }
public void testTryst2() throws Throwable { downTest(); }
public void testStatementOutsideClosure() throws Throwable { downTest(); }
public void testVariableOutsideClosure() throws Throwable { upTest(); }
public void testVariableOutsideClosureDown() throws Throwable { downTest(); }
public void testStatementInsideClosure() throws Throwable { upTest(); }
public void testMoveGroovydocWithMethod() throws Throwable { downTest(); }
public void testMoveMethodWithGroovydoc() throws Throwable { downTest(); }
public void testMoveSecondFieldUp() throws Throwable { upTest(); }
public void testMoveFirstFieldDown() throws Throwable { downTest(); }
public void testVariableOverMethodInScript() throws Throwable { downTest(); }
public void testVariableOverClassInScript() throws Throwable { downTest(); }
public void testUpFromLastOffset() throws Throwable { upTest(); }
public void testClosureWithPrequel() throws Throwable { upTest(); }
public void testMultiLineVariable() throws Throwable { downTest(); }
public void testClosureVariableByRBrace() throws Throwable { upTest(); }
private void downTest() throws Exception {
doTest(GroovyEditorActionsManager.MOVE_STATEMENT_DOWN_ACTION);
}
private void upTest() throws Exception {
doTest(GroovyEditorActionsManager.MOVE_STATEMENT_UP_ACTION);
}
public void doTest(final String actionId) throws Exception {
final List<String> data = TestUtils.readInput(getTestDataPath() + getTestName(true) + ".test");
myFixture.configureByText(GroovyFileType.GROOVY_FILE_TYPE, data.get(0));
final EditorActionHandler handler = EditorActionManager.getInstance().getActionHandler(actionId);
new WriteCommandAction(getProject()) {
protected void run(Result result) throws Throwable {
final Editor editor = myFixture.getEditor();
handler.execute(editor, DataManager.getInstance().getDataContext(editor.getContentComponent()));
((DocumentEx)editor.getDocument()).stripTrailingSpaces(false);
}
}.execute();
myFixture.checkResult(data.get(1));
}
}
| apache-2.0 |
gabedwrds/cas | support/cas-server-support-reports/src/main/java/org/apereo/cas/web/report/ConfigurationStateController.java | 1890 | package org.apereo.cas.web.report;
import org.apereo.cas.web.report.util.ControllerUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.endpoint.mvc.AbstractNamedMvcEndpoint;
import org.springframework.cloud.bus.BusProperties;
import org.springframework.cloud.config.server.config.ConfigServerProperties;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.servlet.ModelAndView;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.HashMap;
import java.util.Map;
/**
* Controller that exposes the CAS internal state and beans
* as JSON. The report is available at {@code /status/config}.
*
* @author Misagh Moayyed
* @since 4.1
*/
public class ConfigurationStateController extends AbstractNamedMvcEndpoint {
private static final String VIEW_CONFIG = "monitoring/viewConfig";
@Autowired(required = false)
private BusProperties busProperties;
@Autowired
private ConfigServerProperties configServerProperties;
public ConfigurationStateController() {
super("configstate", "/config", true, true);
}
/**
* Handle request.
*
* @param request the request
* @param response the response
* @return the model and view
* @throws Exception the exception
*/
@GetMapping
protected ModelAndView handleRequestInternal(final HttpServletRequest request,
final HttpServletResponse response) throws Exception {
final Map<String, Object> model = new HashMap<>();
final String path = request.getContextPath();
ControllerUtils.configureModelMapForConfigServerCloudBusEndpoints(busProperties, configServerProperties, path, model);
return new ModelAndView(VIEW_CONFIG, model);
}
}
| apache-2.0 |
lukecampbell/webtest | src/main/java/com/canoo/ant/table/APropertyTable.java | 8495 | package com.canoo.ant.table;
import com.canoo.ant.filter.AllEqualsFilter;
import com.canoo.ant.filter.AllFilter;
import com.canoo.ant.filter.ITableFilter;
import org.apache.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.util.*;
public abstract class APropertyTable implements IPropertyTable {
private static final Logger LOG = Logger.getLogger(APropertyTable.class);
private static final int MAX_DEPTH = 10; // max recursion depth
private static final ThreadLocal DEPTH = new ThreadLocal();
private File fContainer;
private String fTable;
private String fPrefix;
private ITableFilter fFilter;
private List fRawTable;
private List fMetaTable;
protected static final String EMPTY = "";
protected static final String KEY_JOIN = "JOIN";
protected APropertyTable() {
fFilter = new AllFilter();
if( DEPTH.get() == null ) {
setDepth(0);
}
}
private static void setDepth(int depth){
DEPTH.set(new Integer(depth));
}
private static int getDepth(){
return((Integer)DEPTH.get()).intValue();
}
/**
* @return columnName -> expander (Type IPropertyTable)
*/
public Map getColumnInfo() {
List meta = getMetaTable();
Map result = new HashMap(meta.size()); // smaller is likely
// find all properties for this table
List tableSpecificColumnInfo = new AllEqualsFilter(TableFactory.KEY_TABLE).filter(meta, getTable());
for (Iterator eachColumnInfo = tableSpecificColumnInfo.iterator(); eachColumnInfo.hasNext();) {
Properties colInfo = (Properties) eachColumnInfo.next();
try {
// tableClass defaults to the current class
IPropertyTable table = TableFactory.createTable(colInfo, getClass().getName());
ITableFilter filter = TableFactory.createFilter(colInfo);
final File container;
if (colInfo.getProperty(TableFactory.KEY_CONTAINER, "").length() > 0) {
container = new File(getContainer().getParentFile(), colInfo.getProperty(TableFactory.KEY_CONTAINER));
colInfo.remove(TableFactory.KEY_CONTAINER); // to be sure that it doesn't get used with wrong path
}
else {
container = getContainer();
}
String key = colInfo.getProperty(TableFactory.KEY_NAME); // no default possible
TableFactory.initOrDefault(table, filter, colInfo, container, key);
result.put(key, table);
} catch (Exception e) {
LOG.error("cannot work with Property: " + colInfo.toString(), e);
throw new RuntimeException("Cannot work with Property: " + colInfo.toString(), e);
}
}
return result;
}
public List getPropertiesList(final String filterValue, final String prefix) {
// start with copy of initial table
// if current filter concerns extension keys, filter before extending
// filtering in advance also lowers memory consumption in the average
List result = getFilter().filter(getRawTable(), filterValue);
if (getDepth() > MAX_DEPTH){
LOG.error("processing grounded due to excessive recursion calls: "+getDepth());
return result;
}
setDepth(getDepth()+1);
final Map colInfo = getColumnInfo();
// only go over entries in the colInfo.
// (property names without colInfo info are not expanded)
for (Iterator eachExpandable = colInfo.keySet().iterator(); eachExpandable.hasNext();) {
String expansionName = (String) eachExpandable.next();
expandName(result, expansionName, colInfo);
}
setDepth(getDepth()-1);
// filter a second time to allow filters to work on expansions
result = getFilter().filter(result, filterValue);
// prefix is processed after filtering
if (prefix!=null && prefix.length()>0){
result = mapPrefix(result, prefix);
}
return result;
}
// like a ruby map!
private List mapPrefix(List result, final String prefix) {
List collect = new ArrayList(result.size());
for (Iterator eachProps = result.iterator(); eachProps.hasNext();) {
Properties props = (Properties) eachProps.next();
Properties mapped = new Properties();
for (Iterator eachKey = props.keySet().iterator(); eachKey.hasNext();) {
String key = (String) eachKey.next();
String value = props.getProperty(key);
mapped.setProperty(prefix+"."+key, value);
}
collect.add(mapped);
}
return collect;
}
protected void expandName(List result, String expansionName, Map colInfo) {
List expansions = new LinkedList(); // cannot add while iterating. store and add later
for (Iterator eachProperties = result.iterator(); eachProperties.hasNext();) {
Properties props = (Properties) eachProperties.next();
List newExpansions = expandProps(props, expansionName, colInfo);
// default behaviour: like OUTER join, we do not shrink if nothing found
if (newExpansions.size() > 0) {
eachProperties.remove();
expansions.addAll(newExpansions);
}
}
result.addAll(expansions);
}
protected List expandProps(Properties props, String expansionName, Map colInfo) {
String value = props.getProperty(expansionName);
List propExpansions = new LinkedList();
IPropertyTable expansionTable = (IPropertyTable) colInfo.get(expansionName);
// recursive call
List expandWith = expansionTable.getPropertiesList(value, expansionTable.getPrefix());
for (Iterator eachExpansion = expandWith.iterator(); eachExpansion.hasNext();) {
Properties expandProps = (Properties) eachExpansion.next();
// merge expansion with current line
expandProps.putAll(props);
// store for later adding
propExpansions.add(expandProps);
}
return propExpansions;
}
//-------------- field accessors ------------------
public File getContainer() {
return fContainer;
}
public void setContainer(File container) {
fContainer = container;
}
public String getTable() {
return fTable;
}
public void setTable(String table) {
fTable = table;
}
public ITableFilter getFilter() {
return fFilter;
}
public void setFilter(ITableFilter filter) {
fFilter = filter;
}
public String getPrefix() {
return fPrefix;
}
public void setPrefix(String prefix) {
fPrefix = prefix;
}
//-------------- how to read specifics ------------------
/** lazy getter, cached */
public List getRawTable() {
fRawTable = getCachedTable(getTable(), fRawTable);
return fRawTable;
}
/** lazy getter, cached */
public List getMetaTable() {
if (hasJoinTable()) {
fMetaTable = getCachedTable(KEY_JOIN, fMetaTable);
}
else {
fMetaTable = Collections.EMPTY_LIST;
}
return fMetaTable;
}
/**
* Indicates if the table container has a JOIN table.
* @return default is <code>true</code>
*/
protected boolean hasJoinTable() {
return true;
}
protected List getCachedTable(final String table, List tableCache) {
if (tableCache != null) {
return tableCache;
}
try {
tableCache = read(table);
}
catch (final IOException e) {
LOG.error("Cannot read " + getContainer() + " " + table, e);
String message = "Cannot read container >" + getContainer() + "<";
if (table != null)
message += " (table " + table + ")";
message += ": " + e.getMessage();
throw new RuntimeException(message, e);
}
if (tableCache.isEmpty()) {
LOG.debug("no entry in " + getContainer() + "/" + table);
}
LOG.debug(tableCache.size()+" entries in "+getContainer()+ " " + table);
return tableCache;
}
protected abstract List read(String table) throws IOException;
}
| apache-2.0 |
looker-open-source/java-spanner | google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java | 50481 | /*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.spanner;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerExceptionForCancellation;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import com.google.api.client.util.BackOff;
import com.google.api.client.util.ExponentialBackOff;
import com.google.api.gax.retrying.RetrySettings;
import com.google.cloud.ByteArray;
import com.google.cloud.Date;
import com.google.cloud.Timestamp;
import com.google.cloud.spanner.Type.StructField;
import com.google.cloud.spanner.spi.v1.SpannerRpc;
import com.google.cloud.spanner.v1.stub.SpannerStubSettings;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
import com.google.protobuf.ListValue;
import com.google.protobuf.Value.KindCase;
import com.google.spanner.v1.PartialResultSet;
import com.google.spanner.v1.ResultSetMetadata;
import com.google.spanner.v1.ResultSetStats;
import com.google.spanner.v1.Transaction;
import com.google.spanner.v1.TypeCode;
import io.grpc.Context;
import io.opencensus.common.Scope;
import io.opencensus.trace.AttributeValue;
import io.opencensus.trace.Span;
import io.opencensus.trace.Tracer;
import io.opencensus.trace.Tracing;
import java.io.IOException;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
/** Implementation of {@link ResultSet}. */
abstract class AbstractResultSet<R> extends AbstractStructReader implements ResultSet {
private static final Tracer tracer = Tracing.getTracer();
interface Listener {
/**
* Called when transaction metadata is seen. This method may be invoked at most once. If the
* method is invoked, it will precede {@link #onError(SpannerException)} or {@link #onDone()}.
*/
void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId)
throws SpannerException;
/** Called when the read finishes with an error. Returns the error that should be thrown. */
SpannerException onError(SpannerException e, boolean withBeginTransaction);
/** Called when the read finishes normally. */
void onDone(boolean withBeginTransaction);
}
@VisibleForTesting
static class GrpcResultSet extends AbstractResultSet<List<Object>> {
private final GrpcValueIterator iterator;
private final Listener listener;
private GrpcStruct currRow;
private SpannerException error;
private ResultSetStats statistics;
private boolean closed;
GrpcResultSet(CloseableIterator<PartialResultSet> iterator, Listener listener) {
this.iterator = new GrpcValueIterator(iterator);
this.listener = listener;
}
@Override
protected GrpcStruct currRow() {
checkState(!closed, "ResultSet is closed");
checkState(currRow != null, "next() call required");
return currRow;
}
@Override
public boolean next() throws SpannerException {
if (error != null) {
throw newSpannerException(error);
}
try {
if (currRow == null) {
ResultSetMetadata metadata = iterator.getMetadata();
if (metadata.hasTransaction()) {
listener.onTransactionMetadata(
metadata.getTransaction(), iterator.isWithBeginTransaction());
} else if (iterator.isWithBeginTransaction()) {
// The query should have returned a transaction.
throw SpannerExceptionFactory.newSpannerException(
ErrorCode.FAILED_PRECONDITION, AbstractReadContext.NO_TRANSACTION_RETURNED_MSG);
}
currRow = new GrpcStruct(iterator.type(), new ArrayList<>());
}
boolean hasNext = currRow.consumeRow(iterator);
if (!hasNext) {
statistics = iterator.getStats();
}
return hasNext;
} catch (Throwable t) {
throw yieldError(
SpannerExceptionFactory.asSpannerException(t),
iterator.isWithBeginTransaction() && currRow == null);
}
}
@Override
@Nullable
public ResultSetStats getStats() {
return statistics;
}
@Override
public void close() {
listener.onDone(iterator.isWithBeginTransaction());
iterator.close("ResultSet closed");
closed = true;
}
@Override
public Type getType() {
checkState(currRow != null, "next() call required");
return currRow.getType();
}
private SpannerException yieldError(SpannerException e, boolean beginTransaction) {
SpannerException toThrow = listener.onError(e, beginTransaction);
close();
throw toThrow;
}
}
/**
* Adapts a stream of {@code PartialResultSet} messages into a stream of {@code Value} messages.
*/
private static class GrpcValueIterator extends AbstractIterator<com.google.protobuf.Value> {
private enum StreamValue {
METADATA,
RESULT,
}
private final CloseableIterator<PartialResultSet> stream;
private ResultSetMetadata metadata;
private Type type;
private PartialResultSet current;
private int pos;
private ResultSetStats statistics;
GrpcValueIterator(CloseableIterator<PartialResultSet> stream) {
this.stream = stream;
}
@SuppressWarnings("unchecked")
@Override
protected com.google.protobuf.Value computeNext() {
if (!ensureReady(StreamValue.RESULT)) {
endOfData();
return null;
}
com.google.protobuf.Value value = current.getValues(pos++);
KindCase kind = value.getKindCase();
if (!isMergeable(kind)) {
if (pos == current.getValuesCount() && current.getChunkedValue()) {
throw newSpannerException(ErrorCode.INTERNAL, "Unexpected chunked PartialResultSet.");
} else {
return value;
}
}
if (!current.getChunkedValue() || pos != current.getValuesCount()) {
return value;
}
Object merged =
kind == KindCase.STRING_VALUE
? value.getStringValue()
: new ArrayList<>(value.getListValue().getValuesList());
while (current.getChunkedValue() && pos == current.getValuesCount()) {
if (!ensureReady(StreamValue.RESULT)) {
throw newSpannerException(
ErrorCode.INTERNAL, "Stream closed in the middle of chunked value");
}
com.google.protobuf.Value newValue = current.getValues(pos++);
if (newValue.getKindCase() != kind) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Unexpected type in middle of chunked value. Expected: "
+ kind
+ " but got: "
+ newValue.getKindCase());
}
if (kind == KindCase.STRING_VALUE) {
merged = merged + newValue.getStringValue();
} else {
concatLists(
(List<com.google.protobuf.Value>) merged, newValue.getListValue().getValuesList());
}
}
if (kind == KindCase.STRING_VALUE) {
return com.google.protobuf.Value.newBuilder().setStringValue((String) merged).build();
} else {
return com.google.protobuf.Value.newBuilder()
.setListValue(
ListValue.newBuilder().addAllValues((List<com.google.protobuf.Value>) merged))
.build();
}
}
ResultSetMetadata getMetadata() throws SpannerException {
if (metadata == null) {
if (!ensureReady(StreamValue.METADATA)) {
throw newSpannerException(ErrorCode.INTERNAL, "Stream closed without sending metadata");
}
}
return metadata;
}
/**
* Get the query statistics. Query statistics are delivered with the last PartialResultSet in
* the stream. Any attempt to call this method before the caller has finished consuming the
* results will return null.
*/
@Nullable
ResultSetStats getStats() {
return statistics;
}
Type type() {
checkState(type != null, "metadata has not been received");
return type;
}
private boolean ensureReady(StreamValue requiredValue) throws SpannerException {
while (current == null || pos >= current.getValuesCount()) {
if (!stream.hasNext()) {
return false;
}
current = stream.next();
pos = 0;
if (type == null) {
// This is the first message on the stream.
if (!current.hasMetadata() || !current.getMetadata().hasRowType()) {
throw newSpannerException(ErrorCode.INTERNAL, "Missing type metadata in first message");
}
metadata = current.getMetadata();
com.google.spanner.v1.Type typeProto =
com.google.spanner.v1.Type.newBuilder()
.setCode(TypeCode.STRUCT)
.setStructType(metadata.getRowType())
.build();
try {
type = Type.fromProto(typeProto);
} catch (IllegalArgumentException e) {
throw newSpannerException(
ErrorCode.INTERNAL, "Invalid type metadata: " + e.getMessage(), e);
}
}
if (current.hasStats()) {
statistics = current.getStats();
}
if (requiredValue == StreamValue.METADATA) {
return true;
}
}
return true;
}
void close(@Nullable String message) {
stream.close(message);
}
boolean isWithBeginTransaction() {
return stream.isWithBeginTransaction();
}
/** @param a is a mutable list and b will be concatenated into a. */
private void concatLists(List<com.google.protobuf.Value> a, List<com.google.protobuf.Value> b) {
if (a.size() == 0 || b.size() == 0) {
a.addAll(b);
return;
} else {
com.google.protobuf.Value last = a.get(a.size() - 1);
com.google.protobuf.Value first = b.get(0);
KindCase lastKind = last.getKindCase();
KindCase firstKind = first.getKindCase();
if (isMergeable(lastKind) && lastKind == firstKind) {
com.google.protobuf.Value merged;
if (lastKind == KindCase.STRING_VALUE) {
String lastStr = last.getStringValue();
String firstStr = first.getStringValue();
merged =
com.google.protobuf.Value.newBuilder().setStringValue(lastStr + firstStr).build();
} else { // List
List<com.google.protobuf.Value> mergedList = new ArrayList<>();
mergedList.addAll(last.getListValue().getValuesList());
concatLists(mergedList, first.getListValue().getValuesList());
merged =
com.google.protobuf.Value.newBuilder()
.setListValue(ListValue.newBuilder().addAllValues(mergedList))
.build();
}
a.set(a.size() - 1, merged);
a.addAll(b.subList(1, b.size()));
} else {
a.addAll(b);
}
}
}
private boolean isMergeable(KindCase kind) {
return kind == KindCase.STRING_VALUE || kind == KindCase.LIST_VALUE;
}
}
static class GrpcStruct extends Struct implements Serializable {
private final Type type;
private final List<Object> rowData;
/**
* Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used as
* a serialization proxy.
*/
private Object writeReplace() {
Builder builder = Struct.newBuilder();
List<Type.StructField> structFields = getType().getStructFields();
for (int i = 0; i < structFields.size(); i++) {
Type.StructField field = structFields.get(i);
String fieldName = field.getName();
Object value = rowData.get(i);
Type fieldType = field.getType();
switch (fieldType.getCode()) {
case BOOL:
builder.set(fieldName).to((Boolean) value);
break;
case INT64:
builder.set(fieldName).to((Long) value);
break;
case FLOAT64:
builder.set(fieldName).to((Double) value);
break;
case NUMERIC:
builder.set(fieldName).to((BigDecimal) value);
break;
case STRING:
builder.set(fieldName).to((String) value);
break;
case JSON:
builder.set(fieldName).to(Value.json((String) value));
break;
case BYTES:
builder.set(fieldName).to((ByteArray) value);
break;
case TIMESTAMP:
builder.set(fieldName).to((Timestamp) value);
break;
case DATE:
builder.set(fieldName).to((Date) value);
break;
case ARRAY:
switch (fieldType.getArrayElementType().getCode()) {
case BOOL:
builder.set(fieldName).toBoolArray((Iterable<Boolean>) value);
break;
case INT64:
builder.set(fieldName).toInt64Array((Iterable<Long>) value);
break;
case FLOAT64:
builder.set(fieldName).toFloat64Array((Iterable<Double>) value);
break;
case NUMERIC:
builder.set(fieldName).toNumericArray((Iterable<BigDecimal>) value);
break;
case STRING:
builder.set(fieldName).toStringArray((Iterable<String>) value);
break;
case JSON:
builder.set(fieldName).toJsonArray((Iterable<String>) value);
break;
case BYTES:
builder.set(fieldName).toBytesArray((Iterable<ByteArray>) value);
break;
case TIMESTAMP:
builder.set(fieldName).toTimestampArray((Iterable<Timestamp>) value);
break;
case DATE:
builder.set(fieldName).toDateArray((Iterable<Date>) value);
break;
case STRUCT:
builder
.set(fieldName)
.toStructArray(fieldType.getArrayElementType(), (Iterable<Struct>) value);
break;
default:
throw new AssertionError(
"Unhandled array type code: " + fieldType.getArrayElementType());
}
break;
case STRUCT:
if (value == null) {
builder.set(fieldName).to(fieldType, null);
} else {
builder.set(fieldName).to((Struct) value);
}
break;
default:
throw new AssertionError("Unhandled type code: " + fieldType.getCode());
}
}
return builder.build();
}
GrpcStruct(Type type, List<Object> rowData) {
this.type = type;
this.rowData = rowData;
}
@Override
public String toString() {
return this.rowData.toString();
}
boolean consumeRow(Iterator<com.google.protobuf.Value> iterator) {
rowData.clear();
if (!iterator.hasNext()) {
return false;
}
for (Type.StructField fieldType : getType().getStructFields()) {
if (!iterator.hasNext()) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value stream: end of stream reached before row is complete");
}
com.google.protobuf.Value value = iterator.next();
rowData.add(decodeValue(fieldType.getType(), value));
}
return true;
}
private static Object decodeValue(Type fieldType, com.google.protobuf.Value proto) {
if (proto.getKindCase() == KindCase.NULL_VALUE) {
return null;
}
switch (fieldType.getCode()) {
case BOOL:
checkType(fieldType, proto, KindCase.BOOL_VALUE);
return proto.getBoolValue();
case INT64:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Long.parseLong(proto.getStringValue());
case FLOAT64:
return valueProtoToFloat64(proto);
case NUMERIC:
return new BigDecimal(proto.getStringValue());
case STRING:
case JSON:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return proto.getStringValue();
case BYTES:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return ByteArray.fromBase64(proto.getStringValue());
case TIMESTAMP:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Timestamp.parseTimestamp(proto.getStringValue());
case DATE:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Date.parseDate(proto.getStringValue());
case ARRAY:
checkType(fieldType, proto, KindCase.LIST_VALUE);
ListValue listValue = proto.getListValue();
return decodeArrayValue(fieldType.getArrayElementType(), listValue);
case STRUCT:
checkType(fieldType, proto, KindCase.LIST_VALUE);
ListValue structValue = proto.getListValue();
return decodeStructValue(fieldType, structValue);
default:
throw new AssertionError("Unhandled type code: " + fieldType.getCode());
}
}
private static Struct decodeStructValue(Type structType, ListValue structValue) {
List<Type.StructField> fieldTypes = structType.getStructFields();
checkArgument(
structValue.getValuesCount() == fieldTypes.size(),
"Size mismatch between type descriptor and actual values.");
List<Object> fields = new ArrayList<>(fieldTypes.size());
List<com.google.protobuf.Value> fieldValues = structValue.getValuesList();
for (int i = 0; i < fieldTypes.size(); ++i) {
fields.add(decodeValue(fieldTypes.get(i).getType(), fieldValues.get(i)));
}
return new GrpcStruct(structType, fields);
}
static Object decodeArrayValue(Type elementType, ListValue listValue) {
switch (elementType.getCode()) {
case BOOL:
// Use a view: element conversion is virtually free.
return Lists.transform(
listValue.getValuesList(),
input -> input.getKindCase() == KindCase.NULL_VALUE ? null : input.getBoolValue());
case INT64:
// For int64/float64 types, use custom containers. These avoid wrapper object
// creation for non-null arrays.
return new Int64Array(listValue);
case FLOAT64:
return new Float64Array(listValue);
case NUMERIC:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: new BigDecimal(value.getStringValue()));
}
return list;
}
case STRING:
case JSON:
return Lists.transform(
listValue.getValuesList(),
input -> input.getKindCase() == KindCase.NULL_VALUE ? null : input.getStringValue());
case BYTES:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: ByteArray.fromBase64(value.getStringValue()));
}
return list;
}
case TIMESTAMP:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: Timestamp.parseTimestamp(value.getStringValue()));
}
return list;
}
case DATE:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: Date.parseDate(value.getStringValue()));
}
return list;
}
case STRUCT:
{
ArrayList<Struct> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
if (value.getKindCase() == KindCase.NULL_VALUE) {
list.add(null);
} else {
ListValue structValue = value.getListValue();
list.add(decodeStructValue(elementType, structValue));
}
}
return list;
}
default:
throw new AssertionError("Unhandled type code: " + elementType.getCode());
}
}
private static void checkType(
Type fieldType, com.google.protobuf.Value proto, KindCase expected) {
if (proto.getKindCase() != expected) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value for column type "
+ fieldType
+ " expected "
+ expected
+ " but was "
+ proto.getKindCase());
}
}
Struct immutableCopy() {
return new GrpcStruct(type, new ArrayList<>(rowData));
}
@Override
public Type getType() {
return type;
}
@Override
public boolean isNull(int columnIndex) {
return rowData.get(columnIndex) == null;
}
@Override
protected boolean getBooleanInternal(int columnIndex) {
return (Boolean) rowData.get(columnIndex);
}
@Override
protected long getLongInternal(int columnIndex) {
return (Long) rowData.get(columnIndex);
}
@Override
protected double getDoubleInternal(int columnIndex) {
return (Double) rowData.get(columnIndex);
}
@Override
protected BigDecimal getBigDecimalInternal(int columnIndex) {
return (BigDecimal) rowData.get(columnIndex);
}
@Override
protected String getStringInternal(int columnIndex) {
return (String) rowData.get(columnIndex);
}
@Override
protected String getJsonInternal(int columnIndex) {
return (String) rowData.get(columnIndex);
}
@Override
protected ByteArray getBytesInternal(int columnIndex) {
return (ByteArray) rowData.get(columnIndex);
}
@Override
protected Timestamp getTimestampInternal(int columnIndex) {
return (Timestamp) rowData.get(columnIndex);
}
@Override
protected Date getDateInternal(int columnIndex) {
return (Date) rowData.get(columnIndex);
}
@Override
protected Value getValueInternal(int columnIndex) {
final List<Type.StructField> structFields = getType().getStructFields();
final StructField structField = structFields.get(columnIndex);
final Type columnType = structField.getType();
final boolean isNull = rowData.get(columnIndex) == null;
switch (columnType.getCode()) {
case BOOL:
return Value.bool(isNull ? null : getBooleanInternal(columnIndex));
case INT64:
return Value.int64(isNull ? null : getLongInternal(columnIndex));
case NUMERIC:
return Value.numeric(isNull ? null : getBigDecimalInternal(columnIndex));
case FLOAT64:
return Value.float64(isNull ? null : getDoubleInternal(columnIndex));
case STRING:
return Value.string(isNull ? null : getStringInternal(columnIndex));
case BYTES:
return Value.bytes(isNull ? null : getBytesInternal(columnIndex));
case TIMESTAMP:
return Value.timestamp(isNull ? null : getTimestampInternal(columnIndex));
case DATE:
return Value.date(isNull ? null : getDateInternal(columnIndex));
case STRUCT:
return Value.struct(isNull ? null : getStructInternal(columnIndex));
case ARRAY:
switch (columnType.getArrayElementType().getCode()) {
case BOOL:
return Value.boolArray(isNull ? null : getBooleanListInternal(columnIndex));
case INT64:
return Value.int64Array(isNull ? null : getLongListInternal(columnIndex));
case NUMERIC:
return Value.numericArray(isNull ? null : getBigDecimalListInternal(columnIndex));
case FLOAT64:
return Value.float64Array(isNull ? null : getDoubleListInternal(columnIndex));
case STRING:
return Value.stringArray(isNull ? null : getStringListInternal(columnIndex));
case BYTES:
return Value.bytesArray(isNull ? null : getBytesListInternal(columnIndex));
case TIMESTAMP:
return Value.timestampArray(isNull ? null : getTimestampListInternal(columnIndex));
case DATE:
return Value.dateArray(isNull ? null : getDateListInternal(columnIndex));
case STRUCT:
return Value.structArray(
columnType.getArrayElementType(),
isNull ? null : getStructListInternal(columnIndex));
default:
throw new IllegalArgumentException(
"Invalid array value type " + this.type.getArrayElementType());
}
default:
throw new IllegalArgumentException("Invalid value type " + this.type);
}
}
@Override
protected Struct getStructInternal(int columnIndex) {
return (Struct) rowData.get(columnIndex);
}
@Override
protected boolean[] getBooleanArrayInternal(int columnIndex) {
@SuppressWarnings("unchecked") // We know ARRAY<BOOL> produces a List<Boolean>.
List<Boolean> values = (List<Boolean>) rowData.get(columnIndex);
boolean[] r = new boolean[values.size()];
for (int i = 0; i < values.size(); ++i) {
if (values.get(i) == null) {
throw throwNotNull(columnIndex);
}
r[i] = values.get(i);
}
return r;
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<BOOL> produces a List<Boolean>.
protected List<Boolean> getBooleanListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Boolean>) rowData.get(columnIndex));
}
@Override
protected long[] getLongArrayInternal(int columnIndex) {
return getLongListInternal(columnIndex).toPrimitiveArray(columnIndex);
}
@Override
protected Int64Array getLongListInternal(int columnIndex) {
return (Int64Array) rowData.get(columnIndex);
}
@Override
protected double[] getDoubleArrayInternal(int columnIndex) {
return getDoubleListInternal(columnIndex).toPrimitiveArray(columnIndex);
}
@Override
protected Float64Array getDoubleListInternal(int columnIndex) {
return (Float64Array) rowData.get(columnIndex);
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<NUMERIC> produces a List<BigDecimal>.
protected List<BigDecimal> getBigDecimalListInternal(int columnIndex) {
return (List<BigDecimal>) rowData.get(columnIndex);
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<STRING> produces a List<String>.
protected List<String> getStringListInternal(int columnIndex) {
return Collections.unmodifiableList((List<String>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<String> produces a List<String>.
protected List<String> getJsonListInternal(int columnIndex) {
return Collections.unmodifiableList((List<String>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<BYTES> produces a List<ByteArray>.
protected List<ByteArray> getBytesListInternal(int columnIndex) {
return Collections.unmodifiableList((List<ByteArray>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<TIMESTAMP> produces a List<Timestamp>.
protected List<Timestamp> getTimestampListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Timestamp>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<DATE> produces a List<Date>.
protected List<Date> getDateListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Date>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<STRUCT<...>> produces a List<STRUCT>.
protected List<Struct> getStructListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Struct>) rowData.get(columnIndex));
}
}
@VisibleForTesting
interface CloseableIterator<T> extends Iterator<T> {
/**
* Closes the iterator, freeing any underlying resources.
*
* @param message a message to include in the final RPC status
*/
void close(@Nullable String message);
boolean isWithBeginTransaction();
}
/** Adapts a streaming read/query call into an iterator over partial result sets. */
@VisibleForTesting
static class GrpcStreamIterator extends AbstractIterator<PartialResultSet>
implements CloseableIterator<PartialResultSet> {
private static final Logger logger = Logger.getLogger(GrpcStreamIterator.class.getName());
private static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build();
private final ConsumerImpl consumer = new ConsumerImpl();
private final BlockingQueue<PartialResultSet> stream;
private final Statement statement;
private SpannerRpc.StreamingCall call;
private volatile boolean withBeginTransaction;
private SpannerException error;
@VisibleForTesting
GrpcStreamIterator(int prefetchChunks) {
this(null, prefetchChunks);
}
@VisibleForTesting
GrpcStreamIterator(Statement statement, int prefetchChunks) {
this.statement = statement;
// One extra to allow for END_OF_STREAM message.
this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1);
}
protected final SpannerRpc.ResultStreamConsumer consumer() {
return consumer;
}
public void setCall(SpannerRpc.StreamingCall call, boolean withBeginTransaction) {
this.call = call;
this.withBeginTransaction = withBeginTransaction;
}
@Override
public void close(@Nullable String message) {
if (call != null) {
call.cancel(message);
}
}
@Override
public boolean isWithBeginTransaction() {
return withBeginTransaction;
}
@Override
protected final PartialResultSet computeNext() {
PartialResultSet next;
try {
// TODO: Ideally honor io.grpc.Context while blocking here. In practice,
// cancellation/deadline results in an error being delivered to "stream", which
// should mean that we do not block significantly longer afterwards, but it would
// be more robust to use poll() with a timeout.
next = stream.take();
} catch (InterruptedException e) {
// Treat interrupt as a request to cancel the read.
throw SpannerExceptionFactory.propagateInterrupt(e);
}
if (next != END_OF_STREAM) {
call.request(1);
return next;
}
// All done - close() no longer needs to cancel the call.
call = null;
if (error != null) {
throw SpannerExceptionFactory.newSpannerException(error);
}
endOfData();
return null;
}
private void addToStream(PartialResultSet results) {
// We assume that nothing from the user will interrupt gRPC event threads.
Uninterruptibles.putUninterruptibly(stream, results);
}
private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer {
@Override
public void onPartialResultSet(PartialResultSet results) {
addToStream(results);
}
@Override
public void onCompleted() {
addToStream(END_OF_STREAM);
}
@Override
public void onError(SpannerException e) {
if (statement != null) {
if (logger.isLoggable(Level.FINEST)) {
// Include parameter values if logging level is set to FINEST or higher.
e =
SpannerExceptionFactory.newSpannerExceptionPreformatted(
e.getErrorCode(),
String.format("%s - Statement: '%s'", e.getMessage(), statement.toString()),
e);
logger.log(Level.FINEST, "Error executing statement", e);
} else {
e =
SpannerExceptionFactory.newSpannerExceptionPreformatted(
e.getErrorCode(),
String.format("%s - Statement: '%s'", e.getMessage(), statement.getSql()),
e);
}
}
error = e;
addToStream(END_OF_STREAM);
}
}
}
/**
* Wraps an iterator over partial result sets, supporting resuming RPCs on error. This class keeps
* track of the most recent resume token seen, and will buffer partial result set chunks that do
* not have a resume token until one is seen or buffer space is exceeded, which reduces the chance
* of yielding data to the caller that cannot be resumed.
*/
@VisibleForTesting
abstract static class ResumableStreamIterator extends AbstractIterator<PartialResultSet>
implements CloseableIterator<PartialResultSet> {
private static final RetrySettings STREAMING_RETRY_SETTINGS =
SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings();
private static final Logger logger = Logger.getLogger(ResumableStreamIterator.class.getName());
private final BackOff backOff = newBackOff();
private final LinkedList<PartialResultSet> buffer = new LinkedList<>();
private final int maxBufferSize;
private final Span span;
private CloseableIterator<PartialResultSet> stream;
private ByteString resumeToken;
private boolean finished;
/**
* Indicates whether it is currently safe to retry RPCs. This will be {@code false} if we have
* reached the maximum buffer size without seeing a restart token; in this case, we will drain
* the buffer and remain in this state until we see a new restart token.
*/
private boolean safeToRetry = true;
protected ResumableStreamIterator(int maxBufferSize, String streamName, Span parent) {
checkArgument(maxBufferSize >= 0);
this.maxBufferSize = maxBufferSize;
this.span = tracer.spanBuilderWithExplicitParent(streamName, parent).startSpan();
}
private static ExponentialBackOff newBackOff() {
return new ExponentialBackOff.Builder()
.setMultiplier(STREAMING_RETRY_SETTINGS.getRetryDelayMultiplier())
.setInitialIntervalMillis(
Math.max(10, (int) STREAMING_RETRY_SETTINGS.getInitialRetryDelay().toMillis()))
.setMaxIntervalMillis(
Math.max(1000, (int) STREAMING_RETRY_SETTINGS.getMaxRetryDelay().toMillis()))
.setMaxElapsedTimeMillis(Integer.MAX_VALUE) // Prevent Backoff.STOP from getting returned.
.build();
}
private static void backoffSleep(Context context, BackOff backoff) throws SpannerException {
backoffSleep(context, nextBackOffMillis(backoff));
}
private static long nextBackOffMillis(BackOff backoff) throws SpannerException {
try {
return backoff.nextBackOffMillis();
} catch (IOException e) {
throw newSpannerException(ErrorCode.INTERNAL, e.getMessage(), e);
}
}
private static void backoffSleep(Context context, long backoffMillis) throws SpannerException {
tracer
.getCurrentSpan()
.addAnnotation(
"Backing off",
ImmutableMap.of("Delay", AttributeValue.longAttributeValue(backoffMillis)));
final CountDownLatch latch = new CountDownLatch(1);
final Context.CancellationListener listener =
ignored -> {
// Wakeup on cancellation / DEADLINE_EXCEEDED.
latch.countDown();
};
context.addListener(listener, DirectExecutor.INSTANCE);
try {
if (backoffMillis == BackOff.STOP) {
// Highly unlikely but we handle it just in case.
backoffMillis = STREAMING_RETRY_SETTINGS.getMaxRetryDelay().toMillis();
}
if (latch.await(backoffMillis, TimeUnit.MILLISECONDS)) {
// Woken by context cancellation.
throw newSpannerExceptionForCancellation(context, null);
}
} catch (InterruptedException interruptExcept) {
throw newSpannerExceptionForCancellation(context, interruptExcept);
} finally {
context.removeListener(listener);
}
}
private enum DirectExecutor implements Executor {
INSTANCE;
@Override
public void execute(Runnable command) {
command.run();
}
}
abstract CloseableIterator<PartialResultSet> startStream(@Nullable ByteString resumeToken);
@Override
public void close(@Nullable String message) {
if (stream != null) {
stream.close(message);
span.end(TraceUtil.END_SPAN_OPTIONS);
stream = null;
}
}
@Override
public boolean isWithBeginTransaction() {
return stream != null && stream.isWithBeginTransaction();
}
@Override
protected PartialResultSet computeNext() {
Context context = Context.current();
while (true) {
// Eagerly start stream before consuming any buffered items.
if (stream == null) {
span.addAnnotation(
"Starting/Resuming stream",
ImmutableMap.of(
"ResumeToken",
AttributeValue.stringAttributeValue(
resumeToken == null ? "null" : resumeToken.toStringUtf8())));
try (Scope s = tracer.withSpan(span)) {
// When start a new stream set the Span as current to make the gRPC Span a child of
// this Span.
stream = checkNotNull(startStream(resumeToken));
}
}
// Buffer contains items up to a resume token or has reached capacity: flush.
if (!buffer.isEmpty()
&& (finished || !safeToRetry || !buffer.getLast().getResumeToken().isEmpty())) {
return buffer.pop();
}
try {
if (stream.hasNext()) {
PartialResultSet next = stream.next();
boolean hasResumeToken = !next.getResumeToken().isEmpty();
if (hasResumeToken) {
resumeToken = next.getResumeToken();
safeToRetry = true;
}
// If the buffer is empty and this chunk has a resume token or we cannot resume safely
// anyway, we can yield it immediately rather than placing it in the buffer to be
// returned on the next iteration.
if ((hasResumeToken || !safeToRetry) && buffer.isEmpty()) {
return next;
}
buffer.add(next);
if (buffer.size() > maxBufferSize && buffer.getLast().getResumeToken().isEmpty()) {
// We need to flush without a restart token. Errors encountered until we see
// such a token will fail the read.
safeToRetry = false;
}
} else {
finished = true;
if (buffer.isEmpty()) {
endOfData();
return null;
}
}
} catch (SpannerException e) {
if (safeToRetry && e.isRetryable()) {
span.addAnnotation(
"Stream broken. Safe to retry", TraceUtil.getExceptionAnnotations(e));
logger.log(Level.FINE, "Retryable exception, will sleep and retry", e);
// Truncate any items in the buffer before the last retry token.
while (!buffer.isEmpty() && buffer.getLast().getResumeToken().isEmpty()) {
buffer.removeLast();
}
assert buffer.isEmpty() || buffer.getLast().getResumeToken().equals(resumeToken);
stream = null;
try (Scope s = tracer.withSpan(span)) {
long delay = e.getRetryDelayInMillis();
if (delay != -1) {
backoffSleep(context, delay);
} else {
backoffSleep(context, backOff);
}
}
continue;
}
span.addAnnotation("Stream broken. Not safe to retry");
TraceUtil.setWithFailure(span, e);
throw e;
} catch (RuntimeException e) {
span.addAnnotation("Stream broken. Not safe to retry");
TraceUtil.setWithFailure(span, e);
throw e;
}
}
}
}
static double valueProtoToFloat64(com.google.protobuf.Value proto) {
if (proto.getKindCase() == KindCase.STRING_VALUE) {
switch (proto.getStringValue()) {
case "-Infinity":
return Double.NEGATIVE_INFINITY;
case "Infinity":
return Double.POSITIVE_INFINITY;
case "NaN":
return Double.NaN;
default:
// Fall-through to handling below to produce an error.
}
}
if (proto.getKindCase() != KindCase.NUMBER_VALUE) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value for column type "
+ Type.float64()
+ " expected NUMBER_VALUE or STRING_VALUE with value one of"
+ " \"Infinity\", \"-Infinity\", or \"NaN\" but was "
+ proto.getKindCase()
+ (proto.getKindCase() == KindCase.STRING_VALUE
? " with value \"" + proto.getStringValue() + "\""
: ""));
}
return proto.getNumberValue();
}
static NullPointerException throwNotNull(int columnIndex) {
throw new NullPointerException(
"Cannot call array getter for column " + columnIndex + " with null elements");
}
/**
* Memory-optimized base class for {@code ARRAY<INT64>} and {@code ARRAY<FLOAT64>} types. Both of
* these involve conversions from the type yielded by JSON parsing, which are {@code String} and
* {@code BigDecimal} respectively. Rather than construct new wrapper objects for each array
* element, we use primitive arrays and a {@code BitSet} to track nulls.
*/
abstract static class PrimitiveArray<T, A> extends AbstractList<T> {
private final A data;
private final BitSet nulls;
private final int size;
PrimitiveArray(ListValue protoList) {
this.size = protoList.getValuesCount();
A data = newArray(size);
BitSet nulls = new BitSet(size);
for (int i = 0; i < protoList.getValuesCount(); ++i) {
if (protoList.getValues(i).getKindCase() == KindCase.NULL_VALUE) {
nulls.set(i);
} else {
setProto(data, i, protoList.getValues(i));
}
}
this.data = data;
this.nulls = nulls;
}
PrimitiveArray(A data, BitSet nulls, int size) {
this.data = data;
this.nulls = nulls;
this.size = size;
}
abstract A newArray(int size);
abstract void setProto(A array, int i, com.google.protobuf.Value protoValue);
abstract T get(A array, int i);
@Override
public T get(int index) {
if (index < 0 || index >= size) {
throw new ArrayIndexOutOfBoundsException("index=" + index + " size=" + size);
}
return nulls.get(index) ? null : get(data, index);
}
@Override
public int size() {
return size;
}
A toPrimitiveArray(int columnIndex) {
if (nulls.length() > 0) {
throw throwNotNull(columnIndex);
}
A r = newArray(size);
System.arraycopy(data, 0, r, 0, size);
return r;
}
}
static class Int64Array extends PrimitiveArray<Long, long[]> {
Int64Array(ListValue protoList) {
super(protoList);
}
Int64Array(long[] data, BitSet nulls) {
super(data, nulls, data.length);
}
@Override
long[] newArray(int size) {
return new long[size];
}
@Override
void setProto(long[] array, int i, com.google.protobuf.Value protoValue) {
array[i] = Long.parseLong(protoValue.getStringValue());
}
@Override
Long get(long[] array, int i) {
return array[i];
}
}
static class Float64Array extends PrimitiveArray<Double, double[]> {
Float64Array(ListValue protoList) {
super(protoList);
}
Float64Array(double[] data, BitSet nulls) {
super(data, nulls, data.length);
}
@Override
double[] newArray(int size) {
return new double[size];
}
@Override
void setProto(double[] array, int i, com.google.protobuf.Value protoValue) {
array[i] = valueProtoToFloat64(protoValue);
}
@Override
Double get(double[] array, int i) {
return array[i];
}
}
protected abstract GrpcStruct currRow();
@Override
public Struct getCurrentRowAsStruct() {
return currRow().immutableCopy();
}
@Override
protected boolean getBooleanInternal(int columnIndex) {
return currRow().getBooleanInternal(columnIndex);
}
@Override
protected long getLongInternal(int columnIndex) {
return currRow().getLongInternal(columnIndex);
}
@Override
protected double getDoubleInternal(int columnIndex) {
return currRow().getDoubleInternal(columnIndex);
}
@Override
protected BigDecimal getBigDecimalInternal(int columnIndex) {
return currRow().getBigDecimalInternal(columnIndex);
}
@Override
protected String getStringInternal(int columnIndex) {
return currRow().getStringInternal(columnIndex);
}
@Override
protected String getJsonInternal(int columnIndex) {
return currRow().getJsonInternal(columnIndex);
}
@Override
protected ByteArray getBytesInternal(int columnIndex) {
return currRow().getBytesInternal(columnIndex);
}
@Override
protected Timestamp getTimestampInternal(int columnIndex) {
return currRow().getTimestampInternal(columnIndex);
}
@Override
protected Date getDateInternal(int columnIndex) {
return currRow().getDateInternal(columnIndex);
}
@Override
protected Value getValueInternal(int columnIndex) {
return currRow().getValueInternal(columnIndex);
}
@Override
protected boolean[] getBooleanArrayInternal(int columnIndex) {
return currRow().getBooleanArrayInternal(columnIndex);
}
@Override
protected List<Boolean> getBooleanListInternal(int columnIndex) {
return currRow().getBooleanListInternal(columnIndex);
}
@Override
protected long[] getLongArrayInternal(int columnIndex) {
return currRow().getLongArrayInternal(columnIndex);
}
@Override
protected List<Long> getLongListInternal(int columnIndex) {
return currRow().getLongListInternal(columnIndex);
}
@Override
protected double[] getDoubleArrayInternal(int columnIndex) {
return currRow().getDoubleArrayInternal(columnIndex);
}
@Override
protected List<Double> getDoubleListInternal(int columnIndex) {
return currRow().getDoubleListInternal(columnIndex);
}
@Override
protected List<BigDecimal> getBigDecimalListInternal(int columnIndex) {
return currRow().getBigDecimalListInternal(columnIndex);
}
@Override
protected List<String> getStringListInternal(int columnIndex) {
return currRow().getStringListInternal(columnIndex);
}
@Override
protected List<String> getJsonListInternal(int columnIndex) {
return currRow().getJsonListInternal(columnIndex);
}
@Override
protected List<ByteArray> getBytesListInternal(int columnIndex) {
return currRow().getBytesListInternal(columnIndex);
}
@Override
protected List<Timestamp> getTimestampListInternal(int columnIndex) {
return currRow().getTimestampListInternal(columnIndex);
}
@Override
protected List<Date> getDateListInternal(int columnIndex) {
return currRow().getDateListInternal(columnIndex);
}
@Override
protected List<Struct> getStructListInternal(int columnIndex) {
return currRow().getStructListInternal(columnIndex);
}
@Override
public boolean isNull(int columnIndex) {
return currRow().isNull(columnIndex);
}
}
| apache-2.0 |
JonathanWalsh/Granule-Closure-Compiler | test/com/google/javascript/jscomp/SpecializeModuleTest.java | 16009 | /*
* Copyright 2010 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.collect.ImmutableSet;
import com.google.javascript.jscomp.NodeTraversal.AbstractPostOrderCallback;
import com.google.javascript.jscomp.SpecializeModule.SpecializationState;
import com.google.javascript.rhino.Node;
/**
* Tests for {@link SpecializeModule}.
*
* @author dcc@google.com (Devin Coughlin)
*/
public class SpecializeModuleTest extends CompilerTestCase {
private static final String SHARED_EXTERNS = "var alert = function() {}";
public SpecializeModuleTest() {
super(SHARED_EXTERNS);
}
private PassFactory inlineFunctions =
new PassFactory("inlineFunctions", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new InlineFunctions(compiler,
compiler.getUniqueNameIdSupplier(), true, false, true);
}
};
private PassFactory removeUnusedPrototypeProperties =
new PassFactory("removeUnusedPrototypeProperties", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new RemoveUnusedPrototypeProperties(compiler, false, false);
}
};
private PassFactory devirtualizePrototypeMethods =
new PassFactory("devirtualizePrototypeMethods", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new DevirtualizePrototypeMethods(compiler);
}
};
@Override
protected CompilerPass getProcessor(final Compiler compiler) {
final SpecializeModule specializeModule = new SpecializeModule(compiler,
devirtualizePrototypeMethods, inlineFunctions,
removeUnusedPrototypeProperties);
return new CompilerPass() {
public void process(Node externs, Node root) {
specializeModule.process(externs, root);
/* Make sure variables are declared before used */
new VarCheck(compiler).process(externs, root);
}
};
}
@Override
public void setUp() throws Exception {
super.setUp();
enableNormalize();
}
public void testSpecializeInline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return 6};" +
"A();",
// m2
"A();" +
"B();" +
"B = function() {return 7};" +
"A();" +
"B();"
);
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"A();" +
"B();" +
"B = function() {return 7};" +
"A();" +
"B();"
});
}
public void testSpecializeCascadedInline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return C()};" +
"var C = function() {return 6};" +
"A();",
// m2
"B = function() {return 7};" +
"A();");
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B, C;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return C()};" + /* Removed from m1, so add to m2 */
"C = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"A();"
});
}
public void testSpecializeInlineWithMultipleDependents() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return 6};" +
"A();",
// m2
"B = function() {return 7};" +
"A();",
// m3
"A();"
);
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"A();",
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"A();",
});
}
public void testSpecializeInlineWithNamespaces() {
JSModule[] modules = createModuleStar(
// m1
"var ns = {};" +
/* Recursion in A() prevents inline of A*/
"ns.A = function() {alert(B());ns.A()};" +
"var B = function() {return 6};" +
"ns.A();",
// m2
"B = function() {return 7};" +
"ns.A();");
test(modules, new String[] {
// m1
"var ns = {};" +
"ns.A = function() {alert(6);ns.A()};" + /* Specialized A */
"ns.A();" +
"var B;",
// m2
"ns.A = function() {alert(B());ns.A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"ns.A();"
});
}
public void testSpecializeInlineWithRegularFunctions() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"A();",
// m2
"B = function() {return 7};" +
"A();");
test(modules, new String[] {
// m1
"function A() {alert(6);A()}" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
/* Start of original m2 */
"B = function() {return 7};" +
"A();"
});
}
public void testDontSpecializeLocalNonAnonymousFunctions() {
/* normalize result, but not expected */
enableNormalize(false);
JSModule[] modules = createModuleStar(
// m1
"(function(){var noSpecialize = " +
"function() {alert(6)};noSpecialize()})()",
// m2
"");
test(modules, new String[] {
// m1
"(function(){var noSpecialize = " +
"function() {alert(6)};noSpecialize()})()",
// m2
""
});
}
public void testAddDummyVarsForRemovedFunctions() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B() + C());A()};" +
"var B = function() {return 6};" +
"var C = function() {return 8};" +
"A();",
// m2
"" +
"A();");
test(modules, new String[] {
// m1
"var A = function() {alert(6 + 8);A()};" + /* Specialized A */
"A();" +
"var B, C;",
// m2
"A = function() {alert(B() + C());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"C = function() {return 8};" + /* Removed from m1, so add to m2 */
"A();"
});
}
public void testSpecializeRemoveUnusedProperties() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"Foo.prototype.c = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"var aliasA = Foo.prototype.a;" +
"var x = new Foo();" +
"x.a();",
// m2
"Foo.prototype.b = function() {return 6};" +
"Foo.prototype.c = function() {return 7};"
});
}
public void testDontSpecializeAliasedFunctions_inline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"var aliasA = A;" +
"A();",
// m2
"B = function() {return 7};" +
"B();");
test(modules, new String[] {
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"var aliasA = A;" +
"A();",
// m2
"B = function() {return 7};" +
"B();"
});
}
public void testDontSpecializeAliasedFunctions_remove_unused_properties() {
JSModule[] modules = createModuleStar(
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"var aliasB = Foo.prototype.b;" +
"Foo.prototype.c = function() {return 7};" +
"Foo.prototype.d = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();" +
"var aliasC = (new Foo).c",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"var aliasB = Foo.prototype.b;" +
"Foo.prototype.c = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();" +
"var aliasC = (new Foo).c",
// m2
"Foo.prototype.d = function() {return 7};"
});
}
public void testSpecializeDevirtualizePrototypeMethods() {
JSModule[] modules = createModuleStar(
// m1
"/** @constructor */" +
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a();return 7};" +
"Foo.prototype.b = function() {this.a()};" +
"var x = new Foo();" +
"x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"var JSCompiler_StaticMethods_a =" +
"function(JSCompiler_StaticMethods_a$self) {" +
"JSCompiler_StaticMethods_a(JSCompiler_StaticMethods_a$self);" +
"return 7" +
"};" +
"var x = new Foo();" +
"JSCompiler_StaticMethods_a(x);",
// m2
"Foo.prototype.a = function() {this.a();return 7};" +
"Foo.prototype.b = function() {this.a()};"
});
}
public void testSpecializeDevirtualizePrototypeMethodsWithInline() {
JSModule[] modules = createModuleStar(
// m1
"/** @constructor */" +
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {return 7};" +
"var x = new Foo();" +
"var z = x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"var x = new Foo();" +
"var z = 7;",
// m2
"Foo.prototype.a = function() {return 7};"
});
}
/**
* Tests for {@link SpecializeModule.SpecializationState}.
*/
public static class SpecializeModuleSpecializationStateTest
extends CompilerTestCase {
Compiler lastCompiler;
SpecializationState lastState;
@Override
public CompilerPass getProcessor(final Compiler compiler) {
lastCompiler = compiler;
return new CompilerPass() {
public void process(Node externs, Node root) {
SimpleDefinitionFinder defFinder =
new SimpleDefinitionFinder(compiler);
defFinder.process(externs, root);
SimpleFunctionAliasAnalysis functionAliasAnalysis =
new SimpleFunctionAliasAnalysis();
functionAliasAnalysis.analyze(defFinder);
lastState = new SpecializationState(functionAliasAnalysis);
}
};
}
public void testRemovedFunctions() {
testSame("function F(){}\nvar G = function(a){};");
assertEquals(ImmutableSet.of(), lastState.getRemovedFunctions());
Node functionF = findFunction("F");
lastState.reportRemovedFunction(functionF, functionF.getParent());
assertEquals(ImmutableSet.of(functionF), lastState.getRemovedFunctions());
Node functionG = findFunction("F");
lastState.reportRemovedFunction(functionG, functionF.getParent());
assertEquals(ImmutableSet.of(functionF, functionG),
lastState.getRemovedFunctions());
assertEquals(ImmutableSet.of(), lastState.getSpecializedFunctions());
}
public void testSpecializedFunctions() {
testSame("function F(){}\nvar G = function(a){};");
assertEquals(ImmutableSet.of(), lastState.getSpecializedFunctions());
Node functionF = findFunction("F");
lastState.reportSpecializedFunction(functionF);
assertEquals(ImmutableSet.of(functionF),
lastState.getSpecializedFunctions());
Node functionG = findFunction("F");
lastState.reportSpecializedFunction(functionG);
assertEquals(ImmutableSet.of(functionF, functionG),
lastState.getSpecializedFunctions());
assertEquals(ImmutableSet.of(), lastState.getRemovedFunctions());
}
public void testCanFixupFunction() {
testSame("function F(){}\n" +
"var G = function(a){};\n" +
"var ns = {};" +
"ns.H = function(){};" +
"var ns2 = {I : function anon1(){}};" +
"(function anon2(){})();");
assertTrue(lastState.canFixupFunction(findFunction("F")));
assertTrue(lastState.canFixupFunction(findFunction("G")));
assertTrue(lastState.canFixupFunction(findFunction("ns.H")));
assertFalse(lastState.canFixupFunction(findFunction("anon1")));
assertFalse(lastState.canFixupFunction(findFunction("anon2")));
// Can't guarantee safe fixup for aliased functions
testSame("function A(){}\n" +
"var aliasA = A;\n");
assertFalse(lastState.canFixupFunction(findFunction("A")));
}
private Node findFunction(String name) {
FunctionFinder f = new FunctionFinder(name);
new NodeTraversal(lastCompiler, f).traverse(lastCompiler.jsRoot);
assertNotNull("Couldn't find " + name, f.found);
return f.found;
}
/**
* Quick Traversal to find a given function in the AST.
*/
private class FunctionFinder extends AbstractPostOrderCallback {
Node found = null;
final String target;
FunctionFinder(String target) {
this.target = target;
}
public void visit(NodeTraversal t, Node n, Node parent) {
if (NodeUtil.isFunction(n)
&& target.equals(NodeUtil.getFunctionName(n))) {
found = n;
}
}
}
}
}
| apache-2.0 |
evant/timesync | lib/src/main/java/me/tatarka/timesync/lib/TimeSyncProxy.java | 2035 | package me.tatarka.timesync.lib;
import android.content.Context;
import java.util.Arrays;
/**
* A class for interacting with a {@link TimeSync}. You can get and set it's configuration, and
* force it to sync immediately. Ta get an instance of the class for a given {@link TimeSync}, use
* {@link TimeSync#get(android.content.Context, Class)}.
*/
public final class TimeSyncProxy {
private Context context;
private String name;
private TimeSync listener;
TimeSyncProxy(Context context, String name) {
this.context = context;
this.name = name;
listener = TimeSyncParser.parseListeners(context).get(name);
}
/**
* Syncs immediately. This is useful for a response to a user action. Use this sparingly, as
* frequent syncs defeat the purpose of using this library.
*/
public void sync() {
TimeSyncService.sync(context, name);
}
/**
* Syncs sometime in the near future, randomizing per device. This is useful in response to a
* server message, using GCM for example, so that the server is not overwhelmed with all devices
* trying to sync at once.
*/
public void syncInexact() {
TimeSyncService.syncInexact(context, name);
}
/**
* Gets the current configuration for the {@link TimeSync}.
*
* @return the configuration
* @see TimeSync.Config
*/
public TimeSync.Config config() {
return listener.config();
}
/**
* Modifies the current configuration for the {@link TimeSync}.
*
* @param edits the edits
* @see TimeSync#edit(TimeSync.Edit...)
*/
public void edit(Iterable<TimeSync.Edit> edits) {
listener.edit(edits);
TimeSyncService.update(context, name);
}
/**
* Modifies the current configuration for the {@link TimeSync}.
*
* @param edits the edits
* @see TimeSync#edit(TimeSync.Edit...)
*/
public void edit(TimeSync.Edit... edits) {
edit(Arrays.asList(edits));
}
}
| apache-2.0 |
subchen/jetbrick-template-1x | src/main/java/jetbrick/template/resource/SourceCodeResource.java | 1866 | /**
* jetbrick-template
* http://subchen.github.io/jetbrick-template/
*
* Copyright 2010-2014 Guoqiang Chen. All rights reserved.
* Email: subchen@gmail.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrick.template.resource;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.atomic.AtomicLong;
import jetbrick.template.utils.UnsafeByteArrayInputStream;
/**
* 以源码形式存在的资源.
*
* @since 1.1.3
* @author Guoqiang Chen
*/
public class SourceCodeResource extends Resource {
private static final String ENCODING = "utf-8";
private static AtomicLong index = new AtomicLong();
private final String source;
public SourceCodeResource(String source) {
super("/unknown/file." + index.incrementAndGet(), ENCODING);
this.source = source;
}
@Override
public String getAbsolutePath() {
return "(unknown)";
}
@Override
public long lastModified() {
return 0;
}
@Override
public InputStream getInputStream() throws IOException {
return new UnsafeByteArrayInputStream(source.getBytes(ENCODING));
}
@Override
public char[] getSource() {
return source.toCharArray();
}
@Override
public char[] getSource(String encoding) {
return source.toCharArray();
}
}
| apache-2.0 |
jim-minter/ose3-demos | git/monster/src/main/java/org/jboss/examples/ticketmonster/model/Venue.java | 4603 | package org.jboss.examples.ticketmonster.model;
import static javax.persistence.CascadeType.ALL;
import static javax.persistence.FetchType.EAGER;
import static javax.persistence.GenerationType.IDENTITY;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.ManyToOne;
import javax.persistence.OneToMany;
import org.hibernate.validator.constraints.NotEmpty;
/**
* <p>
* Represents a single venue
* </p>
*
* @author Shane Bryzak
* @author Pete Muir
*/
/*
* We suppress the warning about not specifying a serialVersionUID, as we are still developing this app, and want the JVM to
* generate the serialVersionUID for us. When we put this app into production, we'll generate and embed the serialVersionUID
*/
@SuppressWarnings("serial")
@Entity
public class Venue implements Serializable {
/* Declaration of fields */
/**
* The synthetic id of the object.
*/
@Id
@GeneratedValue(strategy = IDENTITY)
private Long id;
/**
* <p>
* The name of the event.
* </p>
*
* <p>
* The name of the event forms it's natural identity and cannot be shared between events.
* </p>
*
* <p>
* The name must not be null and must be one or more characters, the Bean Validation constrain <code>@NotEmpty</code>
* enforces this.
* </p>
*/
@Column(unique = true)
@NotEmpty
private String name;
/**
* The address of the venue
*/
private Address address = new Address();
/**
* A description of the venue
*/
private String description;
/**
* <p>
* A set of sections in the venue
* </p>
*
* <p>
* The <code>@OneToMany<code> JPA mapping establishes this relationship. TODO Explain EAGER fetch.
* This relationship is bi-directional (a section knows which venue it is part of), and the <code>mappedBy</code>
* attribute establishes this. We cascade all persistence operations to the set of performances, so, for example if a venue
* is removed, then all of it's sections will also be removed.
* </p>
*/
@OneToMany(cascade = ALL, fetch = EAGER, mappedBy = "venue")
private Set<Section> sections = new HashSet<Section>();
/**
* The capacity of the venue
*/
private int capacity;
/**
* An optional media item to entice punters to the venue. The <code>@ManyToOne</code> establishes the relationship.
*/
@ManyToOne
private MediaItem mediaItem;
/* Boilerplate getters and setters */
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
public MediaItem getMediaItem() {
return mediaItem;
}
public void setMediaItem(MediaItem description) {
this.mediaItem = description;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Set<Section> getSections() {
return sections;
}
public void setSections(Set<Section> sections) {
this.sections = sections;
}
public int getCapacity() {
return capacity;
}
public void setCapacity(int capacity) {
this.capacity = capacity;
}
/* toString(), equals() and hashCode() for Venue, using the natural identity of the object */
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Venue venue = (Venue) o;
if (address != null ? !address.equals(venue.address) : venue.address != null)
return false;
if (name != null ? !name.equals(venue.name) : venue.name != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = name != null ? name.hashCode() : 0;
result = 31 * result + (address != null ? address.hashCode() : 0);
return result;
}
@Override
public String toString() {
return name;
}
}
| apache-2.0 |
aiyanbo/guava | guava-tests/test/com/google/common/hash/MessageDigestHashFunctionTest.java | 4293 | /*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.hash;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import junit.framework.TestCase;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
/**
* Tests for the MessageDigestHashFunction.
*
* @author Kurt Alfred Kluever
*/
public class MessageDigestHashFunctionTest extends TestCase {
private static final ImmutableSet<String> INPUTS = ImmutableSet.of("", "Z", "foobar");
// From "How Provider Implementations Are Requested and Supplied" from
// http://docs.oracle.com/javase/6/docs/technotes/guides/security/crypto/CryptoSpec.html
// - Some providers may choose to also include alias names.
// - For example, the "SHA-1" algorithm might be referred to as "SHA1".
// - The algorithm name is not case-sensitive.
private static final ImmutableMap<String, HashFunction> ALGORITHMS =
new ImmutableMap.Builder<String, HashFunction>()
.put("MD5", Hashing.md5())
.put("SHA", Hashing.sha1()) // Not the official name, but still works
.put("SHA1", Hashing.sha1()) // Not the official name, but still works
.put("sHa-1", Hashing.sha1()) // Not the official name, but still works
.put("SHA-1", Hashing.sha1())
.put("SHA-256", Hashing.sha256())
.put("SHA-384", Hashing.sha384())
.put("SHA-512", Hashing.sha512())
.build();
public void testHashing() {
for (String stringToTest : INPUTS) {
for (String algorithmToTest : ALGORITHMS.keySet()) {
assertMessageDigestHashing(HashTestUtils.ascii(stringToTest), algorithmToTest);
}
}
}
public void testPutAfterHash() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
sha1.putInt(42);
fail();
} catch (IllegalStateException expected) {
}
}
public void testHashTwice() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
HashCode unused = sha1.hash();
fail();
} catch (IllegalStateException expected) {
}
}
public void testToString() {
assertEquals("Hashing.md5()", Hashing.md5().toString());
assertEquals("Hashing.sha1()", Hashing.sha1().toString());
assertEquals("Hashing.sha256()", Hashing.sha256().toString());
assertEquals("Hashing.sha512()", Hashing.sha512().toString());
}
private static void assertMessageDigestHashing(byte[] input, String algorithmName) {
try {
MessageDigest digest = MessageDigest.getInstance(algorithmName);
assertEquals(
HashCode.fromBytes(digest.digest(input)),
ALGORITHMS.get(algorithmName).hashBytes(input));
for (int bytes = 4; bytes <= digest.getDigestLength(); bytes++) {
assertEquals(
HashCode.fromBytes(Arrays.copyOf(digest.digest(input), bytes)),
new MessageDigestHashFunction(algorithmName, bytes, algorithmName).hashBytes(input));
}
try {
int maxSize = digest.getDigestLength();
new MessageDigestHashFunction(algorithmName, maxSize + 1, algorithmName);
fail();
} catch (IllegalArgumentException expected) {
}
} catch (NoSuchAlgorithmException nsae) {
throw new AssertionError(nsae);
}
}
}
| apache-2.0 |
palecur/elasticsearch | core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java | 16875 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support.master;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.CapturingTransport;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportService;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class TransportMasterNodeActionTests extends ESTestCase {
private static ThreadPool threadPool;
private ClusterService clusterService;
private TransportService transportService;
private CapturingTransport transport;
private DiscoveryNode localNode;
private DiscoveryNode remoteNode;
private DiscoveryNode[] allNodes;
@BeforeClass
public static void beforeClass() {
threadPool = new TestThreadPool("TransportMasterNodeActionTests");
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(threadPool);
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
localNode = new DiscoveryNode("local_node", DummyTransportAddress.INSTANCE, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
remoteNode = new DiscoveryNode("remote_node", DummyTransportAddress.INSTANCE, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
allNodes = new DiscoveryNode[]{localNode, remoteNode};
}
@After
public void tearDown() throws Exception {
super.tearDown();
clusterService.close();
transportService.close();
}
@AfterClass
public static void afterClass() {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
threadPool = null;
}
void assertListenerThrows(String msg, ActionFuture<?> listener, Class<?> klass) throws InterruptedException {
try {
listener.get();
fail(msg);
} catch (ExecutionException ex) {
assertThat(ex.getCause(), instanceOf(klass));
}
}
public static class Request extends MasterNodeRequest<Request> {
@Override
public ActionRequestValidationException validate() {
return null;
}
}
class Response extends ActionResponse {}
class Action extends TransportMasterNodeAction<Request, Response> {
Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, actionName, transportService, clusterService, threadPool,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new);
}
@Override
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
// remove unneeded threading by wrapping listener with SAME to prevent super.doExecute from wrapping it with LISTENER
super.doExecute(task, request, new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.SAME, listener));
}
@Override
protected String executor() {
// very lightweight operation in memory, no need to fork to a thread
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
listener.onResponse(new Response()); // default implementation, overridden in specific tests
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return null; // default implementation, overridden in specific tests
}
}
public void testLocalOperationWithoutBlocks() throws ExecutionException, InterruptedException {
final boolean masterOperationFailure = randomBoolean();
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Throwable exception = new Throwable();
final Response response = new Response();
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
if (masterOperationFailure) {
listener.onFailure(exception);
} else {
listener.onResponse(response);
}
}
}.execute(request, listener);
assertTrue(listener.isDone());
if (masterOperationFailure) {
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause(), equalTo(exception));
}
} else {
assertThat(listener.get(), equalTo(response));
}
}
public void testLocalOperationWithBlocks() throws ExecutionException, InterruptedException {
final boolean retryableBlock = randomBoolean();
final boolean unblockBeforeTimeout = randomBoolean();
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(unblockBeforeTimeout ? 60 : 0));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ClusterBlock block = new ClusterBlock(1, "", retryableBlock, true,
randomFrom(RestStatus.values()), ClusterBlockLevel.ALL);
ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.builder().addGlobalBlock(block)).build();
setState(clusterService, stateWithBlock);
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
Set<ClusterBlock> blocks = state.blocks().global();
return blocks.isEmpty() ? null : new ClusterBlockException(blocks);
}
}.execute(request, listener);
if (retryableBlock && unblockBeforeTimeout) {
assertFalse(listener.isDone());
setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build());
assertTrue(listener.isDone());
listener.get();
return;
}
assertTrue(listener.isDone());
if (retryableBlock) {
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause(), instanceOf(MasterNotDiscoveredException.class));
assertThat(ex.getCause().getCause(), instanceOf(ClusterBlockException.class));
}
} else {
assertListenerThrows("ClusterBlockException should be thrown", listener, ClusterBlockException.class);
}
}
public void testForceLocalOperation() throws ExecutionException, InterruptedException {
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(null, localNode, remoteNode), allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected boolean localExecute(Request request) {
return true;
}
}.execute(request, listener);
assertTrue(listener.isDone());
listener.get();
}
public void testMasterNotAvailable() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0));
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertTrue(listener.isDone());
assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class);
}
public void testMasterBecomesAvailable() throws ExecutionException, InterruptedException {
Request request = new Request();
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertFalse(listener.isDone());
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
assertTrue(listener.isDone());
listener.get();
}
public void testDelegateToMaster() throws ExecutionException, InterruptedException {
Request request = new Request();
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
Response response = new Response();
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
public void testDelegateToFailingMaster() throws ExecutionException, InterruptedException {
boolean failsWithConnectTransportException = randomBoolean();
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0));
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
if (failsWithConnectTransportException) {
transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(remoteNode, "Fake error"));
assertFalse(listener.isDone());
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
assertTrue(listener.isDone());
listener.get();
} else {
Throwable t = new Throwable();
transport.handleRemoteError(capturedRequest.requestId, t);
assertTrue(listener.isDone());
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause().getCause(), equalTo(t));
}
}
}
public void testMasterFailoverAfterStepDown() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueHours(1));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Response response = new Response();
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
// The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
Throwable failure = randomBoolean()
? new Discovery.FailedToCommitClusterStateException("Fake error")
: new NotMasterException("Fake error");
listener.onFailure(failure);
}
}.execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
}
| apache-2.0 |
hurricup/intellij-community | platform/platform-api/src/com/intellij/ui/components/JBScrollPane.java | 33437 | /*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ui.components;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.util.registry.Registry;
import com.intellij.openapi.wm.IdeGlassPane;
import com.intellij.ui.IdeBorderFactory;
import com.intellij.util.ArrayUtil;
import com.intellij.util.ReflectionUtil;
import com.intellij.util.ui.ButtonlessScrollBarUI;
import com.intellij.util.ui.JBInsets;
import com.intellij.util.ui.RegionPainter;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.border.Border;
import javax.swing.border.LineBorder;
import javax.swing.plaf.ScrollBarUI;
import javax.swing.plaf.ScrollPaneUI;
import javax.swing.plaf.UIResource;
import javax.swing.plaf.basic.BasicScrollBarUI;
import javax.swing.plaf.basic.BasicScrollPaneUI;
import java.awt.*;
import java.awt.event.InputEvent;
import java.awt.event.MouseEvent;
import java.awt.event.MouseWheelEvent;
import java.awt.event.MouseWheelListener;
import java.lang.reflect.Field;
public class JBScrollPane extends JScrollPane {
/**
* This key is used to specify which colors should use the scroll bars on the pane.
* If a client property is set to {@code true} the bar's brightness
* will be modified according to the view's background.
*
* @see UIUtil#putClientProperty
* @see UIUtil#isUnderDarcula
*/
public static final Key<Boolean> BRIGHTNESS_FROM_VIEW = Key.create("JB_SCROLL_PANE_BRIGHTNESS_FROM_VIEW");
@Deprecated
public static final RegionPainter<Float> THUMB_PAINTER = ScrollPainter.EditorThumb.DEFAULT;
@Deprecated
public static final RegionPainter<Float> THUMB_DARK_PAINTER = ScrollPainter.EditorThumb.DARCULA;
@Deprecated
public static final RegionPainter<Float> MAC_THUMB_PAINTER = ScrollPainter.EditorThumb.Mac.DEFAULT;
@Deprecated
public static final RegionPainter<Float> MAC_THUMB_DARK_PAINTER = ScrollPainter.EditorThumb.Mac.DARCULA;
private static final Logger LOG = Logger.getInstance(JBScrollPane.class);
private int myViewportBorderWidth = -1;
private boolean myHasOverlayScrollbars;
private volatile boolean myBackgroundRequested; // avoid cyclic references
public JBScrollPane(int viewportWidth) {
init(false);
myViewportBorderWidth = viewportWidth;
updateViewportBorder();
}
public JBScrollPane() {
init();
}
public JBScrollPane(Component view) {
super(view);
init();
}
public JBScrollPane(int vsbPolicy, int hsbPolicy) {
super(vsbPolicy, hsbPolicy);
init();
}
public JBScrollPane(Component view, int vsbPolicy, int hsbPolicy) {
super(view, vsbPolicy, hsbPolicy);
init();
}
@Override
public Color getBackground() {
Color color = super.getBackground();
if (!myBackgroundRequested && EventQueue.isDispatchThread() && Registry.is("ide.scroll.background.auto")) {
if (!isBackgroundSet() || color instanceof UIResource) {
Component child = getViewport();
if (child != null) {
try {
myBackgroundRequested = true;
return child.getBackground();
}
finally {
myBackgroundRequested = false;
}
}
}
}
return color;
}
static Color getViewBackground(JScrollPane pane) {
if (pane == null) return null;
JViewport viewport = pane.getViewport();
if (viewport == null) return null;
Component view = viewport.getView();
if (view == null) return null;
return view.getBackground();
}
public static JScrollPane findScrollPane(Component c) {
if (c == null) return null;
if (!(c instanceof JViewport)) {
Container vp = c.getParent();
if (vp instanceof JViewport) c = vp;
}
c = c.getParent();
if (!(c instanceof JScrollPane)) return null;
return (JScrollPane)c;
}
private void init() {
init(true);
}
private void init(boolean setupCorners) {
setLayout(Registry.is("ide.scroll.new.layout") ? new Layout() : new ScrollPaneLayout());
if (setupCorners) {
setupCorners();
}
}
protected void setupCorners() {
setBorder(IdeBorderFactory.createBorder());
setCorner(UPPER_RIGHT_CORNER, new Corner(UPPER_RIGHT_CORNER));
setCorner(UPPER_LEFT_CORNER, new Corner(UPPER_LEFT_CORNER));
setCorner(LOWER_RIGHT_CORNER, new Corner(LOWER_RIGHT_CORNER));
setCorner(LOWER_LEFT_CORNER, new Corner(LOWER_LEFT_CORNER));
}
@Override
public void setUI(ScrollPaneUI ui) {
super.setUI(ui);
updateViewportBorder();
if (ui instanceof BasicScrollPaneUI) {
try {
Field field = BasicScrollPaneUI.class.getDeclaredField("mouseScrollListener");
field.setAccessible(true);
Object value = field.get(ui);
if (value instanceof MouseWheelListener) {
MouseWheelListener oldListener = (MouseWheelListener)value;
MouseWheelListener newListener = event -> {
if (isScrollEvent(event)) {
Object source = event.getSource();
if (source instanceof JScrollPane) {
JScrollPane pane = (JScrollPane)source;
if (pane.isWheelScrollingEnabled()) {
JScrollBar bar = event.isShiftDown() ? pane.getHorizontalScrollBar() : pane.getVerticalScrollBar();
if (bar != null && bar.isVisible()) oldListener.mouseWheelMoved(event);
}
}
}
};
field.set(ui, newListener);
// replace listener if field updated successfully
removeMouseWheelListener(oldListener);
addMouseWheelListener(newListener);
}
}
catch (Exception exception) {
LOG.warn(exception);
}
}
}
@Override
public boolean isOptimizedDrawingEnabled() {
if (getLayout() instanceof Layout) {
return isOptimizedDrawingEnabledFor(getVerticalScrollBar()) &&
isOptimizedDrawingEnabledFor(getHorizontalScrollBar());
}
return !myHasOverlayScrollbars;
}
/**
* Returns {@code false} for visible translucent scroll bars, or {@code true} otherwise.
* It is needed to repaint translucent scroll bars on viewport repainting.
*/
private static boolean isOptimizedDrawingEnabledFor(JScrollBar bar) {
return bar == null || bar.isOpaque() || !bar.isVisible();
}
private void updateViewportBorder() {
if (getViewportBorder() instanceof ViewportBorder) {
setViewportBorder(new ViewportBorder(myViewportBorderWidth >= 0 ? myViewportBorderWidth : 1));
}
}
public static ViewportBorder createIndentBorder() {
return new ViewportBorder(2);
}
@Override
public JScrollBar createVerticalScrollBar() {
return new MyScrollBar(Adjustable.VERTICAL);
}
@NotNull
@Override
public JScrollBar createHorizontalScrollBar() {
return new MyScrollBar(Adjustable.HORIZONTAL);
}
@Override
protected JViewport createViewport() {
return new JBViewport();
}
@SuppressWarnings("deprecation")
@Override
public void layout() {
LayoutManager layout = getLayout();
ScrollPaneLayout scrollLayout = layout instanceof ScrollPaneLayout ? (ScrollPaneLayout)layout : null;
// Now we let JScrollPane layout everything as necessary
super.layout();
if (layout instanceof Layout) return;
if (scrollLayout != null) {
// Now it's time to jump in and expand the viewport so it fits the whole area
// (taking into consideration corners, headers and other stuff).
myHasOverlayScrollbars = relayoutScrollbars(
this, scrollLayout,
myHasOverlayScrollbars // If last time we did relayouting, we should restore it back.
);
}
else {
myHasOverlayScrollbars = false;
}
}
private boolean relayoutScrollbars(@NotNull JComponent container, @NotNull ScrollPaneLayout layout, boolean forceRelayout) {
JViewport viewport = layout.getViewport();
if (viewport == null) return false;
JScrollBar vsb = layout.getVerticalScrollBar();
JScrollBar hsb = layout.getHorizontalScrollBar();
JViewport colHead = layout.getColumnHeader();
JViewport rowHead = layout.getRowHeader();
Rectangle viewportBounds = viewport.getBounds();
boolean extendViewportUnderVScrollbar = vsb != null && shouldExtendViewportUnderScrollbar(vsb);
boolean extendViewportUnderHScrollbar = hsb != null && shouldExtendViewportUnderScrollbar(hsb);
boolean hasOverlayScrollbars = extendViewportUnderVScrollbar || extendViewportUnderHScrollbar;
if (!hasOverlayScrollbars && !forceRelayout) return false;
container.setComponentZOrder(viewport, container.getComponentCount() - 1);
if (vsb != null) container.setComponentZOrder(vsb, 0);
if (hsb != null) container.setComponentZOrder(hsb, 0);
if (extendViewportUnderVScrollbar) {
int x2 = Math.max(vsb.getX() + vsb.getWidth(), viewportBounds.x + viewportBounds.width);
viewportBounds.x = Math.min(viewportBounds.x, vsb.getX());
viewportBounds.width = x2 - viewportBounds.x;
}
if (extendViewportUnderHScrollbar) {
int y2 = Math.max(hsb.getY() + hsb.getHeight(), viewportBounds.y + viewportBounds.height);
viewportBounds.y = Math.min(viewportBounds.y, hsb.getY());
viewportBounds.height = y2 - viewportBounds.y;
}
if (extendViewportUnderVScrollbar) {
if (hsb != null) {
Rectangle scrollbarBounds = hsb.getBounds();
scrollbarBounds.width = viewportBounds.x + viewportBounds.width - scrollbarBounds.x;
hsb.setBounds(scrollbarBounds);
}
if (colHead != null) {
Rectangle headerBounds = colHead.getBounds();
headerBounds.width = viewportBounds.width;
colHead.setBounds(headerBounds);
}
hideFromView(layout.getCorner(UPPER_RIGHT_CORNER));
hideFromView(layout.getCorner(LOWER_RIGHT_CORNER));
}
if (extendViewportUnderHScrollbar) {
if (vsb != null) {
Rectangle scrollbarBounds = vsb.getBounds();
scrollbarBounds.height = viewportBounds.y + viewportBounds.height - scrollbarBounds.y;
vsb.setBounds(scrollbarBounds);
}
if (rowHead != null) {
Rectangle headerBounds = rowHead.getBounds();
headerBounds.height = viewportBounds.height;
rowHead.setBounds(headerBounds);
}
hideFromView(layout.getCorner(LOWER_LEFT_CORNER));
hideFromView(layout.getCorner(LOWER_RIGHT_CORNER));
}
viewport.setBounds(viewportBounds);
return hasOverlayScrollbars;
}
private boolean shouldExtendViewportUnderScrollbar(@Nullable JScrollBar scrollbar) {
if (scrollbar == null || !scrollbar.isVisible()) return false;
return isOverlaidScrollbar(scrollbar);
}
protected boolean isOverlaidScrollbar(@Nullable JScrollBar scrollbar) {
if (!ButtonlessScrollBarUI.isMacOverlayScrollbarSupported()) return false;
ScrollBarUI vsbUI = scrollbar == null ? null : scrollbar.getUI();
return vsbUI instanceof ButtonlessScrollBarUI && !((ButtonlessScrollBarUI)vsbUI).alwaysShowTrack();
}
private static void hideFromView(Component component) {
if (component == null) return;
component.setBounds(-10, -10, 1, 1);
}
private class MyScrollBar extends ScrollBar implements IdeGlassPane.TopComponent {
public MyScrollBar(int orientation) {
super(orientation);
}
@Override
public void updateUI() {
ScrollBarUI ui = getUI();
if (ui instanceof DefaultScrollBarUI) return;
setUI(JBScrollBar.createUI(this));
}
@Override
public boolean canBePreprocessed(MouseEvent e) {
return JBScrollPane.canBePreprocessed(e, this);
}
}
public static boolean canBePreprocessed(MouseEvent e, JScrollBar bar) {
if (e.getID() == MouseEvent.MOUSE_MOVED || e.getID() == MouseEvent.MOUSE_PRESSED) {
ScrollBarUI ui = bar.getUI();
if (ui instanceof BasicScrollBarUI) {
BasicScrollBarUI bui = (BasicScrollBarUI)ui;
try {
Rectangle rect = (Rectangle)ReflectionUtil.getDeclaredMethod(BasicScrollBarUI.class, "getThumbBounds", ArrayUtil.EMPTY_CLASS_ARRAY).invoke(bui);
Point point = SwingUtilities.convertPoint(e.getComponent(), e.getX(), e.getY(), bar);
return !rect.contains(point);
}
catch (Exception e1) {
return true;
}
}
else if (ui instanceof DefaultScrollBarUI) {
DefaultScrollBarUI dui = (DefaultScrollBarUI)ui;
Point point = e.getLocationOnScreen();
SwingUtilities.convertPointFromScreen(point, bar);
return !dui.isThumbContains(point.x, point.y);
}
}
return true;
}
private static class Corner extends JPanel {
private final String myPos;
public Corner(String pos) {
myPos = pos;
ScrollColorProducer.setBackground(this);
ScrollColorProducer.setForeground(this);
}
@Override
protected void paintComponent(Graphics g) {
g.setColor(getBackground());
g.fillRect(0, 0, getWidth(), getHeight());
if (SystemInfo.isMac || !Registry.is("ide.scroll.track.border.paint")) return;
g.setColor(getForeground());
int x2 = getWidth() - 1;
int y2 = getHeight() - 1;
if (myPos == UPPER_LEFT_CORNER || myPos == UPPER_RIGHT_CORNER) {
g.drawLine(0, y2, x2, y2);
}
if (myPos == LOWER_LEFT_CORNER || myPos == LOWER_RIGHT_CORNER) {
g.drawLine(0, 0, x2, 0);
}
if (myPos == UPPER_LEFT_CORNER || myPos == LOWER_LEFT_CORNER) {
g.drawLine(x2, 0, x2, y2);
}
if (myPos == UPPER_RIGHT_CORNER || myPos == LOWER_RIGHT_CORNER) {
g.drawLine(0, 0, 0, y2);
}
}
}
private static class ViewportBorder extends LineBorder {
public ViewportBorder(int thickness) {
super(null, thickness);
}
@Override
public void paintBorder(Component c, Graphics g, int x, int y, int width, int height) {
updateColor(c);
super.paintBorder(c, g, x, y, width, height);
}
private void updateColor(Component c) {
if (!(c instanceof JScrollPane)) return;
lineColor = getViewBackground((JScrollPane)c);
}
}
/**
* These client properties modify a scroll pane layout.
* Use the class object as a property key.
*
* @see #putClientProperty(Object, Object)
*/
public enum Flip {
NONE, VERTICAL, HORIZONTAL, BOTH
}
/**
* These client properties show a component position on a scroll pane.
* It is set by internal layout manager of the scroll pane.
*/
public enum Alignment {
TOP, LEFT, RIGHT, BOTTOM;
public static Alignment get(JComponent component) {
if (component != null) {
Object property = component.getClientProperty(Alignment.class);
if (property instanceof Alignment) return (Alignment)property;
Container parent = component.getParent();
if (parent instanceof JScrollPane) {
JScrollPane pane = (JScrollPane)parent;
if (component == pane.getColumnHeader()) {
return TOP;
}
if (component == pane.getHorizontalScrollBar()) {
return BOTTOM;
}
boolean ltr = pane.getComponentOrientation().isLeftToRight();
if (component == pane.getVerticalScrollBar()) {
return ltr ? RIGHT : LEFT;
}
if (component == pane.getRowHeader()) {
return ltr ? LEFT : RIGHT;
}
}
// assume alignment for a scroll bar,
// which is not contained in a scroll pane
if (component instanceof JScrollBar) {
JScrollBar bar = (JScrollBar)component;
switch (bar.getOrientation()) {
case Adjustable.HORIZONTAL:
return BOTTOM;
case Adjustable.VERTICAL:
return bar.getComponentOrientation().isLeftToRight()
? RIGHT
: LEFT;
}
}
}
return null;
}
}
/**
* ScrollPaneLayout implementation that supports
* ScrollBar flipping and non-opaque ScrollBars.
*/
private static class Layout extends ScrollPaneLayout {
private static final Insets EMPTY_INSETS = new Insets(0, 0, 0, 0);
@Override
public void layoutContainer(Container parent) {
JScrollPane pane = (JScrollPane)parent;
// Calculate inner bounds of the scroll pane
Rectangle bounds = new Rectangle(pane.getWidth(), pane.getHeight());
JBInsets.removeFrom(bounds, pane.getInsets());
// Determine positions of scroll bars on the scroll pane
Object property = pane.getClientProperty(Flip.class);
Flip flip = property instanceof Flip ? (Flip)property : Flip.NONE;
boolean hsbOnTop = flip == Flip.BOTH || flip == Flip.VERTICAL;
boolean vsbOnLeft = pane.getComponentOrientation().isLeftToRight()
? flip == Flip.BOTH || flip == Flip.HORIZONTAL
: flip == Flip.NONE || flip == Flip.VERTICAL;
// If there's a visible row header remove the space it needs.
// The row header is treated as if it were fixed width, arbitrary height.
Rectangle rowHeadBounds = new Rectangle(bounds.x, 0, 0, 0);
if (rowHead != null && rowHead.isVisible()) {
rowHeadBounds.width = min(bounds.width, rowHead.getPreferredSize().width);
bounds.width -= rowHeadBounds.width;
if (vsbOnLeft) {
rowHeadBounds.x += bounds.width;
}
else {
bounds.x += rowHeadBounds.width;
}
}
// If there's a visible column header remove the space it needs.
// The column header is treated as if it were fixed height, arbitrary width.
Rectangle colHeadBounds = new Rectangle(0, bounds.y, 0, 0);
if (colHead != null && colHead.isVisible()) {
colHeadBounds.height = min(bounds.height, colHead.getPreferredSize().height);
bounds.height -= colHeadBounds.height;
if (hsbOnTop) {
colHeadBounds.y += bounds.height;
}
else {
bounds.y += colHeadBounds.height;
}
}
// If there's a JScrollPane.viewportBorder, remove the space it occupies
Border border = pane.getViewportBorder();
Insets insets = border == null ? null : border.getBorderInsets(parent);
JBInsets.removeFrom(bounds, insets);
if (insets == null) insets = EMPTY_INSETS;
// At this point:
// colHeadBounds is correct except for its width and x
// rowHeadBounds is correct except for its height and y
// bounds - the space available for the viewport and scroll bars
// Once we're through computing the dimensions of these three parts
// we can go back and set the bounds for the corners and the dimensions of
// colHeadBounds.x, colHeadBounds.width, rowHeadBounds.y, rowHeadBounds.height.
boolean isEmpty = bounds.width < 0 || bounds.height < 0;
Component view = viewport == null ? null : viewport.getView();
Dimension viewPreferredSize = view == null ? new Dimension() : view.getPreferredSize();
if (view instanceof JComponent) JBViewport.fixPreferredSize(viewPreferredSize, (JComponent)view, vsb, hsb);
Dimension viewportExtentSize = viewport == null ? new Dimension() : viewport.toViewCoordinates(bounds.getSize());
// If the view is tracking the viewports width we don't bother with a horizontal scrollbar.
// If the view is tracking the viewports height we don't bother with a vertical scrollbar.
Scrollable scrollable = null;
boolean viewTracksViewportWidth = false;
boolean viewTracksViewportHeight = false;
// Don't bother checking the Scrollable methods if there is no room for the viewport,
// we aren't going to show any scroll bars in this case anyway.
if (!isEmpty && view instanceof Scrollable) {
scrollable = (Scrollable)view;
viewTracksViewportWidth = scrollable.getScrollableTracksViewportWidth();
viewTracksViewportHeight = scrollable.getScrollableTracksViewportHeight();
}
// If there's a vertical scroll bar and we need one, allocate space for it.
// A vertical scroll bar is considered to be fixed width, arbitrary height.
boolean vsbOpaque = false;
boolean vsbNeeded = false;
int vsbPolicy = pane.getVerticalScrollBarPolicy();
if (!isEmpty && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
vsbNeeded = vsbPolicy == VERTICAL_SCROLLBAR_ALWAYS
|| !viewTracksViewportHeight && viewPreferredSize.height > viewportExtentSize.height;
}
Rectangle vsbBounds = new Rectangle(0, bounds.y - insets.top, 0, 0);
if (vsb != null) {
if (!SystemInfo.isMac && view instanceof JTable) vsb.setOpaque(true);
vsbOpaque = vsb.isOpaque();
if (vsbNeeded) {
adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
if (vsbOpaque && viewport != null) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
}
}
}
// If there's a horizontal scroll bar and we need one, allocate space for it.
// A horizontal scroll bar is considered to be fixed height, arbitrary width.
boolean hsbOpaque = false;
boolean hsbNeeded = false;
int hsbPolicy = pane.getHorizontalScrollBarPolicy();
if (!isEmpty && hsbPolicy != HORIZONTAL_SCROLLBAR_NEVER) {
hsbNeeded = hsbPolicy == HORIZONTAL_SCROLLBAR_ALWAYS
|| !viewTracksViewportWidth && viewPreferredSize.width > viewportExtentSize.width;
}
Rectangle hsbBounds = new Rectangle(bounds.x - insets.left, 0, 0, 0);
if (hsb != null) {
if (!SystemInfo.isMac && view instanceof JTable) hsb.setOpaque(true);
hsbOpaque = hsb.isOpaque();
if (hsbNeeded) {
adjustForHSB(bounds, insets, hsbBounds, hsbOpaque, hsbOnTop);
if (hsbOpaque && viewport != null) {
// If we added the horizontal scrollbar and reduced the vertical space
// we may have to add the vertical scrollbar, if that hasn't been done so already.
if (vsb != null && !vsbNeeded && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
vsbNeeded = viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded) adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
}
}
}
// Set the size of the viewport first, and then recheck the Scrollable methods.
// Some components base their return values for the Scrollable methods on the size of the viewport,
// so that if we don't ask after resetting the bounds we may have gotten the wrong answer.
if (viewport != null) {
viewport.setBounds(bounds);
if (scrollable != null && hsbOpaque && vsbOpaque) {
viewTracksViewportWidth = scrollable.getScrollableTracksViewportWidth();
viewTracksViewportHeight = scrollable.getScrollableTracksViewportHeight();
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
boolean vsbNeededOld = vsbNeeded;
if (vsb != null && vsbPolicy == VERTICAL_SCROLLBAR_AS_NEEDED) {
boolean vsbNeededNew = !viewTracksViewportHeight && viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded != vsbNeededNew) {
vsbNeeded = vsbNeededNew;
if (vsbNeeded) {
adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
else if (vsbOpaque) {
bounds.width += vsbBounds.width;
}
if (vsbOpaque) viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
}
}
boolean hsbNeededOld = hsbNeeded;
if (hsb != null && hsbPolicy == HORIZONTAL_SCROLLBAR_AS_NEEDED) {
boolean hsbNeededNew = !viewTracksViewportWidth && viewPreferredSize.width > viewportExtentSize.width;
if (hsbNeeded != hsbNeededNew) {
hsbNeeded = hsbNeededNew;
if (hsbNeeded) {
adjustForHSB(bounds, insets, hsbBounds, hsbOpaque, hsbOnTop);
}
else if (hsbOpaque) {
bounds.height += hsbBounds.height;
}
if (hsbOpaque && vsb != null && !vsbNeeded && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
vsbNeeded = viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded) adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
}
}
if (hsbNeededOld != hsbNeeded || vsbNeededOld != vsbNeeded) {
viewport.setBounds(bounds);
// You could argue that we should recheck the Scrollable methods again until they stop changing,
// but they might never stop changing, so we stop here and don't do any additional checks.
}
}
}
// Set the bounds of the row header.
rowHeadBounds.y = bounds.y - insets.top;
rowHeadBounds.height = bounds.height + insets.top + insets.bottom;
if (rowHead != null) {
rowHead.setBounds(rowHeadBounds);
rowHead.putClientProperty(Alignment.class, vsbOnLeft ? Alignment.RIGHT : Alignment.LEFT);
}
// Set the bounds of the column header.
colHeadBounds.x = bounds.x - insets.left;
colHeadBounds.width = bounds.width + insets.left + insets.right;
if (colHead != null) {
colHead.setBounds(colHeadBounds);
colHead.putClientProperty(Alignment.class, hsbOnTop ? Alignment.BOTTOM : Alignment.TOP);
}
// Calculate overlaps for translucent scroll bars
int overlapWidth = 0;
int overlapHeight = 0;
if (vsbNeeded && !vsbOpaque && hsbNeeded && !hsbOpaque) {
overlapWidth = vsbBounds.width; // shrink horizontally
//overlapHeight = hsbBounds.height; // shrink vertically
}
// Set the bounds of the vertical scroll bar.
vsbBounds.y = bounds.y - insets.top;
vsbBounds.height = bounds.height + insets.top + insets.bottom;
if (vsb != null) {
vsb.setVisible(vsbNeeded);
if (vsbNeeded) {
if (vsbOpaque && colHead != null && UIManager.getBoolean("ScrollPane.fillUpperCorner")) {
if ((vsbOnLeft ? upperLeft : upperRight) == null) {
// This is used primarily for GTK L&F, which needs to extend
// the vertical scrollbar to fill the upper corner near the column header.
// Note that we skip this step (and use the default behavior)
// if the user has set a custom corner component.
if (!hsbOnTop) vsbBounds.y -= colHeadBounds.height;
vsbBounds.height += colHeadBounds.height;
}
}
int overlapY = !hsbOnTop ? 0 : overlapHeight;
vsb.setBounds(vsbBounds.x, vsbBounds.y + overlapY, vsbBounds.width, vsbBounds.height - overlapHeight);
vsb.putClientProperty(Alignment.class, vsbOnLeft ? Alignment.LEFT : Alignment.RIGHT);
}
// Modify the bounds of the translucent scroll bar.
if (!vsbOpaque) {
if (!vsbOnLeft) vsbBounds.x += vsbBounds.width;
vsbBounds.width = 0;
}
}
// Set the bounds of the horizontal scroll bar.
hsbBounds.x = bounds.x - insets.left;
hsbBounds.width = bounds.width + insets.left + insets.right;
if (hsb != null) {
hsb.setVisible(hsbNeeded);
if (hsbNeeded) {
if (hsbOpaque && rowHead != null && UIManager.getBoolean("ScrollPane.fillLowerCorner")) {
if ((vsbOnLeft ? lowerRight : lowerLeft) == null) {
// This is used primarily for GTK L&F, which needs to extend
// the horizontal scrollbar to fill the lower corner near the row header.
// Note that we skip this step (and use the default behavior)
// if the user has set a custom corner component.
if (!vsbOnLeft) hsbBounds.x -= rowHeadBounds.width;
hsbBounds.width += rowHeadBounds.width;
}
}
int overlapX = !vsbOnLeft ? 0 : overlapWidth;
hsb.setBounds(hsbBounds.x + overlapX, hsbBounds.y, hsbBounds.width - overlapWidth, hsbBounds.height);
hsb.putClientProperty(Alignment.class, hsbOnTop ? Alignment.TOP : Alignment.BOTTOM);
}
// Modify the bounds of the translucent scroll bar.
if (!hsbOpaque) {
if (!hsbOnTop) hsbBounds.y += hsbBounds.height;
hsbBounds.height = 0;
}
}
// Set the bounds of the corners.
if (lowerLeft != null) {
lowerLeft.setBounds(vsbOnLeft ? vsbBounds.x : rowHeadBounds.x,
hsbOnTop ? colHeadBounds.y : hsbBounds.y,
vsbOnLeft ? vsbBounds.width : rowHeadBounds.width,
hsbOnTop ? colHeadBounds.height : hsbBounds.height);
}
if (lowerRight != null) {
lowerRight.setBounds(vsbOnLeft ? rowHeadBounds.x : vsbBounds.x,
hsbOnTop ? colHeadBounds.y : hsbBounds.y,
vsbOnLeft ? rowHeadBounds.width : vsbBounds.width,
hsbOnTop ? colHeadBounds.height : hsbBounds.height);
}
if (upperLeft != null) {
upperLeft.setBounds(vsbOnLeft ? vsbBounds.x : rowHeadBounds.x,
hsbOnTop ? hsbBounds.y : colHeadBounds.y,
vsbOnLeft ? vsbBounds.width : rowHeadBounds.width,
hsbOnTop ? hsbBounds.height : colHeadBounds.height);
}
if (upperRight != null) {
upperRight.setBounds(vsbOnLeft ? rowHeadBounds.x : vsbBounds.x,
hsbOnTop ? hsbBounds.y : colHeadBounds.y,
vsbOnLeft ? rowHeadBounds.width : vsbBounds.width,
hsbOnTop ? hsbBounds.height : colHeadBounds.height);
}
if (!vsbOpaque && vsbNeeded || !hsbOpaque && hsbNeeded) {
fixComponentZOrder(vsb, 0);
fixComponentZOrder(viewport, -1);
}
}
private static void fixComponentZOrder(Component component, int index) {
if (component != null) {
Container parent = component.getParent();
synchronized (parent.getTreeLock()) {
if (index < 0) index += parent.getComponentCount();
parent.setComponentZOrder(component, index);
}
}
}
private void adjustForVSB(Rectangle bounds, Insets insets, Rectangle vsbBounds, boolean vsbOpaque, boolean vsbOnLeft) {
vsbBounds.width = !vsb.isEnabled() ? 0 : min(bounds.width, vsb.getPreferredSize().width);
if (vsbOnLeft) {
vsbBounds.x = bounds.x - insets.left/* + vsbBounds.width*/;
if (vsbOpaque) bounds.x += vsbBounds.width;
}
else {
vsbBounds.x = bounds.x + bounds.width + insets.right - vsbBounds.width;
}
if (vsbOpaque) bounds.width -= vsbBounds.width;
}
private void adjustForHSB(Rectangle bounds, Insets insets, Rectangle hsbBounds, boolean hsbOpaque, boolean hsbOnTop) {
hsbBounds.height = !hsb.isEnabled() ? 0 : min(bounds.height, hsb.getPreferredSize().height);
if (hsbOnTop) {
hsbBounds.y = bounds.y - insets.top/* + hsbBounds.height*/;
if (hsbOpaque) bounds.y += hsbBounds.height;
}
else {
hsbBounds.y = bounds.y + bounds.height + insets.bottom - hsbBounds.height;
}
if (hsbOpaque) bounds.height -= hsbBounds.height;
}
private static int min(int one, int two) {
return Math.max(0, Math.min(one, two));
}
}
/**
* Indicates whether the specified event is not consumed and does not have unexpected modifiers.
*
* @param event a mouse wheel event to check for validity
* @return {@code true} if the specified event is valid, {@code false} otherwise
*/
public static boolean isScrollEvent(@NotNull MouseWheelEvent event) {
if (event.isConsumed()) return false; // event should not be consumed already
if (event.getWheelRotation() == 0) return false; // any rotation expected (forward or backward)
return 0 == (SCROLL_MODIFIERS & event.getModifiers());
}
private static final int SCROLL_MODIFIERS = // event modifiers allowed during scrolling
~InputEvent.SHIFT_MASK & ~InputEvent.SHIFT_DOWN_MASK & // for horizontal scrolling
~InputEvent.BUTTON1_MASK & ~InputEvent.BUTTON1_DOWN_MASK; // for selection
}
| apache-2.0 |
gzxishan/OftenPorter | Porter-Core/src/main/java/cn/xishan/oftenporter/porter/core/init/DealSharpProperties.java | 8870 | package cn.xishan.oftenporter.porter.core.init;
import cn.xishan.oftenporter.porter.core.advanced.IConfigData;
import com.alibaba.fastjson.JSON;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* @author Created by https://github.com/CLovinr on 2018-12-21.
*/
public class DealSharpProperties
{
private static final Logger LOGGER = LoggerFactory.getLogger(DealSharpProperties.class);
private static class PropOne
{
private String propKey,
originValue;
private int startIndex, endIndex;
public PropOne(String propKey, String originValue, int startIndex, int endIndex)
{
this.propKey = propKey;
this.originValue = originValue;
this.startIndex = startIndex;
this.endIndex = endIndex;
}
public String getPropKey()
{
return propKey;
}
public String replace(String propValue)
{
String str = originValue.substring(0, startIndex) + propValue + originValue.substring(endIndex);
return str;
}
}
/**
* 替换所有的#{propertyName}.
*
* @param string
* @param properties
* @param forEmpty 如果不为null,则用于替换所有不存在的属性。
* @return
*/
public static String replaceSharpProperties(String string, Map<String, ?> properties, String forEmpty)
{
for (Map.Entry<String, ?> entry : properties.entrySet())
{
if (string.contains("#{" + entry.getKey() + "}"))
{
String rs;
// if (entry.getValue() instanceof Map || entry.getValue() instanceof Collection)
// {
// rs = JSON.toJSONString(entry.getValue());
// } else
// {
// rs = String.valueOf(entry.getValue());
// }
if (entry.getValue() instanceof CharSequence)
{
rs = String.valueOf(entry.getValue());
} else if (entry.getValue() == null)
{
rs = "";
} else
{
rs = JSON.toJSONString(entry.getValue());
}
string = string.replace("#{" + entry.getKey() + "}", rs);
}
}
if (forEmpty != null)
{
string = string.replaceAll("#\\{[^{}]+\\}", forEmpty);//去掉未设置的
}
return string;
}
/**
* 替换#{properName}变量。
*
* @param srcMap 待替换属性值的map
* @param propertiesMap 提供属性的map
*/
public static void dealSharpProperties(Map srcMap, Map propertiesMap)
{
dealSharpProperties(srcMap, propertiesMap, false);
}
/**
* 替换#{properName}变量。
*
* @param srcMap 待替换属性值的map
* @param propertiesMap 提供属性的map
* @param keepNotFound 是否保留未找到的变量。
*/
public static void dealSharpProperties(Map srcMap, Map propertiesMap, boolean keepNotFound)
{
Set<String> containsVar = null;
boolean isFirst = true;
boolean hasSet = true;
//处理properties
while (hasSet)
{
hasSet = false;
Collection<String> nameCollection;
if (isFirst)
{
nameCollection = srcMap.keySet();
} else
{
nameCollection = containsVar;
}
containsVar = new HashSet<>();
for (String properName : nameCollection)
{
Object value = srcMap.get(properName);
if (!(value instanceof CharSequence))
{
continue;
}
String valueString = String.valueOf(value);
PropOne propOne = getPropertiesKey(String.valueOf(valueString));
if (propOne != null && propOne.getPropKey().equals(properName))
{
throw new RuntimeException(
"can not set property of \"" + properName + "\" with value \"" + valueString + "\",prop name eq value attr name");
} else if (propOne != null)
{
containsVar.add(properName);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},replace-attr={},origin-value={}", properName,
propOne.getPropKey(), valueString);
}
String replaceStr = null;
if (propertiesMap.containsKey(propOne.getPropKey()))
{
replaceStr = String.valueOf(propertiesMap.get(propOne.getPropKey()));
} else
{
if (keepNotFound)
{
containsVar.remove(properName);
} else
{
replaceStr = "";
LOGGER.warn("proper value with key '{}' is empty", propOne.getPropKey());
}
}
if (replaceStr != null)
{
String newValue = propOne.replace(replaceStr);
srcMap.put(properName, newValue);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},new-value={}", properName, newValue);
}
}
hasSet = true;
}
}
isFirst = false;
}
}
static void dealProperties(IConfigData configData)
{
Set<String> containsVar = null;
boolean isFirst = true;
boolean hasSet = true;
//处理properties
while (hasSet)
{
hasSet = false;
Collection<String> nameCollection;
if (isFirst)
{
nameCollection = configData.propertyNames();
} else
{
nameCollection = containsVar;
}
containsVar = new HashSet<>();
for (String properName : nameCollection)
{
Object value = configData.get(properName);
if (!(value instanceof CharSequence))
{
continue;
}
String valueString = String.valueOf(value);
PropOne propOne = getPropertiesKey(String.valueOf(valueString));
if (propOne != null && propOne.getPropKey().equals(properName))
{
throw new RuntimeException(
"can not set property of " + properName + " with value \"" + valueString + "\"");
} else if (propOne != null)
{
containsVar.add(properName);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},replace-attr={},origin-value={}", properName,
propOne.getPropKey(), valueString);
}
String replaceStr;
if (configData.contains(propOne.getPropKey()))
{
replaceStr = configData.getString(propOne.getPropKey());
} else
{
replaceStr = "";
LOGGER.warn("proper value with key '{}' is empty", propOne.getPropKey());
}
String newValue = propOne.replace(replaceStr);
configData.set(properName, newValue);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},new-value={}", properName, newValue);
}
hasSet = true;
}
}
isFirst = false;
}
}
private static final Pattern PROPERTIES_PATTERN = Pattern.compile("#\\{([^{}]+)}");
private static PropOne getPropertiesKey(String value)
{
Matcher matcher = PROPERTIES_PATTERN.matcher(value);
if (matcher.find())
{
PropOne propOne = new PropOne(matcher.group(1).trim(), value, matcher.start(), matcher.end());
return propOne;
} else
{
return null;
}
}
}
| apache-2.0 |
DBCDK/kafka-dispatch | src/main/java/dk/dbc/kafka/dispatch/sources/InputStreamSource.java | 818 | package dk.dbc.kafka.dispatch.sources;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
/**
* Source for reading InputStreams line-by-line
* @author Adam Tulinius
*/
public class InputStreamSource extends Source<String> {
private BufferedReader reader;
public InputStreamSource(InputStream inputStream) {
this.reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
}
@Override
public Optional<String> next() throws IOException {
String line = reader.readLine();
if (line != null) {
return Optional.of(line);
} else {
return Optional.empty();
}
}
}
| apache-2.0 |
robjcaskey/Unofficial-Coffee-Mud-Upstream | com/planet_ink/coffee_mud/Commands/Go.java | 16256 | package com.planet_ink.coffee_mud.Commands;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2000-2010 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
@SuppressWarnings("unchecked")
public class Go extends StdCommand
{
public Go(){}
private String[] access={"GO","WALK"};
public String[] getAccessWords(){return access;}
public int energyExpenseFactor(){return 1;}
public void ridersBehind(Vector riders,
Room sourceRoom,
Room destRoom,
int directionCode,
boolean flee)
{
if(riders!=null)
for(int r=0;r<riders.size();r++)
{
Rider rider=(Rider)riders.elementAt(r);
if(rider instanceof MOB)
{
MOB rMOB=(MOB)rider;
if((rMOB.location()==sourceRoom)
||(rMOB.location()==destRoom))
{
boolean fallOff=false;
if(rMOB.location()==sourceRoom)
{
if(rMOB.riding()!=null)
rMOB.tell("You ride "+rMOB.riding().name()+" "+Directions.getDirectionName(directionCode)+".");
if(!move(rMOB,directionCode,flee,false,true,false))
fallOff=true;
}
if(fallOff)
{
if(rMOB.riding()!=null)
rMOB.tell("You fall off "+rMOB.riding().name()+"!");
rMOB.setRiding(null);
}
}
else
rMOB.setRiding(null);
}
else
if(rider instanceof Item)
{
Item rItem=(Item)rider;
if((rItem.owner()==sourceRoom)
||(rItem.owner()==destRoom))
destRoom.bringItemHere(rItem,-1,false);
else
rItem.setRiding(null);
}
}
}
public static Vector addRiders(Rider theRider,
Rideable riding,
Vector riders)
{
if((riding!=null)&&(riding.mobileRideBasis()))
for(int r=0;r<riding.numRiders();r++)
{
Rider rider=riding.fetchRider(r);
if((rider!=null)
&&(rider!=theRider)
&&(!riders.contains(rider)))
{
riders.addElement(rider);
if(rider instanceof Rideable)
addRiders(theRider,(Rideable)rider,riders);
}
}
return riders;
}
public Vector ridersAhead(Rider theRider,
Room sourceRoom,
Room destRoom,
int directionCode,
boolean flee)
{
Vector riders=new Vector();
Rideable riding=theRider.riding();
Vector rideables=new Vector();
while((riding!=null)&&(riding.mobileRideBasis()))
{
rideables.addElement(riding);
addRiders(theRider,riding,riders);
if((riding instanceof Rider)&&((Rider)riding).riding()!=theRider.riding())
riding=((Rider)riding).riding();
else
riding=null;
}
if(theRider instanceof Rideable)
addRiders(theRider,(Rideable)theRider,riders);
for(int r=riders.size()-1;r>=0;r--)
{
Rider R=(Rider)riders.elementAt(r);
if((R instanceof Rideable)&&(((Rideable)R).numRiders()>0))
{
if(!rideables.contains(R))
rideables.addElement(R);
riders.removeElement(R);
}
}
for(int r=0;r<rideables.size();r++)
{
riding=(Rideable)rideables.elementAt(r);
if((riding instanceof Item)
&&((sourceRoom).isContent((Item)riding)))
destRoom.bringItemHere((Item)riding,-1,false);
else
if((riding instanceof MOB)
&&((sourceRoom).isInhabitant((MOB)riding)))
{
((MOB)riding).tell("You are ridden "+Directions.getDirectionName(directionCode)+".");
if(!move(((MOB)riding),directionCode,false,false,true,false))
{
if(theRider instanceof MOB)
((MOB)theRider).tell(((MOB)riding).name()+" won't seem to let you go that way.");
r=r-1;
for(;r>=0;r--)
{
riding=(Rideable)rideables.elementAt(r);
if((riding instanceof Item)
&&((destRoom).isContent((Item)riding)))
sourceRoom.bringItemHere((Item)riding,-1,false);
else
if((riding instanceof MOB)
&&(((MOB)riding).isMonster())
&&((destRoom).isInhabitant((MOB)riding)))
sourceRoom.bringMobHere((MOB)riding,false);
}
return null;
}
}
}
return riders;
}
public boolean move(MOB mob,
int directionCode,
boolean flee,
boolean nolook,
boolean noriders)
{
return move(mob,directionCode,flee,nolook,noriders,false);
}
public boolean move(MOB mob,
int directionCode,
boolean flee,
boolean nolook,
boolean noriders,
boolean always)
{
if(directionCode<0) return false;
if(mob==null) return false;
Room thisRoom=mob.location();
if(thisRoom==null) return false;
Room destRoom=thisRoom.getRoomInDir(directionCode);
Exit exit=thisRoom.getExitInDir(directionCode);
if(destRoom==null)
{
mob.tell("You can't go that way.");
return false;
}
Exit opExit=thisRoom.getReverseExit(directionCode);
String directionName=(directionCode==Directions.GATE)&&(exit!=null)?"through "+exit.name():Directions.getDirectionName(directionCode);
String otherDirectionName=(Directions.getOpDirectionCode(directionCode)==Directions.GATE)&&(exit!=null)?exit.name():Directions.getFromDirectionName(Directions.getOpDirectionCode(directionCode));
int generalMask=always?CMMsg.MASK_ALWAYS:0;
int leaveCode=generalMask|CMMsg.MSG_LEAVE;
if(flee)
leaveCode=generalMask|CMMsg.MSG_FLEE;
CMMsg enterMsg=null;
CMMsg leaveMsg=null;
if((mob.riding()!=null)&&(mob.riding().mobileRideBasis()))
{
enterMsg=CMClass.getMsg(mob,destRoom,exit,generalMask|CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,"<S-NAME> ride(s) "+mob.riding().name()+" in from "+otherDirectionName+".");
leaveMsg=CMClass.getMsg(mob,thisRoom,opExit,leaveCode,((flee)?"You flee "+directionName+".":null),leaveCode,null,leaveCode,((flee)?"<S-NAME> flee(s) with "+mob.riding().name()+" "+directionName+".":"<S-NAME> ride(s) "+mob.riding().name()+" "+directionName+"."));
}
else
{
enterMsg=CMClass.getMsg(mob,destRoom,exit,generalMask|CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,"<S-NAME> "+CMLib.flags().dispositionString(mob,CMFlagLibrary.flag_arrives)+" from "+otherDirectionName+".");
leaveMsg=CMClass.getMsg(mob,thisRoom,opExit,leaveCode,((flee)?"You flee "+directionName+".":null),leaveCode,null,leaveCode,((flee)?"<S-NAME> flee(s) "+directionName+".":"<S-NAME> "+CMLib.flags().dispositionString(mob,CMFlagLibrary.flag_leaves)+" "+directionName+"."));
}
boolean gotoAllowed=CMSecurity.isAllowed(mob,destRoom,"GOTO");
if((exit==null)&&(!gotoAllowed))
{
mob.tell("You can't go that way.");
return false;
}
else
if(exit==null)
thisRoom.showHappens(CMMsg.MSG_OK_VISUAL,"The area to the "+directionName+" shimmers and becomes transparent.");
else
if((!exit.okMessage(mob,enterMsg))&&(!gotoAllowed))
return false;
else
if(!leaveMsg.target().okMessage(mob,leaveMsg)&&(!gotoAllowed))
return false;
else
if((opExit!=null)&&(!opExit.okMessage(mob,leaveMsg))&&(!gotoAllowed))
return false;
else
if(!enterMsg.target().okMessage(mob,enterMsg)&&(!gotoAllowed))
return false;
else
if(!mob.okMessage(mob,enterMsg)&&(!gotoAllowed))
return false;
if(mob.riding()!=null)
{
if((!mob.riding().okMessage(mob,enterMsg))&&(!gotoAllowed))
return false;
}
else
{
if(!mob.isMonster())
for(int i=0;i<energyExpenseFactor();i++)
mob.curState().expendEnergy(mob,mob.maxState(),true);
if((!flee)&&(!mob.curState().adjMovement(-1,mob.maxState()))&&(!gotoAllowed))
{
mob.tell("You are too tired.");
return false;
}
if((mob.soulMate()==null)&&(mob.playerStats()!=null)&&(mob.riding()==null)&&(mob.location()!=null))
mob.playerStats().adjHygiene(mob.location().pointsPerMove(mob));
}
Vector riders=null;
if(!noriders)
{
riders=ridersAhead(mob,(Room)leaveMsg.target(),(Room)enterMsg.target(),directionCode,flee);
if(riders==null) return false;
}
Vector enterTrailersSoFar=null;
Vector leaveTrailersSoFar=null;
if((leaveMsg.trailerMsgs()!=null)&&(leaveMsg.trailerMsgs().size()>0))
{
leaveTrailersSoFar=new Vector();
leaveTrailersSoFar.addAll(leaveMsg.trailerMsgs());
leaveMsg.trailerMsgs().clear();
}
if((enterMsg.trailerMsgs()!=null)&&(enterMsg.trailerMsgs().size()>0))
{
enterTrailersSoFar=new Vector();
enterTrailersSoFar.addAll(enterMsg.trailerMsgs());
enterMsg.trailerMsgs().clear();
}
if(exit!=null) exit.executeMsg(mob,enterMsg);
if(mob.location()!=null) mob.location().delInhabitant(mob);
((Room)leaveMsg.target()).send(mob,leaveMsg);
if(enterMsg.target()==null)
{
((Room)leaveMsg.target()).bringMobHere(mob,false);
mob.tell("You can't go that way.");
return false;
}
mob.setLocation((Room)enterMsg.target());
((Room)enterMsg.target()).addInhabitant(mob);
((Room)enterMsg.target()).send(mob,enterMsg);
if(opExit!=null) opExit.executeMsg(mob,leaveMsg);
if(!nolook)
{
CMLib.commands().postLook(mob,true);
if((!mob.isMonster())
&&(CMath.bset(mob.getBitmap(),MOB.ATT_AUTOWEATHER))
&&(((Room)enterMsg.target())!=null)
&&((thisRoom.domainType()&Room.INDOORS)>0)
&&((((Room)enterMsg.target()).domainType()&Room.INDOORS)==0)
&&(((Room)enterMsg.target()).getArea().getClimateObj().weatherType(((Room)enterMsg.target()))!=Climate.WEATHER_CLEAR)
&&(((Room)enterMsg.target()).isInhabitant(mob)))
mob.tell("\n\r"+((Room)enterMsg.target()).getArea().getClimateObj().weatherDescription(((Room)enterMsg.target())));
}
if(!noriders)
ridersBehind(riders,(Room)leaveMsg.target(),(Room)enterMsg.target(),directionCode,flee);
if(!flee)
for(int f=0;f<mob.numFollowers();f++)
{
MOB follower=mob.fetchFollower(f);
if(follower!=null)
{
if((follower.amFollowing()==mob)
&&((follower.location()==thisRoom)||(follower.location()==destRoom)))
{
if((follower.location()==thisRoom)&&(CMLib.flags().aliveAwakeMobile(follower,true)))
{
if(CMath.bset(follower.getBitmap(),MOB.ATT_AUTOGUARD))
thisRoom.show(follower,null,null,CMMsg.MSG_OK_ACTION,"<S-NAME> remain(s) on guard here.");
else
{
follower.tell("You follow "+mob.name()+" "+Directions.getDirectionName(directionCode)+".");
if(!move(follower,directionCode,false,false,false,false))
{
//follower.setFollowing(null);
}
}
}
}
//else
// follower.setFollowing(null);
}
}
if((leaveTrailersSoFar!=null)&&(leaveMsg.target() instanceof Room))
for(int t=0;t<leaveTrailersSoFar.size();t++)
((Room)leaveMsg.target()).send(mob,(CMMsg)leaveTrailersSoFar.elementAt(t));
if((enterTrailersSoFar!=null)&&(enterMsg.target() instanceof Room))
for(int t=0;t<enterTrailersSoFar.size();t++)
((Room)enterMsg.target()).send(mob,(CMMsg)enterTrailersSoFar.elementAt(t));
return true;
}
protected Command stander=null;
protected Vector ifneccvec=null;
public void standIfNecessary(MOB mob, int metaFlags)
throws java.io.IOException
{
if((ifneccvec==null)||(ifneccvec.size()!=2))
{
ifneccvec=new Vector();
ifneccvec.addElement("STAND");
ifneccvec.addElement("IFNECESSARY");
}
if(stander==null) stander=CMClass.getCommand("Stand");
if((stander!=null)&&(ifneccvec!=null))
stander.execute(mob,ifneccvec,metaFlags);
}
public boolean execute(MOB mob, Vector commands, int metaFlags)
throws java.io.IOException
{
standIfNecessary(mob,metaFlags);
if((commands.size()>3)
&&(commands.firstElement() instanceof Integer))
{
return move(mob,
((Integer)commands.elementAt(0)).intValue(),
((Boolean)commands.elementAt(1)).booleanValue(),
((Boolean)commands.elementAt(2)).booleanValue(),
((Boolean)commands.elementAt(3)).booleanValue(),false);
}
String whereStr=CMParms.combine(commands,1);
Room R=mob.location();
int direction=-1;
if(whereStr.equalsIgnoreCase("OUT"))
{
if(!CMath.bset(R.domainType(),Room.INDOORS))
{
mob.tell("You aren't indoors.");
return false;
}
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
if((R.getExitInDir(d)!=null)
&&(R.getRoomInDir(d)!=null)
&&(!CMath.bset(R.getRoomInDir(d).domainType(),Room.INDOORS)))
{
if(direction>=0)
{
mob.tell("Which way out? Try North, South, East, etc..");
return false;
}
direction=d;
}
if(direction<0)
{
mob.tell("There is no direct way out of this place. Try a direction.");
return false;
}
}
if(direction<0)
direction=Directions.getGoodDirectionCode(whereStr);
if(direction<0)
{
Environmental E=null;
if(R!=null)
E=R.fetchFromRoomFavorItems(null,whereStr,Item.WORNREQ_UNWORNONLY);
if(E instanceof Rideable)
{
Command C=CMClass.getCommand("Enter");
return C.execute(mob,commands,metaFlags);
}
if((E instanceof Exit)&&(R!=null))
{
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
if(R.getExitInDir(d)==E)
{ direction=d; break;}
}
}
String doing=(String)commands.elementAt(0);
if(direction>=0)
move(mob,direction,false,false,false,false);
else
{
boolean doneAnything=false;
if(commands.size()>2)
for(int v=1;v<commands.size();v++)
{
int num=1;
String s=(String)commands.elementAt(v);
if(CMath.s_int(s)>0)
{
num=CMath.s_int(s);
v++;
if(v<commands.size())
s=(String)commands.elementAt(v);
}
else
if(("NSEWUDnsewud".indexOf(s.charAt(s.length()-1))>=0)
&&(CMath.s_int(s.substring(0,s.length()-1))>0))
{
num=CMath.s_int(s.substring(0,s.length()-1));
s=s.substring(s.length()-1);
}
direction=Directions.getGoodDirectionCode(s);
if(direction>=0)
{
doneAnything=true;
for(int i=0;i<num;i++)
{
if(mob.isMonster())
{
if(!move(mob,direction,false,false,false,false))
return false;
}
else
{
Vector V=new Vector();
V.addElement(doing);
V.addElement(Directions.getDirectionName(direction));
mob.enqueCommand(V,metaFlags,0);
}
}
}
else
break;
}
if(!doneAnything)
mob.tell(CMStrings.capitalizeAndLower(doing)+" which direction?\n\rTry north, south, east, west, up, or down.");
}
return false;
}
public double actionsCost(MOB mob, Vector cmds){
double cost=CMath.div(CMProps.getIntVar(CMProps.SYSTEMI_DEFCMDTIME),100.0);
if((mob!=null)&&(CMath.bset(mob.getBitmap(),MOB.ATT_AUTORUN)))
cost /= 4.0;
return cost;
}
public boolean canBeOrdered(){return true;}
}
| apache-2.0 |
consulo/consulo | modules/base/platform-impl/src/main/java/com/intellij/ide/plugins/LabelPopup.java | 2225 | /*
* Copyright 2013-2020 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.plugins;
import com.intellij.icons.AllIcons;
import com.intellij.ide.DataManager;
import com.intellij.openapi.actionSystem.ActionGroup;
import com.intellij.openapi.ui.popup.JBPopupFactory;
import com.intellij.ui.ClickListener;
import com.intellij.util.ui.JBUI;
import com.intellij.util.ui.UIUtil;
import consulo.awt.TargetAWT;
import consulo.localize.LocalizeValue;
import javax.annotation.Nonnull;
import javax.swing.*;
import java.awt.event.MouseEvent;
import java.util.function.Function;
/**
* @author VISTALL
* @since 03/12/2020
*/
public class LabelPopup extends JLabel {
private final LocalizeValue myPrefix;
public LabelPopup(LocalizeValue prefix, Function<LabelPopup, ? extends ActionGroup> groupBuilder) {
myPrefix = prefix;
setForeground(UIUtil.getLabelDisabledForeground());
setBorder(JBUI.Borders.empty(1, 1, 1, 5));
setIcon(TargetAWT.to(AllIcons.General.ComboArrow));
setHorizontalTextPosition(SwingConstants.LEADING);
new ClickListener() {
@Override
public boolean onClick(@Nonnull MouseEvent event, int clickCount) {
LabelPopup component = LabelPopup.this;
JBPopupFactory.getInstance()
.createActionGroupPopup(myPrefix.get(), groupBuilder.apply(component), DataManager.getInstance().getDataContext(component), JBPopupFactory.ActionSelectionAid.SPEEDSEARCH, true)
.showUnderneathOf(component);
return true;
}
}.installOn(this);
}
public void setPrefixedText(LocalizeValue tagValue) {
setText(LocalizeValue.join(myPrefix, LocalizeValue.space(), tagValue).get());
}
}
| apache-2.0 |
support-project/knowledge | src/main/java/org/support/project/knowledge/vo/notification/webhook/WebhookLongIdJson.java | 119 | package org.support.project.knowledge.vo.notification.webhook;
public class WebhookLongIdJson {
public long id;
}
| apache-2.0 |
uni2u/iNaaS | Torpedo/src/etri/sdn/controller/module/vxlanflowmapper/Tester.java | 1831 | package etri.sdn.controller.module.vxlanflowmapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.codehaus.jackson.map.ObjectMapper;
public class Tester {
public static void main(String[] args) {
testV2PRequest();
testV2PResponse();
}
public static void testV2PResponse() {
HeaderInfoPair pair1 = new HeaderInfoPair(
new OuterPacketHeader.Builder()
.srcMac("00:00:00:00:00:11")
.dstMac("00:00:00:00:00:22").
srcIp("10.0.0.11").
dstIp("10.0.0.22").
udpPort("1001")
.build(),
new OrginalPacketHeader.Builder()
.srcMac("00:00:00:00:00:11")
.dstMac("00:00:00:00:00:22")
.srcIp("10.0.0.11")
.dstIp("10.0.0.22")
.vnid("1001")
.build() );
List<HeaderInfoPair> pairs = Arrays.asList(pair1);
V2PResponse response = new V2PResponse(pairs);
ObjectMapper mapper = new ObjectMapper();
String output = null;
try {
output = mapper.defaultPrettyPrintingWriter().writeValueAsString(response);
System.out.println(output);
} catch (IOException e) {
e.printStackTrace();
}
}
public static void testV2PRequest() {
OuterPacketHeader orgHeader = new OuterPacketHeader("00:00:00:00:00:01", "00:00:00:00:00:02", "10.0.0.1", "10.0.0.2", "1234");
List<OuterPacketHeader> headers= Arrays.asList(orgHeader);
P2VRequest request = new P2VRequest(headers);
// request.outerList = headers;
ObjectMapper mapper = new ObjectMapper();
List<OuterPacketHeader> switchs = new ArrayList<>();
String output = null;
try {
output = mapper.defaultPrettyPrintingWriter().writeValueAsString(request);
System.out.println(output);
} catch (IOException e) {
e.printStackTrace();
}
}
}
| apache-2.0 |
tupilabs/nebular | src/test/java/fuzzy/internal/functions/TestMax.java | 1371 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package fuzzy.internal.functions;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import org.junit.Test;
/**
* Tests for Max function.
*
* @since 0.2
* @see Max
*/
public class TestMax {
@Test
public void testMax() {
Collection<Double> list = Arrays.asList(-1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, false);
assertEquals(Double.valueOf(3.5), r);
}
@Test
public void testMaxEmpty() {
Double r = Max.of(Collections.<Double>emptyList(), false);
assertEquals(Double.valueOf(0.0), r);
}
@Test
public void testMaxAbs() {
Collection<Double> list = Arrays.asList(-10.0, -1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, true);
assertEquals(Double.valueOf(-10.0), r);
}
}
| apache-2.0 |
hibernate/hibernate-semantic-query | src/test/java/org/hibernate/orm/type/descriptor/sql/internal/JdbcLiteralFormatterTemporal.java | 2399 | /*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html
*/
package org.hibernate.orm.type.descriptor.sql.internal;
import java.time.temporal.TemporalAccessor;
import javax.persistence.TemporalType;
import org.hibernate.dialect.Dialect;
import org.hibernate.orm.type.descriptor.internal.DateTimeUtils;
import org.hibernate.orm.type.descriptor.java.spi.TemporalJavaTypeDescriptor;
import org.hibernate.orm.type.descriptor.spi.WrapperOptions;
/**
* @author Steve Ebersole
*/
public class JdbcLiteralFormatterTemporal extends BasicJdbcLiteralFormatter {
private final TemporalType precision;
public JdbcLiteralFormatterTemporal(TemporalJavaTypeDescriptor javaTypeDescriptor, TemporalType precision) {
super( javaTypeDescriptor );
this.precision = precision;
// todo : add some validation of combos between javaTypeDescrptor#getPrecision and precision - log warnings
}
@Override
protected TemporalJavaTypeDescriptor getJavaTypeDescriptor() {
return (TemporalJavaTypeDescriptor) super.getJavaTypeDescriptor();
}
@Override
public String toJdbcLiteral(Object value, Dialect dialect, WrapperOptions wrapperOptions) {
// for performance reasons, avoid conversions if we can
if ( value instanceof java.util.Date ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(java.util.Date) value,
precision
);
}
else if ( value instanceof java.util.Calendar ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(java.util.Calendar) value,
precision
);
}
else if ( value instanceof TemporalAccessor ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(TemporalAccessor) value,
precision
);
}
switch ( getJavaTypeDescriptor().getPrecision() ) {
case DATE: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.sql.Date.class, wrapperOptions ),
precision
);
}
case TIME: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.sql.Time.class, wrapperOptions ),
precision
);
}
default: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.util.Date.class, wrapperOptions ),
precision
);
}
}
}
}
| apache-2.0 |