repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
Drifftr/devstudio-tooling-bps
plugins/org.eclipse.bpel.apache.ode.deploy.model/src/org/eclipse/bpel/apache/ode/deploy/model/dd/impl/TScopeEventsImpl.java
4065
/******************************************************************************* * Copyright (c) 2008 IBM Corporation, University of Stuttgart (IAAS) and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation, University of Stuttgart (IAAS) - initial API and implementation *******************************************************************************/ package org.eclipse.bpel.apache.ode.deploy.model.dd.impl; import org.eclipse.bpel.apache.ode.deploy.model.dd.TScopeEvents; import org.eclipse.bpel.apache.ode.deploy.model.dd.ddPackage; import org.eclipse.emf.common.notify.Notification; import org.eclipse.emf.ecore.EClass; import org.eclipse.emf.ecore.impl.ENotificationImpl; /** * <!-- begin-user-doc --> * An implementation of the model object '<em><b>TScope Events</b></em>'. * <!-- end-user-doc --> * <p> * The following features are implemented: * <ul> * <li>{@link org.eclipse.bpel.apache.ode.deploy.model.dd.impl.TScopeEventsImpl#getName <em>Name</em>}</li> * </ul> * </p> * * @generated */ public class TScopeEventsImpl extends TEnableEventListImpl implements TScopeEvents { /** * The default value of the '{@link #getName() <em>Name</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #getName() * @generated * @ordered */ protected static final String NAME_EDEFAULT = null; /** * The cached value of the '{@link #getName() <em>Name</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #getName() * @generated * @ordered */ protected String name = NAME_EDEFAULT; /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ protected TScopeEventsImpl() { super(); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override protected EClass eStaticClass() { return ddPackage.Literals.TSCOPE_EVENTS; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public String getName() { return name; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public void setName(String newName) { String oldName = name; name = newName; if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, ddPackage.TSCOPE_EVENTS__NAME, oldName, name)); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case ddPackage.TSCOPE_EVENTS__NAME: return getName(); } return super.eGet(featureID, resolve, coreType); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public void eSet(int featureID, Object newValue) { switch (featureID) { case ddPackage.TSCOPE_EVENTS__NAME: setName((String)newValue); return; } super.eSet(featureID, newValue); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public void eUnset(int featureID) { switch (featureID) { case ddPackage.TSCOPE_EVENTS__NAME: setName(NAME_EDEFAULT); return; } super.eUnset(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public boolean eIsSet(int featureID) { switch (featureID) { case ddPackage.TSCOPE_EVENTS__NAME: return NAME_EDEFAULT == null ? name != null : !NAME_EDEFAULT.equals(name); } return super.eIsSet(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public String toString() { if (eIsProxy()) return super.toString(); StringBuffer result = new StringBuffer(super.toString()); result.append(" (name: "); result.append(name); result.append(')'); return result.toString(); } } //TScopeEventsImpl
apache-2.0
quyixia/springside4
examples/boot-api/src/main/java/org/springside/examples/bootapi/domain/Account.java
716
package org.springside.examples.bootapi.domain; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import org.apache.commons.lang3.builder.ToStringBuilder; // JPA实体类的标识 @Entity public class Account { // JPA 主键标识, 策略为由数据库生成主键 @Id @GeneratedValue(strategy = GenerationType.IDENTITY) public Long id; public String email; public String name; public String hashPassword; public Account() { } public Account(Long id) { this.id = id; } @Override public String toString() { return ToStringBuilder.reflectionToString(this); } }
apache-2.0
GlenRSmith/elasticsearch
client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeactivateWatchRequestTests.java
1132
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.client.watcher; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.is; public class DeactivateWatchRequestTests extends ESTestCase { public void testNullId() { NullPointerException actual = expectThrows(NullPointerException.class, () -> new DeactivateWatchRequest(null)); assertNotNull(actual); assertThat(actual.getMessage(), is("watch id is missing")); } public void testInvalidId() { IllegalArgumentException actual = expectThrows( IllegalArgumentException.class, () -> new DeactivateWatchRequest("Watch id has spaces") ); assertNotNull(actual); assertThat(actual.getMessage(), is("watch id contains whitespace")); } }
apache-2.0
fnkhan/New
src/main/java/net/onrc/openvirtex/exceptions/InvalidHostException.java
1168
/******************************************************************************* * Copyright 2014 Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package net.onrc.openvirtex.exceptions; public class InvalidHostException extends IllegalArgumentException { private static final long serialVersionUID = 6957434977838246116L; public InvalidHostException() { super(); } public InvalidHostException(final String msg) { super(msg); } public InvalidHostException(final Throwable msg) { super(msg); } }
apache-2.0
tripodsan/jackrabbit
jackrabbit-jcr2spi/src/main/java/org/apache/jackrabbit/jcr2spi/ManagerProvider.java
3479
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.jcr2spi; import org.apache.jackrabbit.jcr2spi.security.authorization.AccessControlProvider; import org.apache.jackrabbit.spi.commons.namespace.NamespaceResolver; import org.apache.jackrabbit.jcr2spi.hierarchy.HierarchyManager; import org.apache.jackrabbit.jcr2spi.security.AccessManager; import org.apache.jackrabbit.jcr2spi.lock.LockStateManager; import org.apache.jackrabbit.jcr2spi.version.VersionManager; import org.apache.jackrabbit.jcr2spi.nodetype.ItemDefinitionProvider; import org.apache.jackrabbit.jcr2spi.nodetype.EffectiveNodeTypeProvider; import org.apache.jackrabbit.jcr2spi.nodetype.NodeTypeDefinitionProvider; import org.apache.jackrabbit.spi.QValueFactory; import org.apache.jackrabbit.spi.commons.conversion.NameResolver; import javax.jcr.Session; import javax.jcr.ValueFactory; import javax.jcr.RepositoryException; /** * <code>ManagerProvider</code>... */ public interface ManagerProvider { public org.apache.jackrabbit.spi.commons.conversion.NamePathResolver getNamePathResolver(); public NameResolver getNameResolver(); public org.apache.jackrabbit.spi.commons.conversion.PathResolver getPathResolver(); public NamespaceResolver getNamespaceResolver(); public HierarchyManager getHierarchyManager(); public AccessManager getAccessManager(); /** * Returns the <code>LockStateManager</code> associated with this * <code>ManagerProvider</code>. * * @return the <code>LockStateManager</code> associated with this * <code>ManagerProvider</code> */ public LockStateManager getLockStateManager(); /** * Returns the <code>VersionManager</code> associated with this * <code>ManagerProvider</code>. * * @return the <code>VersionManager</code> associated with this * <code>ManagerProvider</code> */ public VersionManager getVersionStateManager(); public ItemDefinitionProvider getItemDefinitionProvider(); public NodeTypeDefinitionProvider getNodeTypeDefinitionProvider(); public EffectiveNodeTypeProvider getEffectiveNodeTypeProvider(); /** * Same as {@link Session#getValueFactory()} but omits the check, if this repository * is really level 2 compliant. Therefore, this method may be used for * internal functionality only, that require creation and conversion of * JCR values. * * @return * @throws RepositoryException */ public ValueFactory getJcrValueFactory() throws RepositoryException; public QValueFactory getQValueFactory() throws RepositoryException; public AccessControlProvider getAccessControlProvider() throws RepositoryException; }
apache-2.0
alexksikes/elasticsearch
src/main/java/org/elasticsearch/index/query/HasChildFilterBuilder.java
3210
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.query; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; /** * */ public class HasChildFilterBuilder extends BaseFilterBuilder { private final FilterBuilder filterBuilder; private final QueryBuilder queryBuilder; private String childType; private String filterName; private Integer shortCircuitCutoff; public HasChildFilterBuilder(String type, QueryBuilder queryBuilder) { this.childType = type; this.queryBuilder = queryBuilder; this.filterBuilder = null; } public HasChildFilterBuilder(String type, FilterBuilder filterBuilder) { this.childType = type; this.queryBuilder = null; this.filterBuilder = filterBuilder; } /** * Sets the filter name for the filter that can be used when searching for matched_filters per hit. */ public HasChildFilterBuilder filterName(String filterName) { this.filterName = filterName; return this; } /** * This is a noop since has_child can't be cached. */ public HasChildFilterBuilder cache(boolean cache) { return this; } /** * This is a noop since has_child can't be cached. */ public HasChildFilterBuilder cacheKey(String cacheKey) { return this; } /** * Configures at what cut off point only to evaluate parent documents that contain the matching parent id terms * instead of evaluating all parent docs. */ public HasChildFilterBuilder setShortCircuitCutoff(int shortCircuitCutoff) { this.shortCircuitCutoff = shortCircuitCutoff; return this; } @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(HasChildFilterParser.NAME); if (queryBuilder != null) { builder.field("query"); queryBuilder.toXContent(builder, params); } else if (filterBuilder != null) { builder.field("filter"); filterBuilder.toXContent(builder, params); } builder.field("child_type", childType); if (filterName != null) { builder.field("_name", filterName); } if (shortCircuitCutoff != null) { builder.field("short_circuit_cutoff", shortCircuitCutoff); } builder.endObject(); } }
apache-2.0
yummy222/tess-two
tess-two/src/com/googlecode/tesseract/android/TessPdfRenderer.java
2320
/* * Copyright 2015 Robert Theis * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.googlecode.tesseract.android; /** * Java representation of a native Tesseract PDF renderer */ public class TessPdfRenderer { /** * Used by the native implementation of the class. */ private final long mNativePdfRenderer; static { System.loadLibrary("pngt"); System.loadLibrary("lept"); System.loadLibrary("tess"); } private boolean mRecycled; /** * Constructs an instance of a Tesseract PDF renderer. * * When the instance of TessPdfRenderer is no longer needed, its * {@link #recycle} method must be invoked to dispose of it. * * @param baseApi API instance to use for performing OCR * @param outputPath Full path to write the resulting PDF to, not * including the ".pdf" extension */ public TessPdfRenderer(TessBaseAPI baseApi, String outputPath) { this.mNativePdfRenderer = nativeCreate(baseApi, outputPath); mRecycled = false; } /** * @return A pointer to the native TessPdfRenderer object. */ public long getNativePdfRenderer() { if (mRecycled) throw new IllegalStateException(); return mNativePdfRenderer; } /** * Releases resources and frees any memory associated with this * TessPdfRenderer object. Must be called on object destruction. */ public void recycle() { nativeRecycle(mNativePdfRenderer); mRecycled = true; } private static native long nativeCreate(TessBaseAPI tessBaseAPI, String outputPath); private static native void nativeRecycle(long nativePointer); }
apache-2.0
cbarrin/EAGERFloodlight
src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
73310
/** * Copyright 2011, Big Switch Networks, Inc. * Originally created by David Erickson, Stanford University * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. **/ package net.floodlightcontroller.linkdiscovery.internal; import java.net.NetworkInterface; import java.net.SocketException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantReadWriteLock; import javax.annotation.Nonnull; import net.floodlightcontroller.core.FloodlightContext; import net.floodlightcontroller.core.HAListenerTypeMarker; import net.floodlightcontroller.core.HARole; import net.floodlightcontroller.core.IFloodlightProviderService; import net.floodlightcontroller.core.IShutdownService; import net.floodlightcontroller.core.PortChangeType; import net.floodlightcontroller.core.IHAListener; import net.floodlightcontroller.core.IInfoProvider; import net.floodlightcontroller.core.IOFMessageListener; import net.floodlightcontroller.core.IOFSwitch; import net.floodlightcontroller.core.IOFSwitchListener; import net.floodlightcontroller.core.internal.IOFSwitchService; import net.floodlightcontroller.core.module.FloodlightModuleContext; import net.floodlightcontroller.core.module.FloodlightModuleException; import net.floodlightcontroller.core.module.IFloodlightModule; import net.floodlightcontroller.core.module.IFloodlightService; import net.floodlightcontroller.core.types.NodePortTuple; import net.floodlightcontroller.core.util.SingletonTask; import net.floodlightcontroller.debugcounter.IDebugCounter; import net.floodlightcontroller.debugcounter.IDebugCounterService; import net.floodlightcontroller.linkdiscovery.ILinkDiscovery; import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LDUpdate; import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType; import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.SwitchType; import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.UpdateOperation; import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryListener; import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService; import net.floodlightcontroller.linkdiscovery.Link; import net.floodlightcontroller.linkdiscovery.web.LinkDiscoveryWebRoutable; import net.floodlightcontroller.packet.BSN; import net.floodlightcontroller.packet.Ethernet; import net.floodlightcontroller.packet.LLDP; import net.floodlightcontroller.packet.LLDPTLV; import net.floodlightcontroller.restserver.IRestApiService; import net.floodlightcontroller.storage.IResultSet; import net.floodlightcontroller.storage.IStorageSourceListener; import net.floodlightcontroller.storage.IStorageSourceService; import net.floodlightcontroller.storage.OperatorPredicate; import net.floodlightcontroller.storage.StorageException; import net.floodlightcontroller.threadpool.IThreadPoolService; import net.floodlightcontroller.util.OFMessageUtils; import org.projectfloodlight.openflow.protocol.OFControllerRole; import org.projectfloodlight.openflow.protocol.OFMessage; import org.projectfloodlight.openflow.protocol.OFPacketIn; import org.projectfloodlight.openflow.protocol.OFPacketOut; import org.projectfloodlight.openflow.protocol.OFPortDesc; import org.projectfloodlight.openflow.protocol.OFPortState; import org.projectfloodlight.openflow.protocol.OFVersion; import org.projectfloodlight.openflow.types.DatapathId; import org.projectfloodlight.openflow.types.EthType; import org.projectfloodlight.openflow.types.MacAddress; import org.projectfloodlight.openflow.types.OFBufferId; import org.projectfloodlight.openflow.types.OFPort; import org.projectfloodlight.openflow.types.U64; import org.projectfloodlight.openflow.protocol.OFType; import org.projectfloodlight.openflow.protocol.action.OFAction; import org.projectfloodlight.openflow.protocol.match.MatchField; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class sends out LLDP messages containing the sending switch's datapath * id as well as the outgoing port number. Received LLrescDP messages that match * a known switch cause a new LinkTuple to be created according to the invariant * rules listed below. This new LinkTuple is also passed to routing if it exists * to trigger updates. This class also handles removing links that are * associated to switch ports that go down, and switches that are disconnected. * Invariants: -portLinks and switchLinks will not contain empty Sets outside of * critical sections -portLinks contains LinkTuples where one of the src or dst * SwitchPortTuple matches the map key -switchLinks contains LinkTuples where * one of the src or dst SwitchPortTuple's id matches the switch id -Each * LinkTuple will be indexed into switchLinks for both src.id and dst.id, and * portLinks for each src and dst -The updates queue is only added to from * within a held write lock * * @edited Ryan Izard, rizard@g.clemson.edu, ryan.izard@bigswitch.com */ public class LinkDiscoveryManager implements IOFMessageListener, IOFSwitchListener, IStorageSourceListener, ILinkDiscoveryService, IFloodlightModule, IInfoProvider { protected static final Logger log = LoggerFactory.getLogger(LinkDiscoveryManager.class); public static final String MODULE_NAME = "linkdiscovery"; // Names of table/fields for links in the storage API private static final String TOPOLOGY_TABLE_NAME = "controller_topologyconfig"; private static final String TOPOLOGY_ID = "id"; private static final String TOPOLOGY_AUTOPORTFAST = "autoportfast"; private static final String LINK_TABLE_NAME = "controller_link"; private static final String LINK_ID = "id"; private static final String LINK_SRC_SWITCH = "src_switch_id"; private static final String LINK_SRC_PORT = "src_port"; private static final String LINK_DST_SWITCH = "dst_switch_id"; private static final String LINK_DST_PORT = "dst_port"; private static final String LINK_VALID_TIME = "valid_time"; private static final String LINK_TYPE = "link_type"; private static final String SWITCH_CONFIG_TABLE_NAME = "controller_switchconfig"; protected IFloodlightProviderService floodlightProviderService; protected IOFSwitchService switchService; protected IStorageSourceService storageSourceService; protected IThreadPoolService threadPoolService; protected IRestApiService restApiService; protected IDebugCounterService debugCounterService; protected IShutdownService shutdownService; // Role protected HARole role; // LLDP and BDDP fields private static final byte[] LLDP_STANDARD_DST_MAC_STRING = MacAddress.of("01:80:c2:00:00:0e").getBytes(); private static final long LINK_LOCAL_MASK = 0xfffffffffff0L; private static final long LINK_LOCAL_VALUE = 0x0180c2000000L; protected static int EVENT_HISTORY_SIZE = 1024; // in seconds // BigSwitch OUI is 5C:16:C7, so 5D:16:C7 is the multicast version // private static final String LLDP_BSN_DST_MAC_STRING = // "5d:16:c7:00:00:01"; private static final String LLDP_BSN_DST_MAC_STRING = "ff:ff:ff:ff:ff:ff"; // Direction TLVs are used to indicate if the LLDPs were sent // periodically or in response to a recieved LLDP private static final byte TLV_DIRECTION_TYPE = 0x73; private static final short TLV_DIRECTION_LENGTH = 1; // 1 byte private static final byte TLV_DIRECTION_VALUE_FORWARD[] = { 0x01 }; private static final byte TLV_DIRECTION_VALUE_REVERSE[] = { 0x02 }; private static final LLDPTLV forwardTLV = new LLDPTLV().setType(TLV_DIRECTION_TYPE) .setLength(TLV_DIRECTION_LENGTH) .setValue(TLV_DIRECTION_VALUE_FORWARD); private static final LLDPTLV reverseTLV = new LLDPTLV().setType(TLV_DIRECTION_TYPE) .setLength(TLV_DIRECTION_LENGTH) .setValue(TLV_DIRECTION_VALUE_REVERSE); // Link discovery task details. protected SingletonTask discoveryTask; protected final int DISCOVERY_TASK_INTERVAL = 1; protected final int LINK_TIMEOUT = 35; // timeout as part of LLDP process. protected final int LLDP_TO_ALL_INTERVAL = 15; // 15 seconds. protected long lldpClock = 0; // This value is intentionally kept higher than LLDP_TO_ALL_INTERVAL. // If we want to identify link failures faster, we could decrease this // value to a small number, say 1 or 2 sec. protected final int LLDP_TO_KNOWN_INTERVAL = 20; // LLDP frequency for known // links protected LLDPTLV controllerTLV; protected ReentrantReadWriteLock lock; int lldpTimeCount = 0; /* * Latency tracking */ protected static int LATENCY_HISTORY_SIZE = 10; protected static double LATENCY_UPDATE_THRESHOLD = 0.50; /** * Flag to indicate if automatic port fast is enabled or not. Default is set * to false -- Initialized in the init method as well. */ protected boolean AUTOPORTFAST_DEFAULT = false; protected boolean autoPortFastFeature = AUTOPORTFAST_DEFAULT; /** * Map from link to the most recent time it was verified functioning */ protected Map<Link, LinkInfo> links; /** * Map from switch id to a set of all links with it as an endpoint */ protected Map<DatapathId, Set<Link>> switchLinks; /** * Map from a id:port to the set of links containing it as an endpoint */ protected Map<NodePortTuple, Set<Link>> portLinks; protected volatile boolean shuttingDown = false; /* * topology aware components are called in the order they were added to the * the array */ protected ArrayList<ILinkDiscoveryListener> linkDiscoveryAware; protected BlockingQueue<LDUpdate> updates; protected Thread updatesThread; /** * List of ports through which LLDP/BDDPs are not sent. */ protected Set<NodePortTuple> suppressLinkDiscovery; /** * A list of ports that are quarantined for discovering links through them. * Data traffic from these ports are not allowed until the ports are * released from quarantine. */ protected LinkedBlockingQueue<NodePortTuple> quarantineQueue; protected LinkedBlockingQueue<NodePortTuple> maintenanceQueue; protected LinkedBlockingQueue<NodePortTuple> toRemoveFromQuarantineQueue; protected LinkedBlockingQueue<NodePortTuple> toRemoveFromMaintenanceQueue; /** * Quarantine task */ protected SingletonTask bddpTask; protected final int BDDP_TASK_INTERVAL = 100; // 100 ms. protected final int BDDP_TASK_SIZE = 10; // # of ports per iteration private class MACRange { MacAddress baseMAC; int ignoreBits; } protected Set<MACRange> ignoreMACSet; private IHAListener haListener; /** * Debug Counters */ private IDebugCounter ctrQuarantineDrops; private IDebugCounter ctrIgnoreSrcMacDrops; private IDebugCounter ctrIncoming; private IDebugCounter ctrLinkLocalDrops; private IDebugCounter ctrLldpEol; private final String PACKAGE = LinkDiscoveryManager.class.getPackage().getName(); //********************* // ILinkDiscoveryService //********************* @Override public OFPacketOut generateLLDPMessage(IOFSwitch iofSwitch, OFPort port, boolean isStandard, boolean isReverse) { OFPortDesc ofpPort = iofSwitch.getPort(port); if (log.isTraceEnabled()) { log.trace("Sending LLDP packet out of swich: {}, port: {}, reverse: {}", new Object[] {iofSwitch.getId().toString(), port.toString(), Boolean.toString(isReverse)}); } // using "nearest customer bridge" MAC address for broadest possible // propagation // through provider and TPMR bridges (see IEEE 802.1AB-2009 and // 802.1Q-2011), // in particular the Linux bridge which behaves mostly like a provider // bridge byte[] chassisId = new byte[] { 4, 0, 0, 0, 0, 0, 0 }; // filled in // later byte[] portId = new byte[] { 2, 0, 0 }; // filled in later byte[] ttlValue = new byte[] { 0, 0x78 }; // OpenFlow OUI - 00-26-E1-00 byte[] dpidTLVValue = new byte[] { 0x0, 0x26, (byte) 0xe1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; LLDPTLV dpidTLV = new LLDPTLV().setType((byte) 127) .setLength((short) dpidTLVValue.length) .setValue(dpidTLVValue); byte[] dpidArray = new byte[8]; ByteBuffer dpidBB = ByteBuffer.wrap(dpidArray); ByteBuffer portBB = ByteBuffer.wrap(portId, 1, 2); DatapathId dpid = iofSwitch.getId(); dpidBB.putLong(dpid.getLong()); // set the chassis id's value to last 6 bytes of dpid System.arraycopy(dpidArray, 2, chassisId, 1, 6); // set the optional tlv to the full dpid System.arraycopy(dpidArray, 0, dpidTLVValue, 4, 8); // TODO: Consider remove this block of code. // It's evil to overwrite port object. The the old code always // overwrote mac address, we now only overwrite zero macs and // log a warning, mostly for paranoia. byte[] srcMac = ofpPort.getHwAddr().getBytes(); byte[] zeroMac = { 0, 0, 0, 0, 0, 0 }; if (Arrays.equals(srcMac, zeroMac)) { log.warn("Port {}/{} has zero hardware address" + "overwrite with lower 6 bytes of dpid", dpid.toString(), ofpPort.getPortNo().getPortNumber()); System.arraycopy(dpidArray, 2, srcMac, 0, 6); } // set the portId to the outgoing port portBB.putShort(port.getShortPortNumber()); LLDP lldp = new LLDP(); lldp.setChassisId(new LLDPTLV().setType((byte) 1) .setLength((short) chassisId.length) .setValue(chassisId)); lldp.setPortId(new LLDPTLV().setType((byte) 2) .setLength((short) portId.length) .setValue(portId)); lldp.setTtl(new LLDPTLV().setType((byte) 3) .setLength((short) ttlValue.length) .setValue(ttlValue)); lldp.getOptionalTLVList().add(dpidTLV); // Add the controller identifier to the TLV value. lldp.getOptionalTLVList().add(controllerTLV); if (isReverse) { lldp.getOptionalTLVList().add(reverseTLV); } else { lldp.getOptionalTLVList().add(forwardTLV); } /* * Introduce a new TLV for med-granularity link latency detection. * If same controller, can assume system clock is the same, but * cannot guarantee processing time or account for network congestion. * * Need to include our OpenFlow OUI - 00-26-E1-01 (note 01; 00 is DPID); * save last 8 bytes for long (time in ms). * * Note Long.SIZE is in bits (64). */ long time = System.currentTimeMillis(); long swLatency = iofSwitch.getLatency().getValue(); if (log.isTraceEnabled()) { log.trace("SETTING LLDP LATENCY TLV: Current Time {}; {} control plane latency {}; sum {}", new Object[] { time, iofSwitch.getId(), swLatency, time + swLatency }); } byte[] timestampTLVValue = ByteBuffer.allocate(Long.SIZE / 8 + 4) .put((byte) 0x00) .put((byte) 0x26) .put((byte) 0xe1) .put((byte) 0x01) /* 0x01 is what we'll use to differentiate DPID (0x00) from time (0x01) */ .putLong(time + swLatency /* account for our switch's one-way latency */) .array(); LLDPTLV timestampTLV = new LLDPTLV() .setType((byte) 127) .setLength((short) timestampTLVValue.length) .setValue(timestampTLVValue); /* Now add TLV to our LLDP packet */ lldp.getOptionalTLVList().add(timestampTLV); Ethernet ethernet; if (isStandard) { ethernet = new Ethernet().setSourceMACAddress(ofpPort.getHwAddr()) .setDestinationMACAddress(LLDP_STANDARD_DST_MAC_STRING) .setEtherType(EthType.LLDP); ethernet.setPayload(lldp); } else { BSN bsn = new BSN(BSN.BSN_TYPE_BDDP); bsn.setPayload(lldp); ethernet = new Ethernet().setSourceMACAddress(ofpPort.getHwAddr()) .setDestinationMACAddress(LLDP_BSN_DST_MAC_STRING) .setEtherType(EthType.of(Ethernet.TYPE_BSN & 0xffff)); /* treat as unsigned */ ethernet.setPayload(bsn); } // serialize and wrap in a packet out byte[] data = ethernet.serialize(); OFPacketOut.Builder pob = iofSwitch.getOFFactory().buildPacketOut() .setBufferId(OFBufferId.NO_BUFFER) .setActions(getDiscoveryActions(iofSwitch, port)) .setData(data); OFMessageUtils.setInPort(pob, OFPort.CONTROLLER); log.debug("{}", pob.build()); return pob.build(); } /** * Get the LLDP sending period in seconds. * * @return LLDP sending period in seconds. */ public int getLldpFrequency() { return LLDP_TO_KNOWN_INTERVAL; } /** * Get the LLDP timeout value in seconds * * @return LLDP timeout value in seconds */ public int getLldpTimeout() { return LINK_TIMEOUT; } @Override public Map<NodePortTuple, Set<Link>> getPortLinks() { return portLinks; } @Override public Set<NodePortTuple> getSuppressLLDPsInfo() { return suppressLinkDiscovery; } /** * Add a switch port to the suppressed LLDP list. Remove any known links on * the switch port. */ @Override public void AddToSuppressLLDPs(DatapathId sw, OFPort port) { NodePortTuple npt = new NodePortTuple(sw, port); this.suppressLinkDiscovery.add(npt); deleteLinksOnPort(npt, "LLDP suppressed."); } /** * Remove a switch port from the suppressed LLDP list. Discover links on * that switchport. */ @Override public void RemoveFromSuppressLLDPs(DatapathId sw, OFPort port) { NodePortTuple npt = new NodePortTuple(sw, port); this.suppressLinkDiscovery.remove(npt); discover(npt); } public boolean isShuttingDown() { return shuttingDown; } @Override public boolean isTunnelPort(DatapathId sw, OFPort port) { return false; } @Override public ILinkDiscovery.LinkType getLinkType(Link lt, LinkInfo info) { if (info.getUnicastValidTime() != null) { return ILinkDiscovery.LinkType.DIRECT_LINK; } else if (info.getMulticastValidTime() != null) { return ILinkDiscovery.LinkType.MULTIHOP_LINK; } return ILinkDiscovery.LinkType.INVALID_LINK; } @Override public Set<OFPort> getQuarantinedPorts(DatapathId sw) { Set<OFPort> qPorts = new HashSet<OFPort>(); Iterator<NodePortTuple> iter = quarantineQueue.iterator(); while (iter.hasNext()) { NodePortTuple npt = iter.next(); if (npt.getNodeId().equals(sw)) { qPorts.add(npt.getPortId()); } } return qPorts; } @Override public Map<DatapathId, Set<Link>> getSwitchLinks() { return this.switchLinks; } @Override public void addMACToIgnoreList(MacAddress mac, int ignoreBits) { MACRange range = new MACRange(); range.baseMAC = mac; range.ignoreBits = ignoreBits; ignoreMACSet.add(range); } @Override public boolean isAutoPortFastFeature() { return autoPortFastFeature; } @Override public void setAutoPortFastFeature(boolean autoPortFastFeature) { this.autoPortFastFeature = autoPortFastFeature; } @Override public void addListener(ILinkDiscoveryListener listener) { linkDiscoveryAware.add(listener); } @Override public Map<Link, LinkInfo> getLinks() { lock.readLock().lock(); Map<Link, LinkInfo> result; try { result = new HashMap<Link, LinkInfo>(links); } finally { lock.readLock().unlock(); } return result; } @Override public LinkInfo getLinkInfo(Link link) { lock.readLock().lock(); LinkInfo linkInfo = links.get(link); LinkInfo retLinkInfo = null; if (linkInfo != null) { retLinkInfo = new LinkInfo(linkInfo); } lock.readLock().unlock(); return retLinkInfo; } @Override public String getName() { return MODULE_NAME; } //********************* // OFMessage Listener //********************* @Override public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) { switch (msg.getType()) { case PACKET_IN: ctrIncoming.increment(); return this.handlePacketIn(sw.getId(), (OFPacketIn) msg, cntx); default: break; } return Command.CONTINUE; } @Override public boolean isCallbackOrderingPrereq(OFType type, String name) { return false; } @Override public boolean isCallbackOrderingPostreq(OFType type, String name) { return false; } //*********************************** // Internal Methods - Packet-in Processing Related //*********************************** protected Command handlePacketIn(DatapathId sw, OFPacketIn pi, FloodlightContext cntx) { Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, IFloodlightProviderService.CONTEXT_PI_PAYLOAD); OFPort inPort = (pi.getVersion().compareTo(OFVersion.OF_12) < 0 ? pi.getInPort() : pi.getMatch().get(MatchField.IN_PORT)); if (eth.getPayload() instanceof BSN) { BSN bsn = (BSN) eth.getPayload(); if (bsn == null) return Command.STOP; if (bsn.getPayload() == null) return Command.STOP; // It could be a packet other than BSN LLDP, therefore // continue with the regular processing. if (bsn.getPayload() instanceof LLDP == false) return Command.CONTINUE; return handleLldp((LLDP) bsn.getPayload(), sw, inPort, false, cntx); } else if (eth.getPayload() instanceof LLDP) { return handleLldp((LLDP) eth.getPayload(), sw, inPort, true, cntx); } else if (eth.getEtherType().getValue() < 1536 && eth.getEtherType().getValue() >= 17) { long destMac = eth.getDestinationMACAddress().getLong(); if ((destMac & LINK_LOCAL_MASK) == LINK_LOCAL_VALUE) { ctrLinkLocalDrops.increment(); if (log.isTraceEnabled()) { log.trace("Ignoring packet addressed to 802.1D/Q " + "reserved address."); } return Command.STOP; } } else if (eth.getEtherType().getValue() < 17) { log.error("Received invalid ethertype of {}.", eth.getEtherType()); return Command.STOP; } if (ignorePacketInFromSource(eth.getSourceMACAddress())) { ctrIgnoreSrcMacDrops.increment(); return Command.STOP; } // If packet-in is from a quarantine port, stop processing. NodePortTuple npt = new NodePortTuple(sw, inPort); if (quarantineQueue.contains(npt)) { ctrQuarantineDrops.increment(); return Command.STOP; } return Command.CONTINUE; } private boolean ignorePacketInFromSource(MacAddress srcMAC) { Iterator<MACRange> it = ignoreMACSet.iterator(); while (it.hasNext()) { MACRange range = it.next(); long mask = ~0; if (range.ignoreBits >= 0 && range.ignoreBits <= 48) { mask = mask << range.ignoreBits; if ((range.baseMAC.getLong() & mask) == (srcMAC.getLong() & mask)) { return true; } } } return false; } private Command handleLldp(LLDP lldp, DatapathId sw, OFPort inPort, boolean isStandard, FloodlightContext cntx) { // If LLDP is suppressed on this port, ignore received packet as well IOFSwitch iofSwitch = switchService.getSwitch(sw); log.debug("Received LLDP packet on sw {}, port {}", sw, inPort); if (!isIncomingDiscoveryAllowed(sw, inPort, isStandard)) return Command.STOP; // If this is a malformed LLDP exit if (lldp.getPortId() == null || lldp.getPortId().getLength() != 3) { return Command.STOP; } long myId = ByteBuffer.wrap(controllerTLV.getValue()).getLong(); long otherId = 0; boolean myLLDP = false; Boolean isReverse = null; ByteBuffer portBB = ByteBuffer.wrap(lldp.getPortId().getValue()); portBB.position(1); OFPort remotePort = OFPort.of(portBB.getShort()); IOFSwitch remoteSwitch = null; long timestamp = 0; // Verify this LLDP packet matches what we're looking for for (LLDPTLV lldptlv : lldp.getOptionalTLVList()) { if (lldptlv.getType() == 127 && lldptlv.getLength() == 12 && lldptlv.getValue()[0] == 0x0 && lldptlv.getValue()[1] == 0x26 && lldptlv.getValue()[2] == (byte) 0xe1 && lldptlv.getValue()[3] == 0x0) { ByteBuffer dpidBB = ByteBuffer.wrap(lldptlv.getValue()); remoteSwitch = switchService.getSwitch(DatapathId.of(dpidBB.getLong(4))); } else if (lldptlv.getType() == 127 && lldptlv.getLength() == 12 && lldptlv.getValue()[0] == 0x0 && lldptlv.getValue()[1] == 0x26 && lldptlv.getValue()[2] == (byte) 0xe1 && lldptlv.getValue()[3] == 0x01) { /* 0x01 for timestamp */ ByteBuffer tsBB = ByteBuffer.wrap(lldptlv.getValue()); /* skip OpenFlow OUI (4 bytes above) */ long swLatency = iofSwitch.getLatency().getValue(); timestamp = tsBB.getLong(4); /* include the RX switch latency to "subtract" it */ if (log.isTraceEnabled()) { log.trace("RECEIVED LLDP LATENCY TLV: Got timestamp of {}; Switch {} latency of {}", new Object[] { timestamp, iofSwitch.getId(), iofSwitch.getLatency().getValue() }); } timestamp = timestamp + swLatency; } else if (lldptlv.getType() == 12 && lldptlv.getLength() == 8) { otherId = ByteBuffer.wrap(lldptlv.getValue()).getLong(); if (myId == otherId) myLLDP = true; } else if (lldptlv.getType() == TLV_DIRECTION_TYPE && lldptlv.getLength() == TLV_DIRECTION_LENGTH) { if (lldptlv.getValue()[0] == TLV_DIRECTION_VALUE_FORWARD[0]) isReverse = false; else if (lldptlv.getValue()[0] == TLV_DIRECTION_VALUE_REVERSE[0]) isReverse = true; } } if (myLLDP == false) { // This is not the LLDP sent by this controller. // If the LLDP message has multicast bit set, then we need to // broadcast the packet as a regular packet (after checking IDs) if (isStandard) { if (log.isTraceEnabled()) { log.trace("Got a standard LLDP=[{}] that was not sent by" + " this controller. Not fowarding it.", lldp.toString()); } return Command.STOP; } else if (myId < otherId) { if (log.isTraceEnabled()) { log.trace("Getting BDDP packets from a different controller" + "and letting it go through normal processing chain."); } return Command.CONTINUE; } return Command.STOP; } if (remoteSwitch == null) { // Ignore LLDPs not generated by Floodlight, or from a switch that // has recently // disconnected, or from a switch connected to another Floodlight // instance if (log.isTraceEnabled()) { log.trace("Received LLDP from remote switch not connected to the controller"); } return Command.STOP; } if (!remoteSwitch.portEnabled(remotePort)) { if (log.isTraceEnabled()) { log.trace("Ignoring link with disabled source port: switch {} port {} {}", new Object[] { remoteSwitch.getId().toString(), remotePort, remoteSwitch.getPort(remotePort)}); } return Command.STOP; } if (suppressLinkDiscovery.contains(new NodePortTuple( remoteSwitch.getId(), remotePort))) { if (log.isTraceEnabled()) { log.trace("Ignoring link with suppressed src port: switch {} port {} {}", new Object[] { remoteSwitch.getId().toString(), remotePort, remoteSwitch.getPort(remotePort)}); } return Command.STOP; } if (!iofSwitch.portEnabled(inPort)) { if (log.isTraceEnabled()) { log.trace("Ignoring link with disabled dest port: switch {} port {} {}", new Object[] { sw.toString(), inPort.getPortNumber(), iofSwitch.getPort(inPort).getPortNo().getPortNumber()}); } return Command.STOP; } // Store the time of update to this link, and push it out to // routingEngine long time = System.currentTimeMillis(); U64 latency = (timestamp != 0 && (time - timestamp) > 0) ? U64.of(time - timestamp) : U64.ZERO; if (log.isTraceEnabled()) { log.trace("COMPUTING FINAL DATAPLANE LATENCY: Current time {}; Dataplane+{} latency {}; Overall latency from {} to {} is {}", new Object[] { time, iofSwitch.getId(), timestamp, remoteSwitch.getId(), iofSwitch.getId(), String.valueOf(latency.getValue()) }); } Link lt = new Link(remoteSwitch.getId(), remotePort, iofSwitch.getId(), inPort, latency); if (!isLinkAllowed(lt.getSrc(), lt.getSrcPort(), lt.getDst(), lt.getDstPort())) return Command.STOP; // Continue only if link is allowed. Date lastLldpTime = null; Date lastBddpTime = null; Date firstSeenTime = new Date(System.currentTimeMillis()); if (isStandard) { lastLldpTime = new Date(firstSeenTime.getTime()); } else { lastBddpTime = new Date(firstSeenTime.getTime()); } LinkInfo newLinkInfo = new LinkInfo(firstSeenTime, lastLldpTime, lastBddpTime); addOrUpdateLink(lt, newLinkInfo); // Check if reverse link exists. // If it doesn't exist and if the forward link was seen // first seen within a small interval, send probe on the // reverse link. newLinkInfo = links.get(lt); if (newLinkInfo != null && isStandard && isReverse == false) { Link reverseLink = new Link(lt.getDst(), lt.getDstPort(), lt.getSrc(), lt.getSrcPort(), U64.ZERO); /* latency not used; not important what the value is, since it's intentionally not in equals() */ LinkInfo reverseInfo = links.get(reverseLink); if (reverseInfo == null) { // the reverse link does not exist. if (newLinkInfo.getFirstSeenTime().getTime() > System.currentTimeMillis() - LINK_TIMEOUT) { log.debug("Sending reverse LLDP for link {}", lt); this.sendDiscoveryMessage(lt.getDst(), lt.getDstPort(), isStandard, true); } } } // If the received packet is a BDDP packet, then create a reverse BDDP // link as well. if (!isStandard) { Link reverseLink = new Link(lt.getDst(), lt.getDstPort(), lt.getSrc(), lt.getSrcPort(), latency); // srcPortState and dstPort state are reversed. LinkInfo reverseInfo = new LinkInfo(firstSeenTime, lastLldpTime, lastBddpTime); addOrUpdateLink(reverseLink, reverseInfo); } // Queue removal of the node ports from the quarantine and maintenance queues. NodePortTuple nptSrc = new NodePortTuple(lt.getSrc(), lt.getSrcPort()); NodePortTuple nptDst = new NodePortTuple(lt.getDst(), lt.getDstPort()); flagToRemoveFromQuarantineQueue(nptSrc); flagToRemoveFromMaintenanceQueue(nptSrc); flagToRemoveFromQuarantineQueue(nptDst); flagToRemoveFromMaintenanceQueue(nptDst); // Consume this message ctrLldpEol.increment(); return Command.STOP; } //*********************************** // Internal Methods - Port Status/ New Port Processing Related //*********************************** /** * Process a new port. If link discovery is disabled on the port, then do * nothing. If autoportfast feature is enabled and the port is a fast port, * then do nothing. Otherwise, send LLDP message. Add the port to * quarantine. * * @param sw * @param p */ private void processNewPort(DatapathId sw, OFPort p) { if (isLinkDiscoverySuppressed(sw, p)) { // Do nothing as link discovery is suppressed. return; } IOFSwitch iofSwitch = switchService.getSwitch(sw); if (iofSwitch == null) { return; } NodePortTuple npt = new NodePortTuple(sw, p); discover(sw, p); addToQuarantineQueue(npt); } //*********************************** // Internal Methods - Discovery Related //*********************************** private void doUpdatesThread() throws InterruptedException { do { LDUpdate update = updates.take(); List<LDUpdate> updateList = new ArrayList<LDUpdate>(); updateList.add(update); // Add all the pending updates to the list. while (updates.peek() != null) { updateList.add(updates.remove()); } if (linkDiscoveryAware != null && !updateList.isEmpty()) { if (log.isDebugEnabled()) { log.debug("Dispatching link discovery update {} {} {} {} {} {}ms for {}", new Object[] { update.getOperation(), update.getSrc(), update.getSrcPort(), update.getDst(), update.getDstPort(), update.getLatency().getValue(), linkDiscoveryAware }); } try { for (ILinkDiscoveryListener lda : linkDiscoveryAware) { // order // maintained lda.linkDiscoveryUpdate(updateList); } } catch (Exception e) { log.error("Error in link discovery updates loop", e); } } } while (updates.peek() != null); } protected boolean isLinkDiscoverySuppressed(DatapathId sw, OFPort portNumber) { return this.suppressLinkDiscovery.contains(new NodePortTuple(sw, portNumber)); } protected void discoverLinks() { // timeout known links. timeoutLinks(); // increment LLDP clock lldpClock = (lldpClock + 1) % LLDP_TO_ALL_INTERVAL; if (lldpClock == 0) { if (log.isTraceEnabled()) log.trace("Sending LLDP out on all ports."); discoverOnAllPorts(); } } /** * Quarantine Ports. */ protected class QuarantineWorker implements Runnable { @Override public void run() { try { processBDDPLists(); } catch (Exception e) { log.error("Error in quarantine worker thread", e); } finally { bddpTask.reschedule(BDDP_TASK_INTERVAL, TimeUnit.MILLISECONDS); } } } /** * Add a switch port to the quarantine queue. Schedule the quarantine task * if the quarantine queue was empty before adding this switch port. * * @param npt */ protected void addToQuarantineQueue(NodePortTuple npt) { if (quarantineQueue.contains(npt) == false) { quarantineQueue.add(npt); } } /** * Remove a switch port from the quarantine queue. * protected void removeFromQuarantineQueue(NodePortTuple npt) { // Remove all occurrences of the node port tuple from the list. while (quarantineQueue.remove(npt)); }*/ protected void flagToRemoveFromQuarantineQueue(NodePortTuple npt) { if (toRemoveFromQuarantineQueue.contains(npt) == false) { toRemoveFromQuarantineQueue.add(npt); } } /** * Add a switch port to maintenance queue. * * @param npt */ protected void addToMaintenanceQueue(NodePortTuple npt) { if (maintenanceQueue.contains(npt) == false) { maintenanceQueue.add(npt); } } /** * Remove a switch port from maintenance queue. * * @param npt * protected void removeFromMaintenanceQueue(NodePortTuple npt) { // Remove all occurrences of the node port tuple from the queue. while (maintenanceQueue.remove(npt)); } */ protected void flagToRemoveFromMaintenanceQueue(NodePortTuple npt) { if (toRemoveFromMaintenanceQueue.contains(npt) == false) { toRemoveFromMaintenanceQueue.add(npt); } } /** * This method processes the quarantine list in bursts. The task is at most * once per BDDP_TASK_INTERVAL. One each call, BDDP_TASK_SIZE number of * switch ports are processed. Once the BDDP packets are sent out through * the switch ports, the ports are removed from the quarantine list. */ protected void processBDDPLists() { int count = 0; Set<NodePortTuple> nptList = new HashSet<NodePortTuple>(); while (count < BDDP_TASK_SIZE && quarantineQueue.peek() != null) { NodePortTuple npt; npt = quarantineQueue.remove(); /* * Do not send a discovery message if we already have received one * from another switch on this same port. In other words, if * handleLldp() determines there is a new link between two ports of * two switches, then there is no need to re-discover the link again. * * By flagging the item in handleLldp() and waiting to remove it * from the queue when processBDDPLists() runs, we can guarantee a * PORT_STATUS update is generated and dispatched below by * generateSwitchPortStatusUpdate(). */ if (!toRemoveFromQuarantineQueue.remove(npt)) { sendDiscoveryMessage(npt.getNodeId(), npt.getPortId(), false, false); } /* * Still add the item to the list though, so that the PORT_STATUS update * is generated below at the end of this function. */ nptList.add(npt); count++; } count = 0; while (count < BDDP_TASK_SIZE && maintenanceQueue.peek() != null) { NodePortTuple npt; npt = maintenanceQueue.remove(); /* * Same as above, except we don't care about the PORT_STATUS message; * we only want to avoid sending the discovery message again. */ if (!toRemoveFromMaintenanceQueue.remove(npt)) { sendDiscoveryMessage(npt.getNodeId(), npt.getPortId(), false, false); } count++; } for (NodePortTuple npt : nptList) { generateSwitchPortStatusUpdate(npt.getNodeId(), npt.getPortId()); } } private void generateSwitchPortStatusUpdate(DatapathId sw, OFPort port) { UpdateOperation operation; IOFSwitch iofSwitch = switchService.getSwitch(sw); if (iofSwitch == null) return; OFPortDesc ofp = iofSwitch.getPort(port); if (ofp == null) return; Set<OFPortState> srcPortState = ofp.getState(); boolean portUp = !srcPortState.contains(OFPortState.STP_BLOCK); if (portUp) { operation = UpdateOperation.PORT_UP; } else { operation = UpdateOperation.PORT_DOWN; } updates.add(new LDUpdate(sw, port, operation)); } protected void discover(NodePortTuple npt) { discover(npt.getNodeId(), npt.getPortId()); } protected void discover(DatapathId sw, OFPort port) { sendDiscoveryMessage(sw, port, true, false); } /** * Check if incoming discovery messages are enabled or not. * @param sw * @param port * @param isStandard * @return */ protected boolean isIncomingDiscoveryAllowed(DatapathId sw, OFPort port, boolean isStandard) { if (isLinkDiscoverySuppressed(sw, port)) { /* Do not process LLDPs from this port as suppressLLDP is set */ return false; } IOFSwitch iofSwitch = switchService.getSwitch(sw); if (iofSwitch == null) { return false; } if (port == OFPort.LOCAL) return false; OFPortDesc ofpPort = iofSwitch.getPort(port); if (ofpPort == null) { if (log.isTraceEnabled()) { log.trace("Null physical port. sw={}, port={}", sw.toString(), port.getPortNumber()); } return false; } return true; } /** * Check if outgoing discovery messages are enabled or not. * @param sw * @param port * @param isStandard * @param isReverse * @return */ protected boolean isOutgoingDiscoveryAllowed(DatapathId sw, OFPort port, boolean isStandard, boolean isReverse) { if (isLinkDiscoverySuppressed(sw, port)) { /* Dont send LLDPs out of this port as suppressLLDP is set */ return false; } IOFSwitch iofSwitch = switchService.getSwitch(sw); if (iofSwitch == null) { return false; } else if (iofSwitch.getControllerRole() == OFControllerRole.ROLE_SLAVE) { return false; } if (port == OFPort.LOCAL) return false; OFPortDesc ofpPort = iofSwitch.getPort(port); if (ofpPort == null) { if (log.isTraceEnabled()) { log.trace("Null physical port. sw={}, port={}", sw.toString(), port.getPortNumber()); } return false; } else { return true; } } /** * Get the actions for packet-out corresponding to a specific port. * This is a placeholder for adding actions if any port-specific * actions are desired. The default action is simply to output to * the given port. * @param port * @return */ protected List<OFAction> getDiscoveryActions(IOFSwitch sw, OFPort port) { // set actions List<OFAction> actions = new ArrayList<OFAction>(); actions.add(sw.getOFFactory().actions().buildOutput().setPort(port).build()); return actions; } /** * Send link discovery message out of a given switch port. The discovery * message may be a standard LLDP or a modified LLDP, where the dst mac * address is set to :ff. TODO: The modified LLDP will updated in the future * and may use a different eth-type. * * @param sw * @param port * @param isStandard * indicates standard or modified LLDP * @param isReverse * indicates whether the LLDP was sent as a response */ protected boolean sendDiscoveryMessage(DatapathId sw, OFPort port, boolean isStandard, boolean isReverse) { // Takes care of all checks including null pointer checks. if (!isOutgoingDiscoveryAllowed(sw, port, isStandard, isReverse)) { return false; } IOFSwitch iofSwitch = switchService.getSwitch(sw); if (iofSwitch == null) { // fix dereference violations in case race conditions return false; } return iofSwitch.write(generateLLDPMessage(iofSwitch, port, isStandard, isReverse)); } /** * Send LLDPs to all switch-ports */ protected void discoverOnAllPorts() { log.info("Sending LLDP packets out of all the enabled ports"); // Send standard LLDPs for (DatapathId sw : switchService.getAllSwitchDpids()) { IOFSwitch iofSwitch = switchService.getSwitch(sw); if (iofSwitch == null) continue; if (!iofSwitch.isActive()) continue; /* can't do anything if the switch is SLAVE */ Collection<OFPort> c = iofSwitch.getEnabledPortNumbers(); if (c != null) { for (OFPort ofp : c) { if (isLinkDiscoverySuppressed(sw, ofp)) { continue; } log.trace("Enabled port: {}", ofp); sendDiscoveryMessage(sw, ofp, true, false); // If the switch port is not already in the maintenance // queue, add it. NodePortTuple npt = new NodePortTuple(sw, ofp); addToMaintenanceQueue(npt); } } } } protected UpdateOperation getUpdateOperation(OFPortState srcPortState, OFPortState dstPortState) { boolean added = ((srcPortState != OFPortState.STP_BLOCK) && (dstPortState != OFPortState.STP_BLOCK)); if (added) { return UpdateOperation.LINK_UPDATED; } else { return UpdateOperation.LINK_REMOVED; } } protected UpdateOperation getUpdateOperation(OFPortState srcPortState) { boolean portUp = (srcPortState != OFPortState.STP_BLOCK); if (portUp) { return UpdateOperation.PORT_UP; } else { return UpdateOperation.PORT_DOWN; } } //************************************ // Internal Methods - Link Operations Related //************************************ /** * This method is used to specifically ignore/consider specific links. */ protected boolean isLinkAllowed(DatapathId src, OFPort srcPort, DatapathId dst, OFPort dstPort) { return true; } private boolean addLink(Link lt, LinkInfo newInfo) { NodePortTuple srcNpt, dstNpt; srcNpt = new NodePortTuple(lt.getSrc(), lt.getSrcPort()); dstNpt = new NodePortTuple(lt.getDst(), lt.getDstPort()); // index it by switch source if (!switchLinks.containsKey(lt.getSrc())) switchLinks.put(lt.getSrc(), new HashSet<Link>()); switchLinks.get(lt.getSrc()).add(lt); // index it by switch dest if (!switchLinks.containsKey(lt.getDst())) switchLinks.put(lt.getDst(), new HashSet<Link>()); switchLinks.get(lt.getDst()).add(lt); // index both ends by switch:port if (!portLinks.containsKey(srcNpt)) portLinks.put(srcNpt, new HashSet<Link>()); portLinks.get(srcNpt).add(lt); if (!portLinks.containsKey(dstNpt)) portLinks.put(dstNpt, new HashSet<Link>()); portLinks.get(dstNpt).add(lt); newInfo.addObservedLatency(lt.getLatency()); return true; } /** * Determine if a link should be updated and set the time stamps if it should. * Also, determine the correct latency value for the link. An existing link * will have a list of latencies associated with its LinkInfo. If enough time has * elapsed to determine a good latency baseline average and the new average is * greater or less than the existing latency value by a set threshold, then the * latency should be updated. This allows for latencies to be smoothed and reduces * the number of link updates due to small fluctuations (or outliers) in instantaneous * link latency values. * * @param lt with observed latency. Will be replaced with latency to use. * @param existingInfo with past observed latencies and time stamps * @param newInfo with updated time stamps * @return true if update occurred; false if no update should be dispatched */ protected boolean updateLink(@Nonnull Link lk, @Nonnull LinkInfo existingInfo, @Nonnull LinkInfo newInfo) { boolean linkChanged = false; boolean ignoreBDDP_haveLLDPalready = false; /* * Check if we are transitioning from one link type to another. * A transition is: * -- going from no LLDP time to an LLDP time (is OpenFlow link) * -- going from an LLDP time to a BDDP time (is non-OpenFlow link) * * Note: Going from LLDP to BDDP means our LLDP link must have timed * out already (null in existing LinkInfo). Otherwise, we'll flap * between mulitcast and unicast links. */ if (existingInfo.getMulticastValidTime() == null && newInfo.getMulticastValidTime() != null) { if (existingInfo.getUnicastValidTime() == null) { /* unicast must be null to go to multicast */ log.debug("Link is BDDP. Changed."); linkChanged = true; /* detected BDDP */ } else { ignoreBDDP_haveLLDPalready = true; } } else if (existingInfo.getUnicastValidTime() == null && newInfo.getUnicastValidTime() != null) { log.debug("Link is LLDP. Changed."); linkChanged = true; /* detected LLDP */ } /* * If we're undergoing an LLDP update (non-null time), grab the new LLDP time. * If we're undergoing a BDDP update (non-null time), grab the new BDDP time. * * Only do this if the new LinkInfo is non-null for each respective field. * We want to overwrite an existing LLDP/BDDP time stamp with null if it's * still valid. */ if (newInfo.getUnicastValidTime() != null) { existingInfo.setUnicastValidTime(newInfo.getUnicastValidTime()); } else if (newInfo.getMulticastValidTime() != null) { existingInfo.setMulticastValidTime(newInfo.getMulticastValidTime()); } /* * Update Link latency if we've accumulated enough latency data points * and if the average exceeds +/- the current stored latency by the * defined update threshold. */ U64 currentLatency = existingInfo.getCurrentLatency(); U64 latencyToUse = existingInfo.addObservedLatency(lk.getLatency()); if (currentLatency == null) { /* no-op; already 'changed' as this is a new link */ } else if (!latencyToUse.equals(currentLatency) && !ignoreBDDP_haveLLDPalready) { log.debug("Updating link {} latency to {}ms", lk.toKeyString(), latencyToUse.getValue()); lk.setLatency(latencyToUse); linkChanged = true; } else { log.trace("No need to update link latency {}", lk.toString()); } return linkChanged; } protected boolean addOrUpdateLink(Link lt, LinkInfo newInfo) { boolean linkChanged = false; lock.writeLock().lock(); try { /* * Put the new info only if new. We want a single LinkInfo * to exist per Link. This will allow us to track latencies * without having to conduct a deep, potentially expensive * copy each time a link is updated. */ LinkInfo existingInfo = null; if (links.get(lt) == null) { links.put(lt, newInfo); /* Only put if doesn't exist or null value */ } else { existingInfo = links.get(lt); } /* Update existing LinkInfo with most recent time stamp */ if (existingInfo != null && existingInfo.getFirstSeenTime().before(newInfo.getFirstSeenTime())) { existingInfo.setFirstSeenTime(newInfo.getFirstSeenTime()); } if (log.isTraceEnabled()) { log.trace("addOrUpdateLink: {} {}", lt, (newInfo.getMulticastValidTime() != null) ? "multicast" : "unicast"); } UpdateOperation updateOperation = null; linkChanged = false; if (existingInfo == null) { addLink(lt, newInfo); updateOperation = UpdateOperation.LINK_UPDATED; linkChanged = true; // Log direct links only. Multi-hop links may be numerous // Add all to event history LinkType linkType = getLinkType(lt, newInfo); if (linkType == ILinkDiscovery.LinkType.DIRECT_LINK) { log.debug("Inter-switch link detected: {}", lt); } } else { linkChanged = updateLink(lt, existingInfo, newInfo); if (linkChanged) { updateOperation = UpdateOperation.LINK_UPDATED; LinkType linkType = getLinkType(lt, newInfo); if (linkType == ILinkDiscovery.LinkType.DIRECT_LINK) { log.debug("Inter-switch link updated: {}", lt); } } } if (linkChanged) { // find out if the link was added or removed here. updates.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(), lt.getDst(), lt.getDstPort(), lt.getLatency(), getLinkType(lt, newInfo), updateOperation)); /* Update link structure (FIXME shouldn't have to do this, since it should be the same object) */ Iterator<Entry<Link, LinkInfo>> it = links.entrySet().iterator(); while (it.hasNext()) { Entry<Link, LinkInfo> entry = it.next(); if (entry.getKey().equals(lt)) { entry.getKey().setLatency(lt.getLatency()); break; } } } // Write changes to storage. This will always write the updated // valid time, plus the port states if they've changed (i.e. if // they weren't set to null in the previous block of code. writeLinkToStorage(lt, newInfo); } finally { lock.writeLock().unlock(); } return linkChanged; } /** * Delete a link * * @param link * - link to be deleted. * @param reason * - reason why the link is deleted. */ protected void deleteLink(Link link, String reason) { if (link == null) return; List<Link> linkList = new ArrayList<Link>(); linkList.add(link); deleteLinks(linkList, reason); } /** * Removes links from memory and storage. * * @param links * The List of @LinkTuple to delete. */ protected void deleteLinks(List<Link> links, String reason) { deleteLinks(links, reason, null); } /** * Removes links from memory and storage. * * @param links * The List of @LinkTuple to delete. */ protected void deleteLinks(List<Link> links, String reason, List<LDUpdate> updateList) { NodePortTuple srcNpt, dstNpt; List<LDUpdate> linkUpdateList = new ArrayList<LDUpdate>(); lock.writeLock().lock(); try { for (Link lt : links) { srcNpt = new NodePortTuple(lt.getSrc(), lt.getSrcPort()); dstNpt = new NodePortTuple(lt.getDst(), lt.getDstPort()); if (switchLinks.containsKey(lt.getSrc())) { switchLinks.get(lt.getSrc()).remove(lt); if (switchLinks.get(lt.getSrc()).isEmpty()) this.switchLinks.remove(lt.getSrc()); } if (this.switchLinks.containsKey(lt.getDst())) { switchLinks.get(lt.getDst()).remove(lt); if (this.switchLinks.get(lt.getDst()).isEmpty()) this.switchLinks.remove(lt.getDst()); } if (this.portLinks.get(srcNpt) != null) { this.portLinks.get(srcNpt).remove(lt); if (this.portLinks.get(srcNpt).isEmpty()) this.portLinks.remove(srcNpt); } if (this.portLinks.get(dstNpt) != null) { this.portLinks.get(dstNpt).remove(lt); if (this.portLinks.get(dstNpt).isEmpty()) this.portLinks.remove(dstNpt); } LinkInfo info = this.links.remove(lt); LinkType linkType = getLinkType(lt, info); linkUpdateList.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(), lt.getDst(), lt.getDstPort(), lt.getLatency(), linkType, UpdateOperation.LINK_REMOVED)); // remove link from storage. removeLinkFromStorage(lt); // TODO Whenever link is removed, it has to checked if // the switchports must be added to quarantine. if (linkType == ILinkDiscovery.LinkType.DIRECT_LINK) { log.info("Inter-switch link removed: {}", lt); } else if (log.isTraceEnabled()) { log.trace("Deleted link {}", lt); } } } finally { if (updateList != null) linkUpdateList.addAll(updateList); updates.addAll(linkUpdateList); lock.writeLock().unlock(); } } /** * Delete links incident on a given switch port. * * @param npt * @param reason */ protected void deleteLinksOnPort(NodePortTuple npt, String reason) { List<Link> eraseList = new ArrayList<Link>(); if (this.portLinks.containsKey(npt)) { if (log.isTraceEnabled()) { log.trace("handlePortStatus: Switch {} port #{} " + "removing links {}", new Object[] { npt.getNodeId().toString(), npt.getPortId(), this.portLinks.get(npt) }); } eraseList.addAll(this.portLinks.get(npt)); deleteLinks(eraseList, reason); } } /** * Iterates through the list of links and deletes if the last discovery * message reception time exceeds timeout values. */ protected void timeoutLinks() { List<Link> eraseList = new ArrayList<Link>(); Long curTime = System.currentTimeMillis(); boolean unicastTimedOut = false; /* Reentrant required here because deleteLink also write locks. */ lock.writeLock().lock(); try { Iterator<Entry<Link, LinkInfo>> it = this.links.entrySet().iterator(); while (it.hasNext()) { Entry<Link, LinkInfo> entry = it.next(); Link lt = entry.getKey(); LinkInfo info = entry.getValue(); /* Timeout the unicast and multicast LLDP valid times independently. */ if ((info.getUnicastValidTime() != null) && (info.getUnicastValidTime().getTime() + (this.LINK_TIMEOUT * 1000) < curTime)) { unicastTimedOut = true; info.setUnicastValidTime(null); } if ((info.getMulticastValidTime() != null) && (info.getMulticastValidTime().getTime() + (this.LINK_TIMEOUT * 1000) < curTime)) { info.setMulticastValidTime(null); } /* * Add to the erase list only if the unicast time is null * and the multicast time is null as well. Otherwise, if * only the unicast time is null and we just set it to * null (meaning it just timed out), then we transition * from unicast to multicast. */ if (info.getUnicastValidTime() == null && info.getMulticastValidTime() == null) { eraseList.add(entry.getKey()); } else if (unicastTimedOut) { /* Just moved from unicast to multicast. */ updates.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(), lt.getDst(), lt.getDstPort(), lt.getLatency(), getLinkType(lt, info), UpdateOperation.LINK_UPDATED)); } } if (!eraseList.isEmpty()) { deleteLinks(eraseList, "LLDP timeout"); } } finally { lock.writeLock().unlock(); } } //****************** // Internal Helper Methods //****************** protected void setControllerTLV() { // Setting the controllerTLVValue based on current nano time, // controller's IP address, and the network interface object hash // the corresponding IP address. final int prime = 7867; byte[] controllerTLVValue = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }; // 8 // byte // value. ByteBuffer bb = ByteBuffer.allocate(10); long result = System.nanoTime(); try{ // Use some data specific to the machine this controller is // running on. In this case: the list of network interfaces Enumeration<NetworkInterface> ifaces = NetworkInterface.getNetworkInterfaces(); if (ifaces != null) { result = result * prime + ifaces.hashCode(); } } catch (SocketException e) { log.warn("Could not get list of interfaces of local machine to " + "encode in TLV: {}", e.toString()); } // set the first 4 bits to 0. result = result & (0x0fffffffffffffffL); bb.putLong(result); bb.rewind(); bb.get(controllerTLVValue, 0, 8); this.controllerTLV = new LLDPTLV().setType((byte) 0x0c) .setLength((short) controllerTLVValue.length) .setValue(controllerTLVValue); } //****************** // IOFSwitchListener //****************** private void handlePortDown(DatapathId switchId, OFPort portNumber) { NodePortTuple npt = new NodePortTuple(switchId, portNumber); deleteLinksOnPort(npt, "Port Status Changed"); LDUpdate update = new LDUpdate(switchId, portNumber, UpdateOperation.PORT_DOWN); updates.add(update); } /** * We don't react the port changed notifications here. we listen for * OFPortStatus messages directly. Might consider using this notifier * instead */ @Override public void switchPortChanged(DatapathId switchId, OFPortDesc port, PortChangeType type) { switch (type) { case UP: processNewPort(switchId, port.getPortNo()); break; case DELETE: case DOWN: handlePortDown(switchId, port.getPortNo()); break; case OTHER_UPDATE: case ADD: // This is something other than port add or delete. // Topology does not worry about this. // If for some reason the port features change, which // we may have to react. break; } } @Override public void switchAdded(DatapathId switchId) { // no-op // We don't do anything at switch added, but we do only when the // switch is activated. } @Override public void switchRemoved(DatapathId sw) { List<Link> eraseList = new ArrayList<Link>(); lock.writeLock().lock(); try { if (switchLinks.containsKey(sw)) { if (log.isTraceEnabled()) { log.trace("Handle switchRemoved. Switch {}; removing links {}", sw.toString(), switchLinks.get(sw)); } List<LDUpdate> updateList = new ArrayList<LDUpdate>(); updateList.add(new LDUpdate(sw, SwitchType.BASIC_SWITCH, UpdateOperation.SWITCH_REMOVED)); // add all tuples with an endpoint on this switch to erase list eraseList.addAll(switchLinks.get(sw)); // Sending the updateList, will ensure the updates in this // list will be added at the end of all the link updates. // Thus, it is not necessary to explicitly add these updates // to the queue. deleteLinks(eraseList, "Switch Removed", updateList); } else { // Switch does not have any links. updates.add(new LDUpdate(sw, SwitchType.BASIC_SWITCH, UpdateOperation.SWITCH_REMOVED)); } } finally { lock.writeLock().unlock(); } } @Override public void switchActivated(DatapathId switchId) { IOFSwitch sw = switchService.getSwitch(switchId); if (sw == null) //fix dereference violation in case race conditions return; if (sw.getEnabledPortNumbers() != null) { for (OFPort p : sw.getEnabledPortNumbers()) { processNewPort(sw.getId(), p); } } LDUpdate update = new LDUpdate(sw.getId(), SwitchType.BASIC_SWITCH, UpdateOperation.SWITCH_UPDATED); updates.add(update); } @Override public void switchChanged(DatapathId switchId) { // no-op } //********************* // Storage Listener //********************* /** * Sets the IStorageSource to use for Topology * * @param storageSource * the storage source to use */ public void setStorageSource(IStorageSourceService storageSourceService) { this.storageSourceService = storageSourceService; } /** * Gets the storage source for this ITopology * * @return The IStorageSource ITopology is writing to */ public IStorageSourceService getStorageSource() { return storageSourceService; } @Override public void rowsModified(String tableName, Set<Object> rowKeys) { if (tableName.equals(TOPOLOGY_TABLE_NAME)) { readTopologyConfigFromStorage(); return; } } @Override public void rowsDeleted(String tableName, Set<Object> rowKeys) { // Ignore delete events, the switch delete will do the // right thing on it's own. readTopologyConfigFromStorage(); } //****************************** // Internal methods - Config Related //****************************** protected void readTopologyConfigFromStorage() { IResultSet topologyResult = storageSourceService.executeQuery(TOPOLOGY_TABLE_NAME, null, null, null); if (topologyResult.next()) { boolean apf = topologyResult.getBoolean(TOPOLOGY_AUTOPORTFAST); autoPortFastFeature = apf; } else { this.autoPortFastFeature = AUTOPORTFAST_DEFAULT; } if (autoPortFastFeature) log.debug("Setting autoportfast feature to ON"); else log.debug("Setting autoportfast feature to OFF"); } /** * Deletes all links from storage */ void clearAllLinks() { storageSourceService.deleteRowsAsync(LINK_TABLE_NAME, null); } /** * Writes a LinkTuple and corresponding LinkInfo to storage * * @param lt * The LinkTuple to write * @param linkInfo * The LinkInfo to write */ protected void writeLinkToStorage(Link lt, LinkInfo linkInfo) { LinkType type = getLinkType(lt, linkInfo); // Write only direct links. Do not write links to external // L2 network. // if (type != LinkType.DIRECT_LINK && type != LinkType.TUNNEL) { // return; // } Map<String, Object> rowValues = new HashMap<String, Object>(); String id = getLinkId(lt); rowValues.put(LINK_ID, id); rowValues.put(LINK_VALID_TIME, linkInfo.getUnicastValidTime()); String srcDpid = lt.getSrc().toString(); rowValues.put(LINK_SRC_SWITCH, srcDpid); rowValues.put(LINK_SRC_PORT, lt.getSrcPort()); if (type == LinkType.DIRECT_LINK) rowValues.put(LINK_TYPE, "internal"); else if (type == LinkType.MULTIHOP_LINK) rowValues.put(LINK_TYPE, "external"); else if (type == LinkType.TUNNEL) rowValues.put(LINK_TYPE, "tunnel"); else rowValues.put(LINK_TYPE, "invalid"); String dstDpid = lt.getDst().toString(); rowValues.put(LINK_DST_SWITCH, dstDpid); rowValues.put(LINK_DST_PORT, lt.getDstPort()); storageSourceService.updateRowAsync(LINK_TABLE_NAME, rowValues); } /** * Removes a link from storage using an asynchronous call. * * @param lt * The LinkTuple to delete. */ protected void removeLinkFromStorage(Link lt) { String id = getLinkId(lt); storageSourceService.deleteRowAsync(LINK_TABLE_NAME, id); } public Long readLinkValidTime(Link lt) { // FIXME: We're not currently using this right now, but if we start // to use this again, we probably shouldn't use it in its current // form, because it's doing synchronous storage calls. Depending // on the context this may still be OK, but if it's being called // on the packet in processing thread it should be reworked to // use asynchronous storage calls. Long validTime = null; IResultSet resultSet = null; try { String[] columns = { LINK_VALID_TIME }; String id = getLinkId(lt); resultSet = storageSourceService.executeQuery(LINK_TABLE_NAME, columns, new OperatorPredicate( LINK_ID, OperatorPredicate.Operator.EQ, id), null); if (resultSet.next()) validTime = resultSet.getLong(LINK_VALID_TIME); } finally { if (resultSet != null) resultSet.close(); } return validTime; } /** * Gets the storage key for a LinkTuple * * @param lt * The LinkTuple to get * @return The storage key as a String */ private String getLinkId(Link lt) { return lt.getSrc().toString() + "-" + lt.getSrcPort() + "-" + lt.getDst().toString() + "-" + lt.getDstPort(); } //*************** // IFloodlightModule //*************** @Override public Collection<Class<? extends IFloodlightService>> getModuleServices() { Collection<Class<? extends IFloodlightService>> l = new ArrayList<Class<? extends IFloodlightService>>(); l.add(ILinkDiscoveryService.class); // l.add(ITopologyService.class); return l; } @Override public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() { Map<Class<? extends IFloodlightService>, IFloodlightService> m = new HashMap<Class<? extends IFloodlightService>, IFloodlightService>(); // We are the class that implements the service m.put(ILinkDiscoveryService.class, this); return m; } @Override public Collection<Class<? extends IFloodlightService>> getModuleDependencies() { Collection<Class<? extends IFloodlightService>> l = new ArrayList<Class<? extends IFloodlightService>>(); l.add(IFloodlightProviderService.class); l.add(IStorageSourceService.class); l.add(IThreadPoolService.class); l.add(IRestApiService.class); l.add(IShutdownService.class); return l; } @Override public void init(FloodlightModuleContext context) throws FloodlightModuleException { floodlightProviderService = context.getServiceImpl(IFloodlightProviderService.class); switchService = context.getServiceImpl(IOFSwitchService.class); storageSourceService = context.getServiceImpl(IStorageSourceService.class); threadPoolService = context.getServiceImpl(IThreadPoolService.class); restApiService = context.getServiceImpl(IRestApiService.class); debugCounterService = context.getServiceImpl(IDebugCounterService.class); shutdownService = context.getServiceImpl(IShutdownService.class); // read our config options Map<String, String> configOptions = context.getConfigParams(this); try { String histSize = configOptions.get("event-history-size"); if (histSize != null) { EVENT_HISTORY_SIZE = Short.parseShort(histSize); } } catch (NumberFormatException e) { log.warn("Error event history size. Using default of {} seconds", EVENT_HISTORY_SIZE); } log.debug("Event history size set to {}", EVENT_HISTORY_SIZE); try { String latencyHistorySize = configOptions.get("latency-history-size"); if (latencyHistorySize != null) { LATENCY_HISTORY_SIZE = Integer.parseInt(latencyHistorySize); } } catch (NumberFormatException e) { log.warn("Error in latency history size. Using default of {} LLDP intervals", LATENCY_HISTORY_SIZE); } log.info("Link latency history set to {} LLDP data points", LATENCY_HISTORY_SIZE, LATENCY_HISTORY_SIZE); try { String latencyUpdateThreshold = configOptions.get("latency-update-threshold"); if (latencyUpdateThreshold != null) { LATENCY_UPDATE_THRESHOLD = Double.parseDouble(latencyUpdateThreshold); } } catch (NumberFormatException e) { log.warn("Error in latency update threshold. Can be from 0 to 1.", LATENCY_UPDATE_THRESHOLD); } log.info("Latency update threshold set to +/-{} ({}%) of rolling historical average", LATENCY_UPDATE_THRESHOLD, LATENCY_UPDATE_THRESHOLD * 100); // Set the autoportfast feature to false. this.autoPortFastFeature = AUTOPORTFAST_DEFAULT; // We create this here because there is no ordering guarantee this.linkDiscoveryAware = new ArrayList<ILinkDiscoveryListener>(); this.lock = new ReentrantReadWriteLock(); this.updates = new LinkedBlockingQueue<LDUpdate>(); this.links = new HashMap<Link, LinkInfo>(); this.portLinks = new HashMap<NodePortTuple, Set<Link>>(); this.suppressLinkDiscovery = Collections.synchronizedSet(new HashSet<NodePortTuple>()); this.switchLinks = new HashMap<DatapathId, Set<Link>>(); this.quarantineQueue = new LinkedBlockingQueue<NodePortTuple>(); this.maintenanceQueue = new LinkedBlockingQueue<NodePortTuple>(); this.toRemoveFromQuarantineQueue = new LinkedBlockingQueue<NodePortTuple>(); this.toRemoveFromMaintenanceQueue = new LinkedBlockingQueue<NodePortTuple>(); this.ignoreMACSet = Collections.newSetFromMap( new ConcurrentHashMap<MACRange,Boolean>()); this.haListener = new HAListenerDelegate(); this.floodlightProviderService.addHAListener(this.haListener); registerLinkDiscoveryDebugCounters(); } @Override public void startUp(FloodlightModuleContext context) throws FloodlightModuleException { // Initialize role to floodlight provider role. this.role = floodlightProviderService.getRole(); // Create our storage tables if (storageSourceService == null) { log.error("No storage source found."); return; } storageSourceService.createTable(TOPOLOGY_TABLE_NAME, null); storageSourceService.setTablePrimaryKeyName(TOPOLOGY_TABLE_NAME, TOPOLOGY_ID); readTopologyConfigFromStorage(); storageSourceService.createTable(LINK_TABLE_NAME, null); storageSourceService.setTablePrimaryKeyName(LINK_TABLE_NAME, LINK_ID); storageSourceService.deleteMatchingRows(LINK_TABLE_NAME, null); // Register for storage updates for the switch table try { storageSourceService.addListener(SWITCH_CONFIG_TABLE_NAME, this); storageSourceService.addListener(TOPOLOGY_TABLE_NAME, this); } catch (StorageException ex) { log.error("Error in installing listener for " + "switch table {}", SWITCH_CONFIG_TABLE_NAME); } ScheduledExecutorService ses = threadPoolService.getScheduledExecutor(); // To be started by the first switch connection discoveryTask = new SingletonTask(ses, new Runnable() { @Override public void run() { try { if (role == null || role == HARole.ACTIVE) { /* don't send if we just transitioned to STANDBY */ discoverLinks(); } } catch (StorageException e) { shutdownService.terminate("Storage exception in LLDP send timer. Terminating process " + e, 0); } catch (Exception e) { log.error("Exception in LLDP send timer.", e); } finally { if (!shuttingDown) { // null role implies HA mode is not enabled. if (role == null || role == HARole.ACTIVE) { log.trace("Rescheduling discovery task as role = {}", role); discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL, TimeUnit.SECONDS); } else { log.trace("Stopped LLDP rescheduling due to role = {}.", role); } } } } }); // null role implies HA mode is not enabled. if (role == null || role == HARole.ACTIVE) { log.trace("Setup: Rescheduling discovery task. role = {}", role); discoveryTask.reschedule(DISCOVERY_TASK_INTERVAL, TimeUnit.SECONDS); } else { log.trace("Setup: Not scheduling LLDP as role = {}.", role); } // Setup the BDDP task. It is invoked whenever switch port tuples // are added to the quarantine list. bddpTask = new SingletonTask(ses, new QuarantineWorker()); bddpTask.reschedule(BDDP_TASK_INTERVAL, TimeUnit.MILLISECONDS); updatesThread = new Thread(new Runnable() { @Override public void run() { while (true) { try { doUpdatesThread(); } catch (InterruptedException e) { return; } } } }, "Topology Updates"); updatesThread.start(); // Register for the OpenFlow messages we want to receive floodlightProviderService.addOFMessageListener(OFType.PACKET_IN, this); floodlightProviderService.addOFMessageListener(OFType.PORT_STATUS, this); // Register for switch updates switchService.addOFSwitchListener(this); floodlightProviderService.addHAListener(this.haListener); floodlightProviderService.addInfoProvider("summary", this); if (restApiService != null) restApiService.addRestletRoutable(new LinkDiscoveryWebRoutable()); setControllerTLV(); } // **************************************************** // Link Discovery DebugCounters and DebugEvents // **************************************************** private void registerLinkDiscoveryDebugCounters() throws FloodlightModuleException { if (debugCounterService == null) { log.error("Debug Counter Service not found."); } debugCounterService.registerModule(PACKAGE); ctrIncoming = debugCounterService.registerCounter(PACKAGE, "incoming", "All incoming packets seen by this module"); ctrLldpEol = debugCounterService.registerCounter(PACKAGE, "lldp-eol", "End of Life for LLDP packets"); ctrLinkLocalDrops = debugCounterService.registerCounter(PACKAGE, "linklocal-drops", "All link local packets dropped by this module"); ctrIgnoreSrcMacDrops = debugCounterService.registerCounter(PACKAGE, "ignore-srcmac-drops", "All packets whose srcmac is configured to be dropped by this module"); ctrQuarantineDrops = debugCounterService.registerCounter(PACKAGE, "quarantine-drops", "All packets arriving on quarantined ports dropped by this module", IDebugCounterService.MetaData.WARN); } //********************* // IInfoProvider //********************* @Override public Map<String, Object> getInfo(String type) { if (!"summary".equals(type)) return null; Map<String, Object> info = new HashMap<String, Object>(); int numDirectLinks = 0; for (Set<Link> links : switchLinks.values()) { for (Link link : links) { LinkInfo linkInfo = this.getLinkInfo(link); if (linkInfo != null && linkInfo.getLinkType() == LinkType.DIRECT_LINK) { numDirectLinks++; } } } info.put("# inter-switch links", numDirectLinks / 2); info.put("# quarantine ports", quarantineQueue.size()); return info; } //*************** // IHAListener //*************** private class HAListenerDelegate implements IHAListener { @Override public void transitionToActive() { log.warn("Sending LLDPs due to HA change from STANDBY->ACTIVE"); LinkDiscoveryManager.this.role = HARole.ACTIVE; clearAllLinks(); readTopologyConfigFromStorage(); log.debug("Role Change to Master: Rescheduling discovery tasks"); discoveryTask.reschedule(1, TimeUnit.MICROSECONDS); } @Override public void controllerNodeIPsChanged(Map<String, String> curControllerNodeIPs, Map<String, String> addedControllerNodeIPs, Map<String, String> removedControllerNodeIPs) { // ignore } @Override public String getName() { return MODULE_NAME; } @Override public boolean isCallbackOrderingPrereq(HAListenerTypeMarker type, String name) { return false; } @Override public boolean isCallbackOrderingPostreq(HAListenerTypeMarker type, String name) { return "tunnelmanager".equals(name); } @Override public void transitionToStandby() { log.warn("Disabling LLDPs due to HA change from ACTIVE->STANDBY"); LinkDiscoveryManager.this.role = HARole.STANDBY; } } @Override public void switchDeactivated(DatapathId switchId) { } }
apache-2.0
apache/metamodel
core/src/main/java/org/apache/metamodel/util/UrlResource.java
3300
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.metamodel.util; import java.io.InputStream; import java.io.OutputStream; import java.io.Serializable; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; /** * Resource based on URL or URI. */ public class UrlResource extends AbstractResource implements Serializable { private static final long serialVersionUID = 1L; private final URI _uri; public UrlResource(URL url) { try { _uri = url.toURI(); } catch (URISyntaxException e) { throw new IllegalStateException(e); } } public UrlResource(URI uri) { _uri = uri; } public UrlResource(String urlString) { try { _uri = new URI(urlString); } catch (URISyntaxException e) { throw new IllegalStateException(e); } } @Override public String toString() { return "UrlResource[" + _uri + "]"; } /** * Gets the URI associated with this resource. * * @return */ public URI getUri() { return _uri; } @Override public String getName() { final String name = _uri.toString(); final int lastSlash = name.lastIndexOf('/'); final int lastBackSlash = name.lastIndexOf('\\'); final int lastIndex = Math.max(lastSlash, lastBackSlash); if (lastIndex != -1) { final String lastPart = name.substring(lastIndex + 1); if (!"".equals(lastPart)) { return lastPart; } } return name; } @Override public String getQualifiedPath() { return _uri.toString(); } @Override public boolean isReadOnly() { return true; } @Override public OutputStream write() throws ResourceException { throw new UnsupportedOperationException(); } @Override public OutputStream append() throws ResourceException { throw new UnsupportedOperationException(); } @Override public boolean isExists() { return true; } @Override public long getSize() { return -1; } @Override public long getLastModified() { return -1; } @Override public InputStream read() throws ResourceException { try { return _uri.toURL().openStream(); } catch (Exception e) { throw new ResourceException(this, "Failed to open InputStream", e); } } }
apache-2.0
twitter-forks/bazel
src/tools/android/java/com/google/devtools/build/android/desugar/langmodel/FieldKey.java
3836
/* * Copyright 2019 The Bazel Authors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.devtools.build.android.desugar.langmodel; import static com.google.common.base.Preconditions.checkState; import com.google.auto.value.AutoValue; import org.objectweb.asm.Type; /** The key to index a class or interface field. */ @AutoValue public abstract class FieldKey extends ClassMemberKey<FieldKey> { /** The factory method for {@link FieldKey}. */ public static FieldKey create(ClassName owner, String name, String descriptor) { checkState( !descriptor.startsWith("("), "Expected a type descriptor for field instead of a method descriptor. Actual: (%s#%s:%s)", owner, name, descriptor); return new AutoValue_FieldKey(owner, name, descriptor); } @Override public FieldKey acceptTypeMapper(TypeMapper typeMapper) { return FieldKey.create(typeMapper.map(owner()), name(), typeMapper.mapDesc(descriptor())); } /** * Accepts {@link FieldInstrVisitor} to perform distinct operations based on different invocation * codes. */ public final <R, P> R accept( MemberUseKind fieldUseKind, FieldInstrVisitor<R, ? super FieldKey, P> visitor, P param) { switch (fieldUseKind) { case GETSTATIC: return visitor.visitGetStatic(this, param); case PUTSTATIC: return visitor.visitPutStatic(this, param); case GETFIELD: return visitor.visitGetField(this, param); case PUTFIELD: return visitor.visitPutField(this, param); default: throw new AssertionError( String.format( "Unexpected opcode(%s): Expect one of {GETSTATIC, PUTSTATIC, GETFIELD, PUTFIELD}" + " for field instructions.", fieldUseKind)); } } /** * Returns the bridge method for reading a static field, identified by (getstatic) instruction. */ public final MethodKey bridgeOfStaticRead() { return MethodKey.create( owner(), nameWithSuffix("bridge_getter"), Type.getMethodDescriptor(getFieldType())); } /** * Returns the bridge method for reading an instance field, identified by (getfield) instruction. */ public final MethodKey bridgeOfInstanceRead() { return MethodKey.create( owner(), nameWithSuffix("bridge_getter"), Type.getMethodDescriptor(getFieldType(), Type.getObjectType(ownerName()))); } /** * Returns the bridge method for writing a static field, identified by (putstatic) instruction. */ public final MethodKey bridgeOfStaticWrite() { return MethodKey.create( owner(), nameWithSuffix("bridge_setter"), Type.getMethodDescriptor(getFieldType(), getFieldType())); } /** * Returns the bridge method for writing an instance field, identified by (putfield) instruction. */ public final MethodKey bridgeOfInstanceWrite() { return MethodKey.create( owner(), nameWithSuffix("bridge_setter"), Type.getMethodDescriptor(getFieldType(), Type.getObjectType(ownerName()), getFieldType())); } public final Type getFieldType() { return Type.getType(descriptor()); } public final ClassName getFieldTypeName() { return ClassName.create(getFieldType()); } }
apache-2.0
rnathanday/dryad-repo
dspace-xmlui/dspace-xmlui-api/src/main/java/org/dspace/app/xmlui/aspect/administrative/Navigation.java
16589
/** * The contents of this file are subject to the license and copyright * detailed in the LICENSE and NOTICE files at the root of the source * tree and available online at * * http://www.dspace.org/license/ */ package org.dspace.app.xmlui.aspect.administrative; import java.io.IOException; import java.io.Serializable; import java.sql.SQLException; import java.util.Map; import org.apache.avalon.framework.parameters.Parameters; import org.apache.cocoon.ProcessingException; import org.apache.cocoon.caching.CacheableProcessingComponent; import org.apache.cocoon.environment.ObjectModelHelper; import org.apache.cocoon.environment.Request; import org.apache.cocoon.environment.SourceResolver; import org.apache.cocoon.util.HashUtil; import org.apache.excalibur.source.SourceValidity; import org.apache.excalibur.source.impl.validity.NOPValidity; import org.dspace.app.itemexport.ItemExport; import org.dspace.app.xmlui.cocoon.AbstractDSpaceTransformer; import org.dspace.app.xmlui.utils.DSpaceValidity; import org.dspace.app.xmlui.utils.HandleUtil; import org.dspace.app.xmlui.utils.UIException; import org.dspace.app.xmlui.wing.Message; import org.dspace.app.xmlui.wing.WingException; import org.dspace.app.xmlui.wing.element.List; import org.dspace.app.xmlui.wing.element.Options; import org.dspace.authorize.AuthorizeException; import org.dspace.authorize.AuthorizeManager; import org.dspace.content.Collection; import org.dspace.content.Community; import org.dspace.content.DSpaceObject; import org.dspace.content.Item; import org.dspace.core.Constants; import org.dspace.eperson.Group; import org.xml.sax.SAXException; /** * * Create the navigation options for everything in the administrative aspects. This includes * Epeople, group, item, access control, and registry management. * * @author Scott Phillips * @author Afonso Araujo Neto (internationalization) * @author Alexey Maslov * @author Jay Paz */ public class Navigation extends AbstractDSpaceTransformer implements CacheableProcessingComponent { private static final Message T_context_head = message("xmlui.administrative.Navigation.context_head"); private static final Message T_context_edit_item = message("xmlui.administrative.Navigation.context_edit_item"); private static final Message T_context_edit_collection = message("xmlui.administrative.Navigation.context_edit_collection"); private static final Message T_context_item_mapper = message("xmlui.administrative.Navigation.context_item_mapper"); private static final Message T_context_edit_community = message("xmlui.administrative.Navigation.context_edit_community"); private static final Message T_context_create_collection = message("xmlui.administrative.Navigation.context_create_collection"); private static final Message T_context_create_subcommunity = message("xmlui.administrative.Navigation.context_create_subcommunity"); private static final Message T_context_create_community = message("xmlui.administrative.Navigation.context_create_community"); private static final Message T_context_export_metadata = message("xmlui.administrative.Navigation.context_export_metadata"); private static final Message T_administrative_import_metadata = message("xmlui.administrative.Navigation.administrative_import_metadata"); private static final Message T_administrative_head = message("xmlui.administrative.Navigation.administrative_head"); private static final Message T_administrative_access_control = message("xmlui.administrative.Navigation.administrative_access_control"); private static final Message T_administrative_people = message("xmlui.administrative.Navigation.administrative_people"); private static final Message T_administrative_groups = message("xmlui.administrative.Navigation.administrative_groups"); private static final Message T_administrative_authorizations = message("xmlui.administrative.Navigation.administrative_authorizations"); private static final Message T_administrative_registries = message("xmlui.administrative.Navigation.administrative_registries"); private static final Message T_administrative_metadata = message("xmlui.administrative.Navigation.administrative_metadata"); private static final Message T_administrative_format = message("xmlui.administrative.Navigation.administrative_format"); private static final Message T_administrative_items = message("xmlui.administrative.Navigation.administrative_items"); private static final Message T_administrative_withdrawn = message("xmlui.administrative.Navigation.administrative_withdrawn"); private static final Message T_administrative_control_panel = message("xmlui.administrative.Navigation.administrative_control_panel"); private static final Message T_statistics = message("xmlui.administrative.Navigation.statistics"); private static final Message T_context_export_item = message("xmlui.administrative.Navigation.context_export_item"); private static final Message T_context_export_collection = message("xmlui.administrative.Navigation.context_export_collection"); private static final Message T_context_export_community = message("xmlui.administrative.Navigation.context_export_community"); private static final Message T_account_export = message("xmlui.administrative.Navigation.account_export"); private static final Message T_my_account = message("xmlui.EPerson.Navigation.my_account"); /** Cached validity object */ private SourceValidity validity; /** exports available for download */ java.util.List<String> availableExports = null; /** * Generate the unique cache key. * * @return The generated key hashes the src */ public Serializable getKey() { Request request = ObjectModelHelper.getRequest(objectModel); // Special case, don't cache anything if the user is logging // in. The problem occures because of timming, this cache key // is generated before we know whether the operation has // succeded or failed. So we don't know whether to cache this // under the user's specific cache or under the anonymous user. if (request.getParameter("login_email") != null || request.getParameter("login_password") != null || request.getParameter("login_realm") != null ) { return "0"; } if (context.getCurrentUser() == null) { return HashUtil.hash("anonymous"); } if (availableExports != null && availableExports.size()>0) { StringBuilder key = new StringBuilder(context.getCurrentUser().getEmail()); for(String fileName : availableExports){ key.append(":").append(fileName); } return HashUtil.hash(key.toString()); } return HashUtil.hash(context.getCurrentUser().getEmail()); } /** * Generate the validity object. * * @return The generated validity object or <code>null</code> if the * component is currently not cacheable. */ public SourceValidity getValidity() { if (this.validity == null) { // Only use the DSpaceValidity object is someone is logged in. if (context.getCurrentUser() != null) { try { DSpaceValidity validity = new DSpaceValidity(); validity.add(eperson); Group[] groups = Group.allMemberGroups(context, eperson); for (Group group : groups) { validity.add(group); } this.validity = validity.complete(); } catch (SQLException sqle) { // Just ignore it and return invalid. } } else { this.validity = NOPValidity.SHARED_INSTANCE; } } return this.validity; } public void setup(SourceResolver resolver, Map objectModel, String src, Parameters parameters) throws ProcessingException, SAXException, IOException { super.setup(resolver, objectModel, src, parameters); availableExports = null; if (context.getCurrentUser() != null) { try { availableExports = ItemExport.getExportsAvailable(context.getCurrentUser()); } catch (Exception e) { throw new ProcessingException("Error getting available exports", e); } } } public void addOptions(Options options) throws SAXException, WingException, UIException, SQLException, IOException, AuthorizeException { /* Create skeleton menu structure to ensure consistent order between aspects, * even if they are never used */ options.addList("browse"); List account = options.addList("account"); List context = options.addList("context"); List admin = options.addList("administrative"); account.setHead(T_my_account); // My Account options if(availableExports!=null && availableExports.size()>0){ account.addItem().addXref(contextPath+"/admin/export", T_account_export); } //Check if a system administrator boolean isSystemAdmin = AuthorizeManager.isAdmin(this.context); // Context Administrative options DSpaceObject dso = HandleUtil.obtainHandle(objectModel); if (dso instanceof Item) { Item item = (Item) dso; if (item.canEdit()) { context.setHead(T_context_head); context.addItem().addXref(contextPath+"/admin/item?itemID="+item.getID(), T_context_edit_item); if (AuthorizeManager.isAdmin(this.context, dso)) { context.addItem().addXref(contextPath+"/admin/export?itemID="+item.getID(), T_context_export_item ); context.addItem().addXref(contextPath+ "/csv/handle/"+dso.getHandle(),T_context_export_metadata ); } } } else if (dso instanceof Collection) { Collection collection = (Collection) dso; // can they admin this collection? if (collection.canEditBoolean(true)) { context.setHead(T_context_head); context.addItemXref(contextPath+"/admin/collection?collectionID=" + collection.getID(), T_context_edit_collection); context.addItemXref(contextPath+"/admin/mapper?collectionID="+collection.getID(), T_context_item_mapper); if (AuthorizeManager.isAdmin(this.context, dso)) { context.addItem().addXref(contextPath+"/admin/export?collectionID="+collection.getID(), T_context_export_collection ); context.addItem().addXref(contextPath+ "/csv/handle/"+dso.getHandle(),T_context_export_metadata ); } } } else if (dso instanceof Community) { Community community = (Community) dso; // can they admin this collection? if (community.canEditBoolean()) { context.setHead(T_context_head); context.addItemXref(contextPath+"/admin/community?communityID=" + community.getID(), T_context_edit_community); if (AuthorizeManager.isAdmin(this.context, dso)) { context.addItem().addXref(contextPath + "/admin/export?communityID=" + community.getID(), T_context_export_community); } context.addItem().addXref(contextPath+ "/csv/handle/"+dso.getHandle(),T_context_export_metadata ); } // can they add to this community? if (AuthorizeManager.authorizeActionBoolean(this.context, community,Constants.ADD)) { context.setHead(T_context_head); context.addItemXref(contextPath+"/admin/collection?createNew&communityID=" + community.getID(), T_context_create_collection); context.addItemXref(contextPath+"/admin/community?createNew&communityID=" + community.getID(), T_context_create_subcommunity); } } if (isSystemAdmin && ("community-list".equals(this.sitemapURI) || "".equals(this.sitemapURI))) { // Only System administrators can create top-level communities context.setHead(T_context_head); context.addItemXref(contextPath+"/admin/community?createNew", T_context_create_community); } // System Administrator options! if (isSystemAdmin) { admin.setHead(T_administrative_head); List epeople = admin.addList("epeople"); List registries = admin.addList("registries"); epeople.setHead(T_administrative_access_control); epeople.addItemXref(contextPath+"/admin/epeople", T_administrative_people); epeople.addItemXref(contextPath+"/admin/groups", T_administrative_groups); epeople.addItemXref(contextPath+"/admin/authorize", T_administrative_authorizations); registries.setHead(T_administrative_registries); registries.addItemXref(contextPath+"/admin/metadata-registry",T_administrative_metadata); registries.addItemXref(contextPath+"/admin/format-registry",T_administrative_format); admin.addItemXref(contextPath+"/admin/item", T_administrative_items); admin.addItemXref(contextPath+"/admin/withdrawn", T_administrative_withdrawn); admin.addItemXref(contextPath+"/admin/panel", T_administrative_control_panel); admin.addItemXref(contextPath+"/statistics", T_statistics); admin.addItemXref(contextPath+ "/admin/metadataimport", T_administrative_import_metadata); } } public int addContextualOptions(List context) throws SQLException, WingException { // How many options were added. int options = 0; DSpaceObject dso = HandleUtil.obtainHandle(objectModel); if (dso instanceof Item) { Item item = (Item) dso; if (item.canEdit()) { context.addItem().addXref(contextPath+"/admin/item?itemID="+item.getID(), T_context_edit_item); options++; } } else if (dso instanceof Collection) { Collection collection = (Collection) dso; // can they admin this collection? if (AuthorizeManager.authorizeActionBoolean(this.context, collection, Constants.ADMIN)) { context.addItemXref(contextPath+"/admin/collection?collectionID=" + collection.getID(), T_context_edit_collection); context.addItemXref(contextPath+"/admin/mapper?collectionID="+collection.getID(), T_context_item_mapper); options++; } } else if (dso instanceof Community) { Community community = (Community) dso; // can they admin this collection? if (community.canEditBoolean()) { context.addItemXref(contextPath+"/admin/community?communityID=" + community.getID(), T_context_edit_community); options++; } // can they add to this community? if (AuthorizeManager.authorizeActionBoolean(this.context, community,Constants.ADD)) { context.addItemXref(contextPath+"/admin/collection?createNew&communityID=" + community.getID(), T_context_create_collection); context.addItemXref(contextPath+"/admin/community?createNew&communityID=" + community.getID(), T_context_create_subcommunity); options++; } } if (("community-list".equals(this.sitemapURI) || "".equals(this.sitemapURI)) && AuthorizeManager.isAdmin(this.context)) { context.addItemXref(contextPath+"/admin/community?createNew", T_context_create_community); options++; } return options; } /** * recycle */ public void recycle() { this.validity = null; super.recycle(); } }
bsd-3-clause
ric2b/Vivaldi-browser
chromium/android_webview/support_library/boundary_interfaces/src/org/chromium/support_lib_boundary/JsReplyProxyBoundaryInterface.java
424
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.support_lib_boundary; /** * Boundary interface for org.chromium.android_webview.WebMessageListener. */ public interface JsReplyProxyBoundaryInterface extends IsomorphicObjectBoundaryInterface { void postMessage(String message); }
bsd-3-clause
andre77/XChange
xchange-bleutrade/src/main/java/org/knowm/xchange/bleutrade/dto/DepositRecord.java
1019
package org.knowm.xchange.bleutrade.dto; import com.fasterxml.jackson.annotation.JsonProperty; import java.math.BigDecimal; public class DepositRecord { public final String id; public final String timestamp; public final BigDecimal amount; public final String label; public final String coin; public DepositRecord( @JsonProperty("Id") String id, @JsonProperty("TimeStamp") String timestamp, @JsonProperty("Amount") BigDecimal amount, @JsonProperty("Label") String label, @JsonProperty("Coin") String coin) { this.id = id; this.timestamp = timestamp; this.amount = amount; this.label = label; this.coin = coin; } @Override public String toString() { return "DepositRecord{" + "id='" + id + '\'' + ", timestamp='" + timestamp + '\'' + ", amount=" + amount + ", label='" + label + '\'' + ", coin='" + coin + '\'' + '}'; } }
mit
briandealwis/gwt-eclipse-plugin
plugins/com.gwtplugins.gwt.eclipse.core.test/src/com/google/gwt/eclipse/core/markers/ProblemSeverityTest.java
1455
/******************************************************************************* * Copyright 2011 Google Inc. All Rights Reserved. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package com.google.gwt.eclipse.core.markers; import com.google.gdt.eclipse.core.markers.GdtProblemSeverity; import junit.framework.TestCase; /** * Tests the {@link GdtProblemSeverity} class. */ public class ProblemSeverityTest extends TestCase { private static final int IGNORE = 0; private static final int WARNING = 1; private static final int ERROR = 2; public void testGetSeverity() { assertEquals(GdtProblemSeverity.IGNORE, GdtProblemSeverity.getSeverity(IGNORE)); assertEquals(GdtProblemSeverity.WARNING, GdtProblemSeverity.getSeverity(WARNING)); assertEquals(GdtProblemSeverity.ERROR, GdtProblemSeverity.getSeverity(ERROR)); } }
epl-1.0
md-5/jdk10
test/jdk/sun/security/pkcs11/SecmodTest.java
3199
/* * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ // common infrastructure for Secmod tests import java.io.*; import java.security.Provider; public class SecmodTest extends PKCS11Test { static String LIBPATH; static String DBDIR; static char[] password = "test12".toCharArray(); static String keyAlias = "mykey"; static boolean useSqlite = false; static void useSqlite(boolean b) { useSqlite = b; } static boolean initSecmod() throws Exception { useNSS(); LIBPATH = getNSSLibDir(); if (LIBPATH == null) { return false; } // load all the libraries except libnss3 into memory if (loadNSPR(LIBPATH) == false) { return false; } safeReload(LIBPATH + System.mapLibraryName("softokn3")); safeReload(LIBPATH + System.mapLibraryName("nssckbi")); DBDIR = System.getProperty("test.classes", ".") + SEP + "tmpdb"; if (useSqlite) { System.setProperty("pkcs11test.nss.db", "sql:" + DBDIR); } else { System.setProperty("pkcs11test.nss.db", DBDIR); } File dbdirFile = new File(DBDIR); if (dbdirFile.exists() == false) { dbdirFile.mkdir(); } if (useSqlite) { copyFile("key4.db", BASE, DBDIR); copyFile("cert9.db", BASE, DBDIR); copyFile("pkcs11.txt", BASE, DBDIR); } else { copyFile("secmod.db", BASE, DBDIR); copyFile("key3.db", BASE, DBDIR); copyFile("cert8.db", BASE, DBDIR); } return true; } private static void copyFile(String name, String srcDir, String dstDir) throws IOException { InputStream in = new FileInputStream(new File(srcDir, name)); OutputStream out = new FileOutputStream(new File(dstDir, name)); byte[] buf = new byte[2048]; while (true) { int n = in.read(buf); if (n < 0) { break; } out.write(buf, 0, n); } in.close(); out.close(); } public void main(Provider p) throws Exception { // dummy } }
gpl-2.0
FauxFaux/jdk9-nashorn
test/src/jdk/nashorn/internal/runtime/doubleconv/test/BignumDtoaTest.java
16210
/* * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ // This file is available under and governed by the GNU General Public // License version 2 only, as published by the Free Software Foundation. // However, the following notice accompanied the original version of this // file: // // Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package jdk.nashorn.internal.runtime.doubleconv.test; import java.io.BufferedReader; import java.io.InputStreamReader; import jdk.nashorn.internal.runtime.doubleconv.DoubleConversion; import jdk.nashorn.internal.runtime.doubleconv.DtoaBuffer; import jdk.nashorn.internal.runtime.doubleconv.DtoaMode; import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; /** * FastDtoa tests */ @SuppressWarnings("javadoc") public class BignumDtoaTest { final static private int BUFFER_SIZE = 100; // Removes trailing '0' digits. // Can return the empty string if all digits are 0. private static String trimRepresentation(final String representation) { final int len = representation.length(); int i; for (i = len - 1; i >= 0; --i) { if (representation.charAt(i) != '0') break; } return representation.substring(0, i + 1); } @Test public void testBignumVarious() { final DtoaBuffer buffer = new DtoaBuffer(BUFFER_SIZE); DoubleConversion.bignumDtoa(1, DtoaMode.SHORTEST, 0, buffer); assertEquals("1", buffer.getRawDigits()); assertEquals(1, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(1.0, DtoaMode.FIXED, 3, buffer); assertTrue(3 >= buffer.getLength() - buffer.getDecimalPoint()); assertEquals("1", trimRepresentation(buffer.getRawDigits())); assertEquals(1, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(1.0, DtoaMode.PRECISION, 3, buffer); assertTrue(3 >= buffer.getLength()); assertEquals("1", trimRepresentation(buffer.getRawDigits())); assertEquals(1, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(1.5, DtoaMode.SHORTEST, 0, buffer); assertEquals("15", buffer.getRawDigits()); assertEquals(1, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(1.5, DtoaMode.FIXED, 10, buffer); assertTrue(10 >= buffer.getLength() - buffer.getDecimalPoint()); assertEquals("15", trimRepresentation(buffer.getRawDigits())); assertEquals(1, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(1.5, DtoaMode.PRECISION, 10, buffer); assertTrue(10 >= buffer.getLength()); assertEquals("15", trimRepresentation(buffer.getRawDigits())); assertEquals(1, buffer.getDecimalPoint()); buffer.reset(); final double min_double = 5e-324; DoubleConversion.bignumDtoa(min_double, DtoaMode.SHORTEST, 0, buffer); assertEquals("5", buffer.getRawDigits()); assertEquals(-323, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(min_double, DtoaMode.FIXED, 5, buffer); assertTrue(5 >= buffer.getLength() - buffer.getDecimalPoint()); assertEquals("", trimRepresentation(buffer.getRawDigits())); buffer.reset(); DoubleConversion.bignumDtoa(min_double, DtoaMode.PRECISION, 5, buffer); assertTrue(5 >= buffer.getLength()); assertEquals("49407", trimRepresentation(buffer.getRawDigits())); assertEquals(-323, buffer.getDecimalPoint()); buffer.reset(); final double max_double = 1.7976931348623157e308; DoubleConversion.bignumDtoa(max_double, DtoaMode.SHORTEST, 0, buffer); assertEquals("17976931348623157", buffer.getRawDigits()); assertEquals(309, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(max_double, DtoaMode.PRECISION, 7, buffer); assertTrue(7 >= buffer.getLength()); assertEquals("1797693", trimRepresentation(buffer.getRawDigits())); assertEquals(309, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(4294967272.0, DtoaMode.SHORTEST, 0, buffer); assertEquals("4294967272", buffer.getRawDigits()); assertEquals(10, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(4294967272.0, DtoaMode.FIXED, 5, buffer); assertEquals("429496727200000", buffer.getRawDigits()); assertEquals(10, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(4294967272.0, DtoaMode.PRECISION, 14, buffer); assertTrue(14 >= buffer.getLength()); assertEquals("4294967272", trimRepresentation(buffer.getRawDigits())); assertEquals(10, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(4.1855804968213567e298, DtoaMode.SHORTEST, 0, buffer); assertEquals("4185580496821357", buffer.getRawDigits()); assertEquals(299, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(4.1855804968213567e298, DtoaMode.PRECISION, 20, buffer); assertTrue(20 >= buffer.getLength()); assertEquals("41855804968213567225", trimRepresentation(buffer.getRawDigits())); assertEquals(299, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(5.5626846462680035e-309, DtoaMode.SHORTEST, 0, buffer); assertEquals("5562684646268003", buffer.getRawDigits()); assertEquals(-308, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(5.5626846462680035e-309, DtoaMode.PRECISION, 1, buffer); assertTrue(1 >= buffer.getLength()); assertEquals("6", trimRepresentation(buffer.getRawDigits())); assertEquals(-308, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(2147483648.0, DtoaMode.SHORTEST, 0, buffer); assertEquals("2147483648", buffer.getRawDigits()); assertEquals(10, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(2147483648.0, DtoaMode.FIXED, 2, buffer); assertTrue(2 >= buffer.getLength() - buffer.getDecimalPoint()); assertEquals("2147483648", trimRepresentation(buffer.getRawDigits())); assertEquals(10, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(2147483648.0, DtoaMode.PRECISION, 5, buffer); assertTrue(5 >= buffer.getLength()); assertEquals("21475", trimRepresentation(buffer.getRawDigits())); assertEquals(10, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(3.5844466002796428e+298, DtoaMode.SHORTEST, 0, buffer); assertEquals("35844466002796428", buffer.getRawDigits()); assertEquals(299, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(3.5844466002796428e+298, DtoaMode.PRECISION, 10, buffer); assertTrue(10 >= buffer.getLength()); assertEquals("35844466", trimRepresentation(buffer.getRawDigits())); assertEquals(299, buffer.getDecimalPoint()); buffer.reset(); final long smallest_normal64 = 0x0010000000000000L; double v = Double.longBitsToDouble(smallest_normal64); DoubleConversion.bignumDtoa(v, DtoaMode.SHORTEST, 0, buffer); assertEquals("22250738585072014", buffer.getRawDigits()); assertEquals(-307, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(v, DtoaMode.PRECISION, 20, buffer); assertTrue(20 >= buffer.getLength()); assertEquals("22250738585072013831", trimRepresentation(buffer.getRawDigits())); assertEquals(-307, buffer.getDecimalPoint()); buffer.reset(); final long largest_denormal64 = 0x000FFFFFFFFFFFFFL; v = Double.longBitsToDouble(largest_denormal64); DoubleConversion.bignumDtoa(v, DtoaMode.SHORTEST, 0, buffer); assertEquals("2225073858507201", buffer.getRawDigits()); assertEquals(-307, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(v, DtoaMode.PRECISION, 20, buffer); assertTrue(20 >= buffer.getLength()); assertEquals("2225073858507200889", trimRepresentation(buffer.getRawDigits())); assertEquals(-307, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(4128420500802942e-24, DtoaMode.SHORTEST, 0, buffer); assertEquals("4128420500802942", buffer.getRawDigits()); assertEquals(-8, buffer.getDecimalPoint()); buffer.reset(); DoubleConversion.bignumDtoa(3.9292015898194142585311918e-10, DtoaMode.SHORTEST, 0, buffer); assertEquals("39292015898194143", buffer.getRawDigits()); buffer.reset(); v = 4194304.0; DoubleConversion.bignumDtoa(v, DtoaMode.FIXED, 5, buffer); assertTrue(5 >= buffer.getLength() - buffer.getDecimalPoint()); assertEquals("4194304", trimRepresentation(buffer.getRawDigits())); buffer.reset(); v = 3.3161339052167390562200598e-237; DoubleConversion.bignumDtoa(v, DtoaMode.PRECISION, 19, buffer); assertTrue(19 >= buffer.getLength()); assertEquals("3316133905216739056", trimRepresentation(buffer.getRawDigits())); assertEquals(-236, buffer.getDecimalPoint()); buffer.reset(); v = 7.9885183916008099497815232e+191; DoubleConversion.bignumDtoa(v, DtoaMode.PRECISION, 4, buffer); assertTrue(4 >= buffer.getLength()); assertEquals("7989", trimRepresentation(buffer.getRawDigits())); assertEquals(192, buffer.getDecimalPoint()); buffer.reset(); v = 1.0000000000000012800000000e+17; DoubleConversion.bignumDtoa(v, DtoaMode.FIXED, 1, buffer); assertTrue(1 >= buffer.getLength() - buffer.getDecimalPoint()); assertEquals("100000000000000128", trimRepresentation(buffer.getRawDigits())); assertEquals(18, buffer.getDecimalPoint()); buffer.reset(); } @Test public void testBignumShortest() { new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("resources/gay-shortest.txt"))) .lines() .forEach(line -> { if (line.isEmpty() || line.startsWith("//")) { return; // comment or empty line } final String[] tokens = line.split(",\\s+"); assertEquals(tokens.length, 3, "*" + line + "*"); final double v = Double.parseDouble(tokens[0]); final String str = tokens[1].replace('"', ' ').trim();; final int point = Integer.parseInt(tokens[2]); final DtoaBuffer buffer = new DtoaBuffer(BUFFER_SIZE); DoubleConversion.bignumDtoa(v, DtoaMode.SHORTEST, 0, buffer); assertEquals(str, buffer.getRawDigits()); assertEquals(point, buffer.getDecimalPoint()); }); } @Test public void testBignumFixed() { new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("resources/gay-fixed.txt"))) .lines() .forEach(line -> { if (line.isEmpty() || line.startsWith("//")) { return; // comment or empty line } final String[] tokens = line.split(",\\s+"); assertEquals(tokens.length, 4); final double v = Double.parseDouble(tokens[0]); final int digits = Integer.parseInt(tokens[1]); final String str = tokens[2].replace('"', ' ').trim(); final int point = Integer.parseInt(tokens[3]); final DtoaBuffer buffer = new DtoaBuffer(BUFFER_SIZE); DoubleConversion.bignumDtoa(v, DtoaMode.FIXED, digits, buffer); assertEquals(str, trimRepresentation(buffer.getRawDigits())); assertEquals(point, buffer.getDecimalPoint()); }); } @Test public void testBignumPrecision() { new BufferedReader(new InputStreamReader(getClass().getResourceAsStream("resources/gay-precision.txt"))) .lines() .forEach(line -> { if (line.isEmpty() || line.startsWith("//")) { return; // comment or empty line } final String[] tokens = line.split(",\\s+"); assertEquals(tokens.length, 4); final double v = Double.parseDouble(tokens[0]); final int digits = Integer.parseInt(tokens[1]); final String str = tokens[2].replace('"', ' ').trim(); final int point = Integer.parseInt(tokens[3]); final DtoaBuffer buffer = new DtoaBuffer(BUFFER_SIZE); DoubleConversion.bignumDtoa(v, DtoaMode.PRECISION, digits, buffer); assertEquals(str, trimRepresentation(buffer.getRawDigits())); assertEquals(point, buffer.getDecimalPoint()); }); } }
gpl-2.0
md-5/jdk10
test/hotspot/jtreg/vmTestbase/nsk/jdi/StackFrame/getValues/getvalues003t.java
6158
/* * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package nsk.jdi.StackFrame.getValues; import nsk.share.*; import nsk.share.jpda.*; import nsk.share.jdi.*; // THIS TEST IS LINE NUMBER SENSITIVE /** * This is a debuggee class. */ public class getvalues003t { private Log log; private IOPipe pipe; private OtherThr auxThr; public static void main(String args[]) { System.exit(run(args) + Consts.JCK_STATUS_BASE); } public static int run(String args[]) { return new getvalues003t().runIt(args); } private int runIt(String args[]) { ArgumentHandler argHandler = new ArgumentHandler(args); log = argHandler.createDebugeeLog(); pipe = argHandler.createDebugeeIOPipe(); Thread.currentThread().setName(getvalues003.DEBUGGEE_THRDNAMES[0]); startThread(); // dummy local vars used by debugger for testing byte getvalues003tFindMe = 127; short shortVar = -32768; int intVar = 2147483647; long longVar = 9223372036854775807L; float floatVar = 5.1F; double doubleVar = 6.2D; char charVar = 'a'; boolean booleanVar = true; String strVar = "string var"; // Now the debuggee is ready pipe.println(getvalues003.COMMAND_READY); String cmd = pipe.readln(); if (cmd.equals(getvalues003.COMMAND_QUIT)) { killThread(argHandler.getWaitTime()*60000); log.complain("Debuggee: exiting due to the command " + cmd); return Consts.TEST_PASSED; } int stopMeHere = 0; // getvalues003.DEBUGGEE_STOPATLINE cmd = pipe.readln(); killThread(argHandler.getWaitTime()*60000); if (!cmd.equals(getvalues003.COMMAND_QUIT)) { log.complain("TEST BUG: unknown debugger command: " + cmd); return Consts.TEST_FAILED; } return Consts.TEST_PASSED; } private void startThread() { Object readyObj = new Object(); auxThr = new OtherThr(readyObj, getvalues003.DEBUGGEE_THRDNAMES[1]); auxThr.setDaemon(true); log.display("Debuggee: starting thread \"" + auxThr.getName() + "\" ..."); synchronized(readyObj) { auxThr.start(); try { readyObj.wait(); // wait for the thread's readiness } catch (InterruptedException e) { log.complain("TEST FAILURE: Debuggee: waiting for the thread " + auxThr + " start: caught " + e); pipe.println("failed"); System.exit(Consts.JCK_STATUS_BASE + Consts.TEST_FAILED); } } log.display("Debuggee: the thread \"" + auxThr.getName() + "\" started"); } private void killThread(int waitTime) { auxThr.doExit = true; try { auxThr.join(waitTime); log.display("Debuggee: thread \"" + auxThr.getName() + "\" done"); } catch (InterruptedException e) { log.complain("TEST FAILURE: Debuggee: joining the thread \"" + auxThr.getName() + "\": caught " + e); } } /** * This is an auxiliary thread class used to check * an IllegalArgumentException in debugger. */ class OtherThr extends Thread { volatile boolean doExit = false; private Object readyObj; OtherThr(Object readyObj, String name) { super(name); this.readyObj = readyObj; } public void run() { // dummy local vars used by debugger for testing byte getvalues003tFindMe = 127; short shortVar = -32768; int intVar = 2147483647; long longVar = 9223372036854775807L; float floatVar = 5.1F; double doubleVar = 6.2D; char charVar = 'a'; boolean booleanVar = true; String strVar = "string var"; Thread thr = Thread.currentThread(); synchronized(readyObj) { readyObj.notify(); // notify the main thread } log.display("Debuggee thread \"" + thr.getName() + "\": going to loop"); while(!doExit) { int i = 0; i++; i--; // reliable analogue of Thread.yield() synchronized(this) { try { this.wait(30); } catch (InterruptedException e) { e.printStackTrace(log.getOutStream()); log.complain("TEST FAILURE: Debuggee thread \"" + thr.getName() + "\" interrupted while sleeping:\n\t" + e); break; } } } log.display("Debuggee thread \"" + thr.getName() + "\" exiting ..."); } } ///////////////////////////////////////////////////////////////////////////// }
gpl-2.0
md-5/jdk10
test/hotspot/jtreg/vmTestbase/nsk/jvmti/unit/FollowReferences/followref001.java
4451
/* * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package nsk.jvmti.unit.FollowReferences; import java.io.PrintStream; import nsk.share.*; import nsk.share.jvmti.*; public class followref001 extends DebugeeClass { /** Load native library if required.*/ static { loadLibrary("followref001"); } /** Run test from command line */ public static void main(String argv[]) { argv = nsk.share.jvmti.JVMTITest.commonInit(argv); // JCK-compatible exit System.exit(run(argv, System.out) + Consts.JCK_STATUS_BASE); } /** Run test from JCK-compatible environment */ public static int run(String argv[], PrintStream out) { return new followref001().runIt(argv, out); } /* =================================================================== */ /* scaffold objects */ ArgumentHandler argHandler = null; Log log = null; int status = Consts.TEST_PASSED; /* constants */ public static final int DEFAULT_CHAIN_LENGTH = 3; /** Tested object */ public static followref001RootTestedClass rootObject = null; /** Run debugee code */ public int runIt(String argv[], PrintStream out) { argHandler = new ArgumentHandler(argv); log = new Log(out, argHandler); int chainLength = argHandler.findOptionIntValue("objects", DEFAULT_CHAIN_LENGTH); log.display("Creating chain of tested objects: " + chainLength + " length"); rootObject = new followref001RootTestedClass(chainLength); log.display("Sync: two object chains created"); status = checkStatus(status); log.display("Cleaning the unreachableChain field"); /* This is to ensure that it is not GC-ed */ followref001TestedClass savedChain = rootObject.cleanUnreachable(); log.display("Sync: 2-nd object chain is unreachable from the root object"); status = checkStatus(status); return status; } } /* =================================================================== */ /** Class for root tested object */ class followref001RootTestedClass { int length; followref001TestedClass reachableChain = null; followref001TestedClass unreachableChain = null; public followref001RootTestedClass(int length) { this.length = length; reachableChain = new followref001TestedClass(length); unreachableChain = new followref001TestedClass(length); reachableChain.setChainTail(length, reachableChain); unreachableChain.setChainTail(length, unreachableChain); } public followref001TestedClass cleanUnreachable() { followref001TestedClass chain = unreachableChain; unreachableChain = null; return chain; } } /** Class for tested chain object */ class followref001TestedClass { followref001TestedClass next = null; boolean zz = true; byte bb = 127; char cc = 'C'; short ss = 1995; int level; long jj = 99999999; float ff = 3.14f; double dd = 3.14d; public followref001TestedClass(int length) { this.level = length; if (length > 1) { next = new followref001TestedClass(length - 1); } } public void setChainTail(int length, followref001TestedClass last) { if (length > 1) { next.setChainTail(length - 1, last); } else { next = last; } } }
gpl-2.0
0x726d77/storm
flux/flux-core/src/main/java/org/apache/storm/flux/model/IncludeDef.java
1590
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.storm.flux.model; /** * Represents an include. Includes can be either a file or a classpath resource. *<p/> * If an include is marked as `override=true` then existing properties will be replaced. * */ public class IncludeDef { private boolean resource = false; boolean override = false; private String file; public boolean isResource() { return resource; } public void setResource(boolean resource) { this.resource = resource; } public String getFile() { return file; } public void setFile(String file) { this.file = file; } public boolean isOverride() { return override; } public void setOverride(boolean override) { this.override = override; } }
apache-2.0
an3m0na/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
67290
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState; import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressMetrics; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.ipc.RefreshCallQueueProtocol; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.tools.GetUserMappingsProtocol; import org.apache.hadoop.tracing.SpanReceiverHost; import org.apache.hadoop.tracing.TraceAdminProtocol; import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.LogManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.management.ObjectName; import java.io.IOException; import java.io.PrintStream; import java.net.InetSocketAddress; import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_BIND_HOST_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_BIND_HOST_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PLUGINS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS; import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.ToolRunner.confirmPrompt; /********************************************************** * NameNode serves as both directory namespace manager and * "inode table" for the Hadoop DFS. There is a single NameNode * running in any DFS deployment. (Well, except when there * is a second backup/failover NameNode, or when using federated NameNodes.) * * The NameNode controls two critical tables: * 1) filename->blocksequence (namespace) * 2) block->machinelist ("inodes") * * The first table is stored on disk and is very precious. * The second table is rebuilt every time the NameNode comes up. * * 'NameNode' refers to both this class as well as the 'NameNode server'. * The 'FSNamesystem' class actually performs most of the filesystem * management. The majority of the 'NameNode' class itself is concerned * with exposing the IPC interface and the HTTP server to the outside world, * plus some configuration management. * * NameNode implements the * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} interface, which * allows clients to ask for DFS services. * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} is not designed for * direct use by authors of DFS client code. End-users should instead use the * {@link org.apache.hadoop.fs.FileSystem} class. * * NameNode also implements the * {@link org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol} interface, * used by DataNodes that actually store DFS data blocks. These * methods are invoked repeatedly and automatically by all the * DataNodes in a DFS deployment. * * NameNode also implements the * {@link org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol} interface, * used by secondary namenodes or rebalancing processes to get partial * NameNode state, for example partial blocksMap etc. **********************************************************/ @InterfaceAudience.Private public class NameNode implements NameNodeStatusMXBean { static{ HdfsConfiguration.init(); } /** * Categories of operations supported by the namenode. */ public static enum OperationCategory { /** Operations that are state agnostic */ UNCHECKED, /** Read operation that does not change the namespace state */ READ, /** Write operation that changes the namespace state */ WRITE, /** Operations related to checkpointing */ CHECKPOINT, /** Operations related to {@link JournalProtocol} */ JOURNAL } /** * HDFS configuration can have three types of parameters: * <ol> * <li>Parameters that are common for all the name services in the cluster.</li> * <li>Parameters that are specific to a name service. These keys are suffixed * with nameserviceId in the configuration. For example, * "dfs.namenode.rpc-address.nameservice1".</li> * <li>Parameters that are specific to a single name node. These keys are suffixed * with nameserviceId and namenodeId in the configuration. for example, * "dfs.namenode.rpc-address.nameservice1.namenode1"</li> * </ol> * * In the latter cases, operators may specify the configuration without * any suffix, with a nameservice suffix, or with a nameservice and namenode * suffix. The more specific suffix will take precedence. * * These keys are specific to a given namenode, and thus may be configured * globally, for a nameservice, or for a specific namenode within a nameservice. */ public static final String[] NAMENODE_SPECIFIC_KEYS = { DFS_NAMENODE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_BIND_HOST_KEY, DFS_NAMENODE_NAME_DIR_KEY, DFS_NAMENODE_EDITS_DIR_KEY, DFS_NAMENODE_SHARED_EDITS_DIR_KEY, DFS_NAMENODE_CHECKPOINT_DIR_KEY, DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTPS_ADDRESS_KEY, DFS_NAMENODE_HTTP_BIND_HOST_KEY, DFS_NAMENODE_HTTPS_BIND_HOST_KEY, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_BACKUP_ADDRESS_KEY, DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFS_HA_FENCE_METHODS_KEY, DFS_HA_ZKFC_PORT_KEY, DFS_HA_FENCE_METHODS_KEY }; /** * @see #NAMENODE_SPECIFIC_KEYS * These keys are specific to a nameservice, but may not be overridden * for a specific namenode. */ public static final String[] NAMESERVICE_SPECIFIC_KEYS = { DFS_HA_AUTO_FAILOVER_ENABLED_KEY }; private static final String USAGE = "Usage: java NameNode [" + StartupOption.BACKUP.getName() + "] | \n\t[" + StartupOption.CHECKPOINT.getName() + "] | \n\t[" + StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() + " cid ] [" + StartupOption.FORCE.getName() + "] [" + StartupOption.NONINTERACTIVE.getName() + "] ] | \n\t[" + StartupOption.UPGRADE.getName() + " [" + StartupOption.CLUSTERID.getName() + " cid]" + " [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t[" + StartupOption.UPGRADEONLY.getName() + " [" + StartupOption.CLUSTERID.getName() + " cid]" + " [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t[" + StartupOption.ROLLBACK.getName() + "] | \n\t[" + StartupOption.ROLLINGUPGRADE.getName() + " " + RollingUpgradeStartupOption.getAllOptionString() + " ] | \n\t[" + StartupOption.FINALIZE.getName() + "] | \n\t[" + StartupOption.IMPORT.getName() + "] | \n\t[" + StartupOption.INITIALIZESHAREDEDITS.getName() + "] | \n\t[" + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | \n\t[" + StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName() + "] ] | \n\t[" + StartupOption.METADATAVERSION.getName() + " ] " + " ]"; public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(ClientProtocol.class.getName())) { return ClientProtocol.versionID; } else if (protocol.equals(DatanodeProtocol.class.getName())){ return DatanodeProtocol.versionID; } else if (protocol.equals(NamenodeProtocol.class.getName())){ return NamenodeProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){ return RefreshUserMappingsProtocol.versionID; } else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) { return RefreshCallQueueProtocol.versionID; } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){ return GetUserMappingsProtocol.versionID; } else if (protocol.equals(TraceAdminProtocol.class.getName())){ return TraceAdminProtocol.versionID; } else { throw new IOException("Unknown protocol to name node: " + protocol); } } public static final int DEFAULT_PORT = 8020; public static final Logger LOG = LoggerFactory.getLogger(NameNode.class.getName()); public static final Logger stateChangeLog = LoggerFactory.getLogger("org.apache.hadoop.hdfs.StateChange"); public static final Logger blockStateChangeLog = LoggerFactory.getLogger("BlockStateChange"); public static final HAState ACTIVE_STATE = new ActiveState(); public static final HAState STANDBY_STATE = new StandbyState(); protected FSNamesystem namesystem; protected final Configuration conf; protected final NamenodeRole role; private volatile HAState state; private final boolean haEnabled; private final HAContext haContext; protected final boolean allowStaleStandbyReads; private AtomicBoolean started = new AtomicBoolean(false); /** httpServer */ protected NameNodeHttpServer httpServer; private Thread emptier; /** only used for testing purposes */ protected boolean stopRequested = false; /** Registration information of this name-node */ protected NamenodeRegistration nodeRegistration; /** Activated plug-ins. */ private List<ServicePlugin> plugins; private NameNodeRpcServer rpcServer; private JvmPauseMonitor pauseMonitor; private ObjectName nameNodeStatusBeanName; SpanReceiverHost spanReceiverHost; /** * The namenode address that clients will use to access this namenode * or the name service. For HA configurations using logical URI, it * will be the logical address. */ private String clientNamenodeAddress; /** Format a new filesystem. Destroys any filesystem that may already * exist at this location. **/ public static void format(Configuration conf) throws IOException { format(conf, true, true); } static NameNodeMetrics metrics; private static final StartupProgress startupProgress = new StartupProgress(); /** Return the {@link FSNamesystem} object. * @return {@link FSNamesystem} object. */ public FSNamesystem getNamesystem() { return namesystem; } public NamenodeProtocols getRpcServer() { return rpcServer; } static void initMetrics(Configuration conf, NamenodeRole role) { metrics = NameNodeMetrics.create(conf, role); } public static NameNodeMetrics getNameNodeMetrics() { return metrics; } /** * Returns object used for reporting namenode startup progress. * * @return StartupProgress for reporting namenode startup progress */ public static StartupProgress getStartupProgress() { return startupProgress; } /** * Return the service name of the issued delegation token. * * @return The name service id in HA-mode, or the rpc address in non-HA mode */ public String getTokenServiceName() { return getClientNamenodeAddress(); } /** * Set the namenode address that will be used by clients to access this * namenode or name service. This needs to be called before the config * is overriden. */ public void setClientNamenodeAddress(Configuration conf) { String nnAddr = conf.get(FS_DEFAULT_NAME_KEY); if (nnAddr == null) { // default fs is not set. clientNamenodeAddress = null; return; } LOG.info("{} is {}", FS_DEFAULT_NAME_KEY, nnAddr); URI nnUri = URI.create(nnAddr); String nnHost = nnUri.getHost(); if (nnHost == null) { clientNamenodeAddress = null; return; } if (DFSUtil.getNameServiceIds(conf).contains(nnHost)) { // host name is logical clientNamenodeAddress = nnHost; } else if (nnUri.getPort() > 0) { // physical address with a valid port clientNamenodeAddress = nnUri.getAuthority(); } else { // the port is missing or 0. Figure out real bind address later. clientNamenodeAddress = null; return; } LOG.info("Clients are to use {} to access" + " this namenode/service.", clientNamenodeAddress ); } /** * Get the namenode address to be used by clients. * @return nn address */ public String getClientNamenodeAddress() { return clientNamenodeAddress; } public static InetSocketAddress getAddress(String address) { return NetUtils.createSocketAddr(address, DEFAULT_PORT); } /** * Set the configuration property for the service rpc address * to address */ public static void setServiceAddress(Configuration conf, String address) { LOG.info("Setting ADDRESS {}", address); conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address); } /** * Fetches the address for services to use when connecting to namenode * based on the value of fallback returns null if the special * address is not specified or returns the default namenode address * to be used by both clients and services. * Services here are datanodes, backup node, any non client connection */ public static InetSocketAddress getServiceAddress(Configuration conf, boolean fallback) { String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); if (addr == null || addr.isEmpty()) { return fallback ? getAddress(conf) : null; } return getAddress(addr); } public static InetSocketAddress getAddress(Configuration conf) { URI filesystemURI = FileSystem.getDefaultUri(conf); return getAddress(filesystemURI); } /** * @return address of file system */ public static InetSocketAddress getAddress(URI filesystemURI) { String authority = filesystemURI.getAuthority(); if (authority == null) { throw new IllegalArgumentException(String.format( "Invalid URI for NameNode address (check %s): %s has no authority.", FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString())); } if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase( filesystemURI.getScheme())) { throw new IllegalArgumentException(String.format( "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.", FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(), HdfsConstants.HDFS_URI_SCHEME)); } return getAddress(authority); } public static URI getUri(InetSocketAddress namenode) { int port = namenode.getPort(); String portString = port == DEFAULT_PORT ? "" : (":"+port); return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + namenode.getHostName()+portString); } // // Common NameNode methods implementation for the active name-node role. // public NamenodeRole getRole() { return role; } boolean isRole(NamenodeRole that) { return role.equals(that); } /** * Given a configuration get the address of the service rpc server * If the service rpc is not configured returns null */ protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) { return NameNode.getServiceAddress(conf, false); } protected InetSocketAddress getRpcServerAddress(Configuration conf) { return getAddress(conf); } /** Given a configuration get the bind host of the service rpc server * If the bind host is not configured returns null. */ protected String getServiceRpcServerBindHost(Configuration conf) { String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY); if (addr == null || addr.isEmpty()) { return null; } return addr; } /** Given a configuration get the bind host of the client rpc server * If the bind host is not configured returns null. */ protected String getRpcServerBindHost(Configuration conf) { String addr = conf.getTrimmed(DFS_NAMENODE_RPC_BIND_HOST_KEY); if (addr == null || addr.isEmpty()) { return null; } return addr; } /** * Modifies the configuration passed to contain the service rpc address setting */ protected void setRpcServiceServerAddress(Configuration conf, InetSocketAddress serviceRPCAddress) { setServiceAddress(conf, NetUtils.getHostPortString(serviceRPCAddress)); } protected void setRpcServerAddress(Configuration conf, InetSocketAddress rpcAddress) { FileSystem.setDefaultUri(conf, getUri(rpcAddress)); } protected InetSocketAddress getHttpServerAddress(Configuration conf) { return getHttpAddress(conf); } /** * HTTP server address for binding the endpoint. This method is * for use by the NameNode and its derivatives. It may return * a different address than the one that should be used by clients to * connect to the NameNode. See * {@link DFSConfigKeys#DFS_NAMENODE_HTTP_BIND_HOST_KEY} * * @param conf * @return */ protected InetSocketAddress getHttpServerBindAddress(Configuration conf) { InetSocketAddress bindAddress = getHttpServerAddress(conf); // If DFS_NAMENODE_HTTP_BIND_HOST_KEY exists then it overrides the // host name portion of DFS_NAMENODE_HTTP_ADDRESS_KEY. final String bindHost = conf.getTrimmed(DFS_NAMENODE_HTTP_BIND_HOST_KEY); if (bindHost != null && !bindHost.isEmpty()) { bindAddress = new InetSocketAddress(bindHost, bindAddress.getPort()); } return bindAddress; } /** @return the NameNode HTTP address. */ public static InetSocketAddress getHttpAddress(Configuration conf) { return NetUtils.createSocketAddr( conf.getTrimmed(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT)); } protected void loadNamesystem(Configuration conf) throws IOException { this.namesystem = FSNamesystem.loadFromDisk(conf); } NamenodeRegistration getRegistration() { return nodeRegistration; } NamenodeRegistration setRegistration() { nodeRegistration = new NamenodeRegistration( NetUtils.getHostPortString(rpcServer.getRpcAddress()), NetUtils.getHostPortString(getHttpAddress()), getFSImage().getStorage(), getRole()); return nodeRegistration; } /* optimize ugi lookup for RPC operations to avoid a trip through * UGI.getCurrentUser which is synch'ed */ public static UserGroupInformation getRemoteUser() throws IOException { UserGroupInformation ugi = Server.getRemoteUser(); return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); } /** * Login as the configured user for the NameNode. */ void loginAsNameNodeUser(Configuration conf) throws IOException { InetSocketAddress socAddr = getRpcServerAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } /** * Initialize name-node. * * @param conf the configuration */ protected void initialize(Configuration conf) throws IOException { if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) { String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY); if (intervals != null) { conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS, intervals); } } UserGroupInformation.setConfiguration(conf); loginAsNameNodeUser(conf); NameNode.initMetrics(conf, this.getRole()); StartupProgressMetrics.register(startupProgress); if (NamenodeRole.NAMENODE == role) { startHttpServer(conf); } this.spanReceiverHost = SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX); loadNamesystem(conf); rpcServer = createRpcServer(conf); if (clientNamenodeAddress == null) { // This is expected for MiniDFSCluster. Set it now using // the RPC server's bind address. clientNamenodeAddress = NetUtils.getHostPortString(rpcServer.getRpcAddress()); LOG.info("Clients are to use " + clientNamenodeAddress + " to access" + " this namenode/service."); } if (NamenodeRole.NAMENODE == role) { httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setFSImage(getFSImage()); } pauseMonitor = new JvmPauseMonitor(conf); pauseMonitor.start(); metrics.getJvmMetrics().setPauseMonitor(pauseMonitor); startCommonServices(conf); } /** * Create the RPC server implementation. Used as an extension point for the * BackupNode. */ protected NameNodeRpcServer createRpcServer(Configuration conf) throws IOException { return new NameNodeRpcServer(conf, this); } /** Start the services common to active and standby states */ private void startCommonServices(Configuration conf) throws IOException { namesystem.startCommonServices(conf, haContext); registerNNSMXBean(); if (NamenodeRole.NAMENODE != role) { startHttpServer(conf); httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setFSImage(getFSImage()); } rpcServer.start(); plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY, ServicePlugin.class); for (ServicePlugin p: plugins) { try { p.start(this); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be started", t); } } LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress()); if (rpcServer.getServiceRpcAddress() != null) { LOG.info(getRole() + " service RPC up at: " + rpcServer.getServiceRpcAddress()); } } private void stopCommonServices() { if(rpcServer != null) rpcServer.stop(); if(namesystem != null) namesystem.close(); if (pauseMonitor != null) pauseMonitor.stop(); if (plugins != null) { for (ServicePlugin p : plugins) { try { p.stop(); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be stopped", t); } } } stopHttpServer(); } private void startTrashEmptier(final Configuration conf) throws IOException { long trashInterval = conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); if (trashInterval == 0) { return; } else if (trashInterval < 0) { throw new IOException("Cannot start trash emptier with negative interval." + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value."); } // This may be called from the transitionToActive code path, in which // case the current user is the administrator, not the NN. The trash // emptier needs to run as the NN. See HDFS-3972. FileSystem fs = SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction<FileSystem>() { @Override public FileSystem run() throws IOException { return FileSystem.get(conf); } }); this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); this.emptier.setDaemon(true); this.emptier.start(); } private void stopTrashEmptier() { if (this.emptier != null) { emptier.interrupt(); emptier = null; } } private void startHttpServer(final Configuration conf) throws IOException { httpServer = new NameNodeHttpServer(conf, this, getHttpServerBindAddress(conf)); httpServer.start(); httpServer.setStartupProgress(startupProgress); } private void stopHttpServer() { try { if (httpServer != null) httpServer.stop(); } catch (Exception e) { LOG.error("Exception while stopping httpserver", e); } } /** * Start NameNode. * <p> * The name-node can be started with one of the following startup options: * <ul> * <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li> * <li>{@link StartupOption#FORMAT FORMAT} - format name node</li> * <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li> * <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li> * <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster * <li>{@link StartupOption#UPGRADEONLY UPGRADEONLY} - upgrade the cluster * upgrade and create a snapshot of the current file system state</li> * <li>{@link StartupOption#RECOVER RECOVERY} - recover name node * metadata</li> * <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the * cluster back to the previous state</li> * <li>{@link StartupOption#FINALIZE FINALIZE} - finalize * previous upgrade</li> * <li>{@link StartupOption#IMPORT IMPORT} - import checkpoint</li> * </ul> * The option is passed via configuration field: * <tt>dfs.namenode.startup</tt> * * The conf will be modified to reflect the actual ports on which * the NameNode is up and running if the user passes the port as * <code>zero</code> in the conf. * * @param conf confirguration * @throws IOException */ public NameNode(Configuration conf) throws IOException { this(conf, NamenodeRole.NAMENODE); } protected NameNode(Configuration conf, NamenodeRole role) throws IOException { this.conf = conf; this.role = role; setClientNamenodeAddress(conf); String nsId = getNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); this.haEnabled = HAUtil.isHAEnabled(conf, nsId); state = createHAState(getStartupOption(conf)); this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf); this.haContext = createHAContext(); try { initializeGenericKeys(conf, nsId, namenodeId); initialize(conf); try { haContext.writeLock(); state.prepareToEnterState(haContext); state.enterState(haContext); } finally { haContext.writeUnlock(); } } catch (IOException e) { this.stop(); throw e; } catch (HadoopIllegalArgumentException e) { this.stop(); throw e; } this.started.set(true); } protected HAState createHAState(StartupOption startOpt) { if (!haEnabled || startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) { return ACTIVE_STATE; } else { return STANDBY_STATE; } } protected HAContext createHAContext() { return new NameNodeHAContext(); } /** * Wait for service to finish. * (Normally, it runs forever.) */ public void join() { try { rpcServer.join(); } catch (InterruptedException ie) { LOG.info("Caught interrupted exception ", ie); } } /** * Stop all NameNode threads and wait for all to finish. */ public void stop() { synchronized(this) { if (stopRequested) return; stopRequested = true; } try { if (state != null) { state.exitState(haContext); } } catch (ServiceFailedException e) { LOG.warn("Encountered exception while exiting state ", e); } finally { stopCommonServices(); if (metrics != null) { metrics.shutdown(); } if (namesystem != null) { namesystem.shutdown(); } if (nameNodeStatusBeanName != null) { MBeans.unregister(nameNodeStatusBeanName); nameNodeStatusBeanName = null; } if (this.spanReceiverHost != null) { this.spanReceiverHost.closeReceivers(); } } } synchronized boolean isStopRequested() { return stopRequested; } /** * Is the cluster currently in safe mode? */ public boolean isInSafeMode() { return namesystem.isInSafeMode(); } /** get FSImage */ @VisibleForTesting public FSImage getFSImage() { return namesystem.getFSImage(); } /** * @return NameNode RPC address */ public InetSocketAddress getNameNodeAddress() { return rpcServer.getRpcAddress(); } /** * @return NameNode RPC address in "host:port" string form */ public String getNameNodeAddressHostPortString() { return NetUtils.getHostPortString(rpcServer.getRpcAddress()); } /** * @return NameNode service RPC address if configured, the * NameNode RPC address otherwise */ public InetSocketAddress getServiceRpcAddress() { final InetSocketAddress serviceAddr = rpcServer.getServiceRpcAddress(); return serviceAddr == null ? rpcServer.getRpcAddress() : serviceAddr; } /** * @return NameNode HTTP address, used by the Web UI, image transfer, * and HTTP-based file system clients like Hftp and WebHDFS */ public InetSocketAddress getHttpAddress() { return httpServer.getHttpAddress(); } /** * @return NameNode HTTPS address, used by the Web UI, image transfer, * and HTTP-based file system clients like Hftp and WebHDFS */ public InetSocketAddress getHttpsAddress() { return httpServer.getHttpsAddress(); } /** * Verify that configured directories exist, then * Interactively confirm that formatting is desired * for each existing directory and format them. * * @param conf configuration to use * @param force if true, format regardless of whether dirs exist * @return true if formatting was aborted, false otherwise * @throws IOException */ private static boolean format(Configuration conf, boolean force, boolean isInteractive) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); checkAllowFormat(conf); if (UserGroupInformation.isSecurityEnabled()) { InetSocketAddress socAddr = getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf); List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf); List<URI> dirsToPrompt = new ArrayList<URI>(); dirsToPrompt.addAll(nameDirsToFormat); dirsToPrompt.addAll(sharedDirs); List<URI> editDirsToFormat = FSNamesystem.getNamespaceEditsDirs(conf); // if clusterID is not provided - see if you can find the current one String clusterId = StartupOption.FORMAT.getClusterId(); if(clusterId == null || clusterId.equals("")) { //Generate a new cluster id clusterId = NNStorage.newClusterID(); } System.out.println("Formatting using clusterid: " + clusterId); FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat); try { FSNamesystem fsn = new FSNamesystem(conf, fsImage); fsImage.getEditLog().initJournalsForWrite(); if (!fsImage.confirmFormat(force, isInteractive)) { return true; // aborted } fsImage.format(fsn, clusterId); } catch (IOException ioe) { LOG.warn("Encountered exception during format: ", ioe); fsImage.close(); throw ioe; } return false; } public static void checkAllowFormat(Configuration conf) throws IOException { if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) { throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY + " is set to false for this filesystem, so it " + "cannot be formatted. You will need to set " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter " + "to true in order to format this filesystem"); } } @VisibleForTesting public static boolean initializeSharedEdits(Configuration conf) throws IOException { return initializeSharedEdits(conf, true); } @VisibleForTesting public static boolean initializeSharedEdits(Configuration conf, boolean force) throws IOException { return initializeSharedEdits(conf, force, false); } /** * Clone the supplied configuration but remove the shared edits dirs. * * @param conf Supplies the original configuration. * @return Cloned configuration without the shared edit dirs. * @throws IOException on failure to generate the configuration. */ private static Configuration getConfigurationWithoutSharedEdits( Configuration conf) throws IOException { List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false); String editsDirsString = Joiner.on(",").join(editsDirs); Configuration confWithoutShared = new Configuration(conf); confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY); confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDirsString); return confWithoutShared; } /** * Format a new shared edits dir and copy in enough edit log segments so that * the standby NN can start up. * * @param conf configuration * @param force format regardless of whether or not the shared edits dir exists * @param interactive prompt the user when a dir exists * @return true if the command aborts, false otherwise */ private static boolean initializeSharedEdits(Configuration conf, boolean force, boolean interactive) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); if (conf.get(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY) == null) { LOG.error("No shared edits directory configured for namespace " + nsId + " namenode " + namenodeId); return false; } if (UserGroupInformation.isSecurityEnabled()) { InetSocketAddress socAddr = getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } NNStorage existingStorage = null; FSImage sharedEditsImage = null; try { FSNamesystem fsns = FSNamesystem.loadFromDisk(getConfigurationWithoutSharedEdits(conf)); existingStorage = fsns.getFSImage().getStorage(); NamespaceInfo nsInfo = existingStorage.getNamespaceInfo(); List<URI> sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf); sharedEditsImage = new FSImage(conf, Lists.<URI>newArrayList(), sharedEditsDirs); sharedEditsImage.getEditLog().initJournalsForWrite(); if (!sharedEditsImage.confirmFormat(force, interactive)) { return true; // abort } NNStorage newSharedStorage = sharedEditsImage.getStorage(); // Call Storage.format instead of FSImage.format here, since we don't // actually want to save a checkpoint - just prime the dirs with // the existing namespace info newSharedStorage.format(nsInfo); sharedEditsImage.getEditLog().formatNonFileJournals(nsInfo); // Need to make sure the edit log segments are in good shape to initialize // the shared edits dir. fsns.getFSImage().getEditLog().close(); fsns.getFSImage().getEditLog().initJournalsForWrite(); fsns.getFSImage().getEditLog().recoverUnclosedStreams(); copyEditLogSegmentsToSharedDir(fsns, sharedEditsDirs, newSharedStorage, conf); } catch (IOException ioe) { LOG.error("Could not initialize shared edits dir", ioe); return true; // aborted } finally { if (sharedEditsImage != null) { try { sharedEditsImage.close(); } catch (IOException ioe) { LOG.warn("Could not close sharedEditsImage", ioe); } } // Have to unlock storage explicitly for the case when we're running in a // unit test, which runs in the same JVM as NNs. if (existingStorage != null) { try { existingStorage.unlockAll(); } catch (IOException ioe) { LOG.warn("Could not unlock storage directories", ioe); return true; // aborted } } } return false; // did not abort } private static void copyEditLogSegmentsToSharedDir(FSNamesystem fsns, Collection<URI> sharedEditsDirs, NNStorage newSharedStorage, Configuration conf) throws IOException { Preconditions.checkArgument(!sharedEditsDirs.isEmpty(), "No shared edits specified"); // Copy edit log segments into the new shared edits dir. List<URI> sharedEditsUris = new ArrayList<URI>(sharedEditsDirs); FSEditLog newSharedEditLog = new FSEditLog(conf, newSharedStorage, sharedEditsUris); newSharedEditLog.initJournalsForWrite(); newSharedEditLog.recoverUnclosedStreams(); FSEditLog sourceEditLog = fsns.getFSImage().editLog; long fromTxId = fsns.getFSImage().getMostRecentCheckpointTxId(); Collection<EditLogInputStream> streams = null; try { streams = sourceEditLog.selectInputStreams(fromTxId + 1, 0); // Set the nextTxid to the CheckpointTxId+1 newSharedEditLog.setNextTxId(fromTxId + 1); // Copy all edits after last CheckpointTxId to shared edits dir for (EditLogInputStream stream : streams) { LOG.debug("Beginning to copy stream " + stream + " to shared edits"); FSEditLogOp op; boolean segmentOpen = false; while ((op = stream.readOp()) != null) { if (LOG.isTraceEnabled()) { LOG.trace("copying op: " + op); } if (!segmentOpen) { newSharedEditLog.startLogSegment(op.txid, false); segmentOpen = true; } newSharedEditLog.logEdit(op); if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) { newSharedEditLog.logSync(); newSharedEditLog.endCurrentLogSegment(false); LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + stream); segmentOpen = false; } } if (segmentOpen) { LOG.debug("ending log segment because of end of stream in " + stream); newSharedEditLog.logSync(); newSharedEditLog.endCurrentLogSegment(false); segmentOpen = false; } } } finally { if (streams != null) { FSEditLog.closeAllStreams(streams); } } } @VisibleForTesting public static boolean doRollback(Configuration conf, boolean isConfirmationNeeded) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf)); System.err.print( "\"rollBack\" will remove the current state of the file system,\n" + "returning you to the state prior to initiating your recent.\n" + "upgrade. This action is permanent and cannot be undone. If you\n" + "are performing a rollback in an HA environment, you should be\n" + "certain that no NameNode process is running on any host."); if (isConfirmationNeeded) { if (!confirmPrompt("Roll back file system state?")) { System.err.println("Rollback aborted."); return true; } } nsys.getFSImage().doRollback(nsys); return false; } private static void printUsage(PrintStream out) { out.println(USAGE + "\n"); } @VisibleForTesting static StartupOption parseArguments(String args[]) { int argsLen = (args == null) ? 0 : args.length; StartupOption startOpt = StartupOption.REGULAR; for(int i=0; i < argsLen; i++) { String cmd = args[i]; if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.FORMAT; for (i = i + 1; i < argsLen; i++) { if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) { i++; if (i >= argsLen) { // if no cluster id specified, return null LOG.error("Must specify a valid cluster ID after the " + StartupOption.CLUSTERID.getName() + " flag"); return null; } String clusterId = args[i]; // Make sure an id is specified and not another flag if (clusterId.isEmpty() || clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) || clusterId.equalsIgnoreCase( StartupOption.NONINTERACTIVE.getName())) { LOG.error("Must specify a valid cluster ID after the " + StartupOption.CLUSTERID.getName() + " flag"); return null; } startOpt.setClusterId(clusterId); } if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) { startOpt.setForceFormat(true); } if (args[i].equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) { startOpt.setInteractiveFormat(false); } } } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.GENCLUSTERID; } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.REGULAR; } else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.BACKUP; } else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.CHECKPOINT; } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) || StartupOption.UPGRADEONLY.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) ? StartupOption.UPGRADE : StartupOption.UPGRADEONLY; /* Can be followed by CLUSTERID with a required parameter or * RENAMERESERVED with an optional parameter */ while (i + 1 < argsLen) { String flag = args[i + 1]; if (flag.equalsIgnoreCase(StartupOption.CLUSTERID.getName())) { if (i + 2 < argsLen) { i += 2; startOpt.setClusterId(args[i]); } else { LOG.error("Must specify a valid cluster ID after the " + StartupOption.CLUSTERID.getName() + " flag"); return null; } } else if (flag.equalsIgnoreCase(StartupOption.RENAMERESERVED .getName())) { if (i + 2 < argsLen) { FSImageFormat.setRenameReservedPairs(args[i + 2]); i += 2; } else { FSImageFormat.useDefaultRenameReservedPairs(); i += 1; } } else { LOG.error("Unknown upgrade flag " + flag); return null; } } } else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.ROLLINGUPGRADE; ++i; if (i >= argsLen) { LOG.error("Must specify a rolling upgrade startup option " + RollingUpgradeStartupOption.getAllOptionString()); return null; } startOpt.setRollingUpgradeStartupOption(args[i]); } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.ROLLBACK; } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.FINALIZE; } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.IMPORT; } else if (StartupOption.BOOTSTRAPSTANDBY.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.BOOTSTRAPSTANDBY; return startOpt; } else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.INITIALIZESHAREDEDITS; for (i = i + 1 ; i < argsLen; i++) { if (StartupOption.NONINTERACTIVE.getName().equals(args[i])) { startOpt.setInteractiveFormat(false); } else if (StartupOption.FORCE.getName().equals(args[i])) { startOpt.setForceFormat(true); } else { LOG.error("Invalid argument: " + args[i]); return null; } } return startOpt; } else if (StartupOption.RECOVER.getName().equalsIgnoreCase(cmd)) { if (startOpt != StartupOption.REGULAR) { throw new RuntimeException("Can't combine -recover with " + "other startup options."); } startOpt = StartupOption.RECOVER; while (++i < argsLen) { if (args[i].equalsIgnoreCase( StartupOption.FORCE.getName())) { startOpt.setForce(MetaRecoveryContext.FORCE_FIRST_CHOICE); } else { throw new RuntimeException("Error parsing recovery options: " + "can't understand option \"" + args[i] + "\""); } } } else if (StartupOption.METADATAVERSION.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.METADATAVERSION; } else { return null; } } return startOpt; } private static void setStartupOption(Configuration conf, StartupOption opt) { conf.set(DFS_NAMENODE_STARTUP_KEY, opt.name()); } static StartupOption getStartupOption(Configuration conf) { return StartupOption.valueOf(conf.get(DFS_NAMENODE_STARTUP_KEY, StartupOption.REGULAR.toString())); } private static void doRecovery(StartupOption startOpt, Configuration conf) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) { if (!confirmPrompt("You have selected Metadata Recovery mode. " + "This mode is intended to recover lost metadata on a corrupt " + "filesystem. Metadata recovery mode often permanently deletes " + "data from your HDFS filesystem. Please back up your edit log " + "and fsimage before trying this!\n\n" + "Are you ready to proceed? (Y/N)\n")) { System.err.println("Recovery aborted at user request.\n"); return; } } MetaRecoveryContext.LOG.info("starting recovery..."); UserGroupInformation.setConfiguration(conf); NameNode.initMetrics(conf, startOpt.toNodeRole()); FSNamesystem fsn = null; try { fsn = FSNamesystem.loadFromDisk(conf); fsn.getFSImage().saveNamespace(fsn); MetaRecoveryContext.LOG.info("RECOVERY COMPLETE"); } catch (IOException e) { MetaRecoveryContext.LOG.info("RECOVERY FAILED: caught exception", e); throw e; } catch (RuntimeException e) { MetaRecoveryContext.LOG.info("RECOVERY FAILED: caught exception", e); throw e; } finally { if (fsn != null) fsn.close(); } } /** * Verify that configured directories exist, then print the metadata versions * of the software and the image. * * @param conf configuration to use * @throws IOException */ private static boolean printMetadataVersion(Configuration conf) throws IOException { final String nsId = DFSUtil.getNamenodeNameServiceId(conf); final String namenodeId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, namenodeId); final FSImage fsImage = new FSImage(conf); final FSNamesystem fs = new FSNamesystem(conf, fsImage, false); return fsImage.recoverTransitionRead( StartupOption.METADATAVERSION, fs, null); } public static NameNode createNameNode(String argv[], Configuration conf) throws IOException { LOG.info("createNameNode " + Arrays.asList(argv)); if (conf == null) conf = new HdfsConfiguration(); StartupOption startOpt = parseArguments(argv); if (startOpt == null) { printUsage(System.err); return null; } setStartupOption(conf, startOpt); switch (startOpt) { case FORMAT: { boolean aborted = format(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat()); terminate(aborted ? 1 : 0); return null; // avoid javac warning } case GENCLUSTERID: { System.err.println("Generating new cluster id:"); System.out.println(NNStorage.newClusterID()); terminate(0); return null; } case FINALIZE: { System.err.println("Use of the argument '" + StartupOption.FINALIZE + "' is no longer supported. To finalize an upgrade, start the NN " + " and then run `hdfs dfsadmin -finalizeUpgrade'"); terminate(1); return null; // avoid javac warning } case ROLLBACK: { boolean aborted = doRollback(conf, true); terminate(aborted ? 1 : 0); return null; // avoid warning } case BOOTSTRAPSTANDBY: { String toolArgs[] = Arrays.copyOfRange(argv, 1, argv.length); int rc = BootstrapStandby.run(toolArgs, conf); terminate(rc); return null; // avoid warning } case INITIALIZESHAREDEDITS: { boolean aborted = initializeSharedEdits(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat()); terminate(aborted ? 1 : 0); return null; // avoid warning } case BACKUP: case CHECKPOINT: { NamenodeRole role = startOpt.toNodeRole(); DefaultMetricsSystem.initialize(role.toString().replace(" ", "")); return new BackupNode(conf, role); } case RECOVER: { NameNode.doRecovery(startOpt, conf); return null; } case METADATAVERSION: { printMetadataVersion(conf); terminate(0); return null; // avoid javac warning } case UPGRADEONLY: { DefaultMetricsSystem.initialize("NameNode"); new NameNode(conf); terminate(0); return null; } default: { DefaultMetricsSystem.initialize("NameNode"); return new NameNode(conf); } } } /** * In federation configuration is set for a set of * namenode and secondary namenode/backup/checkpointer, which are * grouped under a logical nameservice ID. The configuration keys specific * to them have suffix set to configured nameserviceId. * * This method copies the value from specific key of format key.nameserviceId * to key, to set up the generic configuration. Once this is done, only * generic version of the configuration is read in rest of the code, for * backward compatibility and simpler code changes. * * @param conf * Configuration object to lookup specific key and to set the value * to the key passed. Note the conf object is modified * @param nameserviceId name service Id (to distinguish federated NNs) * @param namenodeId the namenode ID (to distinguish HA NNs) * @see DFSUtil#setGenericConf(Configuration, String, String, String...) */ public static void initializeGenericKeys(Configuration conf, String nameserviceId, String namenodeId) { if ((nameserviceId != null && !nameserviceId.isEmpty()) || (namenodeId != null && !namenodeId.isEmpty())) { if (nameserviceId != null) { conf.set(DFS_NAMESERVICE_ID, nameserviceId); } if (namenodeId != null) { conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId); } DFSUtil.setGenericConf(conf, nameserviceId, namenodeId, NAMENODE_SPECIFIC_KEYS); DFSUtil.setGenericConf(conf, nameserviceId, null, NAMESERVICE_SPECIFIC_KEYS); } // If the RPC address is set use it to (re-)configure the default FS if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) { URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString()); LOG.debug("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString()); } } /** * Get the name service Id for the node * @return name service Id or null if federation is not configured */ protected String getNameServiceId(Configuration conf) { return DFSUtil.getNamenodeNameServiceId(conf); } /** */ public static void main(String argv[]) throws Exception { if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) { System.exit(0); } try { StringUtils.startupShutdownMessage(NameNode.class, argv, LOG); NameNode namenode = createNameNode(argv, null); if (namenode != null) { namenode.join(); } } catch (Throwable e) { LOG.error("Failed to start namenode.", e); terminate(1, e); } } synchronized void monitorHealth() throws HealthCheckFailedException, AccessControlException { namesystem.checkSuperuserPrivilege(); if (!haEnabled) { return; // no-op, if HA is not enabled } getNamesystem().checkAvailableResources(); if (!getNamesystem().nameNodeHasResourcesAvailable()) { throw new HealthCheckFailedException( "The NameNode has no resources available"); } } synchronized void transitionToActive() throws ServiceFailedException, AccessControlException { namesystem.checkSuperuserPrivilege(); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } state.setState(haContext, ACTIVE_STATE); } synchronized void transitionToStandby() throws ServiceFailedException, AccessControlException { namesystem.checkSuperuserPrivilege(); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } state.setState(haContext, STANDBY_STATE); } synchronized HAServiceStatus getServiceStatus() throws ServiceFailedException, AccessControlException { namesystem.checkSuperuserPrivilege(); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } if (state == null) { return new HAServiceStatus(HAServiceState.INITIALIZING); } HAServiceState retState = state.getServiceState(); HAServiceStatus ret = new HAServiceStatus(retState); if (retState == HAServiceState.STANDBY) { String safemodeTip = namesystem.getSafeModeTip(); if (!safemodeTip.isEmpty()) { ret.setNotReadyToBecomeActive( "The NameNode is in safemode. " + safemodeTip); } else { ret.setReadyToBecomeActive(); } } else if (retState == HAServiceState.ACTIVE) { ret.setReadyToBecomeActive(); } else { ret.setNotReadyToBecomeActive("State is " + state); } return ret; } synchronized HAServiceState getServiceState() { if (state == null) { return HAServiceState.INITIALIZING; } return state.getServiceState(); } /** * Register NameNodeStatusMXBean */ private void registerNNSMXBean() { nameNodeStatusBeanName = MBeans.register("NameNode", "NameNodeStatus", this); } @Override // NameNodeStatusMXBean public String getNNRole() { String roleStr = ""; NamenodeRole role = getRole(); if (null != role) { roleStr = role.toString(); } return roleStr; } @Override // NameNodeStatusMXBean public String getState() { String servStateStr = ""; HAServiceState servState = getServiceState(); if (null != servState) { servStateStr = servState.toString(); } return servStateStr; } @Override // NameNodeStatusMXBean public String getHostAndPort() { return getNameNodeAddressHostPortString(); } @Override // NameNodeStatusMXBean public boolean isSecurityEnabled() { return UserGroupInformation.isSecurityEnabled(); } @Override // NameNodeStatusMXBean public long getLastHATransitionTime() { return state.getLastHATransitionTime(); } /** * Shutdown the NN immediately in an ungraceful way. Used when it would be * unsafe for the NN to continue operating, e.g. during a failed HA state * transition. * * @param t exception which warrants the shutdown. Printed to the NN log * before exit. * @throws ExitException thrown only for testing. */ protected synchronized void doImmediateShutdown(Throwable t) throws ExitException { String message = "Error encountered requiring NN shutdown. " + "Shutting down immediately."; try { LOG.error(message, t); } catch (Throwable ignored) { // This is unlikely to happen, but there's nothing we can do if it does. } terminate(1, t); } /** * Class used to expose {@link NameNode} as context to {@link HAState} */ protected class NameNodeHAContext implements HAContext { @Override public void setState(HAState s) { state = s; } @Override public HAState getState() { return state; } @Override public void startActiveServices() throws IOException { try { namesystem.startActiveServices(); startTrashEmptier(conf); } catch (Throwable t) { doImmediateShutdown(t); } } @Override public void stopActiveServices() throws IOException { try { if (namesystem != null) { namesystem.stopActiveServices(); } stopTrashEmptier(); } catch (Throwable t) { doImmediateShutdown(t); } } @Override public void startStandbyServices() throws IOException { try { namesystem.startStandbyServices(conf); } catch (Throwable t) { doImmediateShutdown(t); } } @Override public void prepareToStopStandbyServices() throws ServiceFailedException { try { namesystem.prepareToStopStandbyServices(); } catch (Throwable t) { doImmediateShutdown(t); } } @Override public void stopStandbyServices() throws IOException { try { if (namesystem != null) { namesystem.stopStandbyServices(); } } catch (Throwable t) { doImmediateShutdown(t); } } @Override public void writeLock() { namesystem.writeLock(); namesystem.lockRetryCache(); } @Override public void writeUnlock() { namesystem.unlockRetryCache(); namesystem.writeUnlock(); } /** Check if an operation of given category is allowed */ @Override public void checkOperation(final OperationCategory op) throws StandbyException { state.checkOperation(haContext, op); } @Override public boolean allowStaleReads() { return allowStaleStandbyReads; } } public boolean isStandbyState() { return (state.equals(STANDBY_STATE)); } public boolean isActiveState() { return (state.equals(ACTIVE_STATE)); } /** * Returns whether the NameNode is completely started */ boolean isStarted() { return this.started.get(); } /** * Check that a request to change this node's HA state is valid. * In particular, verifies that, if auto failover is enabled, non-forced * requests from the HAAdmin CLI are rejected, and vice versa. * * @param req the request to check * @throws AccessControlException if the request is disallowed */ void checkHaStateChange(StateChangeRequestInfo req) throws AccessControlException { boolean autoHaEnabled = conf.getBoolean(DFS_HA_AUTO_FAILOVER_ENABLED_KEY, DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT); switch (req.getSource()) { case REQUEST_BY_USER: if (autoHaEnabled) { throw new AccessControlException( "Manual HA control for this NameNode is disallowed, because " + "automatic HA is enabled."); } break; case REQUEST_BY_USER_FORCED: if (autoHaEnabled) { LOG.warn("Allowing manual HA control from " + Server.getRemoteAddress() + " even though automatic HA is enabled, because the user " + "specified the force flag"); } break; case REQUEST_BY_ZKFC: if (!autoHaEnabled) { throw new AccessControlException( "Request from ZK failover controller at " + Server.getRemoteAddress() + " denied since automatic HA " + "is not enabled"); } break; } } }
apache-2.0
agolPL/keycloak
testsuite/integration-arquillian/tests/base/src/test/java/org/keycloak/testsuite/client/ClientRegistrationPoliciesTest.java
33498
/* * Copyright 2016 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.keycloak.testsuite.client; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.keycloak.admin.client.resource.RealmResource; import org.keycloak.client.registration.Auth; import org.keycloak.client.registration.ClientRegistrationException; import org.keycloak.client.registration.HttpErrorException; import org.keycloak.jose.jws.JWSInput; import org.keycloak.protocol.oidc.OIDCLoginProtocol; import org.keycloak.protocol.oidc.mappers.FullNameMapper; import org.keycloak.protocol.oidc.mappers.HardcodedRole; import org.keycloak.protocol.oidc.mappers.UserAttributeMapper; import org.keycloak.protocol.oidc.mappers.UserPropertyMapper; import org.keycloak.protocol.saml.mappers.UserAttributeStatementMapper; import org.keycloak.protocol.saml.mappers.UserPropertyAttributeStatementMapper; import org.keycloak.representations.idm.ClientInitialAccessCreatePresentation; import org.keycloak.representations.idm.ClientInitialAccessPresentation; import org.keycloak.representations.idm.ClientRepresentation; import org.keycloak.representations.idm.ClientTemplateRepresentation; import org.keycloak.representations.idm.ComponentRepresentation; import org.keycloak.representations.idm.ComponentTypeRepresentation; import org.keycloak.representations.idm.ConfigPropertyRepresentation; import org.keycloak.representations.idm.ProtocolMapperRepresentation; import org.keycloak.representations.idm.RealmRepresentation; import org.keycloak.representations.oidc.OIDCClientRepresentation; import org.keycloak.services.clientregistration.RegistrationAccessToken; import org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy; import org.keycloak.services.clientregistration.policy.ClientRegistrationPolicyManager; import org.keycloak.services.clientregistration.policy.RegistrationAuth; import org.keycloak.services.clientregistration.policy.impl.ClientDisabledClientRegistrationPolicyFactory; import org.keycloak.services.clientregistration.policy.impl.ClientTemplatesClientRegistrationPolicyFactory; import org.keycloak.services.clientregistration.policy.impl.MaxClientsClientRegistrationPolicyFactory; import org.keycloak.services.clientregistration.policy.impl.ProtocolMappersClientRegistrationPolicyFactory; import org.keycloak.services.clientregistration.policy.impl.TrustedHostClientRegistrationPolicyFactory; import org.keycloak.testsuite.Assert; import org.keycloak.testsuite.admin.ApiUtil; import org.keycloak.util.JsonSerialization; import javax.ws.rs.core.Response; import static org.junit.Assert.assertTrue; /** * @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a> */ public class ClientRegistrationPoliciesTest extends AbstractClientRegistrationTest { private static final String PRIVATE_KEY = "MIICXAIBAAKBgQCrVrCuTtArbgaZzL1hvh0xtL5mc7o0NqPVnYXkLvgcwiC3BjLGw1tGEGoJaXDuSaRllobm53JBhjx33UNv+5z/UMG4kytBWxheNVKnL6GgqlNabMaFfPLPCF8kAgKnsi79NMo+n6KnSY8YeUmec/p2vjO2NjsSAVcWEQMVhJ31LwIDAQABAoGAfmO8gVhyBxdqlxmIuglbz8bcjQbhXJLR2EoS8ngTXmN1bo2L90M0mUKSdc7qF10LgETBzqL8jYlQIbt+e6TH8fcEpKCjUlyq0Mf/vVbfZSNaVycY13nTzo27iPyWQHK5NLuJzn1xvxxrUeXI6A2WFpGEBLbHjwpx5WQG9A+2scECQQDvdn9NE75HPTVPxBqsEd2z10TKkl9CZxu10Qby3iQQmWLEJ9LNmy3acvKrE3gMiYNWb6xHPKiIqOR1as7L24aTAkEAtyvQOlCvr5kAjVqrEKXalj0Tzewjweuxc0pskvArTI2Oo070h65GpoIKLc9jf+UA69cRtquwP93aZKtW06U8dQJAF2Y44ks/mK5+eyDqik3koCI08qaC8HYq2wVl7G2QkJ6sbAaILtcvD92ToOvyGyeE0flvmDZxMYlvaZnaQ0lcSQJBAKZU6umJi3/xeEbkJqMfeLclD27XGEFoPeNrmdx0q10Azp4NfJAY+Z8KRyQCR2BEG+oNitBOZ+YXF9KCpH3cdmECQHEigJhYg+ykOvr1aiZUMFT72HU0jnmQe2FVekuG+LJUt2Tm7GtMjTFoGpf0JwrVuZN39fOYAlo+nTixgeW7X8Y="; private static final String PUBLIC_KEY = "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCrVrCuTtArbgaZzL1hvh0xtL5mc7o0NqPVnYXkLvgcwiC3BjLGw1tGEGoJaXDuSaRllobm53JBhjx33UNv+5z/UMG4kytBWxheNVKnL6GgqlNabMaFfPLPCF8kAgKnsi79NMo+n6KnSY8YeUmec/p2vjO2NjsSAVcWEQMVhJ31LwIDAQAB"; @Override public void addTestRealms(List<RealmRepresentation> testRealms) { super.addTestRealms(testRealms); testRealms.get(0).setId(REALM_NAME); testRealms.get(0).setPrivateKey(PRIVATE_KEY); testRealms.get(0).setPublicKey(PUBLIC_KEY); } @After public void after() throws Exception { super.after(); // Default setup of trustedHostPolicy ComponentRepresentation trustedHostPolicy = findPolicyByProviderAndAuth(TrustedHostClientRegistrationPolicyFactory.PROVIDER_ID, getPolicyAnon()); trustedHostPolicy.getConfig().putSingle(TrustedHostClientRegistrationPolicyFactory.HOST_SENDING_REGISTRATION_REQUEST_MUST_MATCH, "true"); trustedHostPolicy.getConfig().putSingle(TrustedHostClientRegistrationPolicyFactory.CLIENT_URIS_MUST_MATCH, "true"); trustedHostPolicy.getConfig().put(TrustedHostClientRegistrationPolicyFactory.TRUSTED_HOSTS, Collections.emptyList()); realmResource().components().component(trustedHostPolicy.getId()).update(trustedHostPolicy); } private RealmResource realmResource() { return adminClient.realm(REALM_NAME); } private ClientRepresentation createRep(String clientId) { ClientRepresentation client = new ClientRepresentation(); client.setClientId(clientId); client.setSecret("test-secret"); return client; } private OIDCClientRepresentation createRepOidc() { return createRepOidc("http://localhost:8080/foo", "http://localhost:8080/foo"); } private OIDCClientRepresentation createRepOidc(String clientBaseUri, String clientRedirectUri) { OIDCClientRepresentation client = new OIDCClientRepresentation(); client.setClientName("RegistrationAccessTokenTest"); client.setClientUri(clientBaseUri); client.setRedirectUris(Collections.singletonList(clientRedirectUri)); return client; } public OIDCClientRepresentation create() throws ClientRegistrationException { OIDCClientRepresentation client = createRepOidc(); OIDCClientRepresentation response = reg.oidc().create(client); reg.auth(Auth.token(response)); return response; } private void assertOidcFail(ClientRegOp operation, OIDCClientRepresentation client, int expectedStatusCode) { assertOidcFail(operation, client, expectedStatusCode, null); } private void assertOidcFail(ClientRegOp operation, OIDCClientRepresentation client, int expectedStatusCode, String expectedErrorContains) { try { switch (operation) { case CREATE: reg.oidc().create(client); break; case UPDATE: reg.oidc().update(client); break; case DELETE: reg.oidc().delete(client); break; } Assert.fail("Not expected to successfuly run operation " + operation.toString() + " on client"); } catch (ClientRegistrationException expected) { HttpErrorException httpEx = (HttpErrorException) expected.getCause(); Assert.assertEquals(expectedStatusCode, httpEx.getStatusLine().getStatusCode()); if (expectedErrorContains != null) { assertTrue("Error response doesn't contain expected text. The error response text is: " + httpEx.getErrorResponse(), httpEx.getErrorResponse().contains(expectedErrorContains)); } } } private void assertFail(ClientRegOp operation, ClientRepresentation client, int expectedStatusCode, String expectedErrorContains) { try { switch (operation) { case CREATE: reg.create(client); break; case UPDATE: reg.update(client); break; case DELETE: reg.delete(client); break; } Assert.fail("Not expected to successfuly run operation " + operation.toString() + " on client"); } catch (ClientRegistrationException expected) { HttpErrorException httpEx = (HttpErrorException) expected.getCause(); Assert.assertEquals(expectedStatusCode, httpEx.getStatusLine().getStatusCode()); if (expectedErrorContains != null) { assertTrue("Error response doesn't contain expected text. The error response text is: " + httpEx.getErrorResponse(), httpEx.getErrorResponse().contains(expectedErrorContains)); } } } @Test public void testAnonCreateWithTrustedHost() throws Exception { // Failed to create client (untrusted host) OIDCClientRepresentation client = createRepOidc("http://root", "http://redirect"); assertOidcFail(ClientRegOp.CREATE, client, 403, "Host not trusted"); // Should still fail (bad redirect_uri) setTrustedHost("localhost"); assertOidcFail(ClientRegOp.CREATE, client, 403, "URL doesn't match"); // Should still fail (bad base_uri) client.setRedirectUris(Collections.singletonList("http://localhost:8080/foo")); assertOidcFail(ClientRegOp.CREATE, client, 403, "URL doesn't match"); // Success create client client.setClientUri("http://localhost:8080/foo"); OIDCClientRepresentation oidcClientRep = reg.oidc().create(client); // Test registration access token assertRegAccessToken(oidcClientRep.getRegistrationAccessToken(), RegistrationAuth.ANONYMOUS); } @Test public void testAnonUpdateWithTrustedHost() throws Exception { setTrustedHost("localhost"); OIDCClientRepresentation client = create(); // Fail update client client.setRedirectUris(Collections.singletonList("http://bad:8080/foo")); assertOidcFail(ClientRegOp.UPDATE, client, 403, "URL doesn't match"); // Should be fine now client.setRedirectUris(Collections.singletonList("http://localhost:8080/foo")); reg.oidc().update(client); } @Test public void testRedirectUriWithDomain() throws Exception { // Change the policy to avoid checking hosts ComponentRepresentation trustedHostPolicyRep = findPolicyByProviderAndAuth(TrustedHostClientRegistrationPolicyFactory.PROVIDER_ID, getPolicyAnon()); trustedHostPolicyRep.getConfig().putSingle(TrustedHostClientRegistrationPolicyFactory.HOST_SENDING_REGISTRATION_REQUEST_MUST_MATCH, "false"); // Configure some trusted host and domain trustedHostPolicyRep.getConfig().put(TrustedHostClientRegistrationPolicyFactory.TRUSTED_HOSTS, Arrays.asList("www.host.com", "*.example.com")); realmResource().components().component(trustedHostPolicyRep.getId()).update(trustedHostPolicyRep); // Verify client can be created with the redirectUri from trusted host and domain OIDCClientRepresentation oidcClientRep = createRepOidc("http://www.host.com", "http://www.example.com"); reg.oidc().create(oidcClientRep); // Remove domain from the config trustedHostPolicyRep.getConfig().put(TrustedHostClientRegistrationPolicyFactory.TRUSTED_HOSTS, Arrays.asList("www.host.com", "www1.example.com")); realmResource().components().component(trustedHostPolicyRep.getId()).update(trustedHostPolicyRep); // Check new client can't be created anymore oidcClientRep = createRepOidc("http://www.host.com", "http://www.example.com"); assertOidcFail(ClientRegOp.CREATE, oidcClientRep, 403, "URL doesn't match"); } @Test public void testAnonConsentRequired() throws Exception { setTrustedHost("localhost"); OIDCClientRepresentation client = create(); // Assert new client has consent required String clientId = client.getClientId(); ClientRepresentation clientRep = ApiUtil.findClientByClientId(realmResource(), clientId).toRepresentation(); Assert.assertTrue(clientRep.isConsentRequired()); // Try update with disabled consent required. Should fail clientRep.setConsentRequired(false); assertFail(ClientRegOp.UPDATE, clientRep, 403, "Not permitted to update consentRequired to false"); // Try update with enabled consent required. Should pass clientRep.setConsentRequired(true); reg.update(clientRep); } @Test public void testAnonFullScopeAllowed() throws Exception { setTrustedHost("localhost"); OIDCClientRepresentation client = create(); // Assert new client has fullScopeAllowed disabled String clientId = client.getClientId(); ClientRepresentation clientRep = ApiUtil.findClientByClientId(realmResource(), clientId).toRepresentation(); Assert.assertFalse(clientRep.isFullScopeAllowed()); // Try update with disabled consent required. Should fail clientRep.setFullScopeAllowed(true); assertFail(ClientRegOp.UPDATE, clientRep, 403, "Not permitted to enable fullScopeAllowed"); // Try update with enabled consent required. Should pass clientRep.setFullScopeAllowed(false); reg.update(clientRep); } @Test public void testClientDisabledPolicy() throws Exception { setTrustedHost("localhost"); // Assert new client is enabled OIDCClientRepresentation client = create(); String clientId = client.getClientId(); ClientRepresentation clientRep = ApiUtil.findClientByClientId(realmResource(), clientId).toRepresentation(); Assert.assertTrue(clientRep.isEnabled()); // Add client-disabled policy ComponentRepresentation rep = new ComponentRepresentation(); rep.setName("Clients disabled"); rep.setParentId(REALM_NAME); rep.setProviderId(ClientDisabledClientRegistrationPolicyFactory.PROVIDER_ID); rep.setProviderType(ClientRegistrationPolicy.class.getName()); rep.setSubType(getPolicyAnon()); Response response = realmResource().components().add(rep); String policyId = ApiUtil.getCreatedId(response); response.close(); // Assert new client is disabled client = create(); clientId = client.getClientId(); clientRep = ApiUtil.findClientByClientId(realmResource(), clientId).toRepresentation(); Assert.assertFalse(clientRep.isEnabled()); // Try enable client. Should fail clientRep.setEnabled(true); assertFail(ClientRegOp.UPDATE, clientRep, 403, "Not permitted to enable client"); // Try update disabled client. Should pass clientRep.setEnabled(false); reg.update(clientRep); // Revert realmResource().components().component(policyId).remove(); } @Test public void testMaxClientsPolicy() throws Exception { setTrustedHost("localhost"); int clientsCount = realmResource().clients().findAll().size(); int newClientsLimit = clientsCount + 1; // Allow to create one more client to current limit ComponentRepresentation maxClientsPolicyRep = findPolicyByProviderAndAuth(MaxClientsClientRegistrationPolicyFactory.PROVIDER_ID, getPolicyAnon()); maxClientsPolicyRep.getConfig().putSingle(MaxClientsClientRegistrationPolicyFactory.MAX_CLIENTS, String.valueOf(newClientsLimit)); realmResource().components().component(maxClientsPolicyRep.getId()).update(maxClientsPolicyRep); // I can register one new client OIDCClientRepresentation client = create(); // I can't register more clients assertOidcFail(ClientRegOp.CREATE, createRepOidc(), 403, "It's allowed to have max " + newClientsLimit + " clients per realm"); // Revert maxClientsPolicyRep.getConfig().putSingle(MaxClientsClientRegistrationPolicyFactory.MAX_CLIENTS, String.valueOf(10000)); realmResource().components().component(maxClientsPolicyRep.getId()).update(maxClientsPolicyRep); } @Test public void testProviders() throws Exception { List<ComponentTypeRepresentation> reps = realmResource().clientRegistrationPolicy().getProviders(); Map<String, ComponentTypeRepresentation> providersMap = reps.stream().collect(Collectors.toMap((ComponentTypeRepresentation rep) -> { return rep.getId(); }, (ComponentTypeRepresentation rep) -> { return rep; })); // test that ProtocolMappersClientRegistrationPolicy provider contains available protocol mappers ComponentTypeRepresentation protMappersRep = providersMap.get(ProtocolMappersClientRegistrationPolicyFactory.PROVIDER_ID); List<String> availableMappers = getProviderConfigProperty(protMappersRep, ProtocolMappersClientRegistrationPolicyFactory.ALLOWED_PROTOCOL_MAPPER_TYPES); List<String> someExpectedMappers = Arrays.asList(UserAttributeStatementMapper.PROVIDER_ID, UserAttributeMapper.PROVIDER_ID, UserPropertyAttributeStatementMapper.PROVIDER_ID, UserPropertyMapper.PROVIDER_ID, HardcodedRole.PROVIDER_ID); availableMappers.containsAll(someExpectedMappers); // test that clientTemplate provider doesn't contain any client templates yet ComponentTypeRepresentation clientTemplateRep = providersMap.get(ClientTemplatesClientRegistrationPolicyFactory.PROVIDER_ID); List<String> clientTemplates = getProviderConfigProperty(clientTemplateRep, ClientTemplatesClientRegistrationPolicyFactory.ALLOWED_CLIENT_TEMPLATES); Assert.assertTrue(clientTemplates.isEmpty()); // Add some clientTemplates ClientTemplateRepresentation clientTemplate = new ClientTemplateRepresentation(); clientTemplate.setName("foo"); Response response = realmResource().clientTemplates().create(clientTemplate); String fooTemplateId = ApiUtil.getCreatedId(response); response.close(); clientTemplate = new ClientTemplateRepresentation(); clientTemplate.setName("bar"); response = realmResource().clientTemplates().create(clientTemplate); String barTemplateId = ApiUtil.getCreatedId(response); response.close(); // send request again and test that clientTemplate provider contains added client templates reps = realmResource().clientRegistrationPolicy().getProviders(); clientTemplateRep = reps.stream().filter((ComponentTypeRepresentation rep1) -> { return rep1.getId().equals(ClientTemplatesClientRegistrationPolicyFactory.PROVIDER_ID); }).findFirst().get(); clientTemplates = getProviderConfigProperty(clientTemplateRep, ClientTemplatesClientRegistrationPolicyFactory.ALLOWED_CLIENT_TEMPLATES); Assert.assertNames(clientTemplates, "foo", "bar"); // Revert client templates realmResource().clientTemplates().get(fooTemplateId).remove(); realmResource().clientTemplates().get(barTemplateId).remove(); } private List<String> getProviderConfigProperty(ComponentTypeRepresentation provider, String expectedConfigPropName) { Assert.assertNotNull(provider); List<ConfigPropertyRepresentation> list = provider.getProperties(); list = list.stream().filter((ConfigPropertyRepresentation rep) -> { return rep.getName().equals(expectedConfigPropName); }).collect(Collectors.toList()); Assert.assertEquals(list.size(), 1); ConfigPropertyRepresentation allowedProtocolMappers = list.get(0); Assert.assertEquals(allowedProtocolMappers.getName(), expectedConfigPropName); return allowedProtocolMappers.getOptions(); } @Test public void testClientTemplatesPolicy() throws Exception { setTrustedHost("localhost"); // Add some clientTemplate through Admin REST ClientTemplateRepresentation clientTemplate = new ClientTemplateRepresentation(); clientTemplate.setName("foo"); Response response = realmResource().clientTemplates().create(clientTemplate); String clientTemplateId = ApiUtil.getCreatedId(response); response.close(); // I can't register new client with this template ClientRepresentation clientRep = createRep("test-app"); clientRep.setClientTemplate("foo"); assertFail(ClientRegOp.CREATE, clientRep, 403, "Not permitted to use specified clientTemplate"); // Register client without template - should success clientRep.setClientTemplate(null); ClientRepresentation registeredClient = reg.create(clientRep); reg.auth(Auth.token(registeredClient)); // Try to update client with template - should fail registeredClient.setClientTemplate("foo"); assertFail(ClientRegOp.UPDATE, registeredClient, 403, "Not permitted to use specified clientTemplate"); // Update client with the clientTemplate via Admin REST ClientRepresentation client = ApiUtil.findClientByClientId(realmResource(), "test-app").toRepresentation(); client.setClientTemplate("foo"); realmResource().clients().get(client.getId()).update(client); // Now the update via clientRegistration is permitted too as template was already set reg.update(registeredClient); // Revert client template realmResource().clients().get(client.getId()).remove(); realmResource().clientTemplates().get(clientTemplateId).remove(); } @Test public void testClientTemplatesPolicyWithPermittedTemplate() throws Exception { setTrustedHost("localhost"); // Add some clientTemplate through Admin REST ClientTemplateRepresentation clientTemplate = new ClientTemplateRepresentation(); clientTemplate.setName("foo"); Response response = realmResource().clientTemplates().create(clientTemplate); String clientTemplateId = ApiUtil.getCreatedId(response); response.close(); // I can't register new client with this template ClientRepresentation clientRep = createRep("test-app"); clientRep.setClientTemplate("foo"); assertFail(ClientRegOp.CREATE, clientRep, 403, "Not permitted to use specified clientTemplate"); // Update the policy to allow the "foo" template ComponentRepresentation clientTemplatesPolicyRep = findPolicyByProviderAndAuth(ClientTemplatesClientRegistrationPolicyFactory.PROVIDER_ID, getPolicyAnon()); clientTemplatesPolicyRep.getConfig().putSingle(ClientTemplatesClientRegistrationPolicyFactory.ALLOWED_CLIENT_TEMPLATES, "foo"); realmResource().components().component(clientTemplatesPolicyRep.getId()).update(clientTemplatesPolicyRep); // Check that I can register client now ClientRepresentation registeredClient = reg.create(clientRep); Assert.assertNotNull(registeredClient.getRegistrationAccessToken()); // Revert client template ApiUtil.findClientResourceByClientId(realmResource(), "test-app").remove(); realmResource().clientTemplates().get(clientTemplateId).remove(); } // PROTOCOL MAPPERS @Test public void testProtocolMappersCreate() throws Exception { setTrustedHost("localhost"); // Try to add client with some "hardcoded role" mapper. Should fail ClientRepresentation clientRep = createRep("test-app"); clientRep.setProtocolMappers(Collections.singletonList(createHardcodedMapperRep())); assertFail(ClientRegOp.CREATE, clientRep, 403, "ProtocolMapper type not allowed"); // Try the same authenticated. Should still fail. ClientInitialAccessPresentation token = adminClient.realm(REALM_NAME).clientInitialAccess().create(new ClientInitialAccessCreatePresentation(0, 10)); reg.auth(Auth.token(token)); assertFail(ClientRegOp.CREATE, clientRep, 403, "ProtocolMapper type not allowed"); // Update the "authenticated" policy and allow hardcoded role mapper ComponentRepresentation protocolMapperPolicyRep = findPolicyByProviderAndAuth(ProtocolMappersClientRegistrationPolicyFactory.PROVIDER_ID, getPolicyAuth()); protocolMapperPolicyRep.getConfig().add(ProtocolMappersClientRegistrationPolicyFactory.ALLOWED_PROTOCOL_MAPPER_TYPES, HardcodedRole.PROVIDER_ID); realmResource().components().component(protocolMapperPolicyRep.getId()).update(protocolMapperPolicyRep); // Check authenticated registration is permitted ClientRepresentation registeredClient = reg.create(clientRep); Assert.assertNotNull(registeredClient.getRegistrationAccessToken()); // Check "anonymous" registration still fails clientRep = createRep("test-app-2"); clientRep.setProtocolMappers(Collections.singletonList(createHardcodedMapperRep())); reg.auth(null); assertFail(ClientRegOp.CREATE, clientRep, 403, "ProtocolMapper type not allowed"); // Revert policy change ApiUtil.findClientResourceByClientId(realmResource(), "test-app").remove(); protocolMapperPolicyRep.getConfig().remove(ProtocolMappersClientRegistrationPolicyFactory.ALLOWED_PROTOCOL_MAPPER_TYPES, HardcodedRole.PROVIDER_ID); realmResource().components().component(protocolMapperPolicyRep.getId()).update(protocolMapperPolicyRep); } private ProtocolMapperRepresentation createHardcodedMapperRep() { ProtocolMapperRepresentation protocolMapper = new ProtocolMapperRepresentation(); protocolMapper.setName("Hardcoded foo role"); protocolMapper.setProtocolMapper(HardcodedRole.PROVIDER_ID); protocolMapper.setProtocol(OIDCLoginProtocol.LOGIN_PROTOCOL); protocolMapper.setConsentRequired(false); protocolMapper.setConsentText(null); protocolMapper.getConfig().put(HardcodedRole.ROLE_CONFIG, "foo-role"); return protocolMapper; } @Test public void testProtocolMappersUpdate() throws Exception { setTrustedHost("localhost"); // Check I can add client with allowed protocolMappers ProtocolMapperRepresentation protocolMapper = new ProtocolMapperRepresentation(); protocolMapper.setName("Full name"); protocolMapper.setProtocolMapper(FullNameMapper.PROVIDER_ID); protocolMapper.setProtocol(OIDCLoginProtocol.LOGIN_PROTOCOL); protocolMapper.setConsentRequired(true); protocolMapper.setConsentText("Full name"); ClientRepresentation clientRep = createRep("test-app"); clientRep.setProtocolMappers(Collections.singletonList(protocolMapper)); ClientRepresentation registeredClient = reg.create(clientRep); reg.auth(Auth.token(registeredClient)); // Add some disallowed protocolMapper registeredClient.getProtocolMappers().add(createHardcodedMapperRep()); // Check I can't update client because of protocolMapper assertFail(ClientRegOp.UPDATE, registeredClient, 403, "ProtocolMapper type not allowed"); // Remove "bad" protocolMapper registeredClient.getProtocolMappers().removeIf((ProtocolMapperRepresentation mapper) -> { return mapper.getProtocolMapper().equals(HardcodedRole.PROVIDER_ID); }); // Check I can update client now reg.update(registeredClient); // Revert client ApiUtil.findClientResourceByClientId(realmResource(), "test-app").remove(); } @Test public void testProtocolMappersConsentRequired() throws Exception { setTrustedHost("localhost"); // Register client and assert it has builtin protocol mappers ClientRepresentation clientRep = createRep("test-app"); ClientRepresentation registeredClient = reg.create(clientRep); long usernamePropMappersCount = registeredClient.getProtocolMappers().stream().filter((ProtocolMapperRepresentation protocolMapper) -> { return protocolMapper.getProtocolMapper().equals(UserPropertyMapper.PROVIDER_ID); }).count(); Assert.assertTrue(usernamePropMappersCount > 0); // Remove USernamePropertyMapper from the policy configuration ComponentRepresentation protocolMapperPolicyRep = findPolicyByProviderAndAuth(ProtocolMappersClientRegistrationPolicyFactory.PROVIDER_ID, getPolicyAnon()); protocolMapperPolicyRep.getConfig().getList(ProtocolMappersClientRegistrationPolicyFactory.ALLOWED_PROTOCOL_MAPPER_TYPES).remove(UserPropertyMapper.PROVIDER_ID); realmResource().components().component(protocolMapperPolicyRep.getId()).update(protocolMapperPolicyRep); // Register another client. Assert it doesn't have builtin mappers anymore clientRep = createRep("test-app-2"); registeredClient = reg.create(clientRep); usernamePropMappersCount = registeredClient.getProtocolMappers().stream().filter((ProtocolMapperRepresentation protocolMapper) -> { return protocolMapper.getProtocolMapper().equals(UserPropertyMapper.PROVIDER_ID); }).count(); Assert.assertEquals(0, usernamePropMappersCount); // Revert ApiUtil.findClientResourceByClientId(realmResource(), "test-app").remove(); protocolMapperPolicyRep.getConfig().getList(ProtocolMappersClientRegistrationPolicyFactory.ALLOWED_PROTOCOL_MAPPER_TYPES).add(UserPropertyMapper.PROVIDER_ID); realmResource().components().component(protocolMapperPolicyRep.getId()).update(protocolMapperPolicyRep); } @Test public void testProtocolMappersRemoveBuiltins() throws Exception { setTrustedHost("localhost"); // Change policy to allow hardcoded mapper ComponentRepresentation protocolMapperPolicyRep = findPolicyByProviderAndAuth(ProtocolMappersClientRegistrationPolicyFactory.PROVIDER_ID, getPolicyAnon()); protocolMapperPolicyRep.getConfig().add(ProtocolMappersClientRegistrationPolicyFactory.ALLOWED_PROTOCOL_MAPPER_TYPES, HardcodedRole.PROVIDER_ID); realmResource().components().component(protocolMapperPolicyRep.getId()).update(protocolMapperPolicyRep); // Create client with hardcoded mapper ClientRepresentation clientRep = createRep("test-app"); clientRep.setProtocolMappers(Collections.singletonList(createHardcodedMapperRep())); ClientRepresentation registeredClient = reg.create(clientRep); Assert.assertEquals(1, registeredClient.getProtocolMappers().size()); ProtocolMapperRepresentation hardcodedMapper = registeredClient.getProtocolMappers().get(0); Assert.assertTrue(hardcodedMapper.isConsentRequired()); Assert.assertEquals("Hardcoded foo role", hardcodedMapper.getConsentText()); // Revert ApiUtil.findClientResourceByClientId(realmResource(), "test-app").remove(); protocolMapperPolicyRep.getConfig().remove(ProtocolMappersClientRegistrationPolicyFactory.ALLOWED_PROTOCOL_MAPPER_TYPES, HardcodedRole.PROVIDER_ID); realmResource().components().component(protocolMapperPolicyRep.getId()).update(protocolMapperPolicyRep); } // HELPER METHODS private String getPolicyAnon() { return ClientRegistrationPolicyManager.getComponentTypeKey(RegistrationAuth.ANONYMOUS); } private String getPolicyAuth() { return ClientRegistrationPolicyManager.getComponentTypeKey(RegistrationAuth.AUTHENTICATED); } private ComponentRepresentation findPolicyByProviderAndAuth(String providerId, String authType) { // Change the policy to avoid checking hosts List<ComponentRepresentation> reps = realmResource().components().query(REALM_NAME, ClientRegistrationPolicy.class.getName()); for (ComponentRepresentation rep : reps) { if (rep.getSubType().equals(authType) && rep.getProviderId().equals(providerId)) { return rep; } } return null; } private void setTrustedHost(String hostname) { ComponentRepresentation trustedHostRep = findPolicyByProviderAndAuth(TrustedHostClientRegistrationPolicyFactory.PROVIDER_ID, getPolicyAnon()); trustedHostRep.getConfig().putSingle(TrustedHostClientRegistrationPolicyFactory.TRUSTED_HOSTS, hostname); realmResource().components().component(trustedHostRep.getId()).update(trustedHostRep); } private void assertRegAccessToken(String registrationAccessToken, RegistrationAuth expectedRegAuth) throws Exception { byte[] content = new JWSInput(registrationAccessToken).getContent(); RegistrationAccessToken regAccessToken = JsonSerialization.readValue(content, RegistrationAccessToken.class); Assert.assertEquals(regAccessToken.getRegistrationAuth(), expectedRegAuth.toString().toLowerCase()); } private enum ClientRegOp { CREATE, READ, UPDATE, DELETE } }
apache-2.0
wudingli/openfire
src/java/org/jivesoftware/openfire/http/HttpConnectionClosedException.java
1033
/** * $RCSfile$ * $Revision: $ * $Date: $ * * Copyright (C) 2005-2008 Jive Software. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.openfire.http; /** * This exception is thrown when an action attempted on the connection to the client but the * connection has been closed. * * @author Alexander Wenckus */ public class HttpConnectionClosedException extends Exception { public HttpConnectionClosedException(String message) { super(message); } }
apache-2.0
trekawek/jackrabbit-oak
oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/tika/BinaryResource.java
2168
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.jackrabbit.oak.plugins.tika; import com.google.common.io.ByteSource; import static com.google.common.base.Preconditions.checkNotNull; import org.jetbrains.annotations.Nullable; class BinaryResource { private final ByteSource byteSource; private final String mimeType; private final String encoding; private final String path; private final String blobId; public BinaryResource(ByteSource byteSource, @Nullable String mimeType, @Nullable String encoding, String path, String blobId) { this.byteSource = checkNotNull(byteSource, "ByteSource must be provided"); this.mimeType = mimeType; this.encoding = encoding; this.path = checkNotNull(path, "Path must be provided"); this.blobId = checkNotNull(blobId, "BlobId must be specified"); } public ByteSource getByteSource() { return byteSource; } @Nullable public String getMimeType() { return mimeType; } @Nullable public String getEncoding() { return encoding; } public String getPath() { return path; } public String getBlobId() { return blobId; } @Override public String toString() { return path; } }
apache-2.0
ascherbakoff/ignite
modules/core/src/test/java/org/apache/ignite/internal/processors/cache/binary/GridCacheBinaryObjectUserClassloaderSelfTest.java
7678
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache.binary; import java.io.Serializable; import java.util.HashSet; import java.util.Set; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.binary.BinaryObjectException; import org.apache.ignite.binary.BinaryReader; import org.apache.ignite.binary.BinarySerializer; import org.apache.ignite.binary.BinaryTypeConfiguration; import org.apache.ignite.binary.BinaryWriter; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; import static org.apache.ignite.cache.CacheMode.REPLICATED; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; /** * */ public class GridCacheBinaryObjectUserClassloaderSelfTest extends GridCommonAbstractTest { /** */ private static volatile boolean customBinaryConf = false; /** */ private static volatile boolean deserialized = false; /** */ private static volatile boolean useWrappingLoader = false; /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { super.afterTest(); stopAllGrids(); } /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); cfg.setCacheConfiguration(cacheConfiguration(igniteInstanceName)); cfg.setMarshaller(new BinaryMarshaller()); cfg.setClassLoader(useWrappingLoader ? new WrappingClassLoader(getExternalClassLoader()) : getExternalClassLoader()); if (customBinaryConf) { BinarySerializer bs = new BinarySerializer() { /** {@inheritDoc} */ @Override public void writeBinary(Object obj, BinaryWriter writer) throws BinaryObjectException { //No-op. } /** {@inheritDoc} */ @Override public void readBinary(Object obj, BinaryReader reader) throws BinaryObjectException { deserialized = true; } }; BinaryTypeConfiguration btcfg1 = new BinaryTypeConfiguration(); btcfg1.setTypeName("org.apache.ignite.tests.p2p.CacheDeploymentTestValue"); btcfg1.setSerializer(bs); BinaryTypeConfiguration btcfg2 = new BinaryTypeConfiguration(); btcfg2.setTypeName("org.apache.ignite.internal.processors.cache.binary." + "GridCacheBinaryObjectUserClassloaderSelfTest$TestValue1"); btcfg2.setSerializer(bs); BinaryConfiguration bcfg = new BinaryConfiguration(); Set<BinaryTypeConfiguration> set = new HashSet<>(); set.add(btcfg1); set.add(btcfg2); bcfg.setTypeConfigurations(set); cfg.setBinaryConfiguration(bcfg); } return cfg; } /** * Gets cache configuration for grid with specified name. * * @param igniteInstanceName Ignite instance name. * @return Cache configuration. */ CacheConfiguration cacheConfiguration(String igniteInstanceName) { CacheConfiguration cacheCfg = defaultCacheConfiguration(); cacheCfg.setCacheMode(REPLICATED); cacheCfg.setWriteSynchronizationMode(FULL_SYNC); return cacheCfg; } /** * @throws Exception If test failed. */ @Test public void testConfigurationRegistration() throws Exception { useWrappingLoader = false; doTestConfigurationRegistration(); } /** * @throws Exception If test failed. */ @Test public void testConfigurationRegistrationWithWrappingLoader() throws Exception { useWrappingLoader = true; doTestConfigurationRegistration(); } /** * @throws Exception If test failed. */ private void doTestConfigurationRegistration() throws Exception { try { customBinaryConf = true; Ignite i1 = startGrid(1); Ignite i2 = startGrid(2); IgniteCache<Integer, Object> cache1 = i1.cache(DEFAULT_CACHE_NAME); IgniteCache<Integer, Object> cache2 = i2.cache(DEFAULT_CACHE_NAME); ClassLoader ldr = useWrappingLoader ? ((WrappingClassLoader)i1.configuration().getClassLoader()).getParent() : i1.configuration().getClassLoader(); Object v1 = ldr.loadClass("org.apache.ignite.tests.p2p.CacheDeploymentTestValue").newInstance(); Object v2 = ldr.loadClass("org.apache.ignite.tests.p2p.CacheDeploymentTestValue2").newInstance(); cache1.put(1, v1); cache1.put(2, v2); cache1.put(3, new TestValue1(123)); cache1.put(4, new TestValue2(123)); deserialized = false; cache2.get(1); assertTrue(deserialized); deserialized = false; cache2.get(2); assertFalse(deserialized); deserialized = false; cache2.get(3); assertTrue(deserialized); deserialized = false; cache2.get(4); assertFalse(deserialized); } finally { customBinaryConf = false; } } /** * */ private static class TestValue1 implements Serializable { /** */ private int val; /** * @param val Value. */ public TestValue1(int val) { this.val = val; } /** * @return Value. */ public int value() { return val; } /** {@inheritDoc} */ @Override public String toString() { return S.toString(TestValue1.class, this); } } /** * */ private static class TestValue2 implements Serializable { /** */ private int val; /** * @param val Value. */ public TestValue2(int val) { this.val = val; } /** * @return Value. */ public int value() { return val; } /** {@inheritDoc} */ @Override public String toString() { return S.toString(TestValue2.class, this); } } /** * */ private static class WrappingClassLoader extends ClassLoader { public WrappingClassLoader(ClassLoader parent) { super(parent); } } }
apache-2.0
wakashige/bazel
src/main/java/com/google/devtools/build/lib/rules/genquery/GenQueryRule.java
4819
// Copyright 2015 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.rules.genquery; import static com.google.devtools.build.lib.packages.Attribute.attr; import static com.google.devtools.build.lib.packages.Type.BOOLEAN; import static com.google.devtools.build.lib.packages.Type.LABEL_LIST; import static com.google.devtools.build.lib.packages.Type.STRING; import static com.google.devtools.build.lib.packages.Type.STRING_LIST; import com.google.devtools.build.lib.analysis.BaseRuleClasses; import com.google.devtools.build.lib.analysis.RuleDefinition; import com.google.devtools.build.lib.analysis.RuleDefinitionEnvironment; import com.google.devtools.build.lib.packages.RuleClass; import com.google.devtools.build.lib.packages.RuleClass.Builder; /** * Rule definition for genquery the rule. */ public final class GenQueryRule implements RuleDefinition { @Override public RuleClass build(Builder builder, RuleDefinitionEnvironment env) { return builder /* <!-- #BLAZE_RULE(genquery).ATTRIBUTE(scope) --> The scope of the query. The query is not allowed to touch targets outside the transitive closure of these targets. ${SYNOPSIS} <!-- #END_BLAZE_RULE.ATTRIBUTE --> */ .add(attr("scope", LABEL_LIST).mandatory().legacyAllowAnyFileType()) /* <!-- #BLAZE_RULE(genquery).ATTRIBUTE(strict) --> If true, targets whose queries escape the transitive closure of their scopes will fail to build. If false, Blaze will print a warning and skip whatever query path led it outside of the scope, while completing the rest of the query. ${SYNOPSIS} <!-- #END_BLAZE_RULE.ATTRIBUTE --> */ .add(attr("strict", BOOLEAN).value(true)) /* <!-- #BLAZE_RULE(genquery).ATTRIBUTE(expression) --> The query to be executed. ${SYNOPSIS} <!-- #END_BLAZE_RULE.ATTRIBUTE --> */ .add(attr("expression", STRING).mandatory()) /* <!-- #BLAZE_RULE(genquery).ATTRIBUTE(opts) --> The options that are passed to the query engine. These correspond to the command line options that can be passed to <code>blaze query</code>. The only query options that are not allowed here are <code>--keep_going</code> and <code>--order_output</code>. ${SYNOPSIS} <!-- #END_BLAZE_RULE.ATTRIBUTE --> */ .add(attr("opts", STRING_LIST)) .build(); } @Override public Metadata getMetadata() { return RuleDefinition.Metadata.builder() .name("genquery") .ancestors(BaseRuleClasses.RuleBase.class) .factoryClass(GenQuery.class) .build(); } } /*<!-- #BLAZE_RULE (NAME = genquery, TYPE = LIBRARY, FAMILY = General)[GENERIC_RULE] --> ${ATTRIBUTE_SIGNATURE} <p> <code>genquery()</code> runs a query specified in the <a href="blaze-query-v2.html">Blaze query language</a> and dumps the result into a file. </p> <p> In order to keep the build consistent, the query is allowed only to visit the transitive closure of the targets specified in the <code>scope</code> attribute. Queries violating this rule will fail during execution if <code>strict</code> is unspecified or true (if <code>strict</code> is false, the out of scope targets will simply be skipped with a warning). The easiest way to make sure this does not happen is to mention the same labels in the scope as in the query expression. The only difference between the queries allowed here and on the command line is that queries containing wildcard target specifications (e.g. <code>//pkg:*</code> or <code>//pkg:all</code>) are not allowed here. </p> <p> The genquery's output is ordered using <code>--order_output=full</code> in order to enforce deterministic output. <p> The name of the output file is the name of the rule. </p> ${ATTRIBUTE_DEFINITION} <h4 id="genquery_examples">Examples</h4> <p> This example writes the list of the labels in the transitive closure of the specified target to a file. </p> <pre class="code"> genquery( name = "kiwi-deps", expression = "deps(//kiwi:kiwi_lib)", scope = ["//kiwi:kiwi_lib"], ) </pre> <!-- #END_BLAZE_RULE -->*/
apache-2.0
aminmkhan/pentaho-kettle
engine/src/test/java/org/pentaho/di/job/entries/getpop/MailConnectionTest.java
4398
/*! ****************************************************************************** * * Pentaho Data Integration * * Copyright (C) 2002-2017 by Hitachi Vantara : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.job.entries.getpop; import static org.mockito.Mockito.when; import javax.mail.Folder; import javax.mail.MessagingException; import javax.mail.Store; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.pentaho.di.core.exception.KettleException; import org.pentaho.di.core.logging.LogChannel; import org.pentaho.di.core.logging.LogChannelInterface; public class MailConnectionTest { private Mconn conn; @Before public void beforeExec() throws KettleException, MessagingException { Object subj = new Object(); LogChannelInterface log = new LogChannel( subj ); conn = new Mconn( log ); } /** * PDI-7426 Test {@link MailConnection#openFolder(String, boolean, boolean)} method. tests that folders are opened * recursively * * @throws KettleException * @throws MessagingException */ @Test public void openFolderTest() throws KettleException, MessagingException { conn.openFolder( "a/b", false, false ); Folder folder = conn.getFolder(); Assert.assertEquals( "Folder B is opened", "B", folder.getFullName() ); } /** * PDI-7426 Test {@link MailConnection#setDestinationFolder(String, boolean)} method. * * @throws KettleException * @throws MessagingException */ @Test public void setDestinationFolderTest() throws KettleException, MessagingException { conn.setDestinationFolder( "a/b/c", true ); Assert.assertTrue( "Folder C created", conn.cCreated ); Assert.assertEquals( "Folder created with holds messages mode", Folder.HOLDS_MESSAGES, conn.mode.intValue() ); } /** * PDI-7426 Test {@link MailConnection#folderExists(String)} method. */ @Test public void folderExistsTest() { boolean actual = conn.folderExists( "a/b" ); Assert.assertTrue( "Folder B exists", actual ); } private class Mconn extends MailConnection { Store store; Folder a; Folder b; Folder c; Folder inbox; Integer mode = -1; boolean cCreated = false; public Mconn( LogChannelInterface log ) throws KettleException, MessagingException { super( log, MailConnectionMeta.PROTOCOL_IMAP, "junit", 0, "junit", "junit", false, false, "junit" ); store = Mockito.mock( Store.class ); inbox = Mockito.mock( Folder.class ); a = Mockito.mock( Folder.class ); b = Mockito.mock( Folder.class ); c = Mockito.mock( Folder.class ); when( a.getFullName() ).thenReturn( "A" ); when( b.getFullName() ).thenReturn( "B" ); when( c.getFullName() ).thenReturn( "C" ); when( a.exists() ).thenReturn( true ); when( b.exists() ).thenReturn( true ); when( c.exists() ).thenReturn( cCreated ); when( c.create( Mockito.anyInt() ) ).thenAnswer( new Answer<Boolean>() { @Override public Boolean answer( InvocationOnMock invocation ) throws Throwable { Object arg0 = invocation.getArguments()[0]; mode = Integer.class.cast( arg0 ); cCreated = true; return true; } } ); when( inbox.getFolder( "a" ) ).thenReturn( a ); when( a.getFolder( "b" ) ).thenReturn( b ); when( b.getFolder( "c" ) ).thenReturn( c ); when( store.getDefaultFolder() ).thenReturn( inbox ); } @Override public Store getStore() { return this.store; } } }
apache-2.0
jorgepgjr/spring-boot
spring-boot-samples/spring-boot-sample-web-method-security/src/test/java/sample/ui/method/SampleMethodSecurityApplicationTests.java
5664
/* * Copyright 2012-2014 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sample.ui.method; import java.util.Arrays; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.test.IntegrationTest; import org.springframework.boot.test.SpringApplicationConfiguration; import org.springframework.boot.test.TestRestTemplate; import org.springframework.http.HttpEntity; import org.springframework.http.HttpHeaders; import org.springframework.http.HttpMethod; import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; import org.springframework.http.ResponseEntity; import org.springframework.test.annotation.DirtiesContext; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.web.WebAppConfiguration; import org.springframework.util.LinkedMultiValueMap; import org.springframework.util.MultiValueMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; /** * Basic integration tests for demo application. * * @author Dave Syer */ @RunWith(SpringJUnit4ClassRunner.class) @SpringApplicationConfiguration(classes = SampleMethodSecurityApplication.class) @WebAppConfiguration @IntegrationTest("server.port:0") @DirtiesContext public class SampleMethodSecurityApplicationTests { @Value("${local.server.port}") private int port; @Test public void testHome() throws Exception { HttpHeaders headers = new HttpHeaders(); headers.setAccept(Arrays.asList(MediaType.TEXT_HTML)); ResponseEntity<String> entity = new TestRestTemplate().exchange( "http://localhost:" + this.port, HttpMethod.GET, new HttpEntity<Void>( headers), String.class); assertEquals(HttpStatus.OK, entity.getStatusCode()); assertTrue("Wrong body (title doesn't match):\n" + entity.getBody(), entity .getBody().contains("<title>Login")); } @Test public void testLogin() throws Exception { HttpHeaders headers = new HttpHeaders(); headers.setAccept(Arrays.asList(MediaType.TEXT_HTML)); MultiValueMap<String, String> form = new LinkedMultiValueMap<String, String>(); form.set("username", "admin"); form.set("password", "admin"); getCsrf(form, headers); ResponseEntity<String> entity = new TestRestTemplate().exchange( "http://localhost:" + this.port + "/login", HttpMethod.POST, new HttpEntity<MultiValueMap<String, String>>(form, headers), String.class); assertEquals(HttpStatus.FOUND, entity.getStatusCode()); assertEquals("http://localhost:" + this.port + "/", entity.getHeaders() .getLocation().toString()); } @Test public void testDenied() throws Exception { HttpHeaders headers = new HttpHeaders(); headers.setAccept(Arrays.asList(MediaType.TEXT_HTML)); MultiValueMap<String, String> form = new LinkedMultiValueMap<String, String>(); form.set("username", "user"); form.set("password", "user"); getCsrf(form, headers); ResponseEntity<String> entity = new TestRestTemplate().exchange( "http://localhost:" + this.port + "/login", HttpMethod.POST, new HttpEntity<MultiValueMap<String, String>>(form, headers), String.class); assertEquals(HttpStatus.FOUND, entity.getStatusCode()); String cookie = entity.getHeaders().getFirst("Set-Cookie"); headers.set("Cookie", cookie); ResponseEntity<String> page = new TestRestTemplate().exchange(entity.getHeaders() .getLocation(), HttpMethod.GET, new HttpEntity<Void>(headers), String.class); assertEquals(HttpStatus.FORBIDDEN, page.getStatusCode()); assertTrue("Wrong body (message doesn't match):\n" + entity.getBody(), page .getBody().contains("Access denied")); } @Test public void testManagementProtected() throws Exception { ResponseEntity<String> entity = new TestRestTemplate().getForEntity( "http://localhost:" + this.port + "/beans", String.class); assertEquals(HttpStatus.UNAUTHORIZED, entity.getStatusCode()); } @Test public void testManagementAuthorizedAccess() throws Exception { ResponseEntity<String> entity = new TestRestTemplate("admin", "admin") .getForEntity("http://localhost:" + this.port + "/beans", String.class); assertEquals(HttpStatus.OK, entity.getStatusCode()); } @Test public void testManagementUnauthorizedAccess() throws Exception { ResponseEntity<String> entity = new TestRestTemplate("user", "user") .getForEntity("http://localhost:" + this.port + "/beans", String.class); assertEquals(HttpStatus.FORBIDDEN, entity.getStatusCode()); } private void getCsrf(MultiValueMap<String, String> form, HttpHeaders headers) { ResponseEntity<String> page = new TestRestTemplate().getForEntity( "http://localhost:" + this.port + "/login", String.class); String cookie = page.getHeaders().getFirst("Set-Cookie"); headers.set("Cookie", cookie); String body = page.getBody(); Matcher matcher = Pattern.compile("(?s).*name=\"_csrf\".*?value=\"([^\"]+).*") .matcher(body); matcher.find(); form.set("_csrf", matcher.group(1)); } }
apache-2.0
steveloughran/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListByArrayList.java
2189
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode.snapshot; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; /** * Resizable-array implementation of the DiffList interface. * @param <T> Type of the object in the list */ public class DiffListByArrayList<T extends Comparable<Integer>> implements DiffList<T> { private final List<T> list; DiffListByArrayList(List<T> list) { this.list = list; } public DiffListByArrayList(int initialCapacity) { this(new ArrayList<>(initialCapacity)); } @Override public T get(int i) { return list.get(i); } @Override public boolean isEmpty() { return list.isEmpty(); } @Override public int size() { return list.size(); } @Override public T remove(int i) { return list.remove(i); } @Override public boolean addLast(T t) { return list.add(t); } @Override public void addFirst(T t) { list.add(0, t); } @Override public int binarySearch(int i) { return Collections.binarySearch(list, i); } @Override public Iterator<T> iterator() { return list.iterator(); } @Override public List<T> getMinListForRange(int startIndex, int endIndex, INodeDirectory dir) { return list.subList(startIndex, endIndex); } }
apache-2.0
nvoron23/presto
presto-main/src/main/java/com/facebook/presto/UnpartitionedPagePartitionFunction.java
1577
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto; import com.facebook.presto.spi.Page; import com.fasterxml.jackson.annotation.JsonCreator; import java.util.List; import java.util.Objects; public final class UnpartitionedPagePartitionFunction implements PagePartitionFunction { @JsonCreator public UnpartitionedPagePartitionFunction() { } @Override public List<Page> partition(List<Page> pages) { return pages; } @Override public int hashCode() { return Objects.hash(getClass()); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } final UnpartitionedPagePartitionFunction other = (UnpartitionedPagePartitionFunction) obj; return Objects.equals(this.getClass(), other.getClass()); } @Override public String toString() { return "unpartitioned"; } }
apache-2.0
shurun19851206/mybaties
src/test/java/org/apache/ibatis/submitted/blocking_cache/PersonMapper.java
980
/* * Copyright 2009-2014 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ibatis.submitted.blocking_cache; import java.util.List; import org.apache.ibatis.annotations.CacheNamespace; import org.apache.ibatis.annotations.Select; @CacheNamespace(blocking=true) public interface PersonMapper { @Select("select id, firstname, lastname from person") public List<Person> findAll(); }
apache-2.0
mbiarnes/drools
kie-pmml/src/main/java/org/kie/pmml/pmml_4_2/model/datatypes/PMML4Boolean.java
1582
/* * Copyright 2017 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.pmml.pmml_4_2.model.datatypes; import org.kie.api.pmml.PMML4Data; public class PMML4Boolean extends PMML4Data<Boolean> { public PMML4Boolean(String correlationId, String name, String context, String displayName, Boolean value) { super(correlationId, name, context, displayName, value); } public PMML4Boolean(String correlationId, String name, String context, String displayName, Boolean value, Double weight, Boolean valid, Boolean missing) { super(correlationId, name, context, displayName, value, weight, valid, missing); } PMML4Boolean(String correlationId, String name, String context, String displayName, Boolean value, Double weight) { super(correlationId, name, context, displayName, value, weight); } @Override public Boolean getValue() { return super.getValue(); } @Override public void registerWithDataFactory() { PMML4DataFactory.registerDataType(Boolean.class.getName(), PMML4Boolean.class); } }
apache-2.0
jittagornp/cpe4235
connect-database/src/main/java/com/blogspot/na5cent/connectdb/X5EmployeeSearchServiceReadContentFile.java
827
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package com.blogspot.na5cent.connectdb; import com.blogspot.na5cent.connectdb.controller.EmployeeSearchCtrl; import com.blogspot.na5cent.connectdb.service.EmployeeSearchByCountryService2; import com.blogspot.na5cent.connectdb.service.EmployeeSearchService; /** * * @author anonymous */ public class X5EmployeeSearchServiceReadContentFile { public static void main(String[] args) throws Exception { EmployeeSearchService service = new EmployeeSearchByCountryService2(); EmployeeSearchCtrl controller = new EmployeeSearchCtrl(service); controller.setKeyword("Canada"); controller.onSearch(1); } }
apache-2.0
Natrezim/perun
perun-base/src/main/java/cz/metacentrum/perun/core/api/exceptions/VoExistsException.java
805
package cz.metacentrum.perun.core.api.exceptions; import cz.metacentrum.perun.core.api.exceptions.rt.VoExistsRuntimeException; /** * Checked version of VoExistsException. * * @see cz.metacentrum.perun.core.api.exceptions.rt.VoExistsRuntimeException * @author Martin Kuba */ public class VoExistsException extends EntityExistsException { static final long serialVersionUID = 0; /** * Converts runtime version to checked version. * @param rt runtime version of this exception */ public VoExistsException(VoExistsRuntimeException rt) { super(rt.getMessage(),rt); } public VoExistsException(String message) { super(message); } public VoExistsException(String message, Throwable cause) { super(message, cause); } public VoExistsException(Throwable cause) { super(cause); } }
bsd-2-clause
whbruce/upm
examples/java/TB7300_Example.java
4099
/* * Author: Jon Trulson <jtrulson@ics.com> * Copyright (c) 2016 Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ import upm_tb7300.TB7300; public class TB7300_Example { private static String defaultDev = "/dev/ttyUSB0"; public static void main(String[] args) throws InterruptedException { // ! [Interesting] // You will need to edit this example to conform to your site // and your devices, specifically the Device Object Instance // ID passed to the constructor, and the arguments to // initMaster() that are appropriate for your BACnet network. if (args.length > 0) defaultDev = args[0]; System.out.println("Using device " + defaultDev); System.out.println("Initializing..."); // Instantiate an TB7300 object for an TB7300 device that has // 73001 as it's unique Device Object Instance ID. NOTE: You // will certainly want to change this to the correct value for // your device(s). TB7300 sensor = new TB7300(73001); // Initialize our BACnet master, if it has not already been // initialized, with the device and baudrate, choosing 1000001 // as our unique Device Object Instance ID, 2 as our MAC // address and using default values for maxMaster and // maxInfoFrames sensor.initMaster(defaultDev, 38400, 1000001, 2); // Uncomment to enable debugging output // sensor.setDebug(true); System.out.println(); System.out.println("Device Name: " + sensor.getDeviceName()); System.out.println("Device Description: " + sensor.getDeviceDescription()); System.out.println("Device Location: " + sensor.getDeviceLocation()); System.out.println(); System.out.println("Fan Mode: " + sensor.getMultiStateValueText(TB7300.MULTISTATE_VALUES_T.MV_Fan_Mode)); System.out.println("Fan Status: " + sensor.getMultiStateValueText(TB7300.MULTISTATE_VALUES_T.MV_Fan_Status)); System.out.println("System Mode: " + sensor.getMultiStateValueText(TB7300.MULTISTATE_VALUES_T.MV_System_Mode)); System.out.println("Service Alarm:" + sensor.getBinaryInputText(TB7300.BINARY_INPUTS_T.BI_Service_Alarm)); System.out.println(); // update and print the room temperature every 5 seconds while (true) { // update our values sensor.update(); // we show both C and F for temperature System.out.println("Temperature: " + sensor.getTemperature() + " C / " + sensor.getTemperature(true) + " F"); System.out.println(); Thread.sleep(5000); } // ! [Interesting] } }
mit
FauxFaux/jdk9-langtools
test/com/sun/javadoc/testDocRootLink/TestDocRootLink.java
4455
/* * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * @test * @bug 6553182 8025416 8029504 * @summary This test verifies the -Xdocrootparent option. * @author Bhavesh Patel * @library ../lib * @modules jdk.javadoc * @build JavadocTester * @run main TestDocRootLink */ public class TestDocRootLink extends JavadocTester { public static void main(String... args) throws Exception { TestDocRootLink tester = new TestDocRootLink(); tester.runTests(); } @Test void test1() { javadoc("-d", "out-1", "-sourcepath", testSrc, "pkg1", "pkg2"); checkExit(Exit.OK); checkOutput("pkg1/C1.html", true, "Refer <a href=\"../../technotes/guides/index.html\">Here</a>", "This <a href=\"../pkg2/C2.html\">Here</a> should not be replaced\n" + " with an absolute link.", "Testing <a href=\"../technotes/guides/index.html\">Link 1</a> and\n" + " <a href=\"../pkg2/C2.html\">Link 2</a>."); checkOutput("pkg1/package-summary.html", true, "<a href=\"../../technotes/guides/index.html\">\n" + " Test document 1</a>", "<a href=\"../pkg2/C2.html\">\n" + " Another Test document 1</a>", "<a href=\"../technotes/guides/index.html\">\n" + " Another Test document 2.</a>"); // TODO: should this check *any* reference to http://download.oracle.com/ checkOutput("pkg1/C1.html", false, "<a href=\"http://download.oracle.com/javase/7/docs/technotes/guides/index.html\">", "<a href=\"http://download.oracle.com/javase/7/docs/pkg2/C2.html\">"); checkOutput("pkg1/package-summary.html", false, "<a href=\"http://download.oracle.com/javase/7/docs/technotes/guides/index.html\">", "<a href=\"http://download.oracle.com/javase/7/docs/pkg2/C2.html\">"); } @Test void test2() { javadoc("-d", "out-2", "-Xdocrootparent", "http://download.oracle.com/javase/7/docs", "-sourcepath", testSrc, "pkg1", "pkg2"); checkExit(Exit.OK); checkOutput("pkg2/C2.html", true, "Refer <a href=\"http://download.oracle.com/javase/7/docs/technotes/guides/index.html\">Here</a>", "This <a href=\"../pkg1/C1.html\">Here</a> should not be replaced\n" + " with an absolute link.", "Testing <a href=\"../technotes/guides/index.html\">Link 1</a> and\n" + " <a href=\"../pkg1/C1.html\">Link 2</a>."); checkOutput("pkg2/package-summary.html", true, "<a href=\"http://download.oracle.com/javase/7/docs/technotes/guides/index.html\">\n" + " Test document 1</a>", "<a href=\"../pkg1/C1.html\">\n" + " Another Test document 1</a>", "<a href=\"../technotes/guides/index.html\">\n" + " Another Test document 2.</a>"); checkOutput("pkg2/C2.html", false, "<a href=\"../../technotes/guides/index.html\">", "<a href=\"http://download.oracle.com/javase/7/docs/pkg1/C1.html\">"); checkOutput("pkg2/package-summary.html", false, "<a href=\"../../technotes/guides/index.html\">", "<a href=\"http://download.oracle.com/javase/7/docs/pkg1/C1.html\">"); } }
gpl-2.0
damienlevin/jeromq
src/main/java/zmq/ZError.java
3694
/* Copyright (c) 2007-2014 Contributors as noted in the AUTHORS file This file is part of 0MQ. 0MQ is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. 0MQ is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package zmq; import java.net.SocketException; import java.nio.channels.ClosedChannelException; public class ZError { private ZError() { } public static class CtxTerminatedException extends RuntimeException { private static final long serialVersionUID = -4404921838608052956L; public CtxTerminatedException() { super(); } } public static class InstantiationException extends RuntimeException { private static final long serialVersionUID = -4404921838608052955L; public InstantiationException(Throwable cause) { super(cause); } } public static class IOException extends RuntimeException { private static final long serialVersionUID = 9202470691157986262L; public IOException(java.io.IOException e) { super(e); } } public static final int EINTR = 4; public static final int EACCESS = 13; public static final int EFAULT = 14; public static final int EINVAL = 22; public static final int EAGAIN = 35; public static final int EINPROGRESS = 36; public static final int EPROTONOSUPPORT = 43; public static final int ENOTSUP = 45; public static final int EADDRINUSE = 48; public static final int EADDRNOTAVAIL = 49; public static final int ENETDOWN = 50; public static final int ENOBUFS = 55; public static final int EISCONN = 56; public static final int ENOTCONN = 57; public static final int ECONNREFUSED = 61; public static final int EHOSTUNREACH = 65; private static final int ZMQ_HAUSNUMERO = 156384712; public static final int ENOTSOCK = ZMQ_HAUSNUMERO + 5; public static final int EFSM = ZMQ_HAUSNUMERO + 51; public static final int ENOCOMPATPROTO = ZMQ_HAUSNUMERO + 52; public static final int ETERM = ZMQ_HAUSNUMERO + 53; public static final int EMTHREAD = ZMQ_HAUSNUMERO + 54; public static final int EIOEXC = ZMQ_HAUSNUMERO + 105; public static final int ESOCKET = ZMQ_HAUSNUMERO + 106; public static final int EMFILE = ZMQ_HAUSNUMERO + 107; static int exccode(java.io.IOException e) { if (e instanceof SocketException) { return ESOCKET; } else if (e instanceof ClosedChannelException) { return ENOTCONN; } else { return EIOEXC; } } public static String toString(int code) { switch (code) { case EADDRINUSE: return "Address already in use"; case EFSM: return "Operation cannot be accomplished in current state"; case ENOCOMPATPROTO: return "The protocol is not compatible with the socket type"; case ETERM: return "Context was terminated"; case EMTHREAD: return "No thread available"; } return ""; } }
gpl-3.0
opensagres/xdocreport.samples
samples/fr.opensagres.xdocreport.samples.odtandfreemarker/src/fr/opensagres/xdocreport/samples/odtandfreemarker/model/DeveloperWithImage.java
8786
/** * GNU LESSER GENERAL PUBLIC LICENSE * Version 3, 29 June 2007 * * Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> * Everyone is permitted to copy and distribute verbatim copies * of this license document, but changing it is not allowed. * * * This version of the GNU Lesser General Public License incorporates * the terms and conditions of version 3 of the GNU General Public * License, supplemented by the additional permissions listed below. * * 0. Additional Definitions. * * As used herein, "this License" refers to version 3 of the GNU Lesser * General Public License, and the "GNU GPL" refers to version 3 of the GNU * General Public License. * * "The Library" refers to a covered work governed by this License, * other than an Application or a Combined Work as defined below. * * An "Application" is any work that makes use of an interface provided * by the Library, but which is not otherwise based on the Library. * Defining a subclass of a class defined by the Library is deemed a mode * of using an interface provided by the Library. * * A "Combined Work" is a work produced by combining or linking an * Application with the Library. The particular version of the Library * with which the Combined Work was made is also called the "Linked * Version". * * The "Minimal Corresponding Source" for a Combined Work means the * Corresponding Source for the Combined Work, excluding any source code * for portions of the Combined Work that, considered in isolation, are * based on the Application, and not on the Linked Version. * * The "Corresponding Application Code" for a Combined Work means the * object code and/or source code for the Application, including any data * and utility programs needed for reproducing the Combined Work from the * Application, but excluding the System Libraries of the Combined Work. * * 1. Exception to Section 3 of the GNU GPL. * * You may convey a covered work under sections 3 and 4 of this License * without being bound by section 3 of the GNU GPL. * * 2. Conveying Modified Versions. * * If you modify a copy of the Library, and, in your modifications, a * facility refers to a function or data to be supplied by an Application * that uses the facility (other than as an argument passed when the * facility is invoked), then you may convey a copy of the modified * version: * * a) under this License, provided that you make a good faith effort to * ensure that, in the event an Application does not supply the * function or data, the facility still operates, and performs * whatever part of its purpose remains meaningful, or * * b) under the GNU GPL, with none of the additional permissions of * this License applicable to that copy. * * 3. Object Code Incorporating Material from Library Header Files. * * The object code form of an Application may incorporate material from * a header file that is part of the Library. You may convey such object * code under terms of your choice, provided that, if the incorporated * material is not limited to numerical parameters, data structure * layouts and accessors, or small macros, inline functions and templates * (ten or fewer lines in length), you do both of the following: * * a) Give prominent notice with each copy of the object code that the * Library is used in it and that the Library and its use are * covered by this License. * * b) Accompany the object code with a copy of the GNU GPL and this license * document. * * 4. Combined Works. * * You may convey a Combined Work under terms of your choice that, * taken together, effectively do not restrict modification of the * portions of the Library contained in the Combined Work and reverse * engineering for debugging such modifications, if you also do each of * the following: * * a) Give prominent notice with each copy of the Combined Work that * the Library is used in it and that the Library and its use are * covered by this License. * * b) Accompany the Combined Work with a copy of the GNU GPL and this license * document. * * c) For a Combined Work that displays copyright notices during * execution, include the copyright notice for the Library among * these notices, as well as a reference directing the user to the * copies of the GNU GPL and this license document. * * d) Do one of the following: * * 0) Convey the Minimal Corresponding Source under the terms of this * License, and the Corresponding Application Code in a form * suitable for, and under terms that permit, the user to * recombine or relink the Application with a modified version of * the Linked Version to produce a modified Combined Work, in the * manner specified by section 6 of the GNU GPL for conveying * Corresponding Source. * * 1) Use a suitable shared library mechanism for linking with the * Library. A suitable mechanism is one that (a) uses at run time * a copy of the Library already present on the user's computer * system, and (b) will operate properly with a modified version * of the Library that is interface-compatible with the Linked * Version. * * e) Provide Installation Information, but only if you would otherwise * be required to provide such information under section 6 of the * GNU GPL, and only to the extent that such information is * necessary to install and execute a modified version of the * Combined Work produced by recombining or relinking the * Application with a modified version of the Linked Version. (If * you use option 4d0, the Installation Information must accompany * the Minimal Corresponding Source and Corresponding Application * Code. If you use option 4d1, you must provide the Installation * Information in the manner specified by section 6 of the GNU GPL * for conveying Corresponding Source.) * * 5. Combined Libraries. * * You may place library facilities that are a work based on the * Library side by side in a single library together with other library * facilities that are not Applications and are not covered by this * License, and convey such a combined library under terms of your * choice, if you do both of the following: * * a) Accompany the combined library with a copy of the same work based * on the Library, uncombined with any other library facilities, * conveyed under the terms of this License. * * b) Give prominent notice with the combined library that part of it * is a work based on the Library, and explaining where to find the * accompanying uncombined form of the same work. * * 6. Revised Versions of the GNU Lesser General Public License. * * The Free Software Foundation may publish revised and/or new versions * of the GNU Lesser General Public License from time to time. Such new * versions will be similar in spirit to the present version, but may * differ in detail to address new problems or concerns. * * Each version is given a distinguishing version number. If the * Library as you received it specifies that a certain numbered version * of the GNU Lesser General Public License "or any later version" * applies to it, you have the option of following the terms and * conditions either of that published version or of any later version * published by the Free Software Foundation. If the Library as you * received it does not specify a version number of the GNU Lesser * General Public License, you may choose any version of the GNU Lesser * General Public License ever published by the Free Software Foundation. * * If the Library as you received it specifies that a proxy can decide * whether future versions of the GNU Lesser General Public License shall * apply, that proxy's public statement of acceptance of any version is * permanent authorization for you to choose that version for the * Library. */ package fr.opensagres.xdocreport.samples.odtandfreemarker.model; import fr.opensagres.xdocreport.document.images.IImageProvider; public class DeveloperWithImage extends Developer { private final IImageProvider photo; public DeveloperWithImage( String name, String lastName, String mail, IImageProvider photo ) { super( name, lastName, mail ); this.photo = photo; } public IImageProvider getPhoto() { return photo; } }
lgpl-3.0
samiunn/incubator-tinkerpop
gremlin-core/src/test/java/org/apache/tinkerpop/gremlin/structure/io/gryo/GryoPoolTest.java
6731
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.tinkerpop.gremlin.structure.io.gryo; import org.apache.commons.configuration.BaseConfiguration; import org.apache.commons.configuration.Configuration; import org.apache.tinkerpop.gremlin.structure.io.IoX; import org.apache.tinkerpop.gremlin.structure.io.IoXIoRegistry; import org.apache.tinkerpop.gremlin.structure.io.IoY; import org.apache.tinkerpop.gremlin.structure.io.IoYIoRegistry; import org.apache.tinkerpop.gremlin.util.function.FunctionUtils; import org.junit.Test; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.util.Collections; import static org.junit.Assert.assertEquals; /** * @author Stephen Mallette (http://stephen.genoprime.com) */ public class GryoPoolTest { @Test public void shouldDoWithReaderWriterMethods() throws Exception { final Configuration conf = new BaseConfiguration(); final GryoPool pool = GryoPool.build().ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create(); try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { pool.doWithWriter(writer -> writer.writeObject(os, 1)); os.flush(); try (final ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray())) { assertEquals(1, pool.<Integer>doWithReader(FunctionUtils.wrapFunction(reader -> reader.readObject(is, Integer.class))).intValue()); } } assertReaderWriter(pool.takeWriter(), pool.takeReader(), 1, Integer.class); } @Test public void shouldConfigPoolOnConstructionWithDefaults() throws Exception { final Configuration conf = new BaseConfiguration(); final GryoPool pool = GryoPool.build().ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create(); assertReaderWriter(pool.takeWriter(), pool.takeReader(), 1, Integer.class); } @Test public void shouldConfigPoolOnConstructionWithPoolSizeOneAndNoIoRegistry() throws Exception { final Configuration conf = new BaseConfiguration(); final GryoPool pool = GryoPool.build().poolSize(1).ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create(); final GryoReader reader = pool.takeReader(); final GryoWriter writer = pool.takeWriter(); pool.offerReader(reader); pool.offerWriter(writer); for (int ix = 0; ix < 100; ix++) { final GryoReader r = pool.takeReader(); final GryoWriter w = pool.takeWriter(); assertReaderWriter(w, r, 1, Integer.class); // should always return the same original instance assertEquals(reader, r); assertEquals(writer, w); pool.offerReader(r); pool.offerWriter(w); } } @Test public void shouldConfigPoolOnConstructionWithCustomIoRegistryConstructor() throws Exception { final Configuration conf = new BaseConfiguration(); conf.setProperty(GryoPool.CONFIG_IO_REGISTRY, IoXIoRegistry.ConstructorBased.class.getName()); final GryoPool pool = GryoPool.build().ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create(); assertReaderWriter(pool.takeWriter(), pool.takeReader(), new IoX("test"), IoX.class); } @Test public void shouldConfigPoolOnConstructionWithCustomIoRegistryInstance() throws Exception { final Configuration conf = new BaseConfiguration(); conf.setProperty(GryoPool.CONFIG_IO_REGISTRY, IoXIoRegistry.InstanceBased.class.getName()); final GryoPool pool = GryoPool.build().ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create(); assertReaderWriter(pool.takeWriter(), pool.takeReader(), new IoX("test"), IoX.class); } @Test public void shouldConfigPoolOnConstructionWithMultipleCustomIoRegistries() throws Exception { final Configuration conf = new BaseConfiguration(); conf.setProperty(GryoPool.CONFIG_IO_REGISTRY, IoXIoRegistry.InstanceBased.class.getName() + "," + IoYIoRegistry.InstanceBased.class.getName()); final GryoPool pool = GryoPool.build().ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create(); assertReaderWriter(pool.takeWriter(), pool.takeReader(), new IoX("test"), IoX.class); assertReaderWriter(pool.takeWriter(), pool.takeReader(), new IoY(100, 200), IoY.class); } @Test(expected = IllegalArgumentException.class) public void shouldConfigPoolOnConstructionWithoutCustomIoRegistryAndFail() throws Exception { final Configuration conf = new BaseConfiguration(); final GryoPool pool = GryoPool.build().ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create(); assertReaderWriter(pool.takeWriter(), pool.takeReader(), new IoX("test"), IoX.class); } @Test(expected = IllegalStateException.class) public void shouldConfigPoolOnConstructionWithoutBadIoRegistryAndFail() throws Exception { final Configuration conf = new BaseConfiguration(); conf.setProperty(GryoPool.CONFIG_IO_REGISTRY, "some.class.that.does.not.exist"); GryoPool.build().ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create(); } private static <T> void assertReaderWriter(final GryoWriter writer, final GryoReader reader, final T o, final Class<T> clazz) throws Exception{ try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { writer.writeObject(os, o); os.flush(); try (final ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray())) { assertEquals(o, reader.readObject(is, clazz)); } } } }
apache-2.0
Guavus/hbase
hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorClassLoader.java
4744
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.util; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.io.IOUtils; import org.junit.Test; import org.junit.experimental.categories.Category; /** * Test TestCoprocessorClassLoader. More tests are in TestClassLoading */ @Category({MiscTests.class, SmallTests.class}) public class TestCoprocessorClassLoader { private static final HBaseCommonTestingUtility TEST_UTIL = new HBaseCommonTestingUtility(); private static final Configuration conf = TEST_UTIL.getConfiguration(); static { TEST_UTIL.getDataTestDir(); // prepare data test dir and hbase local dir } @Test public void testCleanupOldJars() throws Exception { String className = "TestCleanupOldJars"; String folder = TEST_UTIL.getDataTestDir().toString(); File jarFile = ClassLoaderTestHelper.buildJar( folder, className, null, ClassLoaderTestHelper.localDirPath(conf)); File tmpJarFile = new File(jarFile.getParent(), "/tmp/" + className + ".test.jar"); if (tmpJarFile.exists()) tmpJarFile.delete(); assertFalse("tmp jar file should not exist", tmpJarFile.exists()); IOUtils.copyBytes(new FileInputStream(jarFile), new FileOutputStream(tmpJarFile), conf, true); assertTrue("tmp jar file should be created", tmpJarFile.exists()); Path path = new Path(jarFile.getAbsolutePath()); ClassLoader parent = TestCoprocessorClassLoader.class.getClassLoader(); CoprocessorClassLoader.parentDirLockSet.clear(); // So that clean up can be triggered ClassLoader classLoader = CoprocessorClassLoader.getClassLoader(path, parent, "111", conf); assertNotNull("Classloader should be created", classLoader); assertFalse("tmp jar file should be removed", tmpJarFile.exists()); } @Test public void testLibJarName() throws Exception { checkingLibJarName("TestLibJarName.jar", "/lib/"); } @Test public void testRelativeLibJarName() throws Exception { checkingLibJarName("TestRelativeLibJarName.jar", "lib/"); } /** * Test to make sure the lib jar file extracted from a coprocessor jar have * the right name. Otherwise, some existing jar could be override if there are * naming conflicts. */ private void checkingLibJarName(String jarName, String libPrefix) throws Exception { File tmpFolder = new File(ClassLoaderTestHelper.localDirPath(conf), "tmp"); if (tmpFolder.exists()) { // Clean up the tmp folder for (File f: tmpFolder.listFiles()) { f.delete(); } } String className = "CheckingLibJarName"; String folder = TEST_UTIL.getDataTestDir().toString(); File innerJarFile = ClassLoaderTestHelper.buildJar( folder, className, null, ClassLoaderTestHelper.localDirPath(conf)); File targetJarFile = new File(innerJarFile.getParent(), jarName); ClassLoaderTestHelper.addJarFilesToJar(targetJarFile, libPrefix, innerJarFile); Path path = new Path(targetJarFile.getAbsolutePath()); ClassLoader parent = TestCoprocessorClassLoader.class.getClassLoader(); ClassLoader classLoader = CoprocessorClassLoader.getClassLoader(path, parent, "112", conf); assertNotNull("Classloader should be created", classLoader); String fileToLookFor = "." + className + ".jar"; for (String f: tmpFolder.list()) { if (f.endsWith(fileToLookFor) && f.contains(jarName)) { // Cool, found it; return; } } fail("Could not find the expected lib jar file"); } }
apache-2.0
jhrcek/kie-wb-common
kie-wb-common-screens/kie-wb-common-library/kie-wb-common-library-client/src/main/java/org/kie/workbench/common/screens/library/client/widgets/project/NewAssetHandlerWidget.java
1992
/* * Copyright 2017 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.screens.library.client.widgets.project; import javax.inject.Inject; import com.google.gwt.user.client.ui.IsWidget; import org.jboss.errai.common.client.dom.Button; import org.jboss.errai.common.client.dom.Div; import org.jboss.errai.common.client.dom.HTMLElement; import org.jboss.errai.common.client.dom.Node; import org.jboss.errai.ui.client.local.api.IsElement; import org.jboss.errai.ui.shared.TemplateUtil; import org.jboss.errai.ui.shared.api.annotations.DataField; import org.jboss.errai.ui.shared.api.annotations.Templated; import org.uberfire.mvp.Command; @Templated public class NewAssetHandlerWidget implements IsElement { @Inject @DataField Button button; @Inject @DataField Div text; @Inject @DataField Div icon; public void init(final String title, final IsWidget iconWidget, final Command onClick) { text.setTextContent(title); if (iconWidget != null) { HTMLElement assetIconHtml = TemplateUtil.<HTMLElement>nativeCast(iconWidget.asWidget().getElement()); final Node clonedAssetIconHtml = assetIconHtml.cloneNode(true); this.icon.appendChild(clonedAssetIconHtml); } if (onClick != null) { button.setOnclick(e -> onClick.execute()); } } }
apache-2.0
jwren/intellij-community
platform/diff-impl/src/com/intellij/openapi/diff/impl/dir/DirDiffElementImpl.java
7722
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.openapi.diff.impl.dir; import com.intellij.ide.diff.*; import com.intellij.util.text.DateFormatUtil; import org.jetbrains.annotations.Nls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import static com.intellij.ide.diff.DirDiffOperation.*; /** * @author Konstantin Bulenkov */ public final class DirDiffElementImpl implements DirDiffElement { private final DTree myParent; private DiffType myType; private DiffElement mySource; private long mySourceLength; private DiffElement myTarget; private long myTargetLength; private final String myName; private DirDiffOperation myOperation; private DirDiffOperation myDefaultOperation; private final DTree myNode; private DirDiffElementImpl(DTree parent, @Nullable DiffElement source, @Nullable DiffElement target, DiffType type, @Nls String name, @Nullable DirDiffOperation defaultOperation) { myParent = parent.getParent(); myNode = parent; myType = type; mySource = source; mySourceLength = source == null || source.isContainer() ? -1 : source.getSize(); myTarget = target; myTargetLength = target == null || target.isContainer() ? -1 : target.getSize(); myName = name; if(defaultOperation != null){ myDefaultOperation = defaultOperation; } else if (type == DiffType.ERROR) { myDefaultOperation = NONE; } else if (isSource()) { myDefaultOperation = COPY_TO; } else if (isTarget()) { myDefaultOperation = COPY_FROM; } else if (type == DiffType.EQUAL) { myDefaultOperation = EQUAL; } else if (type == DiffType.CHANGED) { assert source != null; myDefaultOperation = MERGE; } } public String getSourceModificationDate() { return mySource == null ? "" : getLastModification(mySource); } public String getTargetModificationDate() { return myTarget == null ? "" : getLastModification(myTarget); } public void updateTargetData() { if (myTarget != null && !myTarget.isContainer()) { myTargetLength = myTarget.getSize(); } else { myTargetLength = -1; } } private static String getLastModification(DiffElement file) { final long timeStamp = file.getTimeStamp(); return timeStamp < 0 ? "" : DateFormatUtil.formatDateTime(timeStamp); } public static DirDiffElementImpl createChange(DTree parent, @NotNull DiffElement source, @NotNull DiffElement target, @Nullable DirDiffSettings.CustomSourceChooser customSourceChooser) { DirDiffOperation defaultOperation = null; if (customSourceChooser != null) { DiffElement chosenSource = customSourceChooser.chooseSource(source, target); if (chosenSource == source) { // chosenSource might be null defaultOperation = COPY_TO; } else if (chosenSource == target) { defaultOperation = COPY_FROM; } } return new DirDiffElementImpl(parent, source, target, DiffType.CHANGED, source.getPresentableName(), defaultOperation); } public static DirDiffElementImpl createError(DTree parent, @Nullable DiffElement source, @Nullable DiffElement target) { return new DirDiffElementImpl(parent, source, target, DiffType.ERROR, source == null ? target.getPresentableName() : source.getPresentableName(), null); } public static DirDiffElementImpl createSourceOnly(DTree parent, @NotNull DiffElement source) { return new DirDiffElementImpl(parent, source, null, DiffType.SOURCE, null, null); } public static DirDiffElementImpl createTargetOnly(DTree parent, @NotNull DiffElement target) { return new DirDiffElementImpl(parent, null, target, DiffType.TARGET, null, null); } public static DirDiffElementImpl createDirElement(DTree parent, DiffElement src, DiffElement trg, @Nls String name) { return new DirDiffElementImpl(parent, src, trg, DiffType.SEPARATOR, name, null); } public static DirDiffElementImpl createEqual(DTree parent, @NotNull DiffElement source, @NotNull DiffElement target) { return new DirDiffElementImpl(parent, source, target, DiffType.EQUAL, source.getPresentableName(), null); } @Override public DiffType getType() { return myType; } @Override public DiffElement getSource() { return mySource; } @Override public DiffElement getTarget() { return myTarget; } @Override public String getName() { return myName; } @Nullable public String getSourceName() { return mySource == null ? null : mySource.getName(); } @Nullable public String getSourcePresentableName() { return mySource == null ? null : mySource.getPresentableName(); } @Nullable public String getSourceSize() { return mySourceLength < 0 ? null : String.valueOf(mySourceLength); } public DirDiffOperation getDefaultOperation() { return myDefaultOperation; //if (myType == DType.SOURCE) return COPY_TO; //if (myType == DType.TARGET) return COPY_FROM; //if (myType == DType.CHANGED) return MERGE; //if (myType == DType.EQUAL) return EQUAL; //return NONE; } @Nullable public String getTargetName() { return myTarget == null ? null : myTarget.getName(); } @Nullable public String getTargetPresentableName() { return myTarget == null ? null : myTarget.getPresentableName(); } @Nullable public String getTargetSize() { return myTargetLength < 0 ? null : String.valueOf(myTargetLength); } public boolean isSeparator() { return myType == DiffType.SEPARATOR; } public boolean isSource() { return myType == DiffType.SOURCE; } public boolean isTarget() { return myType == DiffType.TARGET; } @Override public DirDiffOperation getOperation() { return myOperation == null ? myDefaultOperation : myOperation; } public void updateSourceFromTarget(DiffElement target) { myTarget = target; myTargetLength = mySourceLength; myDefaultOperation = EQUAL; myOperation = EQUAL; myType = DiffType.EQUAL; } public void updateTargetFromSource(DiffElement source) { mySource = source; mySourceLength = myTargetLength; myDefaultOperation = EQUAL; myOperation = EQUAL; myType = DiffType.EQUAL; } public void setNextOperation() { final DirDiffOperation op = getOperation(); if (myType == DiffType.SOURCE) { myOperation = op == COPY_TO ? DELETE : op == DELETE ? NONE : COPY_TO; } else if (myType == DiffType.TARGET) { myOperation = op == COPY_FROM ? DELETE : op == DELETE ? NONE : COPY_FROM; } else if (myType == DiffType.CHANGED) { myOperation = op == MERGE ? COPY_TO : op == COPY_TO ? COPY_FROM : MERGE; } } public void setOperation(@NotNull DirDiffOperation operation) { if (myType == DiffType.EQUAL || myType == DiffType.SEPARATOR) return; if (myType == DiffType.TARGET && operation == COPY_TO) return; if (myType == DiffType.SOURCE && operation == COPY_FROM) return; if (myType == DiffType.CHANGED && operation == DELETE) return; myOperation = operation; } public Icon getSourceIcon() { return getIcon(mySource); } public Icon getTargetIcon() { return getIcon(myTarget); } private static Icon getIcon(DiffElement element) { return element != null ? element.getIcon() : null; } public DTree getParentNode() { return myParent; } public DTree getNode() { return myNode; } }
apache-2.0
hash-X/hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
49285
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.InlineDispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.resourcemanager.Application; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.Task; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class TestFifoScheduler { private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class); private final int GB = 1024; private ResourceManager resourceManager = null; private static Configuration conf; private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); @Before public void setUp() throws Exception { conf = new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); resourceManager = new MockRM(conf); } @After public void tearDown() throws Exception { resourceManager.stop(); } private org.apache.hadoop.yarn.server.resourcemanager.NodeManager registerNode(String hostName, int containerManagerPort, int nmHttpPort, String rackName, Resource capability) throws IOException, YarnException { return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( hostName, containerManagerPort, nmHttpPort, rackName, capability, resourceManager); } private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); ApplicationAttemptId attId = ApplicationAttemptId.newInstance(appIdImpl, attemptId); return attId; } private ResourceRequest createResourceRequest(int memory, String host, int priority, int numContainers) { ResourceRequest request = recordFactory .newRecordInstance(ResourceRequest.class); request.setCapability(Resources.createResource(memory)); request.setResourceName(host); request.setNumContainers(numContainers); Priority prio = recordFactory.newRecordInstance(Priority.class); prio.setPriority(priority); request.setPriority(prio); return request; } @Test(timeout=5000) public void testFifoSchedulerCapacityWhenNoNMs() { FifoScheduler scheduler = new FifoScheduler(); QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false); Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f); } @Test(timeout=5000) public void testAppAttemptMetrics() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); FifoScheduler scheduler = new FifoScheduler(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, null, null, null, writer, scheduler); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); Configuration conf = new Configuration(); scheduler.setRMContext(rmContext); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf, rmContext); QueueMetrics metrics = scheduler.getRootQueueMetrics(); int beforeAppsSubmitted = metrics.getAppsSubmitted(); ApplicationId appId = BuilderUtils.newApplicationId(200, 1); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId( appId, 1); SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "queue", "user"); scheduler.handle(appEvent); SchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false); scheduler.handle(attemptEvent); appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2); SchedulerEvent attemptEvent2 = new AppAttemptAddedSchedulerEvent(appAttemptId, false); scheduler.handle(attemptEvent2); int afterAppsSubmitted = metrics.getAppsSubmitted(); Assert.assertEquals(1, afterAppsSubmitted - beforeAppsSubmitted); scheduler.stop(); } @Test(timeout=2000) public void testNodeLocalAssignment() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); Configuration conf = new Configuration(); RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf); containerTokenSecretManager.rollMasterKey(); NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf); nmTokenSecretManager.rollMasterKey(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); FifoScheduler scheduler = new FifoScheduler(); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, containerTokenSecretManager, nmTokenSecretManager, null, writer, scheduler); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); scheduler.setRMContext(rmContext); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(new Configuration(), rmContext); RMNode node0 = MockNodes.newNodeInfo(1, Resources.createResource(1024 * 64), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0); scheduler.handle(nodeEvent1); int _appId = 1; int _appAttemptId = 1; ApplicationAttemptId appAttemptId = createAppAttemptId(_appId, _appAttemptId); createMockRMApp(appAttemptId, rmContext); AppAddedSchedulerEvent appEvent = new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1", "user1"); scheduler.handle(appEvent); AppAttemptAddedSchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false); scheduler.handle(attemptEvent); int memory = 64; int nConts = 3; int priority = 20; List<ResourceRequest> ask = new ArrayList<ResourceRequest>(); ResourceRequest nodeLocal = createResourceRequest(memory, node0.getHostName(), priority, nConts); ResourceRequest rackLocal = createResourceRequest(memory, node0.getRackName(), priority, nConts); ResourceRequest any = createResourceRequest(memory, ResourceRequest.ANY, priority, nConts); ask.add(nodeLocal); ask.add(rackLocal); ask.add(any); scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null); NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0); // Before the node update event, there are 3 local requests outstanding Assert.assertEquals(3, nodeLocal.getNumContainers()); scheduler.handle(node0Update); // After the node update event, check that there are no more local requests // outstanding Assert.assertEquals(0, nodeLocal.getNumContainers()); //Also check that the containers were scheduled SchedulerAppReport info = scheduler.getSchedulerAppInfo(appAttemptId); Assert.assertEquals(3, info.getLiveContainers().size()); scheduler.stop(); } @Test(timeout=2000) public void testUpdateResourceOnNode() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); Configuration conf = new Configuration(); RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf); containerTokenSecretManager.rollMasterKey(); NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf); nmTokenSecretManager.rollMasterKey(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); FifoScheduler scheduler = new FifoScheduler(){ @SuppressWarnings("unused") public Map<NodeId, FiCaSchedulerNode> getNodes(){ return nodes; } }; RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, containerTokenSecretManager, nmTokenSecretManager, null, writer, scheduler); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); NullRMNodeLabelsManager nlm = new NullRMNodeLabelsManager(); nlm.init(new Configuration()); rmContext.setNodeLabelManager(nlm); scheduler.setRMContext(rmContext); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(new Configuration(), rmContext); RMNode node0 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 4), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0); scheduler.handle(nodeEvent1); Method method = scheduler.getClass().getDeclaredMethod("getNodes"); @SuppressWarnings("unchecked") Map<NodeId, FiCaSchedulerNode> schedulerNodes = (Map<NodeId, FiCaSchedulerNode>) method.invoke(scheduler); assertEquals(schedulerNodes.values().size(), 1); Resource newResource = Resources.createResource(1024, 4); NodeResourceUpdateSchedulerEvent node0ResourceUpdate = new NodeResourceUpdateSchedulerEvent(node0, ResourceOption.newInstance( newResource, RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)); scheduler.handle(node0ResourceUpdate); // SchedulerNode's total resource and available resource are changed. assertEquals(schedulerNodes.get(node0.getNodeID()).getTotalResource() .getMemory(), 1024); assertEquals(schedulerNodes.get(node0.getNodeID()). getAvailableResource().getMemory(), 1024); QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false); Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f); int _appId = 1; int _appAttemptId = 1; ApplicationAttemptId appAttemptId = createAppAttemptId(_appId, _appAttemptId); createMockRMApp(appAttemptId, rmContext); AppAddedSchedulerEvent appEvent = new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1", "user1"); scheduler.handle(appEvent); AppAttemptAddedSchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false); scheduler.handle(attemptEvent); int memory = 1024; int priority = 1; List<ResourceRequest> ask = new ArrayList<ResourceRequest>(); ResourceRequest nodeLocal = createResourceRequest(memory, node0.getHostName(), priority, 1); ResourceRequest rackLocal = createResourceRequest(memory, node0.getRackName(), priority, 1); ResourceRequest any = createResourceRequest(memory, ResourceRequest.ANY, priority, 1); ask.add(nodeLocal); ask.add(rackLocal); ask.add(any); scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null); // Before the node update event, there are one local request Assert.assertEquals(1, nodeLocal.getNumContainers()); NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0); // Now schedule. scheduler.handle(node0Update); // After the node update event, check no local request Assert.assertEquals(0, nodeLocal.getNumContainers()); // Also check that one container was scheduled SchedulerAppReport info = scheduler.getSchedulerAppInfo(appAttemptId); Assert.assertEquals(1, info.getLiveContainers().size()); // And check the default Queue now is full. queueInfo = scheduler.getQueueInfo(null, false, false); Assert.assertEquals(1.0f, queueInfo.getCurrentCapacity(), 0.0f); } // @Test public void testFifoScheduler() throws Exception { LOG.info("--- START: testFifoScheduler ---"); final int GB = 1024; // Register node1 String host_0 = "host_0"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(4 * GB, 1)); nm_0.heartbeat(); // Register node2 String host_1 = "host_1"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(2 * GB, 1)); nm_1.heartbeat(); // ResourceRequest priorities Priority priority_0 = org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(0); Priority priority_1 = org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(1); // Submit an application Application application_0 = new Application("user_0", resourceManager); application_0.submit(); application_0.addNodeManager(host_0, 1234, nm_0); application_0.addNodeManager(host_1, 1234, nm_1); Resource capability_0_0 = Resources.createResource(GB); application_0.addResourceRequestSpec(priority_1, capability_0_0); Resource capability_0_1 = Resources.createResource(2 * GB); application_0.addResourceRequestSpec(priority_0, capability_0_1); Task task_0_0 = new Task(application_0, priority_1, new String[] {host_0, host_1}); application_0.addTask(task_0_0); // Submit another application Application application_1 = new Application("user_1", resourceManager); application_1.submit(); application_1.addNodeManager(host_0, 1234, nm_0); application_1.addNodeManager(host_1, 1234, nm_1); Resource capability_1_0 = Resources.createResource(3 * GB); application_1.addResourceRequestSpec(priority_1, capability_1_0); Resource capability_1_1 = Resources.createResource(4 * GB); application_1.addResourceRequestSpec(priority_0, capability_1_1); Task task_1_0 = new Task(application_1, priority_1, new String[] {host_0, host_1}); application_1.addTask(task_1_0); // Send resource requests to the scheduler LOG.info("Send resource requests to the scheduler"); application_0.schedule(); application_1.schedule(); // Send a heartbeat to kick the tires on the Scheduler LOG.info("Send a heartbeat to kick the tires on the Scheduler... " + "nm0 -> task_0_0 and task_1_0 allocated, used=4G " + "nm1 -> nothing allocated"); nm_0.heartbeat(); // task_0_0 and task_1_0 allocated, used=4G nm_1.heartbeat(); // nothing allocated // Get allocations from the scheduler application_0.schedule(); // task_0_0 checkApplicationResourceUsage(GB, application_0); application_1.schedule(); // task_1_0 checkApplicationResourceUsage(3 * GB, application_1); nm_0.heartbeat(); nm_1.heartbeat(); checkNodeResourceUsage(4*GB, nm_0); // task_0_0 (1G) and task_1_0 (3G) checkNodeResourceUsage(0*GB, nm_1); // no tasks, 2G available LOG.info("Adding new tasks..."); Task task_1_1 = new Task(application_1, priority_1, new String[] {ResourceRequest.ANY}); application_1.addTask(task_1_1); Task task_1_2 = new Task(application_1, priority_1, new String[] {ResourceRequest.ANY}); application_1.addTask(task_1_2); Task task_1_3 = new Task(application_1, priority_0, new String[] {ResourceRequest.ANY}); application_1.addTask(task_1_3); application_1.schedule(); Task task_0_1 = new Task(application_0, priority_1, new String[] {host_0, host_1}); application_0.addTask(task_0_1); Task task_0_2 = new Task(application_0, priority_1, new String[] {host_0, host_1}); application_0.addTask(task_0_2); Task task_0_3 = new Task(application_0, priority_0, new String[] {ResourceRequest.ANY}); application_0.addTask(task_0_3); application_0.schedule(); // Send a heartbeat to kick the tires on the Scheduler LOG.info("Sending hb from " + nm_0.getHostName()); nm_0.heartbeat(); // nothing new, used=4G LOG.info("Sending hb from " + nm_1.getHostName()); nm_1.heartbeat(); // task_0_3, used=2G // Get allocations from the scheduler LOG.info("Trying to allocate..."); application_0.schedule(); checkApplicationResourceUsage(3 * GB, application_0); application_1.schedule(); checkApplicationResourceUsage(3 * GB, application_1); nm_0.heartbeat(); nm_1.heartbeat(); checkNodeResourceUsage(4*GB, nm_0); checkNodeResourceUsage(2*GB, nm_1); // Complete tasks LOG.info("Finishing up task_0_0"); application_0.finishTask(task_0_0); // Now task_0_1 application_0.schedule(); application_1.schedule(); nm_0.heartbeat(); nm_1.heartbeat(); checkApplicationResourceUsage(3 * GB, application_0); checkApplicationResourceUsage(3 * GB, application_1); checkNodeResourceUsage(4*GB, nm_0); checkNodeResourceUsage(2*GB, nm_1); LOG.info("Finishing up task_1_0"); application_1.finishTask(task_1_0); // Now task_0_2 application_0.schedule(); // final overcommit for app0 caused here application_1.schedule(); nm_0.heartbeat(); // final overcommit for app0 occurs here nm_1.heartbeat(); checkApplicationResourceUsage(4 * GB, application_0); checkApplicationResourceUsage(0 * GB, application_1); //checkNodeResourceUsage(1*GB, nm_0); // final over-commit -> rm.node->1G, test.node=2G checkNodeResourceUsage(2*GB, nm_1); LOG.info("Finishing up task_0_3"); application_0.finishTask(task_0_3); // No more application_0.schedule(); application_1.schedule(); nm_0.heartbeat(); nm_1.heartbeat(); checkApplicationResourceUsage(2 * GB, application_0); checkApplicationResourceUsage(0 * GB, application_1); //checkNodeResourceUsage(2*GB, nm_0); // final over-commit, rm.node->1G, test.node->2G checkNodeResourceUsage(0*GB, nm_1); LOG.info("Finishing up task_0_1"); application_0.finishTask(task_0_1); application_0.schedule(); application_1.schedule(); nm_0.heartbeat(); nm_1.heartbeat(); checkApplicationResourceUsage(1 * GB, application_0); checkApplicationResourceUsage(0 * GB, application_1); LOG.info("Finishing up task_0_2"); application_0.finishTask(task_0_2); // now task_1_3 can go! application_0.schedule(); application_1.schedule(); nm_0.heartbeat(); nm_1.heartbeat(); checkApplicationResourceUsage(0 * GB, application_0); checkApplicationResourceUsage(4 * GB, application_1); LOG.info("Finishing up task_1_3"); application_1.finishTask(task_1_3); // now task_1_1 application_0.schedule(); application_1.schedule(); nm_0.heartbeat(); nm_1.heartbeat(); checkApplicationResourceUsage(0 * GB, application_0); checkApplicationResourceUsage(3 * GB, application_1); LOG.info("Finishing up task_1_1"); application_1.finishTask(task_1_1); application_0.schedule(); application_1.schedule(); nm_0.heartbeat(); nm_1.heartbeat(); checkApplicationResourceUsage(0 * GB, application_0); checkApplicationResourceUsage(3 * GB, application_1); LOG.info("--- END: testFifoScheduler ---"); } @Test public void testGetAppsInQueue() throws Exception { Application application_0 = new Application("user_0", resourceManager); application_0.submit(); Application application_1 = new Application("user_0", resourceManager); application_1.submit(); ResourceScheduler scheduler = resourceManager.getResourceScheduler(); List<ApplicationAttemptId> appsInDefault = scheduler.getAppsInQueue("default"); assertTrue(appsInDefault.contains(application_0.getApplicationAttemptId())); assertTrue(appsInDefault.contains(application_1.getApplicationAttemptId())); assertEquals(2, appsInDefault.size()); Assert.assertNull(scheduler.getAppsInQueue("someotherqueue")); } @Test public void testAddAndRemoveAppFromFiFoScheduler() throws Exception { Configuration conf = new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); MockRM rm = new MockRM(conf); @SuppressWarnings("unchecked") AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode> fs = (AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode>) rm .getResourceScheduler(); TestSchedulerUtils.verifyAppAddedAndRemovedFromScheduler( fs.getSchedulerApplications(), fs, "queue"); } @Test(timeout = 30000) public void testConfValidation() throws Exception { FifoScheduler scheduler = new FifoScheduler(); Configuration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 2048); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024); try { scheduler.serviceInit(conf); fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation."); } catch (YarnRuntimeException e) { // Exception is expected. assertTrue("The thrown exception is not the expected one.", e .getMessage().startsWith("Invalid resource scheduler memory")); } } @Test(timeout = 60000) public void testAllocateContainerOnNodeWithoutOffSwitchSpecified() throws Exception { Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MockRM rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB); RMApp app1 = rm.submitApp(2048); // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1 nm1.nodeHeartbeat(true); RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); // add request for containers List<ResourceRequest> requests = new ArrayList<ResourceRequest>(); requests.add(am1.createResourceReq("127.0.0.1", 1 * GB, 1, 1)); requests.add(am1.createResourceReq("/default-rack", 1 * GB, 1, 1)); am1.allocate(requests, null); // send the request try { // kick the schedule nm1.nodeHeartbeat(true); } catch (NullPointerException e) { Assert.fail("NPE when allocating container on node but " + "forget to set off-switch request should be handled"); } rm.stop(); } @Test(timeout = 60000) public void testFifoScheduling() throws Exception { Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MockRM rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB); MockNM nm2 = rm.registerNode("127.0.0.2:5678", 4 * GB); RMApp app1 = rm.submitApp(2048); // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1 nm1.nodeHeartbeat(true); RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); RMApp app2 = rm.submitApp(2048); // kick the scheduling, 2GB given to AM, remaining 2 GB on nm2 nm2.nodeHeartbeat(true); RMAppAttempt attempt2 = app2.getCurrentAppAttempt(); MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId()); am2.registerAppAttempt(); SchedulerNodeReport report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId()); Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1); AllocateResponse alloc1Response = am1.schedule(); // send the request // add request for containers am2.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 0, 1); AllocateResponse alloc2Response = am2.schedule(); // send the request // kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0 nm1.nodeHeartbeat(true); while (alloc1Response.getAllocatedContainers().size() < 1) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(1000); alloc1Response = am1.schedule(); } while (alloc2Response.getAllocatedContainers().size() < 1) { LOG.info("Waiting for containers to be created for app 2..."); Thread.sleep(1000); alloc2Response = am2.schedule(); } // kick the scheduler, nothing given remaining 2 GB. nm2.nodeHeartbeat(true); List<Container> allocated1 = alloc1Response.getAllocatedContainers(); Assert.assertEquals(1, allocated1.size()); Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory()); Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId()); List<Container> allocated2 = alloc2Response.getAllocatedContainers(); Assert.assertEquals(1, allocated2.size()); Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory()); Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId()); Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemory()); Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemory()); Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory()); Container c1 = allocated1.get(0); Assert.assertEquals(GB, c1.getResource().getMemory()); ContainerStatus containerStatus = BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE, "", 0); nm1.containerStatus(containerStatus); int waitCount = 0; while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) { LOG.info("Waiting for containers to be finished for app 1... Tried " + waitCount + " times already.."); Thread.sleep(1000); } Assert.assertEquals(1, attempt1.getJustFinishedContainers().size()); Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses() .size()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemory()); rm.stop(); } @Test(timeout = 60000) public void testNodeUpdateBeforeAppAttemptInit() throws Exception { FifoScheduler scheduler = new FifoScheduler(); MockRM rm = new MockRM(conf); scheduler.setRMContext(rm.getRMContext()); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf, rm.getRMContext()); RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(1024, 4), 1, "127.0.0.1"); scheduler.handle(new NodeAddedSchedulerEvent(node)); ApplicationId appId = ApplicationId.newInstance(0, 1); scheduler.addApplication(appId, "queue1", "user1", false); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); try { scheduler.handle(updateEvent); } catch (NullPointerException e) { Assert.fail(); } ApplicationAttemptId attId = ApplicationAttemptId.newInstance(appId, 1); scheduler.addApplicationAttempt(attId, false, false); rm.stop(); } private void testMinimumAllocation(YarnConfiguration conf, int testAlloc) throws Exception { MockRM rm = new MockRM(conf); rm.start(); // Register node1 MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB); // Submit an application RMApp app1 = rm.submitApp(testAlloc); // kick the scheduling nm1.nodeHeartbeat(true); RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); int checkAlloc = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemory()); rm.stop(); } @Test(timeout = 60000) public void testDefaultMinimumAllocation() throws Exception { // Test with something lesser than default testMinimumAllocation(new YarnConfiguration(TestFifoScheduler.conf), YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB / 2); } @Test(timeout = 60000) public void testNonDefaultMinimumAllocation() throws Exception { // Set custom min-alloc to test tweaking it int allocMB = 1536; YarnConfiguration conf = new YarnConfiguration(TestFifoScheduler.conf); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, allocMB); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, allocMB * 10); // Test for something lesser than this. testMinimumAllocation(conf, allocMB / 2); } @Test(timeout = 50000) public void testReconnectedNode() throws Exception { CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); conf.setQueues("default", new String[] { "default" }); conf.setCapacity("default", 100); FifoScheduler fs = new FifoScheduler(); fs.init(conf); fs.start(); // mock rmContext to avoid NPE. RMContext context = mock(RMContext.class); fs.reinitialize(conf, null); fs.setRMContext(context); RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, "127.0.0.2"); RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2, "127.0.0.3"); fs.handle(new NodeAddedSchedulerEvent(n1)); fs.handle(new NodeAddedSchedulerEvent(n2)); fs.handle(new NodeUpdateSchedulerEvent(n1)); Assert.assertEquals(6 * GB, fs.getRootQueueMetrics().getAvailableMB()); // reconnect n1 with downgraded memory n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1, "127.0.0.2"); fs.handle(new NodeRemovedSchedulerEvent(n1)); fs.handle(new NodeAddedSchedulerEvent(n1)); fs.handle(new NodeUpdateSchedulerEvent(n1)); Assert.assertEquals(4 * GB, fs.getRootQueueMetrics().getAvailableMB()); fs.stop(); } @Test(timeout = 50000) public void testBlackListNodes() throws Exception { Configuration conf = new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); MockRM rm = new MockRM(conf); rm.start(); FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler(); int rack_num_0 = 0; int rack_num_1 = 1; // Add 4 nodes in 2 racks // host_0_0 in rack0 String host_0_0 = "127.0.0.1"; RMNode n1 = MockNodes.newNodeInfo(rack_num_0, MockNodes.newResource(4 * GB), 1, host_0_0); fs.handle(new NodeAddedSchedulerEvent(n1)); // host_0_1 in rack0 String host_0_1 = "127.0.0.2"; RMNode n2 = MockNodes.newNodeInfo(rack_num_0, MockNodes.newResource(4 * GB), 1, host_0_1); fs.handle(new NodeAddedSchedulerEvent(n2)); // host_1_0 in rack1 String host_1_0 = "127.0.0.3"; RMNode n3 = MockNodes.newNodeInfo(rack_num_1, MockNodes.newResource(4 * GB), 1, host_1_0); fs.handle(new NodeAddedSchedulerEvent(n3)); // host_1_1 in rack1 String host_1_1 = "127.0.0.4"; RMNode n4 = MockNodes.newNodeInfo(rack_num_1, MockNodes.newResource(4 * GB), 1, host_1_1); fs.handle(new NodeAddedSchedulerEvent(n4)); // Add one application ApplicationId appId1 = BuilderUtils.newApplicationId(100, 1); ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(appId1, 1); createMockRMApp(appAttemptId1, rm.getRMContext()); SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId1, "queue", "user"); fs.handle(appEvent); SchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId1, false); fs.handle(attemptEvent); List<ContainerId> emptyId = new ArrayList<ContainerId>(); List<ResourceRequest> emptyAsk = new ArrayList<ResourceRequest>(); // Allow rack-locality for rack_1, but blacklist host_1_0 // Set up resource requests // Ask for a 1 GB container for app 1 List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>(); ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0), "rack1", BuilderUtils.newResource(GB, 1), 1)); ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0), ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1)); fs.allocate(appAttemptId1, ask1, emptyId, Collections.singletonList(host_1_0), null); // Trigger container assignment fs.handle(new NodeUpdateSchedulerEvent(n3)); // Get the allocation for the application and verify no allocation on // blacklist node Allocation allocation1 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null); Assert.assertEquals("allocation1", 0, allocation1.getContainers().size()); // verify host_1_1 can get allocated as not in blacklist fs.handle(new NodeUpdateSchedulerEvent(n4)); Allocation allocation2 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null); Assert.assertEquals("allocation2", 1, allocation2.getContainers().size()); List<Container> containerList = allocation2.getContainers(); for (Container container : containerList) { Assert.assertEquals("Container is allocated on n4", container.getNodeId(), n4.getNodeID()); } // Ask for a 1 GB container again for app 1 List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>(); // this time, rack0 is also in blacklist, so only host_1_1 is available to // be assigned ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0), ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1)); fs.allocate(appAttemptId1, ask2, emptyId, Collections.singletonList("rack0"), null); // verify n1 is not qualified to be allocated fs.handle(new NodeUpdateSchedulerEvent(n1)); Allocation allocation3 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null); Assert.assertEquals("allocation3", 0, allocation3.getContainers().size()); // verify n2 is not qualified to be allocated fs.handle(new NodeUpdateSchedulerEvent(n2)); Allocation allocation4 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null); Assert.assertEquals("allocation4", 0, allocation4.getContainers().size()); // verify n3 is not qualified to be allocated fs.handle(new NodeUpdateSchedulerEvent(n3)); Allocation allocation5 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null); Assert.assertEquals("allocation5", 0, allocation5.getContainers().size()); fs.handle(new NodeUpdateSchedulerEvent(n4)); Allocation allocation6 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null); Assert.assertEquals("allocation6", 1, allocation6.getContainers().size()); containerList = allocation6.getContainers(); for (Container container : containerList) { Assert.assertEquals("Container is allocated on n4", container.getNodeId(), n4.getNodeID()); } rm.stop(); } @Test(timeout = 50000) public void testHeadroom() throws Exception { Configuration conf = new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); MockRM rm = new MockRM(conf); rm.start(); FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler(); // Add a node RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, "127.0.0.2"); fs.handle(new NodeAddedSchedulerEvent(n1)); // Add two applications ApplicationId appId1 = BuilderUtils.newApplicationId(100, 1); ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(appId1, 1); createMockRMApp(appAttemptId1, rm.getRMContext()); SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId1, "queue", "user"); fs.handle(appEvent); SchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId1, false); fs.handle(attemptEvent); ApplicationId appId2 = BuilderUtils.newApplicationId(200, 2); ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(appId2, 1); createMockRMApp(appAttemptId2, rm.getRMContext()); SchedulerEvent appEvent2 = new AppAddedSchedulerEvent(appId2, "queue", "user"); fs.handle(appEvent2); SchedulerEvent attemptEvent2 = new AppAttemptAddedSchedulerEvent(appAttemptId2, false); fs.handle(attemptEvent2); List<ContainerId> emptyId = new ArrayList<ContainerId>(); List<ResourceRequest> emptyAsk = new ArrayList<ResourceRequest>(); // Set up resource requests // Ask for a 1 GB container for app 1 List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>(); ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0), ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1)); fs.allocate(appAttemptId1, ask1, emptyId, null, null); // Ask for a 2 GB container for app 2 List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>(); ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0), ResourceRequest.ANY, BuilderUtils.newResource(2 * GB, 1), 1)); fs.allocate(appAttemptId2, ask2, emptyId, null, null); // Trigger container assignment fs.handle(new NodeUpdateSchedulerEvent(n1)); // Get the allocation for the applications and verify headroom Allocation allocation1 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null); Assert.assertEquals("Allocation headroom", 1 * GB, allocation1 .getResourceLimit().getMemory()); Allocation allocation2 = fs.allocate(appAttemptId2, emptyAsk, emptyId, null, null); Assert.assertEquals("Allocation headroom", 1 * GB, allocation2 .getResourceLimit().getMemory()); rm.stop(); } @Test(timeout = 60000) public void testResourceOverCommit() throws Exception { MockRM rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB); RMApp app1 = rm.submitApp(2048); // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1 nm1.nodeHeartbeat(true); RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 2 GB used and 2 GB available Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1); AllocateResponse alloc1Response = am1.schedule(); // send the request // kick the scheduler, 2 GB given to AM1, resource remaining 0 nm1.nodeHeartbeat(true); while (alloc1Response.getAllocatedContainers().size() < 1) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(1000); alloc1Response = am1.schedule(); } List<Container> allocated1 = alloc1Response.getAllocatedContainers(); Assert.assertEquals(1, allocated1.size()); Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory()); Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 4 GB used and 0 GB available Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); // check container is assigned with 2 GB. Container c1 = allocated1.get(0); Assert.assertEquals(2 * GB, c1.getResource().getMemory()); // update node resource to 2 GB, so resource is over-consumed. Map<NodeId, ResourceOption> nodeResourceMap = new HashMap<NodeId, ResourceOption>(); nodeResourceMap.put(nm1.getNodeId(), ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1)); UpdateNodeResourceRequest request = UpdateNodeResourceRequest.newInstance(nodeResourceMap); rm.getAdminService().updateNodeResource(request); // Now, the used resource is still 4 GB, and available resource is minus // value. report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory()); // Check container can complete successfully in case of resource // over-commitment. ContainerStatus containerStatus = BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE, "", 0); nm1.containerStatus(containerStatus); int waitCount = 0; while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) { LOG.info("Waiting for containers to be finished for app 1... Tried " + waitCount + " times already.."); Thread.sleep(100); } Assert.assertEquals(1, attempt1.getJustFinishedContainers().size()); Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses() .size()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); // As container return 2 GB back, the available resource becomes 0 again. Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); rm.stop(); } private void checkApplicationResourceUsage(int expected, Application application) { Assert.assertEquals(expected, application.getUsedResources().getMemory()); } private void checkNodeResourceUsage(int expected, org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) { Assert.assertEquals(expected, node.getUsed().getMemory()); node.checkResourceUsage(); } public static void main(String[] arg) throws Exception { TestFifoScheduler t = new TestFifoScheduler(); t.setUp(); t.testFifoScheduler(); t.tearDown(); } private RMAppImpl createMockRMApp(ApplicationAttemptId attemptId, RMContext context) { RMAppImpl app = mock(RMAppImpl.class); when(app.getApplicationId()).thenReturn(attemptId.getApplicationId()); RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class); when(attempt.getAppAttemptId()).thenReturn(attemptId); RMAppAttemptMetrics attemptMetric = mock(RMAppAttemptMetrics.class); when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric); when(app.getCurrentAppAttempt()).thenReturn(attempt); ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class); when(submissionContext.getUnmanagedAM()).thenReturn(false); when(attempt.getSubmissionContext()).thenReturn(submissionContext); context.getRMApps().putIfAbsent(attemptId.getApplicationId(), app); return app; } }
apache-2.0
donNewtonAlpha/onos
providers/pcep/tunnel/src/test/java/org/onosproject/provider/pcep/tunnel/impl/TunnelServiceAdapter.java
4163
/* * Copyright 2015-present Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.provider.pcep.tunnel.impl; import org.onosproject.core.ApplicationId; import org.onosproject.incubator.net.tunnel.Tunnel; import org.onosproject.incubator.net.tunnel.TunnelEndPoint; import org.onosproject.incubator.net.tunnel.TunnelId; import org.onosproject.incubator.net.tunnel.TunnelListener; import org.onosproject.incubator.net.tunnel.TunnelName; import org.onosproject.incubator.net.tunnel.TunnelService; import org.onosproject.incubator.net.tunnel.TunnelSubscription; import org.onosproject.net.Annotations; import org.onosproject.net.DeviceId; import org.onosproject.net.ElementId; import java.util.Collection; import java.util.Collections; import org.onosproject.net.Path; public class TunnelServiceAdapter implements TunnelService { @Override public Tunnel borrowTunnel(ApplicationId consumerId, TunnelId tunnelId, Annotations... annotations) { return null; } @Override public Collection<Tunnel> borrowTunnel(ApplicationId consumerId, TunnelName tunnelName, Annotations... annotations) { return null; } @Override public Collection<Tunnel> borrowTunnel(ApplicationId consumerId, TunnelEndPoint src, TunnelEndPoint dst, Annotations... annotations) { return null; } @Override public Collection<Tunnel> borrowTunnel(ApplicationId consumerId, TunnelEndPoint src, TunnelEndPoint dst, Tunnel.Type type, Annotations... annotations) { return null; } @Override public TunnelId setupTunnel(ApplicationId producerId, ElementId srcElementId, Tunnel tunnel, Path path) { return null; } @Override public boolean downTunnel(ApplicationId producerId, TunnelId tunnelId) { return false; } @Override public boolean returnTunnel(ApplicationId consumerId, TunnelId tunnelId, Annotations... annotations) { return false; } @Override public boolean returnTunnel(ApplicationId consumerId, TunnelName tunnelName, Annotations... annotations) { return false; } @Override public boolean returnTunnel(ApplicationId consumerId, TunnelEndPoint src, TunnelEndPoint dst, Tunnel.Type type, Annotations... annotations) { return false; } @Override public boolean returnTunnel(ApplicationId consumerId, TunnelEndPoint src, TunnelEndPoint dst, Annotations... annotations) { return false; } @Override public Tunnel queryTunnel(TunnelId tunnelId) { return null; } @Override public Collection<TunnelSubscription> queryTunnelSubscription(ApplicationId consumerId) { return Collections.emptySet(); } @Override public Collection<Tunnel> queryTunnel(Tunnel.Type type) { return Collections.emptySet(); } @Override public Collection<Tunnel> queryTunnel(TunnelEndPoint src, TunnelEndPoint dst) { return Collections.emptySet(); } @Override public Collection<Tunnel> queryAllTunnels() { return Collections.emptyList(); } @Override public int tunnelCount() { return 0; } @Override public Iterable<Tunnel> getTunnels(DeviceId deviceId) { return null; } @Override public void addListener(TunnelListener listener) { } @Override public void removeListener(TunnelListener listener) { } }
apache-2.0
robzor92/hops
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
23955
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.nodelabels; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Map; import java.util.Set; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; public class TestCommonNodeLabelsManager extends NodeLabelTestBase { DummyCommonNodeLabelsManager mgr = null; @Before public void before() { mgr = new DummyCommonNodeLabelsManager(); Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); mgr.init(conf); mgr.start(); } @After public void after() { mgr.stop(); } @Test(timeout = 5000) public void testAddRemovelabel() throws Exception { // Add some label mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("hello")); verifyNodeLabelAdded(Sets.newHashSet("hello"), mgr.lastAddedlabels); mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("world")); mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("hello1", "world1")); verifyNodeLabelAdded(Sets.newHashSet("hello1", "world1"), mgr.lastAddedlabels); Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll( Sets.newHashSet("hello", "world", "hello1", "world1"))); try { mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("hello1", false))); Assert.fail("IOException not thrown on exclusivity change of labels"); } catch (Exception e) { Assert.assertTrue("IOException is expected when exclusivity is modified", e instanceof IOException); } try { mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("hello1", true))); } catch (Exception e) { Assert.assertFalse( "IOException not expected when no change in exclusivity", e instanceof IOException); } // try to remove null, empty and non-existed label, should fail for (String p : Arrays.asList(null, CommonNodeLabelsManager.NO_LABEL, "xx")) { boolean caught = false; try { mgr.removeFromClusterNodeLabels(Arrays.asList(p)); } catch (IOException e) { caught = true; } Assert.assertTrue("remove label should fail " + "when label is null/empty/non-existed", caught); } // Remove some label mgr.removeFromClusterNodeLabels(Arrays.asList("hello")); assertCollectionEquals(Sets.newHashSet("hello"), mgr.lastRemovedlabels); Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll( Arrays.asList("world", "hello1", "world1"))); mgr.removeFromClusterNodeLabels(Arrays .asList("hello1", "world1", "world")); Assert.assertTrue(mgr.lastRemovedlabels.containsAll(Sets.newHashSet( "hello1", "world1", "world"))); Assert.assertTrue(mgr.getClusterNodeLabelNames().isEmpty()); } @Test(timeout = 5000) public void testAddlabelWithCase() throws Exception { // Add some label, case will not ignore here mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("HeLlO")); verifyNodeLabelAdded(Sets.newHashSet("HeLlO"), mgr.lastAddedlabels); Assert.assertFalse(mgr.getClusterNodeLabelNames().containsAll( Arrays.asList("hello"))); } @Test(timeout = 5000) public void testAddlabelWithExclusivity() throws Exception { // Add some label, case will not ignore here mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("a", false), NodeLabel.newInstance("b", true))); Assert.assertFalse(mgr.isExclusiveNodeLabel("a")); Assert.assertTrue(mgr.isExclusiveNodeLabel("b")); } @Test(timeout = 5000) public void testAddInvalidlabel() throws IOException { boolean caught = false; try { Set<String> set = new HashSet<String>(); set.add(null); mgr.addToCluserNodeLabelsWithDefaultExclusivity(set); } catch (IOException e) { caught = true; } Assert.assertTrue("null label should not add to repo", caught); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of(CommonNodeLabelsManager.NO_LABEL)); } catch (IOException e) { caught = true; } Assert.assertTrue("empty label should not add to repo", caught); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("-?")); } catch (IOException e) { caught = true; } Assert.assertTrue("invalid label charactor should not add to repo", caught); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of(StringUtils.repeat("c", 257))); } catch (IOException e) { caught = true; } Assert.assertTrue("too long label should not add to repo", caught); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("-aaabbb")); } catch (IOException e) { caught = true; } Assert.assertTrue("label cannot start with \"-\"", caught); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("_aaabbb")); } catch (IOException e) { caught = true; } Assert.assertTrue("label cannot start with \"_\"", caught); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("a^aabbb")); } catch (IOException e) { caught = true; } Assert.assertTrue("label cannot contains other chars like ^[] ...", caught); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("aa[a]bbb")); } catch (IOException e) { caught = true; } Assert.assertTrue("label cannot contains other chars like ^[] ...", caught); } @SuppressWarnings({ "unchecked", "rawtypes" }) @Test(timeout = 5000) public void testAddReplaceRemoveLabelsOnNodes() throws Exception { // set a label on a node, but label doesn't exist boolean caught = false; try { mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("node"), toSet("label"))); } catch (IOException e) { caught = true; } Assert.assertTrue("trying to set a label to a node but " + "label doesn't exist in repository should fail", caught); // set a label on a node, but node is null or empty try { mgr.replaceLabelsOnNode(ImmutableMap.of( toNodeId(CommonNodeLabelsManager.NO_LABEL), toSet("label"))); } catch (IOException e) { caught = true; } Assert.assertTrue("trying to add a empty node but succeeded", caught); // set node->label one by one mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3")); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p2"))); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2"), toSet("p3"))); assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), toSet("p2"), toNodeId("n2"), toSet("p3"))); assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n2"), toSet("p3"))); // set bunch of node->label mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"), toNodeId("n1"), toSet("p1"))); assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), toSet("p1"), toNodeId("n2"), toSet("p3"), toNodeId("n3"), toSet("p3"))); assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n3"), toSet("p3"), toNodeId("n1"), toSet("p1"))); /* * n1: p1 * n2: p3 * n3: p3 */ // remove label on node mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"), toSet("p3"), toNodeId("n3"), toSet("p3"))); assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n1"), CommonNodeLabelsManager.EMPTY_STRING_SET)); // add label on node mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); assertMapEquals( mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), toSet("p1"), toNodeId("n2"), toSet("p3"), toNodeId("n3"), toSet("p3"))); assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n1"), toSet("p1"))); // remove labels on node mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"), toNodeId("n2"), toSet("p3"), toNodeId("n3"), toSet("p3"))); Assert.assertEquals(0, mgr.getNodeLabels().size()); assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n1"), CommonNodeLabelsManager.EMPTY_STRING_SET, toNodeId("n2"), CommonNodeLabelsManager.EMPTY_STRING_SET, toNodeId("n3"), CommonNodeLabelsManager.EMPTY_STRING_SET)); } @Test(timeout = 5000) public void testRemovelabelWithNodes() throws Exception { mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3")); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2"), toSet("p2"))); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n3"), toSet("p3"))); mgr.removeFromClusterNodeLabels(ImmutableSet.of("p1")); assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"), toSet("p2"), toNodeId("n3"), toSet("p3"))); assertCollectionEquals(Arrays.asList("p1"), mgr.lastRemovedlabels); mgr.removeFromClusterNodeLabels(ImmutableSet.of("p2", "p3")); Assert.assertTrue(mgr.getNodeLabels().isEmpty()); Assert.assertTrue(mgr.getClusterNodeLabelNames().isEmpty()); assertCollectionEquals(Arrays.asList("p2", "p3"), mgr.lastRemovedlabels); } @Test(timeout = 5000) public void testTrimLabelsWhenAddRemoveNodeLabels() throws IOException { mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet(" p1")); assertCollectionEquals(toSet("p1"), mgr.getClusterNodeLabelNames()); mgr.removeFromClusterNodeLabels(toSet("p1 ")); Assert.assertTrue(mgr.getClusterNodeLabelNames().isEmpty()); } @Test(timeout = 5000) public void testTrimLabelsWhenModifyLabelsOnNodes() throws IOException { mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet(" p1", "p2")); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1 "))); assertMapEquals( mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), toSet("p1"))); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet(" p2"))); assertMapEquals( mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), toSet("p2"))); mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet(" p2 "))); Assert.assertTrue(mgr.getNodeLabels().isEmpty()); } @Test(timeout = 5000) public void testReplaceLabelsOnHostsShouldUpdateNodesBelongTo() throws IOException { mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3")); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); assertMapEquals( mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), toSet("p1"))); // Replace labels on n1:1 to P2 mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p2"), toNodeId("n1:2"), toSet("p2"))); assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), toSet("p1"), toNodeId("n1:1"), toSet("p2"), toNodeId("n1:2"), toSet("p2"))); // Replace labels on n1 to P1, both n1:1/n1 will be P1 now mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), toSet("p1"), toNodeId("n1:1"), toSet("p1"), toNodeId("n1:2"), toSet("p1"))); // Set labels on n1:1 to P2 again to verify if add/remove works mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p2"))); } private void assertNodeLabelsDisabledErrorMessage(IOException e) { Assert.assertEquals(CommonNodeLabelsManager.NODE_LABELS_NOT_ENABLED_ERR, e.getMessage()); } @Test(timeout = 5000) public void testNodeLabelsDisabled() throws IOException { DummyCommonNodeLabelsManager mgr = new DummyCommonNodeLabelsManager(); Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, false); mgr.init(conf); mgr.start(); boolean caught = false; // add labels try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x")); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught Assert.assertTrue(caught); caught = false; // remove labels try { mgr.removeFromClusterNodeLabels(ImmutableSet.of("x")); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught Assert.assertTrue(caught); caught = false; // add labels to node try { mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("host", 0), CommonNodeLabelsManager.EMPTY_STRING_SET)); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught Assert.assertTrue(caught); caught = false; // remove labels from node try { mgr.removeLabelsFromNode(ImmutableMap.of(NodeId.newInstance("host", 0), CommonNodeLabelsManager.EMPTY_STRING_SET)); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught Assert.assertTrue(caught); caught = false; // replace labels on node try { mgr.replaceLabelsOnNode(ImmutableMap.of(NodeId.newInstance("host", 0), CommonNodeLabelsManager.EMPTY_STRING_SET)); } catch (IOException e) { assertNodeLabelsDisabledErrorMessage(e); caught = true; } // check exception caught Assert.assertTrue(caught); caught = false; mgr.close(); } @Test(timeout = 5000) public void testLabelsToNodes() throws IOException { mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3")); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); Map<String, Set<NodeId>> labelsToNodes = mgr.getLabelsToNodes(); assertLabelsToNodesEquals( labelsToNodes, ImmutableMap.of( "p1", toSet(toNodeId("n1")))); assertLabelsToNodesEquals( labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels())); // Replace labels on n1:1 to P2 mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p2"), toNodeId("n1:2"), toSet("p2"))); labelsToNodes = mgr.getLabelsToNodes(); assertLabelsToNodesEquals( labelsToNodes, ImmutableMap.of( "p1", toSet(toNodeId("n1")), "p2", toSet(toNodeId("n1:1"),toNodeId("n1:2")))); assertLabelsToNodesEquals( labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels())); // Replace labels on n1 to P1, both n1:1/n1 will be P1 now mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); labelsToNodes = mgr.getLabelsToNodes(); assertLabelsToNodesEquals( labelsToNodes, ImmutableMap.of( "p1", toSet(toNodeId("n1"),toNodeId("n1:1"),toNodeId("n1:2")))); assertLabelsToNodesEquals( labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels())); // Set labels on n1:1 to P2 again to verify if add/remove works mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p2"))); // Add p3 to n1, should makes n1:1 to be p2/p3, and n1:2 to be p1/p3 mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n2"), toSet("p3"))); labelsToNodes = mgr.getLabelsToNodes(); assertLabelsToNodesEquals( labelsToNodes, ImmutableMap.of( "p1", toSet(toNodeId("n1"),toNodeId("n1:2")), "p2", toSet(toNodeId("n1:1")), "p3", toSet(toNodeId("n2")))); assertLabelsToNodesEquals( labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels())); // Remove P3 from n1, should makes n1:1 to be p2, and n1:2 to be p1 mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n2"), toSet("p3"))); labelsToNodes = mgr.getLabelsToNodes(); assertLabelsToNodesEquals( labelsToNodes, ImmutableMap.of( "p1", toSet(toNodeId("n1"),toNodeId("n1:2")), "p2", toSet(toNodeId("n1:1")))); assertLabelsToNodesEquals( labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels())); } @Test(timeout = 5000) public void testLabelsToNodesForSelectedLabels() throws IOException { mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3")); mgr.addLabelsToNode( ImmutableMap.of( toNodeId("n1:1"), toSet("p1"), toNodeId("n1:2"), toSet("p2"))); Set<String> setlabels = new HashSet<String>(Arrays.asList(new String[]{"p1"})); assertLabelsToNodesEquals(mgr.getLabelsToNodes(setlabels), ImmutableMap.of("p1", toSet(toNodeId("n1:1")))); // Replace labels on n1:1 to P3 mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p3"))); assertTrue(mgr.getLabelsToNodes(setlabels).isEmpty()); setlabels = new HashSet<String>(Arrays.asList(new String[]{"p2", "p3"})); assertLabelsToNodesEquals( mgr.getLabelsToNodes(setlabels), ImmutableMap.of( "p3", toSet(toNodeId("n1"), toNodeId("n1:1"),toNodeId("n1:2")))); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n2"), toSet("p2"))); assertLabelsToNodesEquals( mgr.getLabelsToNodes(setlabels), ImmutableMap.of( "p2", toSet(toNodeId("n2")), "p3", toSet(toNodeId("n1"), toNodeId("n1:1"),toNodeId("n1:2")))); mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet("p3"))); setlabels = new HashSet<String>(Arrays.asList(new String[]{"p1", "p2", "p3"})); assertLabelsToNodesEquals( mgr.getLabelsToNodes(setlabels), ImmutableMap.of( "p2", toSet(toNodeId("n2")))); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n3"), toSet("p1"))); assertLabelsToNodesEquals( mgr.getLabelsToNodes(setlabels), ImmutableMap.of( "p1", toSet(toNodeId("n3")), "p2", toSet(toNodeId("n2")))); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2:2"), toSet("p3"))); assertLabelsToNodesEquals( mgr.getLabelsToNodes(setlabels), ImmutableMap.of( "p1", toSet(toNodeId("n3")), "p2", toSet(toNodeId("n2")), "p3", toSet(toNodeId("n2:2")))); setlabels = new HashSet<String>(Arrays.asList(new String[]{"p1"})); assertLabelsToNodesEquals(mgr.getLabelsToNodes(setlabels), ImmutableMap.of("p1", toSet(toNodeId("n3")))); } @Test(timeout = 5000) public void testNoMoreThanOneLabelExistedInOneHost() throws IOException { boolean failed = false; // As in YARN-2694, we temporarily disable no more than one label existed in // one host mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3")); try { mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1", "p2"))); } catch (IOException e) { failed = true; } Assert.assertTrue("Should failed when set > 1 labels on a host", failed); try { mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1", "p2"))); } catch (IOException e) { failed = true; } Assert.assertTrue("Should failed when add > 1 labels on a host", failed); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); // add a same label to a node, #labels in this node is still 1, shouldn't // fail mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); try { mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p2"))); } catch (IOException e) { failed = true; } Assert.assertTrue("Should failed when #labels > 1 on a host after add", failed); } private void verifyNodeLabelAdded(Set<String> expectedAddedLabelNames, Collection<NodeLabel> addedNodeLabels) { Assert.assertEquals(expectedAddedLabelNames.size(), addedNodeLabels.size()); for (NodeLabel label : addedNodeLabels) { Assert.assertTrue(expectedAddedLabelNames.contains(label.getName())); } } @Test(timeout = 5000) public void testReplaceLabelsOnNodeInDistributedMode() throws Exception { //create new DummyCommonNodeLabelsManager than the one got from @before mgr.stop(); mgr = new DummyCommonNodeLabelsManager(); Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); conf.set(YarnConfiguration.NODELABEL_CONFIGURATION_TYPE, YarnConfiguration.DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE); mgr.init(conf); mgr.start(); mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3")); mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); Set<String> labelsByNode = mgr.getLabelsByNode(toNodeId("n1")); Assert.assertNull( "Labels are not expected to be written to the NodeLabelStore", mgr.lastNodeToLabels); Assert.assertNotNull("Updated labels should be available from the Mgr", labelsByNode); Assert.assertTrue(labelsByNode.contains("p1")); } @Test(timeout = 5000) public void testLabelsInfoToNodes() throws IOException { mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p1", false), NodeLabel.newInstance("p2", true), NodeLabel.newInstance("p3", true))); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); Map<NodeLabel, Set<NodeId>> labelsToNodes = mgr.getLabelsInfoToNodes(); assertLabelsInfoToNodesEquals(labelsToNodes, ImmutableMap.of( NodeLabel.newInstance("p1", false), toSet(toNodeId("n1")))); } @Test(timeout = 5000) public void testGetNodeLabelsInfo() throws IOException { mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p1", false), NodeLabel.newInstance("p2", true), NodeLabel.newInstance("p3", false))); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p2"))); mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n2"), toSet("p3"))); assertLabelInfoMapEquals(mgr.getNodeLabelsInfo(), ImmutableMap.of( toNodeId("n1"), toSet(NodeLabel.newInstance("p2", true)), toNodeId("n2"), toSet(NodeLabel.newInstance("p3", false)))); } }
apache-2.0
jwren/intellij-community
xml/impl/src/com/intellij/xml/template/formatter/TemplateFormatUtil.java
12103
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.xml.template.formatter; import com.intellij.formatting.Block; import com.intellij.formatting.FormattingModel; import com.intellij.formatting.FormattingModelBuilder; import com.intellij.formatting.Indent; import com.intellij.lang.Language; import com.intellij.lang.LanguageFormatting; import com.intellij.openapi.util.Pair; import com.intellij.openapi.util.TextRange; import com.intellij.psi.FileViewProvider; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiErrorElement; import com.intellij.psi.PsiFile; import com.intellij.psi.codeStyle.CodeStyleSettings; import com.intellij.psi.templateLanguages.OuterLanguageElement; import com.intellij.psi.templateLanguages.TemplateLanguageFileViewProvider; import com.intellij.util.text.TextRangeUtil; import com.intellij.xml.psi.XmlPsiBundle; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.ArrayList; import java.util.Iterator; import java.util.List; public final class TemplateFormatUtil { private final static List<PsiElement> EMPTY_PSI_ELEMENT_LIST = new ArrayList<>(); private final static String[] IGNORABLE_ERROR_MESSAGES = { XmlPsiBundle.message("xml.parsing.closing.tag.matches.nothing"), XmlPsiBundle.message("xml.parsing.closing.tag.name.missing") }; private TemplateFormatUtil() { } @NotNull static List<PsiElement> findAllMarkupLanguageElementsInside(PsiElement outerLangElement) { PsiFile file = outerLangElement.getContainingFile(); if (file != null && file.getViewProvider() instanceof TemplateLanguageFileViewProvider) { TemplateLanguageFileViewProvider viewProvider = (TemplateLanguageFileViewProvider)file.getViewProvider(); return findAllElementsInside(outerLangElement.getTextRange(), viewProvider, false); } return EMPTY_PSI_ELEMENT_LIST; } @NotNull static List<PsiElement> findAllTemplateLanguageElementsInside(@NotNull PsiElement outerLangElement, @NotNull TemplateLanguageFileViewProvider viewProvider) { return findAllElementsInside(outerLangElement.getTextRange(), viewProvider, true); } @NotNull static List<PsiElement> findAllElementsInside(@NotNull TextRange range, @NotNull TemplateLanguageFileViewProvider viewProvider, boolean fromTemplate) { return findAllElementsInside(range, viewProvider, fromTemplate ? viewProvider.getBaseLanguage() : viewProvider.getTemplateDataLanguage()); } @NotNull public static List<PsiElement> findAllElementsInside(TextRange range, TemplateLanguageFileViewProvider viewProvider, Language language) { List<PsiElement> matchingElements = new ArrayList<>(); PsiElement currElement = viewProvider.findElementAt(range.getStartOffset(), language); while (currElement instanceof OuterLanguageElement) { currElement = currElement.getNextSibling(); } if (currElement != null) { currElement = findTopmostElementInRange(currElement, range); Pair<Integer, PsiElement> result = addElementSequence(currElement, range, matchingElements); int lastOffset = result.first; assert lastOffset >= 0 : "Failed to process elements in range: " + range; if (lastOffset < range.getEndOffset()) { matchingElements.addAll(findAllElementsInside(new TextRange(lastOffset, range.getEndOffset()), viewProvider, language)); } } return matchingElements; } private static Pair<Integer,PsiElement> addElementSequence(PsiElement startElement, TextRange range, List<? super PsiElement> targetList) { PsiElement currElement = startElement; int lastOffset = -1; while (currElement != null && (lastOffset = currElement.getTextRange().getEndOffset()) <= range.getEndOffset()) { if (!(currElement instanceof OuterLanguageElement)) { targetList.add(currElement); } currElement = currElement.getNextSibling(); } if (currElement != null && currElement.getTextRange().intersects(range)) { PsiElement child = currElement.getFirstChild(); if (child != null) { addElementSequence(child, range, targetList); } } return new Pair<>(lastOffset, currElement); } @NotNull public static PsiElement findTopmostElementInRange(@NotNull PsiElement original, TextRange fitToRange) { PsiElement currElement = original; PsiElement prevElement = original; while (currElement != null) { if ((currElement instanceof PsiFile) || !fitToRange.contains(currElement.getTextRange())) { if (!fitToRange.contains(prevElement.getTextRange())) { return original; } return prevElement; } prevElement = currElement; currElement = currElement.getParent(); } return original; } static List<Block> mergeBlocks(List<Block> originalBlocks, List<? extends Block> blocksToMerge, TextRange range) throws FragmentedTemplateException { if (blocksToMerge.isEmpty()) return originalBlocks; List<Block> result = new ArrayList<>(); if (originalBlocks.isEmpty()) { for (Block mergeCandidate : blocksToMerge) { if (range.contains(mergeCandidate.getTextRange())) { result.add(mergeCandidate); } } return result; } List<TextRange> originalRanges = new ArrayList<>(); for (Block originalBlock : originalBlocks) { originalRanges.add(originalBlock.getTextRange()); } int lastOffset = range.getStartOffset(); for (Iterator<Block> originalBlockIterator = originalBlocks.iterator(); originalBlockIterator.hasNext();) { Block originalBlock = originalBlockIterator.next(); int startOffset = originalBlock.getTextRange().getStartOffset(); if (lastOffset < startOffset) { lastOffset = fillGap(originalRanges, blocksToMerge, result, lastOffset, startOffset); if (lastOffset < startOffset) { lastOffset = fillGap(originalRanges, originalBlocks, result, lastOffset, startOffset); } } Block mergeableBlock = getBlockContaining(blocksToMerge, originalRanges, originalBlock.getTextRange()); if (mergeableBlock != null) { if (mergeableBlock.getTextRange().getStartOffset() >= lastOffset) { result.add(mergeableBlock); lastOffset = mergeableBlock.getTextRange().getEndOffset(); } } else { if (startOffset >= lastOffset) { result.add(originalBlock); originalBlockIterator.remove(); lastOffset = originalBlock.getTextRange().getEndOffset(); } } } if (lastOffset < range.getEndOffset()) { lastOffset = fillGap(originalRanges, blocksToMerge, result, lastOffset, range.getEndOffset()); if (lastOffset < range.getEndOffset()) { fillGap(originalRanges, originalBlocks, result, lastOffset, range.getEndOffset()); } } return result; } private static int fillGap(List<? extends TextRange> originalRanges, List<? extends Block> blocks, List<? super Block> result, int startOffset, int endOffset) throws FragmentedTemplateException { return fillGap(null, originalRanges, blocks, result, startOffset, endOffset, 0); } private static int fillGap(@Nullable Block parent, List<? extends TextRange> originalRanges, List<? extends Block> blocks, List<? super Block> result, int startOffset, int endOffset, int depth) throws FragmentedTemplateException { int lastOffset = startOffset; TextRange currRange = new TextRange(lastOffset, endOffset); for (Block block : blocks) { if (lastOffset == endOffset || block.getTextRange().getStartOffset() > endOffset) return lastOffset; if (currRange.contains(block.getTextRange())) { result.add(block); if (parent != null && block instanceof IndentInheritingBlock) { ((IndentInheritingBlock)block).setIndent(parent.getIndent()); } lastOffset = block.getTextRange().getEndOffset(); currRange = new TextRange(lastOffset, endOffset); } else if (currRange.intersects(block.getTextRange()) && TextRangeUtil.intersectsOneOf(block.getTextRange(), originalRanges)) { List<Block> subBlocks = block.getSubBlocks(); if (block instanceof TemplateLanguageBlock && ((TemplateLanguageBlock)block).containsErrorElements()) { throw new FragmentedTemplateException(); } lastOffset = fillGap(block, originalRanges, subBlocks, result, lastOffset, endOffset, depth + 1); currRange = new TextRange(lastOffset, endOffset); } } return lastOffset; } private static Block getBlockContaining(List<? extends Block> blockList, List<? extends TextRange> originalRanges, TextRange range) { return getBlockContaining(blockList, originalRanges, range, 0); } @Nullable private static Block getBlockContaining(List<? extends Block> blockList, List<? extends TextRange> originalRanges, TextRange range, int depth) { for (Block block : blockList) { if (block.getTextRange().contains(range)) { if (TextRangeUtil.intersectsOneOf(block.getTextRange(), originalRanges)) { Block containingBlock = getBlockContaining(block.getSubBlocks(), originalRanges, range, depth + 1); if (containingBlock != null) return containingBlock; } return block; } } return null; } /** * Creates a template language block for the given outer element if possible. Finds all the elements matching the current outerElement in * a template language PSI tree and builds a submodel for them with a composite root block. * * @param outerElement The outer element for which the submodel (template language root block) is to be built. * @param settings Code style settings to be used to build the submodel. * @param indent The indent for the root block. * @return Template language root block (submodel) or null if it can't be built. */ @Nullable public static Block buildTemplateLanguageBlock(@NotNull OuterLanguageElement outerElement, @NotNull CodeStyleSettings settings, @Nullable Indent indent) { try { PsiFile file = outerElement.getContainingFile(); FileViewProvider viewProvider = outerElement.getContainingFile().getViewProvider(); if (viewProvider instanceof TemplateLanguageFileViewProvider) { Language language = outerElement.getLanguage(); FormattingModelBuilder builder = LanguageFormatting.INSTANCE.forContext(language, outerElement); if (builder instanceof AbstractXmlTemplateFormattingModelBuilder) { FormattingModel model = ((AbstractXmlTemplateFormattingModelBuilder)builder) .createTemplateFormattingModel(file, (TemplateLanguageFileViewProvider)viewProvider, outerElement, settings, indent); if (model != null) { return model.getRootBlock(); } } } } catch (FragmentedTemplateException e) { // Ignore and return null } return null; } public static boolean isErrorElement(@NotNull PsiElement element) { if (element instanceof PsiErrorElement) { String description = ((PsiErrorElement)element).getErrorDescription(); for (String ignorableMessage : IGNORABLE_ERROR_MESSAGES) { if (ignorableMessage.equals(description)) return false; } return true; } return false; } }
apache-2.0
luchuangbin/test1
src/org/jivesoftware/smackx/workgroup/WorkgroupInvitation.java
4667
/** * $Revision$ * $Date$ * * Copyright 2003-2007 Jive Software. * * All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.smackx.workgroup; import java.util.List; import java.util.Map; /** * An immutable class wrapping up the basic information which comprises a group chat invitation. * * @author loki der quaeler */ public class WorkgroupInvitation { protected String uniqueID; protected String sessionID; protected String groupChatName; protected String issuingWorkgroupName; protected String messageBody; protected String invitationSender; protected Map<String, List<String>> metaData; /** * This calls the 5-argument constructor with a null MetaData argument value * * @param jid the jid string with which the issuing AgentSession or Workgroup instance * was created * @param group the jid of the room to which the person is invited * @param workgroup the jid of the workgroup issuing the invitation * @param sessID the session id associated with the pending chat * @param msgBody the body of the message which contained the invitation * @param from the user jid who issued the invitation, if known, null otherwise */ public WorkgroupInvitation (String jid, String group, String workgroup, String sessID, String msgBody, String from) { this(jid, group, workgroup, sessID, msgBody, from, null); } /** * @param jid the jid string with which the issuing AgentSession or Workgroup instance * was created * @param group the jid of the room to which the person is invited * @param workgroup the jid of the workgroup issuing the invitation * @param sessID the session id associated with the pending chat * @param msgBody the body of the message which contained the invitation * @param from the user jid who issued the invitation, if known, null otherwise * @param metaData the metadata sent with the invitation */ public WorkgroupInvitation (String jid, String group, String workgroup, String sessID, String msgBody, String from, Map<String, List<String>> metaData) { super(); this.uniqueID = jid; this.sessionID = sessID; this.groupChatName = group; this.issuingWorkgroupName = workgroup; this.messageBody = msgBody; this.invitationSender = from; this.metaData = metaData; } /** * @return the jid string with which the issuing AgentSession or Workgroup instance * was created. */ public String getUniqueID () { return this.uniqueID; } /** * @return the session id associated with the pending chat; working backwards temporally, * this session id should match the session id to the corresponding offer request * which resulted in this invitation. */ public String getSessionID () { return this.sessionID; } /** * @return the jid of the room to which the person is invited. */ public String getGroupChatName () { return this.groupChatName; } /** * @return the name of the workgroup from which the invitation was issued. */ public String getWorkgroupName () { return this.issuingWorkgroupName; } /** * @return the contents of the body-block of the message that housed this invitation. */ public String getMessageBody () { return this.messageBody; } /** * @return the user who issued the invitation, or null if it wasn't known. */ public String getInvitationSender () { return this.invitationSender; } /** * @return the meta data associated with the invitation, or null if this instance was * constructed with none */ public Map<String, List<String>> getMetaData () { return this.metaData; } }
apache-2.0
android-gems/ThemeDemo
baselibrary/src/main/java/com/mingle/widget/animation/SimpleAnimListener.java
380
package com.mingle.widget.animation; /** * Created by zzz40500 on 15/9/3. */ public class SimpleAnimListener { public void onAnimationStart(CRAnimation animation) { } public void onAnimationEnd(CRAnimation animation) { } public void onAnimationCancel(CRAnimation animation) { } public void onAnimationRepeat(CRAnimation animation) { } }
apache-2.0
rnathanday/dryad-repo
dspace-api/src/main/java/org/dspace/content/packager/PackageValidationException.java
1493
/** * The contents of this file are subject to the license and copyright * detailed in the LICENSE and NOTICE files at the root of the source * tree and available online at * * http://www.dspace.org/license/ */ package org.dspace.content.packager; /** * This represents a failure when importing or exporting a package * caused by invalid unacceptable package format or contents; for * example, missing files that were mentioned in the manifest, or * extra files not in manifest, or lack of a manifest. * <p> * When throwing a PackageValidationException, be sure the message * includes enough specific information to let the end user diagnose * the problem, i.e. what files appear to be missing from the manifest * or package, or the details of a checksum error on a file. * * @author Larry Stone * @version $Revision$ */ public class PackageValidationException extends PackageException { /** * Create a new exception with the given message. * @param message - diagnostic message. */ public PackageValidationException(String message) { super(message); } /** * Create a new exception wrapping it around another exception. * @param exception - exception specifying the cause of this failure. */ public PackageValidationException(Exception exception) { super(exception); } public PackageValidationException(String message, Exception exception) { super(message, exception); } }
bsd-3-clause
shaotuanchen/sunflower_exp
tools/source/gcc-4.2.4/libjava/java/lang/Win32Process.java
2160
// Win32Process.java - Subclass of Process for Win32 systems. /* Copyright (C) 2002, 2003 Free Software Foundation This file is part of libgcj. This software is copyrighted work licensed under the terms of the Libgcj License. Please consult the file "LIBGCJ_LICENSE" for details. */ package java.lang; import java.io.File; import java.io.InputStream; import java.io.OutputStream; import java.io.IOException; /** * @author Adam Megacz * @date Feb 24, 2002 */ // This is entirely internal to our implementation. // This file is copied to `ConcreteProcess.java' before compilation. // Hence the class name apparently does not match the file name. final class ConcreteProcess extends Process { public native void destroy (); public int exitValue () { if (! hasExited ()) throw new IllegalThreadStateException ("Process has not exited"); return exitCode; } public InputStream getErrorStream () { return errorStream; } public InputStream getInputStream () { return inputStream; } public OutputStream getOutputStream () { return outputStream; } public native int waitFor () throws InterruptedException; public ConcreteProcess (String[] progarray, String[] envp, File dir) throws IOException { for (int i = 0; i < progarray.length; i++) { String s = progarray[i]; if ( (s.indexOf (' ') >= 0) || (s.indexOf ('\t') >= 0)) progarray[i] = "\"" + s + "\""; } startProcess (progarray, envp, dir); } // The standard streams (stdin, stdout and stderr, respectively) // of the child as seen by the parent process. private OutputStream outputStream; private InputStream inputStream; private InputStream errorStream; // Handle to the child process - cast to HANDLE before use. private int procHandle; // Exit code of the child if it has exited. private int exitCode; private native boolean hasExited (); private native void startProcess (String[] progarray, String[] envp, File dir) throws IOException; private native void cleanup (); }
bsd-3-clause
KunkkaCoco/java-base
src/main/java/headfirst/combining/composite/RedheadDuck.java
204
package headfirst.combining.composite; public class RedheadDuck implements Quackable { public void quack() { System.out.println("Quack"); } public String toString() { return "Redhead Duck"; } }
epl-1.0
johannrichard/openhab2-addons
addons/binding/org.openhab.binding.rfxcom/src/main/java/org/openhab/binding/rfxcom/internal/messages/RFXComInterfaceMessage.java
12368
/** * Copyright (c) 2010-2018 by the respective copyright holders. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.rfxcom.internal.messages; import static org.openhab.binding.rfxcom.internal.messages.ByteEnumUtil.fromByte; import java.io.UnsupportedEncodingException; import org.eclipse.smarthome.core.types.Type; import org.openhab.binding.rfxcom.internal.exceptions.RFXComException; /** * RFXCOM data class for interface message. * * @author Pauli Anttila - Initial contribution * @author Ivan Martinez - Older firmware support (OH1) */ public class RFXComInterfaceMessage extends RFXComBaseMessage { public enum SubType implements ByteEnumWrapper { UNKNOWN_COMMAND(-1), RESPONSE(0), UNKNOWN_RTS_REMOTE(1), NO_EXTENDED_HW_PRESENT(2), LIST_RFY_REMOTES(3), LIST_ASA_REMOTES(4), START_RECEIVER(7); private final int subType; SubType(int subType) { this.subType = subType; } @Override public byte toByte() { return (byte) subType; } } public enum Commands implements ByteEnumWrapper { RESET(0), // Reset the receiver/transceiver. No answer is transmitted! GET_STATUS(2), // Get Status, return firmware versions and configuration of the interface SET_MODE(3), // Set mode msg1-msg5, return firmware versions and configuration of the interface ENABLE_ALL(4), // Enable all receiving modes of the receiver/transceiver ENABLE_UNDECODED_PACKETS(5), // Enable reporting of undecoded packets SAVE_RECEIVING_MODES(6), // Save receiving modes of the receiver/transceiver in non-volatile memory START_RECEIVER(7), // Start RFXtrx receiver T1(8), // For internal use by RFXCOM T2(9), // For internal use by RFXCOM UNSUPPORTED_COMMAND(-1); // wrong command received from the application private final int command; Commands(int command) { this.command = command; } @Override public byte toByte() { return (byte) command; } } public enum TransceiverType implements ByteEnumWrapper { _310MHZ(80), _315MHZ(81), _433_92MHZ_RECEIVER_ONLY(82), _433_92MHZ_TRANSCEIVER(83), _868_00MHZ(85), _868_00MHZ_FSK(86), _868_30MHZ(87), _868_30MHZ_FSK(88), _868_35MHZ(89), _868_35MHZ_FSK(90), _868_95MHZ_FSK(91); private final int type; TransceiverType(int type) { this.type = type; } @Override public byte toByte() { return (byte) type; } } public enum FirmwareType implements ByteEnumWrapper { TYPE1_RX_ONLY(0), TYPE1(1), TYPE2(2), EXT(3), EXT2(4); private final int type; FirmwareType(int type) { this.type = type; } @Override public byte toByte() { return (byte) type; } } public SubType subType; public Commands command; public String text = ""; public TransceiverType transceiverType; public int firmwareVersion; public boolean enableUndecodedPackets; // 0x80 - Undecoded packets public boolean enableImagintronixOpusPackets; // 0x40 - Imagintronix/Opus (433.92) public boolean enableByronSXPackets; // 0x20 - Byron SX (433.92) public boolean enableRSLPackets; // 0x10 - RSL (433.92) public boolean enableLighting4Packets; // 0x08 - Lighting4 (433.92) public boolean enableFineOffsetPackets; // 0x04 - FineOffset / Viking (433.92) public boolean enableRubicsonPackets; // 0x02 - Rubicson (433.92) public boolean enableAEPackets; // 0x01 - AE (433.92) public boolean enableBlindsT1T2T3T4Packets; // 0x80 - BlindsT1/T2/T3/T4 (433.92) public boolean enableBlindsT0Packets; // 0x40 - BlindsT0 (433.92) public boolean enableProGuardPackets; // 0x20 - ProGuard (868.35 FSK) public boolean enableFS20Packets; // 0x10 - FS20 (868.35) public boolean enableLaCrossePackets; // 0x08 - La Crosse (433.92/868.30) public boolean enableHidekiUPMPackets; // 0x04 - Hideki/UPM (433.92) public boolean enableADPackets; // 0x02 - AD LightwaveRF (433.92) public boolean enableMertikPackets; // 0x01 - Mertik (433.92) public boolean enableVisonicPackets; // 0x80 - Visonic (315/868.95) public boolean enableATIPackets; // 0x40 - ATI (433.92) public boolean enableOregonPackets; // 0x20 - Oregon Scientific (433.92) public boolean enableMeiantechPackets; // 0x10 - Meiantech (433.92) public boolean enableHomeEasyPackets; // 0x08 - HomeEasy EU (433.92) public boolean enableACPackets; // 0x04 - AC (433.92) public boolean enableARCPackets; // 0x02 - ARC (433.92) public boolean enableX10Packets; // 0x01 - X10 (310/433.92) public boolean enableHomeConfortPackets; // 0x02 - HomeConfort (433.92) public boolean enableKEELOQPackets; // 0x01 - KEELOQ (433.92) public byte hardwareVersion1; public byte hardwareVersion2; public int outputPower; // -18dBm to +13dBm. N.B. maximum allowed is +10dBm public FirmwareType firmwareType; public RFXComInterfaceMessage(byte[] data) throws RFXComException { encodeMessage(data); } @Override public String toString() { String str = ""; str += super.toString(); str += ", Sub type = " + subType; str += ", Command = " + command; if (subType == SubType.RESPONSE) { str += ", Transceiver type = " + transceiverType; str += ", Hardware version = " + hardwareVersion1 + "." + hardwareVersion2; str += ", Firmware type = " + (firmwareType != null ? firmwareType : "unknown"); str += ", Firmware version = " + firmwareVersion; str += ", Output power = " + outputPower + "dBm"; str += ", Undecoded packets = " + enableUndecodedPackets; str += ", RFU6 packets = " + enableImagintronixOpusPackets; str += ", Byron SX packets packets (433.92) = " + enableByronSXPackets; str += ", RSL packets packets (433.92) = " + enableRSLPackets; str += ", Lighting4 packets (433.92) = " + enableLighting4Packets; str += ", FineOffset / Viking (433.92) packets = " + enableFineOffsetPackets; str += ", Rubicson (433.92) packets = " + enableRubicsonPackets; str += ", AE (433.92) packets = " + enableAEPackets; str += ", BlindsT1/T2/T3 (433.92) packets = " + enableBlindsT1T2T3T4Packets; str += ", BlindsT0 (433.92) packets = " + enableBlindsT0Packets; str += ", ProGuard (868.35 FSK) packets = " + enableProGuardPackets; str += ", FS20/Legrand CAD (868.35/433.92) packets = " + enableFS20Packets; str += ", La Crosse (433.92/868.30) packets = " + enableLaCrossePackets; str += ", Hideki/UPM (433.92) packets = " + enableHidekiUPMPackets; str += ", AD LightwaveRF (433.92) packets = " + enableADPackets; str += ", Mertik (433.92) packets = " + enableMertikPackets; str += ", Visonic (315/868.95) packets = " + enableVisonicPackets; str += ", ATI (433.92) packets = " + enableATIPackets; str += ", Oregon Scientific (433.92) packets = " + enableOregonPackets; str += ", Meiantech (433.92) packets = " + enableMeiantechPackets; str += ", HomeEasy EU (433.92) packets = " + enableHomeEasyPackets; str += ", AC (433.92) packets = " + enableACPackets; str += ", ARC (433.92) packets = " + enableARCPackets; str += ", X10 (310/433.92) packets = " + enableX10Packets; str += ", HomeConfort (433.92) packets = " + enableHomeConfortPackets; str += ", KEELOQ (433.92/868.95) packets = " + enableKEELOQPackets; } else if (subType == SubType.START_RECEIVER) { str += ", Text = " + text; } return str; } @Override public void encodeMessage(byte[] data) throws RFXComException { super.encodeMessage(data); subType = fromByte(SubType.class, super.subType); if (subType == SubType.RESPONSE) { command = fromByte(Commands.class, data[4]); transceiverType = fromByte(TransceiverType.class, data[5]); firmwareVersion = data[6] & 0xFF; enableUndecodedPackets = (data[7] & 0x80) != 0; enableImagintronixOpusPackets = (data[7] & 0x40) != 0; enableByronSXPackets = (data[7] & 0x20) != 0; enableRSLPackets = (data[7] & 0x10) != 0; enableLighting4Packets = (data[7] & 0x08) != 0; enableFineOffsetPackets = (data[7] & 0x04) != 0; enableRubicsonPackets = (data[7] & 0x02) != 0; enableAEPackets = (data[7] & 0x01) != 0; enableBlindsT1T2T3T4Packets = (data[8] & 0x80) != 0; enableBlindsT0Packets = (data[8] & 0x40) != 0; enableProGuardPackets = (data[8] & 0x20) != 0; enableFS20Packets = (data[8] & 0x10) != 0; enableLaCrossePackets = (data[8] & 0x08) != 0; enableHidekiUPMPackets = (data[8] & 0x04) != 0; enableADPackets = (data[8] & 0x02) != 0; enableMertikPackets = (data[8] & 0x01) != 0; enableVisonicPackets = (data[9] & 0x80) != 0; enableATIPackets = (data[9] & 0x40) != 0; enableOregonPackets = (data[9] & 0x20) != 0; enableMeiantechPackets = (data[9] & 0x10) != 0; enableHomeEasyPackets = (data[9] & 0x08) != 0; enableACPackets = (data[9] & 0x04) != 0; enableARCPackets = (data[9] & 0x02) != 0; enableX10Packets = (data[9] & 0x01) != 0; /* * Different firmware versions have slightly different message formats. * The firmware version numbering is unique to each hardware version * but the location of the hardware version in the message is one of * those things whose position varies. So we have to just look at the * firmware version and pray. This condition below is taken from the * openhab1-addons binding. */ if ((firmwareVersion >= 95 && firmwareVersion <= 100) || (firmwareVersion >= 195 && firmwareVersion <= 200) || (firmwareVersion >= 251)) { enableHomeConfortPackets = (data[10] & 0x02) != 0; enableKEELOQPackets = (data[10] & 0x01) != 0; hardwareVersion1 = data[11]; hardwareVersion2 = data[12]; outputPower = data[13] - 18; firmwareType = fromByte(FirmwareType.class, data[14]); } else { hardwareVersion1 = data[10]; hardwareVersion2 = data[11]; } text = ""; } else if (subType == SubType.START_RECEIVER) { command = fromByte(Commands.class, data[4]); final int len = 16; final int dataOffset = 5; byte[] byteArray = new byte[len]; for (int i = dataOffset; i < (dataOffset + len); i++) { byteArray[i - dataOffset] += data[i]; } try { text = new String(byteArray, "ASCII"); } catch (UnsupportedEncodingException e) { // ignore } } else { // We don't handle the other subTypes but to avoid null pointer // exceptions we set command to something. It doesn't really // matter what but it may b printed in log messages so... command = Commands.UNSUPPORTED_COMMAND; } } @Override public byte[] decodeMessage() throws RFXComException { throw new UnsupportedOperationException(); } @Override public void convertFromState(String channelId, Type type) { throw new UnsupportedOperationException(); } }
epl-1.0
theoweiss/openhab2
bundles/org.openhab.binding.knx/src/main/java/org/openhab/binding/knx/internal/channel/ListenSpecImpl.java
1434
/** * Copyright (c) 2010-2019 Contributors to the openHAB project * * See the NOTICE file(s) distributed with this work for additional * information. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.openhab.binding.knx.internal.channel; import static java.util.stream.Collectors.toList; import java.util.Collections; import java.util.List; import org.eclipse.jdt.annotation.Nullable; import org.openhab.binding.knx.internal.client.InboundSpec; import tuwien.auto.calimero.GroupAddress; /** * Listen meta-data. * * @author Simon Kaufmann - initial contribution and API. * */ public class ListenSpecImpl extends AbstractSpec implements InboundSpec { private final List<GroupAddress> listenAddresses; public ListenSpecImpl(@Nullable ChannelConfiguration channelConfiguration, String defaultDPT) { super(channelConfiguration, defaultDPT); if (channelConfiguration != null) { this.listenAddresses = channelConfiguration.getListenGAs().stream().map(this::toGroupAddress) .collect(toList()); } else { this.listenAddresses = Collections.emptyList(); } } public List<GroupAddress> getGroupAddresses() { return listenAddresses; } }
epl-1.0
FauxFaux/jdk9-jaxws
src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/reader/xmlschema/ct/ComplexTypeFieldBuilder.java
3843
/* * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package com.sun.tools.internal.xjc.reader.xmlschema.ct; import java.util.HashMap; import java.util.Map; import com.sun.tools.internal.xjc.reader.xmlschema.BGMBuilder; import com.sun.tools.internal.xjc.reader.xmlschema.BindingComponent; import com.sun.xml.internal.xsom.XSComplexType; /** * single entry point of building a field expression from a complex type. * * One object is created for one {@link BGMBuilder}. * * @author * Kohsuke Kawaguchi (kohsuke.kawaguchi@sun.com) */ public final class ComplexTypeFieldBuilder extends BindingComponent { /** * All installed available complex type builders. * * <p> * Builders are tried in this order, to put specific ones first. */ private final CTBuilder[] complexTypeBuilders = new CTBuilder[]{ new MultiWildcardComplexTypeBuilder(), new MixedExtendedComplexTypeBuilder(), new MixedComplexTypeBuilder(), new FreshComplexTypeBuilder(), new ExtendedComplexTypeBuilder(), new RestrictedComplexTypeBuilder(), new STDerivedComplexTypeBuilder() }; /** Records ComplexTypeBindingMode for XSComplexType. */ private final Map<XSComplexType,ComplexTypeBindingMode> complexTypeBindingModes = new HashMap<XSComplexType,ComplexTypeBindingMode>(); /** * Binds a complex type to a field expression. */ public void build( XSComplexType type ) { for( CTBuilder ctb : complexTypeBuilders ) if( ctb.isApplicable(type) ) { ctb.build(type); return; } assert false; // shall never happen } /** * Records the binding mode of the given complex type. * * <p> * Binding of a derived complex type often depends on that of the * base complex type. For example, when a base type is bound to * the getRest() method, all the derived complex types will be bound * in the same way. * * <p> * For this reason, we have to record how each complex type is being * bound. */ public void recordBindingMode( XSComplexType type, ComplexTypeBindingMode flag ) { // it is an error to override the flag. Object o = complexTypeBindingModes.put(type,flag); assert o==null; } /** * Obtains the binding mode recorded through * {@link #recordBindingMode(XSComplexType, ComplexTypeBindingMode)}. */ protected ComplexTypeBindingMode getBindingMode( XSComplexType type ) { ComplexTypeBindingMode r = complexTypeBindingModes.get(type); assert r!=null; return r; } }
gpl-2.0
patcon/TextSecure
src/org/thoughtcrime/securesms/mms/GifSlide.java
503
package org.thoughtcrime.securesms.mms; import android.content.Context; import android.net.Uri; import java.io.IOException; import ws.com.google.android.mms.pdu.PduPart; public class GifSlide extends ImageSlide { public GifSlide(Context context, PduPart part) { super(context, part); } public GifSlide(Context context, Uri uri, long dataSize) throws IOException { super(context, uri, dataSize); } @Override public Uri getThumbnailUri() { return getPart().getDataUri(); } }
gpl-3.0
Ole8700/openhab
bundles/binding/org.openhab.binding.tinkerforge/src/main/java/org/openhab/binding/tinkerforge/internal/model/MLCD20x4Button.java
4698
/** * * Tinkerforge Binding Copyright (C) 2013 Theo Weiss <theo.weiss@gmail.com> contributed to: openHAB, the open Home Automation Bus. * Copyright (C) 2013, openHAB.org <admin@openhab.org> * * See the contributors.txt file in the distribution for a * full listing of individual contributors. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses>. * * Additional permission under GNU GPL version 3 section 7 * * If you modify this Program, or any covered work, by linking or * combining it with Eclipse (or a modified version of that library), * containing parts covered by the terms of the Eclipse Public License * (EPL), the licensors of this Program grant you additional permission * to convey the resulting work. * */ package org.openhab.binding.tinkerforge.internal.model; /** * <!-- begin-user-doc --> * A representation of the model object '<em><b>MLCD2 0x4 Button</b></em>'. * <!-- end-user-doc --> * * <p> * The following features are supported: * <ul> * <li>{@link org.openhab.binding.tinkerforge.internal.model.MLCD20x4Button#getDeviceType <em>Device Type</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.MLCD20x4Button#getButtonNum <em>Button Num</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.MLCD20x4Button#getCallbackPeriod <em>Callback Period</em>}</li> * </ul> * </p> * * @see org.openhab.binding.tinkerforge.internal.model.ModelPackage#getMLCD20x4Button() * @model * @generated */ public interface MLCD20x4Button extends MOutSwitchActor, MSubDevice<MBrickletLCD20x4> { /** * Returns the value of the '<em><b>Device Type</b></em>' attribute. * The default value is <code>"lcd_button"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Device Type</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Device Type</em>' attribute. * @see org.openhab.binding.tinkerforge.internal.model.ModelPackage#getMLCD20x4Button_DeviceType() * @model default="lcd_button" unique="false" changeable="false" * @generated */ String getDeviceType(); /** * Returns the value of the '<em><b>Button Num</b></em>' attribute. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Button Num</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Button Num</em>' attribute. * @see #setButtonNum(short) * @see org.openhab.binding.tinkerforge.internal.model.ModelPackage#getMLCD20x4Button_ButtonNum() * @model unique="false" * @generated */ short getButtonNum(); /** * Sets the value of the '{@link org.openhab.binding.tinkerforge.internal.model.MLCD20x4Button#getButtonNum <em>Button Num</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Button Num</em>' attribute. * @see #getButtonNum() * @generated */ void setButtonNum(short value); /** * Returns the value of the '<em><b>Callback Period</b></em>' attribute. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Callback Period</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Callback Period</em>' attribute. * @see #setCallbackPeriod(int) * @see org.openhab.binding.tinkerforge.internal.model.ModelPackage#getMLCD20x4Button_CallbackPeriod() * @model unique="false" * @generated */ int getCallbackPeriod(); /** * Sets the value of the '{@link org.openhab.binding.tinkerforge.internal.model.MLCD20x4Button#getCallbackPeriod <em>Callback Period</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Callback Period</em>' attribute. * @see #getCallbackPeriod() * @generated */ void setCallbackPeriod(int value); } // MLCD20x4Button
gpl-3.0
oskopek/jfreechart-fse
src/main/java/org/jfree/chart/plot/CenterTextMode.java
1824
/* =========================================================== * JFreeChart : a free chart library for the Java(tm) platform * =========================================================== * * (C) Copyright 2000-2014, by Object Refinery Limited and Contributors. * * Project Info: http://www.jfree.org/jfreechart/index.html * * This library is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public * License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. * * [Oracle and Java are registered trademarks of Oracle and/or its affiliates. * Other names may be trademarks of their respective owners.] * * ------------------- * CenterTextMode.java * ------------------- * (C) Copyright 2014, by Object Refinery Limited. * * Original Author: David Gilbert (for Object Refinery Limited); * Contributor(s): -; * * Changes * ------- * 28-Feb-2014 : Version 1 (DG); * */ package org.jfree.chart.plot; /** * The mode for the center text on a {@link RingPlot}. * * @since 1.0.18 */ public enum CenterTextMode { /** A fixed text item */ FIXED, /** A value item (taken from the first item in the dataset). */ VALUE, /** No center text. */ NONE }
lgpl-2.1
xiaoleiPENG/my-project
spring-boot-actuator/src/test/java/org/springframework/boot/actuate/autoconfigure/EndpointMBeanExportAutoConfigurationTests.java
8077
/* * Copyright 2012-2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.actuate.autoconfigure; import javax.management.InstanceNotFoundException; import javax.management.IntrospectionException; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; import javax.management.ReflectionException; import org.junit.After; import org.junit.Test; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.boot.actuate.endpoint.AbstractEndpoint; import org.springframework.boot.actuate.endpoint.Endpoint; import org.springframework.boot.actuate.endpoint.jmx.EndpointMBeanExporter; import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; import org.springframework.boot.autoconfigure.jmx.JmxAutoConfiguration; import org.springframework.context.ApplicationContext; import org.springframework.context.annotation.AnnotationConfigApplicationContext; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.EnableMBeanExport; import org.springframework.jmx.export.MBeanExporter; import org.springframework.jmx.export.annotation.ManagedResource; import org.springframework.jmx.support.ObjectNameManager; import org.springframework.mock.env.MockEnvironment; import org.springframework.stereotype.Component; import org.springframework.util.ObjectUtils; import static org.assertj.core.api.Assertions.assertThat; /** * Tests for {@link EndpointMBeanExportAutoConfiguration}. * */ public class EndpointMBeanExportAutoConfigurationTests { private AnnotationConfigApplicationContext context; @After public void close() { if (this.context != null) { this.context.close(); } } @Test public void testEndpointMBeanExporterIsInstalled() throws Exception { this.context = new AnnotationConfigApplicationContext(); this.context.register(TestConfiguration.class, JmxAutoConfiguration.class, EndpointAutoConfiguration.class, EndpointMBeanExportAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class); this.context.refresh(); assertThat(this.context.getBean(EndpointMBeanExporter.class)).isNotNull(); MBeanExporter mbeanExporter = this.context.getBean(EndpointMBeanExporter.class); assertThat(mbeanExporter.getServer() .queryNames(getObjectName("*", "*,*", this.context), null)).isNotEmpty(); } @Test public void testEndpointMBeanExporterIsNotInstalledIfManagedResource() throws Exception { this.context = new AnnotationConfigApplicationContext(); this.context.register(TestConfiguration.class, JmxAutoConfiguration.class, ManagedEndpoint.class, EndpointMBeanExportAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class); this.context.refresh(); assertThat(this.context.getBean(EndpointMBeanExporter.class)).isNotNull(); MBeanExporter mbeanExporter = this.context.getBean(EndpointMBeanExporter.class); assertThat(mbeanExporter.getServer() .queryNames(getObjectName("*", "*,*", this.context), null)).isEmpty(); } @Test public void testEndpointMBeanExporterIsNotInstalledIfNestedInManagedResource() throws Exception { this.context = new AnnotationConfigApplicationContext(); this.context.register(TestConfiguration.class, JmxAutoConfiguration.class, NestedInManagedEndpoint.class, EndpointMBeanExportAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class); this.context.refresh(); assertThat(this.context.getBean(EndpointMBeanExporter.class)).isNotNull(); MBeanExporter mbeanExporter = this.context.getBean(EndpointMBeanExporter.class); assertThat(mbeanExporter.getServer() .queryNames(getObjectName("*", "*,*", this.context), null)).isEmpty(); } @Test(expected = NoSuchBeanDefinitionException.class) public void testEndpointMBeanExporterIsNotInstalled() { MockEnvironment environment = new MockEnvironment(); environment.setProperty("endpoints.jmx.enabled", "false"); this.context = new AnnotationConfigApplicationContext(); this.context.setEnvironment(environment); this.context.register(JmxAutoConfiguration.class, EndpointAutoConfiguration.class, EndpointMBeanExportAutoConfiguration.class); this.context.refresh(); this.context.getBean(EndpointMBeanExporter.class); } @Test public void testEndpointMBeanExporterWithProperties() throws IntrospectionException, InstanceNotFoundException, MalformedObjectNameException, ReflectionException { MockEnvironment environment = new MockEnvironment(); environment.setProperty("endpoints.jmx.domain", "test-domain"); environment.setProperty("endpoints.jmx.unique_names", "true"); environment.setProperty("endpoints.jmx.static_names", "key1=value1, key2=value2"); this.context = new AnnotationConfigApplicationContext(); this.context.setEnvironment(environment); this.context.register(JmxAutoConfiguration.class, EndpointAutoConfiguration.class, EndpointMBeanExportAutoConfiguration.class); this.context.refresh(); this.context.getBean(EndpointMBeanExporter.class); MBeanExporter mbeanExporter = this.context.getBean(EndpointMBeanExporter.class); assertThat(mbeanExporter.getServer().getMBeanInfo(ObjectNameManager.getInstance( getObjectName("test-domain", "healthEndpoint", this.context).toString() + ",key1=value1,key2=value2"))).isNotNull(); } @Test public void testEndpointMBeanExporterInParentChild() throws IntrospectionException, InstanceNotFoundException, MalformedObjectNameException, ReflectionException { this.context = new AnnotationConfigApplicationContext(); this.context.register(JmxAutoConfiguration.class, EndpointAutoConfiguration.class, EndpointMBeanExportAutoConfiguration.class); AnnotationConfigApplicationContext parent = new AnnotationConfigApplicationContext(); parent.register(JmxAutoConfiguration.class, EndpointAutoConfiguration.class, EndpointMBeanExportAutoConfiguration.class); this.context.setParent(parent); parent.refresh(); this.context.refresh(); parent.close(); } private ObjectName getObjectName(String domain, String beanKey, ApplicationContext applicationContext) throws MalformedObjectNameException { String name = "%s:type=Endpoint,name=%s"; if (applicationContext.getParent() != null) { name = name + ",context=%s"; } if (applicationContext.getEnvironment().getProperty("endpoints.jmx.unique_names", Boolean.class, false)) { name = name + ",identity=" + ObjectUtils .getIdentityHexString(applicationContext.getBean(beanKey)); } if (applicationContext.getParent() != null) { return ObjectNameManager.getInstance(String.format(name, domain, beanKey, ObjectUtils.getIdentityHexString(applicationContext))); } return ObjectNameManager.getInstance(String.format(name, domain, beanKey)); } @Configuration @EnableMBeanExport public static class TestConfiguration { } @Component @ManagedResource public static class ManagedEndpoint extends AbstractEndpoint<Boolean> { public ManagedEndpoint() { super("managed", true); } @Override public Boolean invoke() { return true; } } @Configuration @ManagedResource public static class NestedInManagedEndpoint { @Bean public Endpoint<Boolean> nested() { return new Nested(); } class Nested extends AbstractEndpoint<Boolean> { Nested() { super("managed", true); } @Override public Boolean invoke() { return true; } } } }
apache-2.0
irudyak/ignite
modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkIgnitePaths.java
8219
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.spi.discovery.zk.internal; import java.util.UUID; /** * */ class ZkIgnitePaths { /** */ static final String PATH_SEPARATOR = "/"; /** */ private static final byte CLIENT_NODE_FLAG_MASK = 0x01; /** */ private static final int UUID_LEN = 36; /** Directory to store joined node data. */ private static final String JOIN_DATA_DIR = "jd"; /** Directory to store new custom events. */ private static final String CUSTOM_EVTS_DIR = "ce"; /** Directory to store parts of multi-parts custom events. */ private static final String CUSTOM_EVTS_PARTS_DIR = "cp"; /** Directory to store acknowledge messages for custom events. */ private static final String CUSTOM_EVTS_ACKS_DIR = "ca"; /** Directory to store EPHEMERAL znodes for alive cluster nodes. */ static final String ALIVE_NODES_DIR = "n"; /** Path to store discovery events {@link ZkDiscoveryEventsData}. */ private static final String DISCO_EVENTS_PATH = "e"; /** */ final String clusterDir; /** */ final String aliveNodesDir; /** */ final String joinDataDir; /** */ final String evtsPath; /** */ final String customEvtsDir; /** */ final String customEvtsPartsDir; /** */ final String customEvtsAcksDir; /** * @param zkRootPath Base Zookeeper directory for all Ignite nodes. */ ZkIgnitePaths(String zkRootPath) { clusterDir = zkRootPath; aliveNodesDir = zkPath(ALIVE_NODES_DIR); joinDataDir = zkPath(JOIN_DATA_DIR); evtsPath = zkPath(DISCO_EVENTS_PATH); customEvtsDir = zkPath(CUSTOM_EVTS_DIR); customEvtsPartsDir = zkPath(CUSTOM_EVTS_PARTS_DIR); customEvtsAcksDir = zkPath(CUSTOM_EVTS_ACKS_DIR); } /** * @param path Relative path. * @return Full path. */ private String zkPath(String path) { return clusterDir + "/" + path; } /** * @param nodeId Node ID. * @param prefixId Unique prefix ID. * @return Path. */ String joiningNodeDataPath(UUID nodeId, UUID prefixId) { return joinDataDir + '/' + prefixId + ":" + nodeId.toString(); } /** * @param path Alive node zk path. * @return Node internal ID. */ static long aliveInternalId(String path) { int idx = path.lastIndexOf('|'); return Integer.parseInt(path.substring(idx + 1)); } /** * @param prefix Node unique path prefix. * @param node Node. * @return Path. */ String aliveNodePathForCreate(String prefix, ZookeeperClusterNode node) { byte flags = 0; if (node.isClient()) flags |= CLIENT_NODE_FLAG_MASK; return aliveNodesDir + "/" + prefix + ":" + node.id() + ":" + encodeFlags(flags) + "|"; } /** * @param path Alive node zk path. * @return {@code True} if node is client. */ static boolean aliveNodeClientFlag(String path) { return (aliveFlags(path) & CLIENT_NODE_FLAG_MASK) != 0; } /** * @param path Alive node zk path. * @return Node ID. */ static UUID aliveNodePrefixId(String path) { return UUID.fromString(path.substring(0, ZkIgnitePaths.UUID_LEN)); } /** * @param path Alive node zk path. * @return Node ID. */ static UUID aliveNodeId(String path) { // <uuid prefix>:<node id>:<flags>|<alive seq> int startIdx = ZkIgnitePaths.UUID_LEN + 1; String idStr = path.substring(startIdx, startIdx + ZkIgnitePaths.UUID_LEN); return UUID.fromString(idStr); } /** * @param path Event zk path. * @return Event sequence number. */ static int customEventSequence(String path) { int idx = path.lastIndexOf('|'); return Integer.parseInt(path.substring(idx + 1)); } /** * @param path Custom event zl path. * @return Event node ID. */ static UUID customEventSendNodeId(String path) { // <uuid prefix>:<node id>:<partCnt>|<seq> int startIdx = ZkIgnitePaths.UUID_LEN + 1; String idStr = path.substring(startIdx, startIdx + ZkIgnitePaths.UUID_LEN); return UUID.fromString(idStr); } /** * @param path Event path. * @return Event unique prefix. */ static String customEventPrefix(String path) { // <uuid prefix>:<node id>:<partCnt>|<seq> return path.substring(0, ZkIgnitePaths.UUID_LEN); } /** * @param path Custom event zl path. * @return Event node ID. */ static int customEventPartsCount(String path) { // <uuid prefix>:<node id>:<partCnt>|<seq> int startIdx = 2 * ZkIgnitePaths.UUID_LEN + 2; String cntStr = path.substring(startIdx, startIdx + 4); int partCnt = Integer.parseInt(cntStr); assert partCnt >= 1 : partCnt; return partCnt; } /** * @param prefix Prefix. * @param nodeId Node ID. * @param partCnt Parts count. * @return Path. */ String createCustomEventPath(String prefix, UUID nodeId, int partCnt) { return customEvtsDir + "/" + prefix + ":" + nodeId + ":" + String.format("%04d", partCnt) + '|'; } /** * @param prefix Prefix. * @param nodeId Node ID. * @return Path. */ String customEventPartsBasePath(String prefix, UUID nodeId) { return customEvtsPartsDir + "/" + prefix + ":" + nodeId + ":"; } /** * @param prefix Prefix. * @param nodeId Node ID. * @param part Part number. * @return Path. */ String customEventPartPath(String prefix, UUID nodeId, int part) { return customEventPartsBasePath(prefix, nodeId) + String.format("%04d", part); } /** * @param evtId Event ID. * @return Event zk path. */ String joinEventDataPathForJoined(long evtId) { return evtsPath + "/fj-" + evtId; } /** * @param topVer Event topology version. * @return Event zk path. */ String joinEventSecuritySubjectPath(long topVer) { return evtsPath + "/s-" + topVer; } /** * @param origEvtId ID of original custom event. * @return Path for custom event ack. */ String ackEventDataPath(long origEvtId) { assert origEvtId != 0; return customEvtsAcksDir + "/" + String.valueOf(origEvtId); } /** * @param id Future ID. * @return Future path. */ String distributedFutureBasePath(UUID id) { return evtsPath + "/f-" + id; } /** * @param id Future ID. * @return Future path. */ String distributedFutureResultPath(UUID id) { return evtsPath + "/fr-" + id; } /** * @param flags Flags. * @return Flags string. */ private static String encodeFlags(byte flags) { int intVal = flags + 128; String str = Integer.toString(intVal, 16); if (str.length() == 1) str = '0' + str; assert str.length() == 2 : str; return str; } /** * @param path Alive node zk path. * @return Flags. */ private static byte aliveFlags(String path) { int startIdx = path.lastIndexOf(':') + 1; String flagsStr = path.substring(startIdx, startIdx + 2); return (byte)(Integer.parseInt(flagsStr, 16) - 128); } }
apache-2.0
jomarko/kie-wb-common
kie-wb-common-widgets/kie-wb-common-ui/src/main/java/org/kie/workbench/common/widgets/client/datamodel/FilteredEnumLists.java
5038
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.widgets.client.datamodel; import java.util.HashMap; import java.util.Map; import java.util.Set; /** * Filtered (current package and imports) map of { TypeName.field : String[] } - where a list is valid values to display in a drop down for a given Type.field combination. */ public class FilteredEnumLists extends HashMap<String, String[]> { // This is used to calculate what fields an enum list may depend on. private transient Map<String, Object> enumLookupFields; /** * For simple cases - where a list of values are known based on a field. */ public String[] getEnumValues(final String factType, final String field) { return this.get(factType + "#" + field); } Object getTypeFields(String type, String field) { return loadDataEnumLookupFields().get(type + "#" + field); } boolean isDependentEnum(final String factType, final String parentField, final String childField) { final Map<String, Object> enums = loadDataEnumLookupFields(); if (enums.isEmpty()) { return false; } //Check if the childField is a direct descendant of the parentField final String key = factType + "#" + childField; if (!enums.containsKey(key)) { return false; } //Otherwise follow the dependency chain... final Object _parent = enums.get(key); if (_parent instanceof String) { final String _parentField = (String) _parent; if (_parentField.equals(parentField)) { return true; } else { return isDependentEnum(factType, parentField, _parentField); } } return false; } /** * This is only used by enums that are like Fact.field[something=X] and so on. */ Map<String, Object> loadDataEnumLookupFields() { if (enumLookupFields == null) { enumLookupFields = new HashMap<String, Object>(); final Set<String> keys = keySet(); for (String key : keys) { if (key.indexOf('[') != -1) { int ix = key.indexOf('['); final String factField = key.substring(0, ix); final String predicate = key.substring(ix + 1, key.indexOf(']')); if (predicate.indexOf('=') > -1) { final String[] bits = predicate.split(","); final StringBuilder typeFieldBuilder = new StringBuilder(); for (int i = 0; i < bits.length; i++) { typeFieldBuilder.append(bits[i].substring(0, bits[i].indexOf('='))); if (i != (bits.length - 1)) { typeFieldBuilder.append(","); } } enumLookupFields.put(factField, typeFieldBuilder.toString()); } else { final String[] fields = predicate.split(","); for (int i = 0; i < fields.length; i++) { fields[i] = fields[i].trim(); } enumLookupFields.put(factField, fields); } } } } return enumLookupFields; } boolean hasEnums(final String qualifiedFactField) { boolean hasEnums = false; final String key = qualifiedFactField.replace(".", "#"); final String dependentType = key + "["; for (String e : keySet()) { //e.g. Fact.field1 if (e.equals(key)) { return true; } //e.g. Fact.field2[field1=val2] if (e.startsWith(dependentType)) { return true; } } return hasEnums; } }
apache-2.0
samaitra/ignite
modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientIntResponse.java
1419
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.platform.client; import org.apache.ignite.internal.binary.BinaryRawWriterEx; /** * Int response. */ public class ClientIntResponse extends ClientResponse { /** */ private final int val; /** * Constructor. * * @param reqId Request id. */ public ClientIntResponse(long reqId, int val) { super(reqId); this.val = val; } /** {@inheritDoc} */ @Override public void encode(ClientConnectionContext ctx, BinaryRawWriterEx writer) { super.encode(ctx, writer); writer.writeInt(val); } }
apache-2.0
jwren/intellij-community
java/java-tests/testSrc/com/intellij/java/codeInspection/NumericOverflowInspectionTest.java
1956
/* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.java.codeInspection; import com.intellij.JavaTestUtil; import com.intellij.codeInspection.NumericOverflowInspection; import com.intellij.lang.annotation.HighlightSeverity; import com.intellij.testFramework.fixtures.LightJavaCodeInsightFixtureTestCase; public class NumericOverflowInspectionTest extends LightJavaCodeInsightFixtureTestCase { @Override protected String getBasePath() { return JavaTestUtil.getRelativeJavaTestDataPath() + "/inspection/numericOverflow"; } public void testSimple() { doTest(); } @Override protected void setUp() throws Exception { super.setUp(); myFixture.enableInspections(new NumericOverflowInspection()); } private void doTest() { myFixture.testHighlighting(getTestName(false) + ".java"); } public void testUpdatesOnTyping() { String text = "class My {\n" + "void d(long lower) {\n" + " long upper = lower + 1000<caret> * 31536000;\n" + " }" + "}"; myFixture.configureByText("My.java", text); assertOneElement(myFixture.doHighlighting(HighlightSeverity.WARNING)); myFixture.type('L'); assertEmpty(myFixture.doHighlighting(HighlightSeverity.WARNING)); myFixture.type('\b'); assertOneElement(myFixture.doHighlighting(HighlightSeverity.WARNING)); } }
apache-2.0
samaitra/ignite
modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheMultinodeUpdateAtomicNearEnabledSelfTest.java
1427
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.configuration.NearCacheConfiguration; import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; /** * */ public class GridCacheMultinodeUpdateAtomicNearEnabledSelfTest extends GridCacheMultinodeUpdateAbstractSelfTest { /** {@inheritDoc} */ @Override protected NearCacheConfiguration nearConfiguration() { return new NearCacheConfiguration(); } /** {@inheritDoc} */ @Override protected CacheAtomicityMode atomicityMode() { return ATOMIC; } }
apache-2.0
raviagarwal7/buck
src/com/facebook/buck/cxx/CxxInferCaptureAndAggregatingRules.java
1349
/* * Copyright 2016-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.cxx; import com.facebook.buck.rules.BuildRule; import com.google.common.collect.ImmutableSet; /** * Each set of capture rules is headed by aggregating rules, e.g. analysis rules * that run over these captures, or collection rules that are needed only for separate post * processing (e.g. transitive capture of all source files) */ class CxxInferCaptureAndAggregatingRules<T extends BuildRule> { final ImmutableSet<CxxInferCapture> captureRules; final ImmutableSet<T> aggregatingRules; CxxInferCaptureAndAggregatingRules( ImmutableSet<CxxInferCapture> captureRules, ImmutableSet<T> aggregatingRules) { this.captureRules = captureRules; this.aggregatingRules = aggregatingRules; } }
apache-2.0
nwnpallewela/developer-studio
jaggery/plugins/org.eclipse.php.core/src/org/eclipse/php/internal/core/codeassist/strategies/CatchTypeStrategy.java
1371
/******************************************************************************* * Copyright (c) 2009 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation * Zend Technologies *******************************************************************************/ package org.eclipse.php.internal.core.codeassist.strategies; import org.eclipse.php.core.codeassist.ICompletionContext; import org.eclipse.php.internal.core.codeassist.ProposalExtraInfo; import org.eclipse.php.internal.core.codeassist.contexts.AbstractCompletionContext; /** * This strategy completes exception types in catch clause * * @author michael */ public class CatchTypeStrategy extends GlobalTypesStrategy { public CatchTypeStrategy(ICompletionContext context, int trueFlag, int falseFlag) { super(context, trueFlag, falseFlag); } public CatchTypeStrategy(ICompletionContext context) { super(context); } public String getSuffix(AbstractCompletionContext abstractContext) { return " "; //$NON-NLS-1$ } protected int getExtraInfo() { return ProposalExtraInfo.TYPE_ONLY; } }
apache-2.0
cleliameneghin/sling
bundles/extensions/fsresource/src/main/java/org/apache/sling/fsprovider/internal/mapper/jcr/FsItem.java
5179
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sling.fsprovider.internal.mapper.jcr; import javax.jcr.AccessDeniedException; import javax.jcr.InvalidItemStateException; import javax.jcr.Item; import javax.jcr.ItemExistsException; import javax.jcr.ItemNotFoundException; import javax.jcr.ItemVisitor; import javax.jcr.Node; import javax.jcr.ReferentialIntegrityException; import javax.jcr.RepositoryException; import javax.jcr.Session; import javax.jcr.lock.LockException; import javax.jcr.nodetype.ConstraintViolationException; import javax.jcr.nodetype.NoSuchNodeTypeException; import javax.jcr.version.VersionException; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.sling.api.resource.Resource; import org.apache.sling.api.resource.ResourceResolver; import org.apache.sling.api.resource.ValueMap; import org.apache.sling.fsprovider.internal.mapper.ContentFile; /** * Simplified implementation of read-only content access via the JCR API. */ abstract class FsItem implements Item { protected final ContentFile contentFile; protected final ResourceResolver resolver; protected final ValueMap props; public FsItem(ContentFile contentFile, ResourceResolver resolver) { this.contentFile = contentFile; this.resolver = resolver; this.props = contentFile.getValueMap(); } @Override public String getPath() throws RepositoryException { if (contentFile.getSubPath() == null) { return contentFile.getPath(); } else { return contentFile.getPath() + "/" + contentFile.getSubPath(); } } @Override public Item getAncestor(int depth) throws ItemNotFoundException, AccessDeniedException, RepositoryException { String path; if (depth == 0) { path = "/"; } else { String[] pathParts = StringUtils.splitPreserveAllTokens(getPath(), "/"); path = StringUtils.join(pathParts, "/", 0, depth + 1); } Resource resource = resolver.getResource(path); if (resource != null) { Node refNode = resource.adaptTo(Node.class); if (refNode != null) { return refNode; } } throw new ItemNotFoundException(); } @Override public int getDepth() throws RepositoryException { if (StringUtils.equals("/", getPath())) { return 0; } else { return StringUtils.countMatches(getPath(), "/"); } } @Override public Session getSession() throws RepositoryException { return resolver.adaptTo(Session.class); } @Override public boolean isNode() { return (this instanceof Node); } @Override public boolean isNew() { return false; } @Override public boolean isModified() { return false; } @Override public boolean isSame(Item otherItem) throws RepositoryException { return StringUtils.equals(getPath(), otherItem.getPath()); } @Override public void accept(ItemVisitor visitor) throws RepositoryException { // do nothing } @Override public String toString() { try { return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) .append("path", getPath()) .build(); } catch (RepositoryException ex) { throw new RuntimeException(ex); } } // --- unsupported methods --- @Override public void save() throws AccessDeniedException, ItemExistsException, ConstraintViolationException, InvalidItemStateException, ReferentialIntegrityException, VersionException, LockException, NoSuchNodeTypeException, RepositoryException { throw new UnsupportedOperationException(); } @Override public void refresh(boolean keepChanges) throws InvalidItemStateException, RepositoryException { throw new UnsupportedOperationException(); } @Override public void remove() throws VersionException, LockException, ConstraintViolationException, AccessDeniedException, RepositoryException { throw new UnsupportedOperationException(); } }
apache-2.0
sriksun/falcon
prism/src/main/java/org/apache/falcon/util/SecureEmbeddedServer.java
2612
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.falcon.util; import org.mortbay.jetty.Connector; import org.mortbay.jetty.security.SslSocketConnector; import java.util.Properties; /** * This is a jetty server which requires client auth via certificates. */ public class SecureEmbeddedServer extends EmbeddedServer { public SecureEmbeddedServer(int port, String path) { super(port, path); } protected Connector getConnector(int port) { Properties properties = StartupProperties.get(); SslSocketConnector connector = new SslSocketConnector(); connector.setPort(port); connector.setHost("0.0.0.0"); connector.setKeystore(properties.getProperty("keystore.file", System.getProperty("keystore.file", "conf/prism.keystore"))); connector.setKeyPassword(properties.getProperty("keystore.password", System.getProperty("keystore.password", "falcon-prism-passwd"))); connector.setTruststore(properties.getProperty("truststore.file", System.getProperty("truststore.file", "conf/prism.keystore"))); connector.setTrustPassword(properties.getProperty("truststore.password", System.getProperty("truststore.password", "falcon-prism-passwd"))); connector.setPassword(properties.getProperty("password", System.getProperty("password", "falcon-prism-passwd"))); connector.setWantClientAuth(true); // this is to enable large header sizes when Kerberos is enabled with AD final Integer bufferSize = Integer.valueOf(StartupProperties.get().getProperty( "falcon.jetty.request.buffer.size", "16192")); connector.setHeaderBufferSize(bufferSize); connector.setRequestBufferSize(bufferSize); return connector; } }
apache-2.0
midnightradio/gerrit
gerrit-extension-api/src/main/java/com/google/gerrit/extensions/restapi/RestApiModule.java
6088
// Copyright (C) 2012 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.gerrit.extensions.restapi; import com.google.gerrit.extensions.annotations.Export; import com.google.gerrit.extensions.annotations.Exports; import com.google.inject.AbstractModule; import com.google.inject.Provider; import com.google.inject.TypeLiteral; import com.google.inject.binder.LinkedBindingBuilder; import com.google.inject.binder.ScopedBindingBuilder; /** Guice DSL for binding {@link RestView} implementations. */ public abstract class RestApiModule extends AbstractModule { protected static final String GET = "GET"; protected static final String PUT = "PUT"; protected static final String DELETE = "DELETE"; protected static final String POST = "POST"; protected <R extends RestResource> ReadViewBinder<R> get(TypeLiteral<RestView<R>> viewType) { return new ReadViewBinder<>(view(viewType, GET, "/")); } protected <R extends RestResource> ModifyViewBinder<R> put(TypeLiteral<RestView<R>> viewType) { return new ModifyViewBinder<>(view(viewType, PUT, "/")); } protected <R extends RestResource> ModifyViewBinder<R> post(TypeLiteral<RestView<R>> viewType) { return new ModifyViewBinder<>(view(viewType, POST, "/")); } protected <R extends RestResource> ModifyViewBinder<R> delete(TypeLiteral<RestView<R>> viewType) { return new ModifyViewBinder<>(view(viewType, DELETE, "/")); } protected <R extends RestResource> ReadViewBinder<R> get(TypeLiteral<RestView<R>> viewType, String name) { return new ReadViewBinder<>(view(viewType, GET, name)); } protected <R extends RestResource> ModifyViewBinder<R> put(TypeLiteral<RestView<R>> viewType, String name) { return new ModifyViewBinder<>(view(viewType, PUT, name)); } protected <R extends RestResource> ModifyViewBinder<R> post(TypeLiteral<RestView<R>> viewType, String name) { return new ModifyViewBinder<>(view(viewType, POST, name)); } protected <R extends RestResource> ModifyViewBinder<R> delete(TypeLiteral<RestView<R>> viewType, String name) { return new ModifyViewBinder<>(view(viewType, DELETE, name)); } protected <P extends RestResource> ChildCollectionBinder<P> child(TypeLiteral<RestView<P>> type, String name) { return new ChildCollectionBinder<>(view(type, GET, name)); } protected <R extends RestResource> LinkedBindingBuilder<RestView<R>> view( TypeLiteral<RestView<R>> viewType, String method, String name) { return bind(viewType).annotatedWith(export(method, name)); } private static Export export(String method, String name) { if (name.length() > 1 && name.startsWith("/")) { // Views may be bound as "/" to mean the resource itself, or // as "status" as in "/type/{id}/status". Don't bind "/status" // if the caller asked for that, bind what the server expects. name = name.substring(1); } return Exports.named(method + "." + name); } public static class ReadViewBinder<P extends RestResource> { private final LinkedBindingBuilder<RestView<P>> binder; private ReadViewBinder(LinkedBindingBuilder<RestView<P>> binder) { this.binder = binder; } public <T extends RestReadView<P>> ScopedBindingBuilder to(Class<T> impl) { return binder.to(impl); } public <T extends RestReadView<P>> void toInstance(T impl) { binder.toInstance(impl); } public <T extends RestReadView<P>> ScopedBindingBuilder toProvider(Class<? extends Provider<? extends T>> providerType) { return binder.toProvider(providerType); } public <T extends RestReadView<P>> ScopedBindingBuilder toProvider(Provider<? extends T> provider) { return binder.toProvider(provider); } } public static class ModifyViewBinder<P extends RestResource> { private final LinkedBindingBuilder<RestView<P>> binder; private ModifyViewBinder(LinkedBindingBuilder<RestView<P>> binder) { this.binder = binder; } public <T extends RestModifyView<P, ?>> ScopedBindingBuilder to(Class<T> impl) { return binder.to(impl); } public <T extends RestModifyView<P, ?>> void toInstance(T impl) { binder.toInstance(impl); } public <T extends RestModifyView<P, ?>> ScopedBindingBuilder toProvider(Class<? extends Provider<? extends T>> providerType) { return binder.toProvider(providerType); } public <T extends RestModifyView<P, ?>> ScopedBindingBuilder toProvider(Provider<? extends T> provider) { return binder.toProvider(provider); } } public static class ChildCollectionBinder<P extends RestResource> { private final LinkedBindingBuilder<RestView<P>> binder; private ChildCollectionBinder(LinkedBindingBuilder<RestView<P>> binder) { this.binder = binder; } public <C extends RestResource, T extends ChildCollection<P, C>> ScopedBindingBuilder to(Class<T> impl) { return binder.to(impl); } public <C extends RestResource, T extends ChildCollection<P, C>> void toInstance(T impl) { binder.toInstance(impl); } public <C extends RestResource, T extends ChildCollection<P, C>> ScopedBindingBuilder toProvider(Class<? extends Provider<? extends T>> providerType) { return binder.toProvider(providerType); } public <C extends RestResource, T extends ChildCollection<P, C>> ScopedBindingBuilder toProvider(Provider<? extends T> provider) { return binder.toProvider(provider); } } }
apache-2.0
josephknight/libgdx
extensions/gdx-bullet/jni/swig-src/dynamics/com/badlogic/gdx/physics/bullet/dynamics/SWIGTYPE_p_btAlignedObjectArrayT_btSolverConstraint_t.java
919
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 3.0.11 * * Do not make changes to this file unless you know what you are doing--modify * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ package com.badlogic.gdx.physics.bullet.dynamics; public class SWIGTYPE_p_btAlignedObjectArrayT_btSolverConstraint_t { private transient long swigCPtr; protected SWIGTYPE_p_btAlignedObjectArrayT_btSolverConstraint_t(long cPtr, @SuppressWarnings("unused") boolean futureUse) { swigCPtr = cPtr; } protected SWIGTYPE_p_btAlignedObjectArrayT_btSolverConstraint_t() { swigCPtr = 0; } protected static long getCPtr(SWIGTYPE_p_btAlignedObjectArrayT_btSolverConstraint_t obj) { return (obj == null) ? 0 : obj.swigCPtr; } }
apache-2.0
ueshin/apache-spark
common/sketch/src/main/java/org/apache/spark/util/sketch/BitArray.java
3608
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.util.sketch; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.util.Arrays; final class BitArray { private final long[] data; private long bitCount; static int numWords(long numBits) { if (numBits <= 0) { throw new IllegalArgumentException("numBits must be positive, but got " + numBits); } long numWords = (long) Math.ceil(numBits / 64.0); if (numWords > Integer.MAX_VALUE) { throw new IllegalArgumentException("Can't allocate enough space for " + numBits + " bits"); } return (int) numWords; } BitArray(long numBits) { this(new long[numWords(numBits)]); } private BitArray(long[] data) { this.data = data; long bitCount = 0; for (long word : data) { bitCount += Long.bitCount(word); } this.bitCount = bitCount; } /** Returns true if the bit changed value. */ boolean set(long index) { if (!get(index)) { data[(int) (index >>> 6)] |= (1L << index); bitCount++; return true; } return false; } boolean get(long index) { return (data[(int) (index >>> 6)] & (1L << index)) != 0; } /** Number of bits */ long bitSize() { return (long) data.length * Long.SIZE; } /** Number of set bits (1s) */ long cardinality() { return bitCount; } /** Combines the two BitArrays using bitwise OR. */ void putAll(BitArray array) { assert data.length == array.data.length : "BitArrays must be of equal length when merging"; long bitCount = 0; for (int i = 0; i < data.length; i++) { data[i] |= array.data[i]; bitCount += Long.bitCount(data[i]); } this.bitCount = bitCount; } /** Combines the two BitArrays using bitwise AND. */ void and(BitArray array) { assert data.length == array.data.length : "BitArrays must be of equal length when merging"; long bitCount = 0; for (int i = 0; i < data.length; i++) { data[i] &= array.data[i]; bitCount += Long.bitCount(data[i]); } this.bitCount = bitCount; } void writeTo(DataOutputStream out) throws IOException { out.writeInt(data.length); for (long datum : data) { out.writeLong(datum); } } static BitArray readFrom(DataInputStream in) throws IOException { int numWords = in.readInt(); long[] data = new long[numWords]; for (int i = 0; i < numWords; i++) { data[i] = in.readLong(); } return new BitArray(data); } @Override public boolean equals(Object other) { if (this == other) return true; if (!(other instanceof BitArray)) return false; BitArray that = (BitArray) other; return Arrays.equals(data, that.data); } @Override public int hashCode() { return Arrays.hashCode(data); } }
apache-2.0
nuwand/carbon-apimgt
components/apimgt/org.wso2.carbon.apimgt.internal.service/src/gen/java/org/wso2/carbon/apimgt/internal/service/ApiPoliciesApiService.java
812
package org.wso2.carbon.apimgt.internal.service; import org.wso2.carbon.apimgt.internal.service.*; import org.wso2.carbon.apimgt.internal.service.dto.*; import org.apache.cxf.jaxrs.ext.MessageContext; import org.apache.cxf.jaxrs.ext.multipart.Attachment; import org.apache.cxf.jaxrs.ext.multipart.Multipart; import org.wso2.carbon.apimgt.api.APIManagementException; import org.wso2.carbon.apimgt.internal.service.dto.ApiPolicyListDTO; import org.wso2.carbon.apimgt.internal.service.dto.ErrorDTO; import java.util.List; import java.io.InputStream; import javax.ws.rs.core.Response; import javax.ws.rs.core.SecurityContext; public interface ApiPoliciesApiService { public Response apiPoliciesGet(String xWSO2Tenant, String policyName, MessageContext messageContext) throws APIManagementException; }
apache-2.0
amckee23/drools
drools-compiler/src/main/java/org/drools/compiler/lang/api/AnnotationDescrBuilder.java
1104
/* * Copyright 2011 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.compiler.lang.api; import org.drools.compiler.lang.descr.AnnotationDescr; /** * A descriptor builder for annotations */ public interface AnnotationDescrBuilder<P extends DescrBuilder< ? , ? >> extends DescrBuilder<P, AnnotationDescr>, AnnotatedDescrBuilder<AnnotationDescrBuilder<P>> { public AnnotationDescrBuilder<P> value( Object value ); public AnnotationDescrBuilder<P> keyValue( String key, Object value ); }
apache-2.0
marcinkwiatkowski/buck
src/com/facebook/buck/util/environment/CommandMode.java
888
/* * Copyright 2016-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.util.environment; public enum CommandMode { RELEASE, TEST; private boolean loggingEnabled; static { RELEASE.loggingEnabled = true; TEST.loggingEnabled = false; } public boolean isLoggingEnabled() { return loggingEnabled; } }
apache-2.0
jk1/intellij-community
plugins/groovy/groovy-psi/src/org/jetbrains/plugins/groovy/codeInspection/control/GroovyConditionalWithIdenticalBranchesInspection.java
3235
/* * Copyright 2007-2008 Dave Griffith * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jetbrains.plugins.groovy.codeInspection.control; import com.intellij.codeInspection.ProblemDescriptor; import com.intellij.openapi.project.Project; import com.intellij.psi.PsiElement; import com.intellij.util.IncorrectOperationException; import org.jetbrains.annotations.NotNull; import org.jetbrains.plugins.groovy.codeInspection.BaseInspection; import org.jetbrains.plugins.groovy.codeInspection.BaseInspectionVisitor; import org.jetbrains.plugins.groovy.codeInspection.GroovyFix; import org.jetbrains.plugins.groovy.codeInspection.utils.EquivalenceChecker; import org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions.GrConditionalExpression; import org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions.GrExpression; public class GroovyConditionalWithIdenticalBranchesInspection extends BaseInspection { @Override public boolean isEnabledByDefault() { return true; } @Override @NotNull public String getDisplayName() { return "Conditional expression with identical branches"; } @Override public String buildErrorString(Object... args) { return "Conditional expression with identical branches #loc"; } @Override public GroovyFix buildFix(@NotNull PsiElement location) { return new CollapseConditionalFix(); } private static class CollapseConditionalFix extends GroovyFix { @Override @NotNull public String getFamilyName() { return "Collapse conditional expression"; } @Override public void doFix(@NotNull Project project, @NotNull ProblemDescriptor descriptor) throws IncorrectOperationException { final PsiElement element = descriptor.getPsiElement(); if (!(element instanceof GrConditionalExpression)) return; final GrConditionalExpression expression = (GrConditionalExpression)element; final GrExpression thenBranch = expression.getThenBranch(); replaceExpression(expression, thenBranch.getText()); } } @NotNull @Override public BaseInspectionVisitor buildVisitor() { return new Visitor(); } private static class Visitor extends BaseInspectionVisitor { @Override public void visitConditionalExpression(@NotNull GrConditionalExpression expression) { super.visitConditionalExpression(expression); final GrExpression thenBranch = expression.getThenBranch(); final GrExpression elseBranch = expression.getElseBranch(); if (thenBranch == null || elseBranch == null) { return; } if (EquivalenceChecker.expressionsAreEquivalent(thenBranch, elseBranch)) { registerStatementError(expression); } } } }
apache-2.0
dianping/cosmos-hadoop
src/core/org/apache/hadoop/fs/FileSystem.java
56490
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.IdentityHashMap; import java.security.PrivilegedExceptionAction; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.*; import org.apache.hadoop.conf.*; import org.apache.hadoop.util.*; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; /**************************************************************** * An abstract base class for a fairly generic filesystem. It * may be implemented as a distributed filesystem, or as a "local" * one that reflects the locally-connected disk. The local version * exists for small Hadoop instances and for testing. * * <p> * * All user code that may potentially use the Hadoop Distributed * File System should be written to use a FileSystem object. The * Hadoop DFS is a multi-machine system that appears as a single * disk. It's useful because of its fault tolerance and potentially * very large capacity. * * <p> * The local implementation is {@link LocalFileSystem} and distributed * implementation is DistributedFileSystem. *****************************************************************/ public abstract class FileSystem extends Configured implements Closeable { public static final String FS_DEFAULT_NAME_KEY = "fs.default.name"; public static final Log LOG = LogFactory.getLog(FileSystem.class); /** FileSystem cache */ private static final Cache CACHE = new Cache(); /** The key this instance is stored under in the cache. */ private Cache.Key key; /** Recording statistics per a FileSystem class */ private static final Map<Class<? extends FileSystem>, Statistics> statisticsTable = new IdentityHashMap<Class<? extends FileSystem>, Statistics>(); /** * The statistics for this file system. */ protected Statistics statistics; /** * A cache of files that should be deleted when filsystem is closed * or the JVM is exited. */ private Set<Path> deleteOnExit = new TreeSet<Path>(); /** * This method adds a file system for testing so that we can find it later. * It is only for testing. * @param uri the uri to store it under * @param conf the configuration to store it under * @param fs the file system to store * @throws IOException */ public static void addFileSystemForTesting(URI uri, Configuration conf, FileSystem fs) throws IOException { CACHE.map.put(new Cache.Key(uri, conf), fs); } public static FileSystem get(final URI uri, final Configuration conf, final String user) throws IOException, InterruptedException { UserGroupInformation ugi; if (user == null) { ugi = UserGroupInformation.getCurrentUser(); } else { ugi = UserGroupInformation.createRemoteUser(user); } return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { public FileSystem run() throws IOException { return get(uri, conf); } }); } /** Returns the configured filesystem implementation.*/ public static FileSystem get(Configuration conf) throws IOException { return get(getDefaultUri(conf), conf); } /** Get the default filesystem URI from a configuration. * @param conf the configuration to access * @return the uri of the default filesystem */ public static URI getDefaultUri(Configuration conf) { return URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, "file:///"))); } /** Set the default filesystem URI in a configuration. * @param conf the configuration to alter * @param uri the new default filesystem uri */ public static void setDefaultUri(Configuration conf, URI uri) { conf.set(FS_DEFAULT_NAME_KEY, uri.toString()); } /** Set the default filesystem URI in a configuration. * @param conf the configuration to alter * @param uri the new default filesystem uri */ public static void setDefaultUri(Configuration conf, String uri) { setDefaultUri(conf, URI.create(fixName(uri))); } /** Called after a new FileSystem instance is constructed. * @param name a uri whose authority section names the host, port, etc. * for this FileSystem * @param conf the configuration */ public void initialize(URI name, Configuration conf) throws IOException { statistics = getStatistics(name.getScheme(), getClass()); } /** Returns a URI whose scheme and authority identify this FileSystem.*/ public abstract URI getUri(); /** * Resolve the uri's hostname and add the default port if not in the uri * @return URI * @see NetUtils#getCanonicalUri(URI, int) */ protected URI getCanonicalUri() { return NetUtils.getCanonicalUri(getUri(), getDefaultPort()); } /** * Get the default port for this file system. * @return the default port or 0 if there isn't one */ protected int getDefaultPort() { return 0; } /** * Get a canonical service name for this file system. The token cache is * the only user of this value, and uses it to lookup this filesystem's * service tokens. The token cache will not attempt to acquire tokens if the * service is null. * @return a service string that uniquely identifies this file system, null * if the filesystem does not implement tokens * @see SecurityUtil#buildDTServiceName(URI, int) */ public String getCanonicalServiceName() { return SecurityUtil.buildDTServiceName(getUri(), getDefaultPort()); } /** @deprecated call #getUri() instead.*/ public String getName() { return getUri().toString(); } /** @deprecated call #get(URI,Configuration) instead. */ public static FileSystem getNamed(String name, Configuration conf) throws IOException { return get(URI.create(fixName(name)), conf); } /** Update old-format filesystem names, for back-compatibility. This should * eventually be replaced with a checkName() method that throws an exception * for old-format names. */ private static String fixName(String name) { // convert old-format name to new-format name if (name.equals("local")) { // "local" is now "file:///". LOG.warn("\"local\" is a deprecated filesystem name." +" Use \"file:///\" instead."); name = "file:///"; } else if (name.indexOf('/')==-1) { // unqualified is "hdfs://" LOG.warn("\""+name+"\" is a deprecated filesystem name." +" Use \"hdfs://"+name+"/\" instead."); name = "hdfs://"+name; } return name; } /** * Get the local file syste * @param conf the configuration to configure the file system with * @return a LocalFileSystem */ public static LocalFileSystem getLocal(Configuration conf) throws IOException { return (LocalFileSystem)get(LocalFileSystem.NAME, conf); } /** Returns the FileSystem for this URI's scheme and authority. The scheme * of the URI determines a configuration property name, * <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class. * The entire URI is passed to the FileSystem instance's initialize method. */ public static FileSystem get(URI uri, Configuration conf) throws IOException { String scheme = uri.getScheme(); String authority = uri.getAuthority(); if (scheme == null) { // no scheme: use default FS return get(conf); } if (authority == null) { // no authority URI defaultUri = getDefaultUri(conf); if (scheme.equals(defaultUri.getScheme()) // if scheme matches default && defaultUri.getAuthority() != null) { // & default has authority return get(defaultUri, conf); // return default } } String disableCacheName = String.format("fs.%s.impl.disable.cache", scheme); if (conf.getBoolean(disableCacheName, false)) { return createFileSystem(uri, conf); } return CACHE.get(uri, conf); } private static class ClientFinalizer extends Thread { public synchronized void run() { try { FileSystem.closeAll(); } catch (IOException e) { LOG.info("FileSystem.closeAll() threw an exception:\n" + e); } } } private static final ClientFinalizer clientFinalizer = new ClientFinalizer(); /** * Close all cached filesystems. Be sure those filesystems are not * used anymore. * * @throws IOException */ public static void closeAll() throws IOException { LOG.debug("Starting clear of FileSystem cache with " + CACHE.size() + " elements."); CACHE.closeAll(); LOG.debug("Done clearing cache"); } /** * Close all cached filesystems for a given UGI. Be sure those filesystems * are not used anymore. * @param ugi * @throws IOException */ public static void closeAllForUGI(UserGroupInformation ugi) throws IOException { CACHE.closeAll(ugi); } /** Make sure that a path specifies a FileSystem. */ public Path makeQualified(Path path) { checkPath(path); return path.makeQualified(this); } /** create a file with the provided permission * The permission of the file is set to be the provided permission as in * setPermission, not permission&~umask * * It is implemented using two RPCs. It is understood that it is inefficient, * but the implementation is thread-safe. The other option is to change the * value of umask in configuration to be 0, but it is not thread-safe. * * @param fs file system handle * @param file the name of the file to be created * @param permission the permission of the file * @return an output stream * @throws IOException */ public static FSDataOutputStream create(FileSystem fs, Path file, FsPermission permission) throws IOException { // create the file with default permission FSDataOutputStream out = fs.create(file); // set its permission to the supplied one fs.setPermission(file, permission); return out; } /** create a directory with the provided permission * The permission of the directory is set to be the provided permission as in * setPermission, not permission&~umask * * @see #create(FileSystem, Path, FsPermission) * * @param fs file system handle * @param dir the name of the directory to be created * @param permission the permission of the directory * @return true if the directory creation succeeds; false otherwise * @throws IOException */ public static boolean mkdirs(FileSystem fs, Path dir, FsPermission permission) throws IOException { // create the directory using the default permission boolean result = fs.mkdirs(dir); // set its permission to be the supplied one fs.setPermission(dir, permission); return result; } /////////////////////////////////////////////////////////////// // FileSystem /////////////////////////////////////////////////////////////// protected FileSystem() { super(null); } /** Check that a Path belongs to this FileSystem. */ protected void checkPath(Path path) { URI uri = path.toUri(); String thatScheme = uri.getScheme(); if (thatScheme == null) // fs is relative return; URI thisUri = getCanonicalUri(); String thisScheme = thisUri.getScheme(); //authority and scheme are not case sensitive if (thisScheme.equalsIgnoreCase(thatScheme)) {// schemes match String thisAuthority = thisUri.getAuthority(); String thatAuthority = uri.getAuthority(); if (thatAuthority == null && // path's authority is null thisAuthority != null) { // fs has an authority URI defaultUri = getDefaultUri(getConf()); if (thisScheme.equalsIgnoreCase(defaultUri.getScheme())) { uri = defaultUri; // schemes match, so use this uri instead } else { uri = null; // can't determine auth of the path } } if (uri != null) { // canonicalize uri before comparing with this fs uri = NetUtils.getCanonicalUri(uri, getDefaultPort()); thatAuthority = uri.getAuthority(); if (thisAuthority == thatAuthority || // authorities match (thisAuthority != null && thisAuthority.equalsIgnoreCase(thatAuthority))) return; } } throw new IllegalArgumentException("Wrong FS: "+path+ ", expected: "+this.getUri()); } /** * Return an array containing hostnames, offset and size of * portions of the given file. For a nonexistent * file or regions, null will be returned. * * This call is most helpful with DFS, where it returns * hostnames of machines that contain the given file. * * The FileSystem will simply return an elt containing 'localhost'. */ public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException { if (file == null) { return null; } if ( (start<0) || (len < 0) ) { throw new IllegalArgumentException("Invalid start or len parameter"); } if (file.getLen() < start) { return new BlockLocation[0]; } String[] name = { "localhost:50010" }; String[] host = { "localhost" }; return new BlockLocation[] { new BlockLocation(name, host, 0, file.getLen()) }; } /** * Opens an FSDataInputStream at the indicated Path. * @param f the file name to open * @param bufferSize the size of the buffer to be used. */ public abstract FSDataInputStream open(Path f, int bufferSize) throws IOException; /** * Opens an FSDataInputStream at the indicated Path. * @param f the file to open */ public FSDataInputStream open(Path f) throws IOException { return open(f, getConf().getInt("io.file.buffer.size", 4096)); } /** * Opens an FSDataOutputStream at the indicated Path. * Files are overwritten by default. */ public FSDataOutputStream create(Path f) throws IOException { return create(f, true); } /** * Opens an FSDataOutputStream at the indicated Path. */ public FSDataOutputStream create(Path f, boolean overwrite) throws IOException { return create(f, overwrite, getConf().getInt("io.file.buffer.size", 4096), getDefaultReplication(), getDefaultBlockSize()); } /** * Create an FSDataOutputStream at the indicated Path with write-progress * reporting. * Files are overwritten by default. */ public FSDataOutputStream create(Path f, Progressable progress) throws IOException { return create(f, true, getConf().getInt("io.file.buffer.size", 4096), getDefaultReplication(), getDefaultBlockSize(), progress); } /** * Opens an FSDataOutputStream at the indicated Path. * Files are overwritten by default. */ public FSDataOutputStream create(Path f, short replication) throws IOException { return create(f, true, getConf().getInt("io.file.buffer.size", 4096), replication, getDefaultBlockSize()); } /** * Opens an FSDataOutputStream at the indicated Path with write-progress * reporting. * Files are overwritten by default. */ public FSDataOutputStream create(Path f, short replication, Progressable progress) throws IOException { return create(f, true, getConf().getInt("io.file.buffer.size", 4096), replication, getDefaultBlockSize(), progress); } /** * Opens an FSDataOutputStream at the indicated Path. * @param f the file name to open * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. */ public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize ) throws IOException { return create(f, overwrite, bufferSize, getDefaultReplication(), getDefaultBlockSize()); } /** * Opens an FSDataOutputStream at the indicated Path with write-progress * reporting. * @param f the file name to open * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. */ public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, Progressable progress ) throws IOException { return create(f, overwrite, bufferSize, getDefaultReplication(), getDefaultBlockSize(), progress); } /** * Opens an FSDataOutputStream at the indicated Path. * @param f the file name to open * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. * @param replication required block replication for the file. */ public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize ) throws IOException { return create(f, overwrite, bufferSize, replication, blockSize, null); } /** * Opens an FSDataOutputStream at the indicated Path with write-progress * reporting. * @param f the file name to open * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. * @param replication required block replication for the file. */ public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress ) throws IOException { return this.create(f, FsPermission.getDefault(), overwrite, bufferSize, replication, blockSize, progress); } /** * Opens an FSDataOutputStream at the indicated Path with write-progress * reporting. * @param f the file name to open * @param permission * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. * @param replication required block replication for the file. * @param blockSize * @param progress * @throws IOException * @see #setPermission(Path, FsPermission) */ public abstract FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException; /** * Opens an FSDataOutputStream at the indicated Path with write-progress * reporting. Same as create(), except fails if parent directory doesn't * already exist. * @param f the file name to open * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. * @param replication required block replication for the file. * @param blockSize * @param progress * @throws IOException * @see #setPermission(Path, FsPermission) * @deprecated API only for 0.20-append */ @Deprecated public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return this.createNonRecursive(f, FsPermission.getDefault(), overwrite, bufferSize, replication, blockSize, progress); } /** * Opens an FSDataOutputStream at the indicated Path with write-progress * reporting. Same as create(), except fails if parent directory doesn't * already exist. * @param f the file name to open * @param permission * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. * @param replication required block replication for the file. * @param blockSize * @param progress * @throws IOException * @see #setPermission(Path, FsPermission) * @deprecated API only for 0.20-append */ @Deprecated public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { throw new IOException("createNonRecursive unsupported for this filesystem " + this.getClass()); } /** * Creates the given Path as a brand-new zero-length file. If * create fails, or if it already existed, return false. */ public boolean createNewFile(Path f) throws IOException { if (exists(f)) { return false; } else { create(f, false, getConf().getInt("io.file.buffer.size", 4096)).close(); return true; } } /** * Append to an existing file (optional operation). * Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null) * @param f the existing file to be appended. * @throws IOException */ public FSDataOutputStream append(Path f) throws IOException { return append(f, getConf().getInt("io.file.buffer.size", 4096), null); } /** * Append to an existing file (optional operation). * Same as append(f, bufferSize, null). * @param f the existing file to be appended. * @param bufferSize the size of the buffer to be used. * @throws IOException */ public FSDataOutputStream append(Path f, int bufferSize) throws IOException { return append(f, bufferSize, null); } /** * Append to an existing file (optional operation). * @param f the existing file to be appended. * @param bufferSize the size of the buffer to be used. * @param progress for reporting progress if it is not null. * @throws IOException */ public abstract FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException; /** * Get replication. * * @deprecated Use getFileStatus() instead * @param src file name * @return file replication * @throws IOException */ @Deprecated public short getReplication(Path src) throws IOException { return getFileStatus(src).getReplication(); } /** * Set replication for an existing file. * * @param src file name * @param replication new replication * @throws IOException * @return true if successful; * false if file does not exist or is a directory */ public boolean setReplication(Path src, short replication) throws IOException { return true; } /** * Renames Path src to Path dst. Can take place on local fs * or remote DFS. */ public abstract boolean rename(Path src, Path dst) throws IOException; /** Delete a file. */ /** @deprecated Use delete(Path, boolean) instead */ @Deprecated public abstract boolean delete(Path f) throws IOException; /** Delete a file. * * @param f the path to delete. * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. * @return true if delete is successful else false. * @throws IOException */ public abstract boolean delete(Path f, boolean recursive) throws IOException; /** * Mark a path to be deleted when FileSystem is closed. * When the JVM shuts down, * all FileSystem objects will be closed automatically. * Then, * the marked path will be deleted as a result of closing the FileSystem. * * The path has to exist in the file system. * * @param f the path to delete. * @return true if deleteOnExit is successful, otherwise false. * @throws IOException */ public boolean deleteOnExit(Path f) throws IOException { if (!exists(f)) { return false; } synchronized (deleteOnExit) { deleteOnExit.add(f); } return true; } /** * Delete all files that were marked as delete-on-exit. This recursively * deletes all files in the specified paths. */ protected void processDeleteOnExit() { synchronized (deleteOnExit) { for (Iterator<Path> iter = deleteOnExit.iterator(); iter.hasNext();) { Path path = iter.next(); try { delete(path, true); } catch (IOException e) { LOG.info("Ignoring failure to deleteOnExit for path " + path); } iter.remove(); } } } /** Check if exists. * @param f source file */ public boolean exists(Path f) throws IOException { try { return getFileStatus(f) != null; } catch (FileNotFoundException e) { return false; } } /** True iff the named path is a directory. */ /** @deprecated Use getFileStatus() instead */ @Deprecated public boolean isDirectory(Path f) throws IOException { try { return getFileStatus(f).isDir(); } catch (FileNotFoundException e) { return false; // f does not exist } } /** True iff the named path is a regular file. */ public boolean isFile(Path f) throws IOException { try { return !getFileStatus(f).isDir(); } catch (FileNotFoundException e) { return false; // f does not exist } } /** The number of bytes in a file. */ /** @deprecated Use getFileStatus() instead */ @Deprecated public long getLength(Path f) throws IOException { return getFileStatus(f).getLen(); } /** Return the {@link ContentSummary} of a given {@link Path}. */ public ContentSummary getContentSummary(Path f) throws IOException { FileStatus status = getFileStatus(f); if (!status.isDir()) { // f is a file return new ContentSummary(status.getLen(), 1, 0); } // f is a directory long[] summary = {0, 0, 1}; for(FileStatus s : listStatus(f)) { ContentSummary c = s.isDir() ? getContentSummary(s.getPath()) : new ContentSummary(s.getLen(), 1, 0); summary[0] += c.getLength(); summary[1] += c.getFileCount(); summary[2] += c.getDirectoryCount(); } return new ContentSummary(summary[0], summary[1], summary[2]); } final private static PathFilter DEFAULT_FILTER = new PathFilter() { public boolean accept(Path file) { return true; } }; /** * List the statuses of the files/directories in the given path if the path is * a directory. * * @param f * given path * @return the statuses of the files/directories in the given patch * returns null, if Path f does not exist in the FileSystem * @throws IOException */ public abstract FileStatus[] listStatus(Path f) throws IOException; /* * Filter files/directories in the given path using the user-supplied path * filter. Results are added to the given array <code>results</code>. */ private void listStatus(ArrayList<FileStatus> results, Path f, PathFilter filter) throws IOException { FileStatus listing[] = listStatus(f); if (listing != null) { for (int i = 0; i < listing.length; i++) { if (filter.accept(listing[i].getPath())) { results.add(listing[i]); } } } } /** * Filter files/directories in the given path using the user-supplied path * filter. * * @param f * a path name * @param filter * the user-supplied path filter * @return an array of FileStatus objects for the files under the given path * after applying the filter * @throws IOException * if encounter any problem while fetching the status */ public FileStatus[] listStatus(Path f, PathFilter filter) throws IOException { ArrayList<FileStatus> results = new ArrayList<FileStatus>(); listStatus(results, f, filter); return results.toArray(new FileStatus[results.size()]); } /** * Filter files/directories in the given list of paths using default * path filter. * * @param files * a list of paths * @return a list of statuses for the files under the given paths after * applying the filter default Path filter * @exception IOException */ public FileStatus[] listStatus(Path[] files) throws IOException { return listStatus(files, DEFAULT_FILTER); } /** * Filter files/directories in the given list of paths using user-supplied * path filter. * * @param files * a list of paths * @param filter * the user-supplied path filter * @return a list of statuses for the files under the given paths after * applying the filter * @exception IOException */ public FileStatus[] listStatus(Path[] files, PathFilter filter) throws IOException { ArrayList<FileStatus> results = new ArrayList<FileStatus>(); for (int i = 0; i < files.length; i++) { listStatus(results, files[i], filter); } return results.toArray(new FileStatus[results.size()]); } /** * <p>Return all the files that match filePattern and are not checksum * files. Results are sorted by their names. * * <p> * A filename pattern is composed of <i>regular</i> characters and * <i>special pattern matching</i> characters, which are: * * <dl> * <dd> * <dl> * <p> * <dt> <tt> ? </tt> * <dd> Matches any single character. * * <p> * <dt> <tt> * </tt> * <dd> Matches zero or more characters. * * <p> * <dt> <tt> [<i>abc</i>] </tt> * <dd> Matches a single character from character set * <tt>{<i>a,b,c</i>}</tt>. * * <p> * <dt> <tt> [<i>a</i>-<i>b</i>] </tt> * <dd> Matches a single character from the character range * <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be * lexicographically less than or equal to character <tt><i>b</i></tt>. * * <p> * <dt> <tt> [^<i>a</i>] </tt> * <dd> Matches a single character that is not from character set or range * <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur * immediately to the right of the opening bracket. * * <p> * <dt> <tt> \<i>c</i> </tt> * <dd> Removes (escapes) any special meaning of character <i>c</i>. * * <p> * <dt> <tt> {ab,cd} </tt> * <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt> * * <p> * <dt> <tt> {ab,c{de,fh}} </tt> * <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt> * * </dl> * </dd> * </dl> * * @param pathPattern a regular expression specifying a pth pattern * @return an array of paths that match the path pattern * @throws IOException */ public FileStatus[] globStatus(Path pathPattern) throws IOException { return globStatus(pathPattern, DEFAULT_FILTER); } /** * Return an array of FileStatus objects whose path names match pathPattern * and is accepted by the user-supplied path filter. Results are sorted by * their path names. * Return null if pathPattern has no glob and the path does not exist. * Return an empty array if pathPattern has a glob and no path matches it. * * @param pathPattern * a regular expression specifying the path pattern * @param filter * a user-supplied path filter * @return an array of FileStatus objects * @throws IOException if any I/O error occurs when fetching file status */ public FileStatus[] globStatus(Path pathPattern, PathFilter filter) throws IOException { String filename = pathPattern.toUri().getPath(); List<String> filePatterns = GlobExpander.expand(filename); if (filePatterns.size() == 1) { return globStatusInternal(pathPattern, filter); } else { List<FileStatus> results = new ArrayList<FileStatus>(); for (String filePattern : filePatterns) { FileStatus[] files = globStatusInternal(new Path(filePattern), filter); for (FileStatus file : files) { results.add(file); } } return results.toArray(new FileStatus[results.size()]); } } private FileStatus[] globStatusInternal(Path pathPattern, PathFilter filter) throws IOException { Path[] parents = new Path[1]; int level = 0; String filename = pathPattern.toUri().getPath(); // path has only zero component if ("".equals(filename) || Path.SEPARATOR.equals(filename)) { return getFileStatus(new Path[]{pathPattern}); } // path has at least one component String[] components = filename.split(Path.SEPARATOR); // get the first component if (pathPattern.isAbsolute()) { parents[0] = new Path(Path.SEPARATOR); level = 1; } else { parents[0] = new Path(Path.CUR_DIR); } // glob the paths that match the parent path, i.e., [0, components.length-1] boolean[] hasGlob = new boolean[]{false}; Path[] parentPaths = globPathsLevel(parents, components, level, hasGlob); FileStatus[] results; if (parentPaths == null || parentPaths.length == 0) { results = null; } else { // Now work on the last component of the path GlobFilter fp = new GlobFilter(components[components.length - 1], filter); if (fp.hasPattern()) { // last component has a pattern // list parent directories and then glob the results results = listStatus(parentPaths, fp); hasGlob[0] = true; } else { // last component does not have a pattern // get all the path names ArrayList<Path> filteredPaths = new ArrayList<Path>(parentPaths.length); for (int i = 0; i < parentPaths.length; i++) { parentPaths[i] = new Path(parentPaths[i], components[components.length - 1]); if (fp.accept(parentPaths[i])) { filteredPaths.add(parentPaths[i]); } } // get all their statuses results = getFileStatus( filteredPaths.toArray(new Path[filteredPaths.size()])); } } // Decide if the pathPattern contains a glob or not if (results == null) { if (hasGlob[0]) { results = new FileStatus[0]; } } else { if (results.length == 0 ) { if (!hasGlob[0]) { results = null; } } else { Arrays.sort(results); } } return results; } /* * For a path of N components, return a list of paths that match the * components [<code>level</code>, <code>N-1</code>]. */ private Path[] globPathsLevel(Path[] parents, String[] filePattern, int level, boolean[] hasGlob) throws IOException { if (level == filePattern.length - 1) return parents; if (parents == null || parents.length == 0) { return null; } GlobFilter fp = new GlobFilter(filePattern[level]); if (fp.hasPattern()) { parents = FileUtil.stat2Paths(listStatus(parents, fp)); hasGlob[0] = true; } else { for (int i = 0; i < parents.length; i++) { parents[i] = new Path(parents[i], filePattern[level]); } } return globPathsLevel(parents, filePattern, level + 1, hasGlob); } /** Return the current user's home directory in this filesystem. * The default implementation returns "/user/$USER/". */ public Path getHomeDirectory() { return new Path("/user/"+System.getProperty("user.name")) .makeQualified(this); } /** * Get a new delegation token for this file system. * @param renewer the account name that is allowed to renew the token. * @return a new delegation token * @throws IOException */ public Token<?> getDelegationToken(String renewer) throws IOException { return null; } /** * Set the current working directory for the given file system. All relative * paths will be resolved relative to it. * * @param new_dir */ public abstract void setWorkingDirectory(Path new_dir); /** * Get the current working directory for the given file system * @return the directory pathname */ public abstract Path getWorkingDirectory(); /** * Call {@link #mkdirs(Path, FsPermission)} with default permission. */ public boolean mkdirs(Path f) throws IOException { return mkdirs(f, FsPermission.getDefault()); } /** * Make the given file and all non-existent parents into * directories. Has the semantics of Unix 'mkdir -p'. * Existence of the directory hierarchy is not an error. */ public abstract boolean mkdirs(Path f, FsPermission permission ) throws IOException; /** * The src file is on the local disk. Add it to FS at * the given dst name and the source is kept intact afterwards */ public void copyFromLocalFile(Path src, Path dst) throws IOException { copyFromLocalFile(false, src, dst); } /** * The src files is on the local disk. Add it to FS at * the given dst name, removing the source afterwards. */ public void moveFromLocalFile(Path[] srcs, Path dst) throws IOException { copyFromLocalFile(true, true, srcs, dst); } /** * The src file is on the local disk. Add it to FS at * the given dst name, removing the source afterwards. */ public void moveFromLocalFile(Path src, Path dst) throws IOException { copyFromLocalFile(true, src, dst); } /** * The src file is on the local disk. Add it to FS at * the given dst name. * delSrc indicates if the source should be removed */ public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws IOException { copyFromLocalFile(delSrc, true, src, dst); } /** * The src files are on the local disk. Add it to FS at * the given dst name. * delSrc indicates if the source should be removed */ public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, Path dst) throws IOException { Configuration conf = getConf(); FileUtil.copy(getLocal(conf), srcs, this, dst, delSrc, overwrite, conf); } /** * The src file is on the local disk. Add it to FS at * the given dst name. * delSrc indicates if the source should be removed */ public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { Configuration conf = getConf(); FileUtil.copy(getLocal(conf), src, this, dst, delSrc, overwrite, conf); } /** * The src file is under FS, and the dst is on the local disk. * Copy it from FS control to the local dst name. */ public void copyToLocalFile(Path src, Path dst) throws IOException { copyToLocalFile(false, src, dst); } /** * The src file is under FS, and the dst is on the local disk. * Copy it from FS control to the local dst name. * Remove the source afterwards */ public void moveToLocalFile(Path src, Path dst) throws IOException { copyToLocalFile(true, src, dst); } /** * The src file is under FS, and the dst is on the local disk. * Copy it from FS control to the local dst name. * delSrc indicates if the src will be removed or not. */ public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException { FileUtil.copy(this, src, getLocal(getConf()), dst, delSrc, getConf()); } /** * Returns a local File that the user can write output to. The caller * provides both the eventual FS target name and the local working * file. If the FS is local, we write directly into the target. If * the FS is remote, we write into the tmp local area. */ public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { return tmpLocalFile; } /** * Called when we're all done writing to the target. A local FS will * do nothing, because we've written to exactly the right place. A remote * FS will copy the contents of tmpLocalFile to the correct target at * fsOutputFile. */ public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { moveFromLocalFile(tmpLocalFile, fsOutputFile); } /** * No more filesystem operations are needed. Will * release any held locks. */ public void close() throws IOException { // delete all files that were marked as delete-on-exit. processDeleteOnExit(); CACHE.remove(this.key, this); LOG.debug("Removing filesystem for " + getUri()); } /** Return the total size of all files in the filesystem.*/ public long getUsed() throws IOException{ long used = 0; FileStatus[] files = listStatus(new Path("/")); for(FileStatus file:files){ used += file.getLen(); } return used; } /** * Get the block size for a particular file. * @param f the filename * @return the number of bytes in a block */ /** @deprecated Use getFileStatus() instead */ @Deprecated public long getBlockSize(Path f) throws IOException { return getFileStatus(f).getBlockSize(); } /** Return the number of bytes that large input files should be optimally * be split into to minimize i/o time. */ public long getDefaultBlockSize() { // default to 32MB: large enough to minimize the impact of seeks return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024); } /** * Get the default replication. */ public short getDefaultReplication() { return 1; } /** * Return a file status object that represents the path. * @param f The path we want information from * @return a FileStatus object * @throws FileNotFoundException when the path does not exist; * IOException see specific implementation */ public abstract FileStatus getFileStatus(Path f) throws IOException; /** * Get the checksum of a file. * * @param f The file path * @return The file checksum. The default return value is null, * which indicates that no checksum algorithm is implemented * in the corresponding FileSystem. */ public FileChecksum getFileChecksum(Path f) throws IOException { return null; } /** * Set the verify checksum flag. This is only applicable if the * corresponding FileSystem supports checksum. By default doesn't do anything. * @param verifyChecksum */ public void setVerifyChecksum(boolean verifyChecksum) { //doesn't do anything } /** * Return a list of file status objects that corresponds to the list of paths * excluding those non-existent paths. * * @param paths * the list of paths we want information from * @return a list of FileStatus objects * @throws IOException * see specific implementation */ private FileStatus[] getFileStatus(Path[] paths) throws IOException { if (paths == null) { return null; } ArrayList<FileStatus> results = new ArrayList<FileStatus>(paths.length); for (int i = 0; i < paths.length; i++) { try { results.add(getFileStatus(paths[i])); } catch (FileNotFoundException e) { // do nothing } } return results.toArray(new FileStatus[results.size()]); } /** * Set permission of a path. * @param p * @param permission */ public void setPermission(Path p, FsPermission permission ) throws IOException { } /** * Set owner of a path (i.e. a file or a directory). * The parameters username and groupname cannot both be null. * @param p The path * @param username If it is null, the original username remains unchanged. * @param groupname If it is null, the original groupname remains unchanged. */ public void setOwner(Path p, String username, String groupname ) throws IOException { } /** * Set access time of a file * @param p The path * @param mtime Set the modification time of this file. * The number of milliseconds since Jan 1, 1970. * A value of -1 means that this call should not set modification time. * @param atime Set the access time of this file. * The number of milliseconds since Jan 1, 1970. * A value of -1 means that this call should not set access time. */ public void setTimes(Path p, long mtime, long atime ) throws IOException { } private static FileSystem createFileSystem(URI uri, Configuration conf ) throws IOException { Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null); LOG.debug("Creating filesystem for " + uri); if (clazz == null) { throw new IOException("No FileSystem for scheme: " + uri.getScheme()); } FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf); fs.initialize(uri, conf); return fs; } /** Caching FileSystem objects */ static class Cache { private final Map<Key, FileSystem> map = new HashMap<Key, FileSystem>(); FileSystem get(URI uri, Configuration conf) throws IOException{ Key key = new Key(uri, conf); FileSystem fs = null; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } fs = createFileSystem(uri, conf); synchronized (this) { // refetch the lock again FileSystem oldfs = map.get(key); if (oldfs != null) { // a file system is created while lock is releasing fs.close(); // close the new file system return oldfs; // return the old file system } // now insert the new file system into the map if (map.isEmpty() && !clientFinalizer.isAlive()) { Runtime.getRuntime().addShutdownHook(clientFinalizer); } fs.key = key; map.put(key, fs); return fs; } } synchronized void remove(Key key, FileSystem fs) { if (map.containsKey(key) && fs == map.get(key)) { map.remove(key); if (map.isEmpty() && !clientFinalizer.isAlive()) { if (!Runtime.getRuntime().removeShutdownHook(clientFinalizer)) { LOG.info("Could not cancel cleanup thread, though no " + "FileSystems are open"); } } } } synchronized void closeAll() throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); for(; !map.isEmpty(); ) { Map.Entry<Key, FileSystem> e = map.entrySet().iterator().next(); final Key key = e.getKey(); final FileSystem fs = e.getValue(); //remove from cache remove(key, fs); if (fs != null) { try { fs.close(); } catch(IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } } synchronized void closeAll(UserGroupInformation ugi) throws IOException { List<FileSystem> targetFSList = new ArrayList<FileSystem>(); //Make a pass over the list and collect the filesystems to close //we cannot close inline since close() removes the entry from the Map for (Map.Entry<Key, FileSystem> entry : map.entrySet()) { final Key key = entry.getKey(); final FileSystem fs = entry.getValue(); if (ugi.equals(key.ugi) && fs != null) { targetFSList.add(fs); } } List<IOException> exceptions = new ArrayList<IOException>(); //now make a pass over the target list and close each for (FileSystem fs : targetFSList) { try { fs.close(); } catch(IOException ioe) { exceptions.add(ioe); } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } } /** FileSystem.Cache.Key */ static class Key { final String scheme; final String authority; final UserGroupInformation ugi; Key(URI uri, Configuration conf) throws IOException { scheme = uri.getScheme()==null?"":uri.getScheme().toLowerCase(); authority = uri.getAuthority()==null?"":uri.getAuthority().toLowerCase(); this.ugi = UserGroupInformation.getCurrentUser(); } /** {@inheritDoc} */ public int hashCode() { return (scheme + authority).hashCode() + ugi.hashCode(); } static boolean isEqual(Object a, Object b) { return a == b || (a != null && a.equals(b)); } /** {@inheritDoc} */ public boolean equals(Object obj) { if (obj == this) { return true; } if (obj != null && obj instanceof Key) { Key that = (Key)obj; return isEqual(this.scheme, that.scheme) && isEqual(this.authority, that.authority) && isEqual(this.ugi, that.ugi); } return false; } /** {@inheritDoc} */ public String toString() { return "("+ugi.toString() + ")@" + scheme + "://" + authority; } } /** * Get the number of file systems in the cache. * @return the number of cached file systems */ int size() { return map.size(); } } public static final class Statistics { private final String scheme; private AtomicLong bytesRead = new AtomicLong(); private AtomicLong bytesWritten = new AtomicLong(); private AtomicInteger readOps = new AtomicInteger(); private AtomicInteger largeReadOps = new AtomicInteger(); private AtomicInteger writeOps = new AtomicInteger(); public Statistics(String scheme) { this.scheme = scheme; } /** * Increment the bytes read in the statistics * @param newBytes the additional bytes read */ public void incrementBytesRead(long newBytes) { bytesRead.getAndAdd(newBytes); } /** * Increment the bytes written in the statistics * @param newBytes the additional bytes written */ public void incrementBytesWritten(long newBytes) { bytesWritten.getAndAdd(newBytes); } /** * Increment the number of read operations * @param count number of read operations */ public void incrementReadOps(int count) { readOps.getAndAdd(count); } /** * Increment the number of large read operations * @param count number of large read operations */ public void incrementLargeReadOps(int count) { largeReadOps.getAndAdd(count); } /** * Increment the number of write operations * @param count number of write operations */ public void incrementWriteOps(int count) { writeOps.getAndAdd(count); } /** * Get the total number of bytes read * @return the number of bytes */ public long getBytesRead() { return bytesRead.get(); } /** * Get the total number of bytes written * @return the number of bytes */ public long getBytesWritten() { return bytesWritten.get(); } /** * Get the number of file system read operations such as list files * @return number of read operations */ public int getReadOps() { return readOps.get() + largeReadOps.get(); } /** * Get the number of large file system read operations such as list files * under a large directory * @return number of large read operations */ public int getLargeReadOps() { return largeReadOps.get(); } /** * Get the number of file system write operations such as create, append * rename etc. * @return number of write operations */ public int getWriteOps() { return writeOps.get(); } public String toString() { return bytesRead + " bytes read, " + bytesWritten + " bytes written, " + readOps + " read ops, " + largeReadOps + " large read ops, " + writeOps + " write ops"; } /** * Reset the counts of bytes to 0. */ public void reset() { bytesWritten.set(0); bytesRead.set(0); } /** * Get the uri scheme associated with this statistics object. * @return the schema associated with this set of statistics */ public String getScheme() { return scheme; } } /** * Get the Map of Statistics object indexed by URI Scheme. * @return a Map having a key as URI scheme and value as Statistics object * @deprecated use {@link #getAllStatistics} instead */ public static synchronized Map<String, Statistics> getStatistics() { Map<String, Statistics> result = new HashMap<String, Statistics>(); for(Statistics stat: statisticsTable.values()) { result.put(stat.getScheme(), stat); } return result; } /** * Return the FileSystem classes that have Statistics */ public static synchronized List<Statistics> getAllStatistics() { return new ArrayList<Statistics>(statisticsTable.values()); } /** * Get the statistics for a particular file system * @param cls the class to lookup * @return a statistics object */ public static synchronized Statistics getStatistics(String scheme, Class<? extends FileSystem> cls) { Statistics result = statisticsTable.get(cls); if (result == null) { result = new Statistics(scheme); statisticsTable.put(cls, result); } return result; } public static synchronized void clearStatistics() { for(Statistics stat: statisticsTable.values()) { stat.reset(); } } public static synchronized void printStatistics() throws IOException { for (Map.Entry<Class<? extends FileSystem>, Statistics> pair: statisticsTable.entrySet()) { System.out.println(" FileSystem " + pair.getKey().getName() + ": " + pair.getValue()); } } }
apache-2.0
siosio/intellij-community
java/java-tests/testData/inspection/stringRepeat/afterRepeatDiffKnown.java
235
// "Replace with 'String.repeat()'" "true" class Test { String testRepeat(String s, StringBuilder sb, int digits) { if ((s.length() < digits) && (sb.length() > 0)) { sb.append("0".repeat(digits - s.length())); } } }
apache-2.0
corochoone/elasticsearch
src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java
14815
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.*; import org.apache.lucene.util.Bits; import org.apache.lucene.util.ToStringUtils; import org.elasticsearch.common.lucene.docset.DocIdSets; import java.io.IOException; import java.util.*; /** * A query that allows for a pluggable boost function / filter. If it matches * the filter, it will be boosted by the formula. */ public class FiltersFunctionScoreQuery extends Query { public static class FilterFunction { public final Filter filter; public final ScoreFunction function; public FilterFunction(Filter filter, ScoreFunction function) { this.filter = filter; this.function = function; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; FilterFunction that = (FilterFunction) o; if (filter != null ? !filter.equals(that.filter) : that.filter != null) return false; if (function != null ? !function.equals(that.function) : that.function != null) return false; return true; } @Override public int hashCode() { int result = filter != null ? filter.hashCode() : 0; result = 31 * result + (function != null ? function.hashCode() : 0); return result; } } public static enum ScoreMode { First, Avg, Max, Sum, Min, Multiply } Query subQuery; final FilterFunction[] filterFunctions; final ScoreMode scoreMode; final float maxBoost; protected CombineFunction combineFunction; public FiltersFunctionScoreQuery(Query subQuery, ScoreMode scoreMode, FilterFunction[] filterFunctions, float maxBoost) { this.subQuery = subQuery; this.scoreMode = scoreMode; this.filterFunctions = filterFunctions; this.maxBoost = maxBoost; combineFunction = CombineFunction.MULT; } public FiltersFunctionScoreQuery setCombineFunction(CombineFunction combineFunction) { this.combineFunction = combineFunction; return this; } public Query getSubQuery() { return subQuery; } public FilterFunction[] getFilterFunctions() { return filterFunctions; } @Override public Query rewrite(IndexReader reader) throws IOException { Query newQ = subQuery.rewrite(reader); if (newQ == subQuery) return this; FiltersFunctionScoreQuery bq = (FiltersFunctionScoreQuery) this.clone(); bq.subQuery = newQ; return bq; } @Override public void extractTerms(Set<Term> terms) { subQuery.extractTerms(terms); } @Override public Weight createWeight(IndexSearcher searcher) throws IOException { Weight subQueryWeight = subQuery.createWeight(searcher); return new CustomBoostFactorWeight(subQueryWeight, filterFunctions.length); } class CustomBoostFactorWeight extends Weight { final Weight subQueryWeight; final Bits[] docSets; public CustomBoostFactorWeight(Weight subQueryWeight, int filterFunctionLength) throws IOException { this.subQueryWeight = subQueryWeight; this.docSets = new Bits[filterFunctionLength]; } public Query getQuery() { return FiltersFunctionScoreQuery.this; } @Override public float getValueForNormalization() throws IOException { float sum = subQueryWeight.getValueForNormalization(); sum *= getBoost() * getBoost(); return sum; } @Override public void normalize(float norm, float topLevelBoost) { subQueryWeight.normalize(norm, topLevelBoost * getBoost()); } @Override public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException { // we ignore scoreDocsInOrder parameter, because we need to score in // order if documents are scored with a script. The // ShardLookup depends on in order scoring. Scorer subQueryScorer = subQueryWeight.scorer(context, acceptDocs); if (subQueryScorer == null) { return null; } for (int i = 0; i < filterFunctions.length; i++) { FilterFunction filterFunction = filterFunctions[i]; filterFunction.function.setNextReader(context); docSets[i] = DocIdSets.toSafeBits(context.reader(), filterFunction.filter.getDocIdSet(context, acceptDocs)); } return new CustomBoostFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, docSets, combineFunction); } @Override public Explanation explain(AtomicReaderContext context, int doc) throws IOException { Explanation subQueryExpl = subQueryWeight.explain(context, doc); if (!subQueryExpl.isMatch()) { return subQueryExpl; } // First: Gather explanations for all filters List<ComplexExplanation> filterExplanations = new ArrayList<>(); for (FilterFunction filterFunction : filterFunctions) { Bits docSet = DocIdSets.toSafeBits(context.reader(), filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs())); if (docSet.get(doc)) { filterFunction.function.setNextReader(context); Explanation functionExplanation = filterFunction.function.explainScore(doc, subQueryExpl); double factor = functionExplanation.getValue(); float sc = CombineFunction.toFloat(factor); ComplexExplanation filterExplanation = new ComplexExplanation(true, sc, "function score, product of:"); filterExplanation.addDetail(new Explanation(1.0f, "match filter: " + filterFunction.filter.toString())); filterExplanation.addDetail(functionExplanation); filterExplanations.add(filterExplanation); } } if (filterExplanations.size() == 0) { float sc = getBoost() * subQueryExpl.getValue(); Explanation res = new ComplexExplanation(true, sc, "function score, no filter match, product of:"); res.addDetail(subQueryExpl); res.addDetail(new Explanation(getBoost(), "queryBoost")); return res; } // Second: Compute the factor that would have been computed by the // filters double factor = 1.0; switch (scoreMode) { case First: factor = filterExplanations.get(0).getValue(); break; case Max: factor = Double.NEGATIVE_INFINITY; for (int i = 0; i < filterExplanations.size(); i++) { factor = Math.max(filterExplanations.get(i).getValue(), factor); } break; case Min: factor = Double.POSITIVE_INFINITY; for (int i = 0; i < filterExplanations.size(); i++) { factor = Math.min(filterExplanations.get(i).getValue(), factor); } break; case Multiply: for (int i = 0; i < filterExplanations.size(); i++) { factor *= filterExplanations.get(i).getValue(); } break; default: // Avg / Total double totalFactor = 0.0f; int count = 0; for (int i = 0; i < filterExplanations.size(); i++) { totalFactor += filterExplanations.get(i).getValue(); count++; } if (count != 0) { factor = totalFactor; if (scoreMode == ScoreMode.Avg) { factor /= count; } } } ComplexExplanation factorExplanaition = new ComplexExplanation(true, CombineFunction.toFloat(factor), "function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]"); for (int i = 0; i < filterExplanations.size(); i++) { factorExplanaition.addDetail(filterExplanations.get(i)); } return combineFunction.explain(getBoost(), subQueryExpl, factorExplanaition, maxBoost); } } static class CustomBoostFactorScorer extends Scorer { private final float subQueryBoost; private final Scorer scorer; private final FilterFunction[] filterFunctions; private final ScoreMode scoreMode; private final float maxBoost; private final Bits[] docSets; private final CombineFunction scoreCombiner; private CustomBoostFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreMode scoreMode, FilterFunction[] filterFunctions, float maxBoost, Bits[] docSets, CombineFunction scoreCombiner) throws IOException { super(w); this.subQueryBoost = w.getQuery().getBoost(); this.scorer = scorer; this.scoreMode = scoreMode; this.filterFunctions = filterFunctions; this.maxBoost = maxBoost; this.docSets = docSets; this.scoreCombiner = scoreCombiner; } @Override public int docID() { return scorer.docID(); } @Override public int advance(int target) throws IOException { return scorer.advance(target); } @Override public int nextDoc() throws IOException { return scorer.nextDoc(); } @Override public float score() throws IOException { int docId = scorer.docID(); double factor = 1.0f; float subQueryScore = scorer.score(); if (scoreMode == ScoreMode.First) { for (int i = 0; i < filterFunctions.length; i++) { if (docSets[i].get(docId)) { factor = filterFunctions[i].function.score(docId, subQueryScore); break; } } } else if (scoreMode == ScoreMode.Max) { double maxFactor = Double.NEGATIVE_INFINITY; for (int i = 0; i < filterFunctions.length; i++) { if (docSets[i].get(docId)) { maxFactor = Math.max(filterFunctions[i].function.score(docId, subQueryScore), maxFactor); } } if (maxFactor != Float.NEGATIVE_INFINITY) { factor = maxFactor; } } else if (scoreMode == ScoreMode.Min) { double minFactor = Double.POSITIVE_INFINITY; for (int i = 0; i < filterFunctions.length; i++) { if (docSets[i].get(docId)) { minFactor = Math.min(filterFunctions[i].function.score(docId, subQueryScore), minFactor); } } if (minFactor != Float.POSITIVE_INFINITY) { factor = minFactor; } } else if (scoreMode == ScoreMode.Multiply) { for (int i = 0; i < filterFunctions.length; i++) { if (docSets[i].get(docId)) { factor *= filterFunctions[i].function.score(docId, subQueryScore); } } } else { // Avg / Total double totalFactor = 0.0f; int count = 0; for (int i = 0; i < filterFunctions.length; i++) { if (docSets[i].get(docId)) { totalFactor += filterFunctions[i].function.score(docId, subQueryScore); count++; } } if (count != 0) { factor = totalFactor; if (scoreMode == ScoreMode.Avg) { factor /= count; } } } return scoreCombiner.combine(subQueryBoost, subQueryScore, factor, maxBoost); } @Override public int freq() throws IOException { return scorer.freq(); } @Override public long cost() { return scorer.cost(); } } public String toString(String field) { StringBuilder sb = new StringBuilder(); sb.append("function score (").append(subQuery.toString(field)).append(", functions: ["); for (FilterFunction filterFunction : filterFunctions) { sb.append("{filter(").append(filterFunction.filter).append("), function [").append(filterFunction.function).append("]}"); } sb.append("])"); sb.append(ToStringUtils.boost(getBoost())); return sb.toString(); } public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; FiltersFunctionScoreQuery other = (FiltersFunctionScoreQuery) o; if (this.getBoost() != other.getBoost()) return false; if (!this.subQuery.equals(other.subQuery)) { return false; } return Arrays.equals(this.filterFunctions, other.filterFunctions); } public int hashCode() { return subQuery.hashCode() + 31 * Arrays.hashCode(filterFunctions) ^ Float.floatToIntBits(getBoost()); } }
apache-2.0
FauxFaux/jdk9-jdk
test/java/net/Socks/SocksIPv6Test.java
6314
/* * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* @test * @bug 7100957 * @modules jdk.httpserver * @summary Java doesn't correctly handle the SOCKS protocol when used over IPv6. * @run testng SocksIPv6Test */ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.net.Authenticator; import java.net.InetSocketAddress; import java.net.URL; import java.net.Proxy; import java.lang.Override; import java.net.InetAddress; import java.net.Inet6Address; import java.net.ServerSocket; import java.net.SocketException; import java.net.NetworkInterface; import java.net.UnknownHostException; import java.util.Collections; import java.util.List; import com.sun.net.httpserver.*; import java.io.BufferedWriter; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import static org.testng.Assert.*; public class SocksIPv6Test { private HttpServer server; private SocksServer socks; private String response = "Hello."; private static boolean shouldRun = false; @BeforeClass public void setUp() throws Exception { shouldRun = ensureInet6AddressFamily() && ensureIPv6OnLoopback(); server = HttpServer.create(new InetSocketAddress(0), 0); server.createContext("/", ex -> { ex.sendResponseHeaders(200, response.length()); try (BufferedWriter writer = new BufferedWriter( new OutputStreamWriter(ex.getResponseBody(), "UTF-8"))) { writer.write(response); } ex.close(); }); server.start(); socks = new SocksServer(0, false); socks.addUser("user", "pass"); socks.start(); Authenticator.setDefault(new Authenticator() { @Override protected java.net.PasswordAuthentication getPasswordAuthentication() { return new java.net.PasswordAuthentication( "user", "pass".toCharArray()); } }); } private boolean ensureIPv6OnLoopback() throws Exception { boolean ipv6 = false; List<NetworkInterface> nics = Collections.list(NetworkInterface.getNetworkInterfaces()); for (NetworkInterface nic : nics) { if (!nic.isLoopback()) { continue; } List<InetAddress> addrs = Collections.list(nic.getInetAddresses()); for (InetAddress addr : addrs) { if (addr instanceof Inet6Address) { ipv6 = true; break; } } } if (!ipv6) System.out.println("IPv6 is not enabled on loopback. Skipping test suite."); return ipv6; } private boolean ensureInet6AddressFamily() throws IOException { try (ServerSocket s = new ServerSocket()) { s.bind(new InetSocketAddress("::1", 0)); return true; } catch (SocketException e) { System.out.println("Inet 6 address family is not available. Skipping test suite."); } return false; } @Test(groups = "unit") public void testSocksOverIPv6() throws Exception { if (!shouldRun) return; Proxy proxy = new Proxy(Proxy.Type.SOCKS, new InetSocketAddress("::1", socks.getPort())); URL url = new URL("http://[::1]:" + server.getAddress().getPort()); java.net.URLConnection conn = url.openConnection(proxy); String actual = ""; try (BufferedReader reader = new BufferedReader( new InputStreamReader(conn.getInputStream()))) { actual = reader.readLine(); } assertEquals(actual, response); } @Test(groups = "unit") public void testSocksOverIPv6Hostname() throws Exception { if (!shouldRun) return; String ipv6Hostname = InetAddress.getByName("::1").getHostName(); String ipv4Hostname = InetAddress.getByName("127.0.0.1").getHostName(); if (ipv6Hostname.equals(InetAddress.getByName("::1").getHostAddress())) { System.out.println("Unable to get the hostname of the IPv6 loopback " + "address. Skipping test case."); return; } if (ipv6Hostname.equals(ipv4Hostname)) { System.out.println("IPv6 and IPv4 loopback addresses map to the" + " same hostname. Skipping test case."); return; } Proxy proxy = new Proxy(Proxy.Type.SOCKS, new InetSocketAddress(ipv6Hostname, socks.getPort())); URL url = new URL("http://" + ipv6Hostname + ":" + server.getAddress().getPort()); java.net.URLConnection conn = url.openConnection(proxy); String actual = ""; try (BufferedReader reader = new BufferedReader( new InputStreamReader(conn.getInputStream()))) { actual = reader.readLine(); } assertEquals(actual, response); } @AfterClass public void tearDown() { if (server != null) { server.stop(1); } if (socks != null) { socks.terminate(); } } }
gpl-2.0
JoeChien23/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
100519
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import com.google.common.annotations.VisibleForTesting; import java.io.BufferedInputStream; import java.io.DataInput; import java.io.DataOutput; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Reader; import java.io.Writer; import java.lang.ref.WeakReference; import java.net.InetSocketAddress; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.StringTokenizer; import java.util.WeakHashMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; import com.google.common.base.Charsets; import org.apache.commons.collections.map.UnmodifiableMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.alias.CredentialProvider; import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry; import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonGenerator; import org.w3c.dom.DOMException; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.w3c.dom.Text; import org.xml.sax.SAXException; import com.google.common.base.Preconditions; /** * Provides access to configuration parameters. * * <h4 id="Resources">Resources</h4> * * <p>Configurations are specified by resources. A resource contains a set of * name/value pairs as XML data. Each resource is named by either a * <code>String</code> or by a {@link Path}. If named by a <code>String</code>, * then the classpath is examined for a file with that name. If named by a * <code>Path</code>, then the local filesystem is examined directly, without * referring to the classpath. * * <p>Unless explicitly turned off, Hadoop by default specifies two * resources, loaded in-order from the classpath: <ol> * <li><tt> * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml"> * core-default.xml</a></tt>: Read-only defaults for hadoop.</li> * <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop * installation.</li> * </ol> * Applications may add additional resources, which are loaded * subsequent to these resources in the order they are added. * * <h4 id="FinalParams">Final Parameters</h4> * * <p>Configuration parameters may be declared <i>final</i>. * Once a resource declares a value final, no subsequently-loaded * resource can alter that value. * For example, one might define a final parameter with: * <tt><pre> * &lt;property&gt; * &lt;name&gt;dfs.hosts.include&lt;/name&gt; * &lt;value&gt;/etc/hadoop/conf/hosts.include&lt;/value&gt; * <b>&lt;final&gt;true&lt;/final&gt;</b> * &lt;/property&gt;</pre></tt> * * Administrators typically define parameters as final in * <tt>core-site.xml</tt> for values that user applications may not alter. * * <h4 id="VariableExpansion">Variable Expansion</h4> * * <p>Value strings are first processed for <i>variable expansion</i>. The * available properties are:<ol> * <li>Other properties defined in this Configuration; and, if a name is * undefined here,</li> * <li>Properties in {@link System#getProperties()}.</li> * </ol> * * <p>For example, if a configuration resource contains the following property * definitions: * <tt><pre> * &lt;property&gt; * &lt;name&gt;basedir&lt;/name&gt; * &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt; * &lt;/property&gt; * * &lt;property&gt; * &lt;name&gt;tempdir&lt;/name&gt; * &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt; * &lt;/property&gt;</pre></tt> * * When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt> * will be resolved to another property in this Configuration, while * <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value * of the System property with that name. * By default, warnings will be given to any deprecated configuration * parameters and these are suppressible by configuring * <tt>log4j.logger.org.apache.hadoop.conf.Configuration.deprecation</tt> in * log4j.properties file. */ @InterfaceAudience.Public @InterfaceStability.Stable public class Configuration implements Iterable<Map.Entry<String,String>>, Writable { private static final Log LOG = LogFactory.getLog(Configuration.class); private static final Log LOG_DEPRECATION = LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation"); private boolean quietmode = true; private static final String DEFAULT_STRING_CHECK = "testingforemptydefaultvalue"; private boolean allowNullValueProperties = false; private static class Resource { private final Object resource; private final String name; public Resource(Object resource) { this(resource, resource.toString()); } public Resource(Object resource, String name) { this.resource = resource; this.name = name; } public String getName(){ return name; } public Object getResource() { return resource; } @Override public String toString() { return name; } } /** * List of configuration resources. */ private ArrayList<Resource> resources = new ArrayList<Resource>(); /** * The value reported as the setting resource when a key is set * by code rather than a file resource by dumpConfiguration. */ static final String UNKNOWN_RESOURCE = "Unknown"; /** * List of configuration parameters marked <b>final</b>. */ private Set<String> finalParameters = Collections.newSetFromMap( new ConcurrentHashMap<String, Boolean>()); private boolean loadDefaults = true; /** * Configuration objects */ private static final WeakHashMap<Configuration,Object> REGISTRY = new WeakHashMap<Configuration,Object>(); /** * List of default Resources. Resources are loaded in the order of the list * entries */ private static final CopyOnWriteArrayList<String> defaultResources = new CopyOnWriteArrayList<String>(); private static final Map<ClassLoader, Map<String, WeakReference<Class<?>>>> CACHE_CLASSES = new WeakHashMap<ClassLoader, Map<String, WeakReference<Class<?>>>>(); /** * Sentinel value to store negative cache results in {@link #CACHE_CLASSES}. */ private static final Class<?> NEGATIVE_CACHE_SENTINEL = NegativeCacheSentinel.class; /** * Stores the mapping of key to the resource which modifies or loads * the key most recently */ private Map<String, String[]> updatingResource; /** * Class to keep the information about the keys which replace the deprecated * ones. * * This class stores the new keys which replace the deprecated keys and also * gives a provision to have a custom message for each of the deprecated key * that is being replaced. It also provides method to get the appropriate * warning message which can be logged whenever the deprecated key is used. */ private static class DeprecatedKeyInfo { private final String[] newKeys; private final String customMessage; private final AtomicBoolean accessed = new AtomicBoolean(false); DeprecatedKeyInfo(String[] newKeys, String customMessage) { this.newKeys = newKeys; this.customMessage = customMessage; } /** * Method to provide the warning message. It gives the custom message if * non-null, and default message otherwise. * @param key the associated deprecated key. * @return message that is to be logged when a deprecated key is used. */ private final String getWarningMessage(String key) { String warningMessage; if(customMessage == null) { StringBuilder message = new StringBuilder(key); String deprecatedKeySuffix = " is deprecated. Instead, use "; message.append(deprecatedKeySuffix); for (int i = 0; i < newKeys.length; i++) { message.append(newKeys[i]); if(i != newKeys.length-1) { message.append(", "); } } warningMessage = message.toString(); } else { warningMessage = customMessage; } return warningMessage; } boolean getAndSetAccessed() { return accessed.getAndSet(true); } public void clearAccessed() { accessed.set(false); } } /** * A pending addition to the global set of deprecated keys. */ public static class DeprecationDelta { private final String key; private final String[] newKeys; private final String customMessage; DeprecationDelta(String key, String[] newKeys, String customMessage) { Preconditions.checkNotNull(key); Preconditions.checkNotNull(newKeys); Preconditions.checkArgument(newKeys.length > 0); this.key = key; this.newKeys = newKeys; this.customMessage = customMessage; } public DeprecationDelta(String key, String newKey, String customMessage) { this(key, new String[] { newKey }, customMessage); } public DeprecationDelta(String key, String newKey) { this(key, new String[] { newKey }, null); } public String getKey() { return key; } public String[] getNewKeys() { return newKeys; } public String getCustomMessage() { return customMessage; } } /** * The set of all keys which are deprecated. * * DeprecationContext objects are immutable. */ private static class DeprecationContext { /** * Stores the deprecated keys, the new keys which replace the deprecated keys * and custom message(if any provided). */ private final Map<String, DeprecatedKeyInfo> deprecatedKeyMap; /** * Stores a mapping from superseding keys to the keys which they deprecate. */ private final Map<String, String> reverseDeprecatedKeyMap; /** * Create a new DeprecationContext by copying a previous DeprecationContext * and adding some deltas. * * @param other The previous deprecation context to copy, or null to start * from nothing. * @param deltas The deltas to apply. */ @SuppressWarnings("unchecked") DeprecationContext(DeprecationContext other, DeprecationDelta[] deltas) { HashMap<String, DeprecatedKeyInfo> newDeprecatedKeyMap = new HashMap<String, DeprecatedKeyInfo>(); HashMap<String, String> newReverseDeprecatedKeyMap = new HashMap<String, String>(); if (other != null) { for (Entry<String, DeprecatedKeyInfo> entry : other.deprecatedKeyMap.entrySet()) { newDeprecatedKeyMap.put(entry.getKey(), entry.getValue()); } for (Entry<String, String> entry : other.reverseDeprecatedKeyMap.entrySet()) { newReverseDeprecatedKeyMap.put(entry.getKey(), entry.getValue()); } } for (DeprecationDelta delta : deltas) { if (!newDeprecatedKeyMap.containsKey(delta.getKey())) { DeprecatedKeyInfo newKeyInfo = new DeprecatedKeyInfo(delta.getNewKeys(), delta.getCustomMessage()); newDeprecatedKeyMap.put(delta.key, newKeyInfo); for (String newKey : delta.getNewKeys()) { newReverseDeprecatedKeyMap.put(newKey, delta.key); } } } this.deprecatedKeyMap = UnmodifiableMap.decorate(newDeprecatedKeyMap); this.reverseDeprecatedKeyMap = UnmodifiableMap.decorate(newReverseDeprecatedKeyMap); } Map<String, DeprecatedKeyInfo> getDeprecatedKeyMap() { return deprecatedKeyMap; } Map<String, String> getReverseDeprecatedKeyMap() { return reverseDeprecatedKeyMap; } } private static DeprecationDelta[] defaultDeprecations = new DeprecationDelta[] { new DeprecationDelta("topology.script.file.name", CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY), new DeprecationDelta("topology.script.number.args", CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY), new DeprecationDelta("hadoop.configured.node.mapping", CommonConfigurationKeys.NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY), new DeprecationDelta("topology.node.switch.mapping.impl", CommonConfigurationKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY), new DeprecationDelta("dfs.df.interval", CommonConfigurationKeys.FS_DF_INTERVAL_KEY), new DeprecationDelta("hadoop.native.lib", CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY), new DeprecationDelta("fs.default.name", CommonConfigurationKeys.FS_DEFAULT_NAME_KEY), new DeprecationDelta("dfs.umaskmode", CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY), new DeprecationDelta("dfs.nfs.exports.allowed.hosts", CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY) }; /** * The global DeprecationContext. */ private static AtomicReference<DeprecationContext> deprecationContext = new AtomicReference<DeprecationContext>( new DeprecationContext(null, defaultDeprecations)); /** * Adds a set of deprecated keys to the global deprecations. * * This method is lockless. It works by means of creating a new * DeprecationContext based on the old one, and then atomically swapping in * the new context. If someone else updated the context in between us reading * the old context and swapping in the new one, we try again until we win the * race. * * @param deltas The deprecations to add. */ public static void addDeprecations(DeprecationDelta[] deltas) { DeprecationContext prev, next; do { prev = deprecationContext.get(); next = new DeprecationContext(prev, deltas); } while (!deprecationContext.compareAndSet(prev, next)); } /** * Adds the deprecated key to the global deprecation map. * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, * would lead to <tt>UnsupportedOperationException</tt> * * If a key is deprecated in favor of multiple keys, they are all treated as * aliases of each other, and setting any one of them resets all the others * to the new value. * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. * * @param key * @param newKeys * @param customMessage * @deprecated use {@link #addDeprecation(String key, String newKey, String customMessage)} instead */ @Deprecated public static void addDeprecation(String key, String[] newKeys, String customMessage) { addDeprecations(new DeprecationDelta[] { new DeprecationDelta(key, newKeys, customMessage) }); } /** * Adds the deprecated key to the global deprecation map. * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, * would lead to <tt>UnsupportedOperationException</tt> * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. * * @param key * @param newKey * @param customMessage */ public static void addDeprecation(String key, String newKey, String customMessage) { addDeprecation(key, new String[] {newKey}, customMessage); } /** * Adds the deprecated key to the global deprecation map when no custom * message is provided. * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, * would lead to <tt>UnsupportedOperationException</tt> * * If a key is deprecated in favor of multiple keys, they are all treated as * aliases of each other, and setting any one of them resets all the others * to the new value. * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. * * @param key Key that is to be deprecated * @param newKeys list of keys that take up the values of deprecated key * @deprecated use {@link #addDeprecation(String key, String newKey)} instead */ @Deprecated public static void addDeprecation(String key, String[] newKeys) { addDeprecation(key, newKeys, null); } /** * Adds the deprecated key to the global deprecation map when no custom * message is provided. * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, * would lead to <tt>UnsupportedOperationException</tt> * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. * * @param key Key that is to be deprecated * @param newKey key that takes up the value of deprecated key */ public static void addDeprecation(String key, String newKey) { addDeprecation(key, new String[] {newKey}, null); } /** * checks whether the given <code>key</code> is deprecated. * * @param key the parameter which is to be checked for deprecation * @return <code>true</code> if the key is deprecated and * <code>false</code> otherwise. */ public static boolean isDeprecated(String key) { return deprecationContext.get().getDeprecatedKeyMap().containsKey(key); } /** * Sets all deprecated properties that are not currently set but have a * corresponding new property that is set. Useful for iterating the * properties when all deprecated properties for currently set properties * need to be present. */ public void setDeprecatedProperties() { DeprecationContext deprecations = deprecationContext.get(); Properties props = getProps(); Properties overlay = getOverlay(); for (Map.Entry<String, DeprecatedKeyInfo> entry : deprecations.getDeprecatedKeyMap().entrySet()) { String depKey = entry.getKey(); if (!overlay.contains(depKey)) { for (String newKey : entry.getValue().newKeys) { String val = overlay.getProperty(newKey); if (val != null) { props.setProperty(depKey, val); overlay.setProperty(depKey, val); break; } } } } } /** * Checks for the presence of the property <code>name</code> in the * deprecation map. Returns the first of the list of new keys if present * in the deprecation map or the <code>name</code> itself. If the property * is not presently set but the property map contains an entry for the * deprecated key, the value of the deprecated key is set as the value for * the provided property name. * * @param name the property name * @return the first property in the list of properties mapping * the <code>name</code> or the <code>name</code> itself. */ private String[] handleDeprecation(DeprecationContext deprecations, String name) { if (null != name) { name = name.trim(); } ArrayList<String > names = new ArrayList<String>(); if (isDeprecated(name)) { DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name); warnOnceIfDeprecated(deprecations, name); for (String newKey : keyInfo.newKeys) { if(newKey != null) { names.add(newKey); } } } if(names.size() == 0) { names.add(name); } for(String n : names) { String deprecatedKey = deprecations.getReverseDeprecatedKeyMap().get(n); if (deprecatedKey != null && !getOverlay().containsKey(n) && getOverlay().containsKey(deprecatedKey)) { getProps().setProperty(n, getOverlay().getProperty(deprecatedKey)); getOverlay().setProperty(n, getOverlay().getProperty(deprecatedKey)); } } return names.toArray(new String[names.size()]); } private void handleDeprecation() { LOG.debug("Handling deprecation for all properties in config..."); DeprecationContext deprecations = deprecationContext.get(); Set<Object> keys = new HashSet<Object>(); keys.addAll(getProps().keySet()); for (Object item: keys) { LOG.debug("Handling deprecation for " + (String)item); handleDeprecation(deprecations, (String)item); } } static{ //print deprecation warning if hadoop-site.xml is found in classpath ClassLoader cL = Thread.currentThread().getContextClassLoader(); if (cL == null) { cL = Configuration.class.getClassLoader(); } if(cL.getResource("hadoop-site.xml")!=null) { LOG.warn("DEPRECATED: hadoop-site.xml found in the classpath. " + "Usage of hadoop-site.xml is deprecated. Instead use core-site.xml, " + "mapred-site.xml and hdfs-site.xml to override properties of " + "core-default.xml, mapred-default.xml and hdfs-default.xml " + "respectively"); } addDefaultResource("core-default.xml"); addDefaultResource("core-site.xml"); } private Properties properties; private Properties overlay; private ClassLoader classLoader; { classLoader = Thread.currentThread().getContextClassLoader(); if (classLoader == null) { classLoader = Configuration.class.getClassLoader(); } } /** A new configuration. */ public Configuration() { this(true); } /** A new configuration where the behavior of reading from the default * resources can be turned off. * * If the parameter {@code loadDefaults} is false, the new instance * will not load resources from the default files. * @param loadDefaults specifies whether to load from the default files */ public Configuration(boolean loadDefaults) { this.loadDefaults = loadDefaults; updatingResource = new ConcurrentHashMap<String, String[]>(); synchronized(Configuration.class) { REGISTRY.put(this, null); } } /** * A new configuration with the same settings cloned from another. * * @param other the configuration from which to clone settings. */ @SuppressWarnings("unchecked") public Configuration(Configuration other) { this.resources = (ArrayList<Resource>) other.resources.clone(); synchronized(other) { if (other.properties != null) { this.properties = (Properties)other.properties.clone(); } if (other.overlay!=null) { this.overlay = (Properties)other.overlay.clone(); } this.updatingResource = new ConcurrentHashMap<String, String[]>( other.updatingResource); this.finalParameters = Collections.newSetFromMap( new ConcurrentHashMap<String, Boolean>()); this.finalParameters.addAll(other.finalParameters); } synchronized(Configuration.class) { REGISTRY.put(this, null); } this.classLoader = other.classLoader; this.loadDefaults = other.loadDefaults; setQuietMode(other.getQuietMode()); } /** * Add a default resource. Resources are loaded in the order of the resources * added. * @param name file name. File should be present in the classpath. */ public static synchronized void addDefaultResource(String name) { if(!defaultResources.contains(name)) { defaultResources.add(name); for(Configuration conf : REGISTRY.keySet()) { if(conf.loadDefaults) { conf.reloadConfiguration(); } } } } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param name resource to be added, the classpath is examined for a file * with that name. */ public void addResource(String name) { addResourceObject(new Resource(name)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param url url of the resource to be added, the local filesystem is * examined directly to find the resource, without referring to * the classpath. */ public void addResource(URL url) { addResourceObject(new Resource(url)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param file file-path of resource to be added, the local filesystem is * examined directly to find the resource, without referring to * the classpath. */ public void addResource(Path file) { addResourceObject(new Resource(file)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * WARNING: The contents of the InputStream will be cached, by this method. * So use this sparingly because it does increase the memory consumption. * * @param in InputStream to deserialize the object from. In will be read from * when a get or set is called next. After it is read the stream will be * closed. */ public void addResource(InputStream in) { addResourceObject(new Resource(in)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param in InputStream to deserialize the object from. * @param name the name of the resource because InputStream.toString is not * very descriptive some times. */ public void addResource(InputStream in, String name) { addResourceObject(new Resource(in, name)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param conf Configuration object from which to load properties */ public void addResource(Configuration conf) { addResourceObject(new Resource(conf.getProps())); } /** * Reload configuration from previously added resources. * * This method will clear all the configuration read from the added * resources, and final parameters. This will make the resources to * be read again before accessing the values. Values that are added * via set methods will overlay values read from the resources. */ public synchronized void reloadConfiguration() { properties = null; // trigger reload finalParameters.clear(); // clear site-limits } private synchronized void addResourceObject(Resource resource) { resources.add(resource); // add to resources reloadConfiguration(); } private static final int MAX_SUBST = 20; private static final int SUB_START_IDX = 0; private static final int SUB_END_IDX = SUB_START_IDX + 1; /** * This is a manual implementation of the following regex * "\\$\\{[^\\}\\$\u0020]+\\}". It can be 15x more efficient than * a regex matcher as demonstrated by HADOOP-11506. This is noticeable with * Hadoop apps building on the assumption Configuration#get is an O(1) * hash table lookup, especially when the eval is a long string. * * @param eval a string that may contain variables requiring expansion. * @return a 2-element int array res such that * eval.substring(res[0], res[1]) is "var" for the left-most occurrence of * ${var} in eval. If no variable is found -1, -1 is returned. */ private static int[] findSubVariable(String eval) { int[] result = {-1, -1}; int matchStart; int leftBrace; // scanning for a brace first because it's less frequent than $ // that can occur in nested class names // match_loop: for (matchStart = 1, leftBrace = eval.indexOf('{', matchStart); // minimum left brace position (follows '$') leftBrace > 0 // right brace of a smallest valid expression "${c}" && leftBrace + "{c".length() < eval.length(); leftBrace = eval.indexOf('{', matchStart)) { int matchedLen = 0; if (eval.charAt(leftBrace - 1) == '$') { int subStart = leftBrace + 1; // after '{' for (int i = subStart; i < eval.length(); i++) { switch (eval.charAt(i)) { case '}': if (matchedLen > 0) { // match result[SUB_START_IDX] = subStart; result[SUB_END_IDX] = subStart + matchedLen; break match_loop; } // fall through to skip 1 char case ' ': case '$': matchStart = i + 1; continue match_loop; default: matchedLen++; } } // scanned from "${" to the end of eval, and no reset via ' ', '$': // no match! break match_loop; } else { // not a start of a variable // matchStart = leftBrace + 1; } } return result; } /** * Attempts to repeatedly expand the value {@code expr} by replacing the * left-most substring of the form "${var}" in the following precedence order * <ol> * <li>by the value of the Java system property "var" if defined</li> * <li>by the value of the configuration key "var" if defined</li> * </ol> * * If var is unbounded the current state of expansion "prefix${var}suffix" is * returned. * * @param expr the literal value of a config key * @return null if expr is null, otherwise the value resulting from expanding * expr using the algorithm above. * @throws IllegalArgumentException when more than * {@link Configuration#MAX_SUBST} replacements are required */ private String substituteVars(String expr) { if (expr == null) { return null; } String eval = expr; for (int s = 0; s < MAX_SUBST; s++) { final int[] varBounds = findSubVariable(eval); if (varBounds[SUB_START_IDX] == -1) { return eval; } final String var = eval.substring(varBounds[SUB_START_IDX], varBounds[SUB_END_IDX]); String val = null; try { val = System.getProperty(var); } catch(SecurityException se) { LOG.warn("Unexpected SecurityException in Configuration", se); } if (val == null) { val = getRaw(var); } if (val == null) { return eval; // return literal ${var}: var is unbound } final int dollar = varBounds[SUB_START_IDX] - "${".length(); final int afterRightBrace = varBounds[SUB_END_IDX] + "}".length(); // substitute eval = eval.substring(0, dollar) + val + eval.substring(afterRightBrace); } throw new IllegalStateException("Variable substitution depth too large: " + MAX_SUBST + " " + expr); } /** * Get the value of the <code>name</code> property, <code>null</code> if * no such property exists. If the key is deprecated, it returns the value of * the first key which replaces the deprecated key and is not null. * * Values are processed for <a href="#VariableExpansion">variable expansion</a> * before being returned. * * @param name the property name, will be trimmed before get value. * @return the value of the <code>name</code> or its replacing property, * or null if no such property exists. */ public String get(String name) { String[] names = handleDeprecation(deprecationContext.get(), name); String result = null; for(String n : names) { result = substituteVars(getProps().getProperty(n)); } return result; } /** * Set Configuration to allow keys without values during setup. Intended * for use during testing. * * @param val If true, will allow Configuration to store keys without values */ @VisibleForTesting public void setAllowNullValueProperties( boolean val ) { this.allowNullValueProperties = val; } /** * Return existence of the <code>name</code> property, but only for * names which have no valid value, usually non-existent or commented * out in XML. * * @param name the property name * @return true if the property <code>name</code> exists without value */ @VisibleForTesting public boolean onlyKeyExists(String name) { String[] names = handleDeprecation(deprecationContext.get(), name); for(String n : names) { if ( getProps().getProperty(n,DEFAULT_STRING_CHECK) .equals(DEFAULT_STRING_CHECK) ) { return true; } } return false; } /** * Get the value of the <code>name</code> property as a trimmed <code>String</code>, * <code>null</code> if no such property exists. * If the key is deprecated, it returns the value of * the first key which replaces the deprecated key and is not null * * Values are processed for <a href="#VariableExpansion">variable expansion</a> * before being returned. * * @param name the property name. * @return the value of the <code>name</code> or its replacing property, * or null if no such property exists. */ public String getTrimmed(String name) { String value = get(name); if (null == value) { return null; } else { return value.trim(); } } /** * Get the value of the <code>name</code> property as a trimmed <code>String</code>, * <code>defaultValue</code> if no such property exists. * See @{Configuration#getTrimmed} for more details. * * @param name the property name. * @param defaultValue the property default value. * @return the value of the <code>name</code> or defaultValue * if it is not set. */ public String getTrimmed(String name, String defaultValue) { String ret = getTrimmed(name); return ret == null ? defaultValue : ret; } /** * Get the value of the <code>name</code> property, without doing * <a href="#VariableExpansion">variable expansion</a>.If the key is * deprecated, it returns the value of the first key which replaces * the deprecated key and is not null. * * @param name the property name. * @return the value of the <code>name</code> property or * its replacing property and null if no such property exists. */ public String getRaw(String name) { String[] names = handleDeprecation(deprecationContext.get(), name); String result = null; for(String n : names) { result = getProps().getProperty(n); } return result; } /** * Returns alternative names (non-deprecated keys or previously-set deprecated keys) * for a given non-deprecated key. * If the given key is deprecated, return null. * * @param name property name. * @return alternative names. */ private String[] getAlternativeNames(String name) { String altNames[] = null; DeprecatedKeyInfo keyInfo = null; DeprecationContext cur = deprecationContext.get(); String depKey = cur.getReverseDeprecatedKeyMap().get(name); if(depKey != null) { keyInfo = cur.getDeprecatedKeyMap().get(depKey); if(keyInfo.newKeys.length > 0) { if(getProps().containsKey(depKey)) { //if deprecated key is previously set explicitly List<String> list = new ArrayList<String>(); list.addAll(Arrays.asList(keyInfo.newKeys)); list.add(depKey); altNames = list.toArray(new String[list.size()]); } else { altNames = keyInfo.newKeys; } } } return altNames; } /** * Set the <code>value</code> of the <code>name</code> property. If * <code>name</code> is deprecated or there is a deprecated name associated to it, * it sets the value to both names. Name will be trimmed before put into * configuration. * * @param name property name. * @param value property value. */ public void set(String name, String value) { set(name, value, null); } /** * Set the <code>value</code> of the <code>name</code> property. If * <code>name</code> is deprecated, it also sets the <code>value</code> to * the keys that replace the deprecated key. Name will be trimmed before put * into configuration. * * @param name property name. * @param value property value. * @param source the place that this configuration value came from * (For debugging). * @throws IllegalArgumentException when the value or name is null. */ public void set(String name, String value, String source) { Preconditions.checkArgument( name != null, "Property name must not be null"); Preconditions.checkArgument( value != null, "The value of property " + name + " must not be null"); name = name.trim(); DeprecationContext deprecations = deprecationContext.get(); if (deprecations.getDeprecatedKeyMap().isEmpty()) { getProps(); } getOverlay().setProperty(name, value); getProps().setProperty(name, value); String newSource = (source == null ? "programatically" : source); if (!isDeprecated(name)) { updatingResource.put(name, new String[] {newSource}); String[] altNames = getAlternativeNames(name); if(altNames != null) { for(String n: altNames) { if(!n.equals(name)) { getOverlay().setProperty(n, value); getProps().setProperty(n, value); updatingResource.put(n, new String[] {newSource}); } } } } else { String[] names = handleDeprecation(deprecationContext.get(), name); String altSource = "because " + name + " is deprecated"; for(String n : names) { getOverlay().setProperty(n, value); getProps().setProperty(n, value); updatingResource.put(n, new String[] {altSource}); } } } private void warnOnceIfDeprecated(DeprecationContext deprecations, String name) { DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name); if (keyInfo != null && !keyInfo.getAndSetAccessed()) { LOG_DEPRECATION.info(keyInfo.getWarningMessage(name)); } } /** * Unset a previously set property. */ public synchronized void unset(String name) { String[] names = null; if (!isDeprecated(name)) { names = getAlternativeNames(name); if(names == null) { names = new String[]{name}; } } else { names = handleDeprecation(deprecationContext.get(), name); } for(String n: names) { getOverlay().remove(n); getProps().remove(n); } } /** * Sets a property if it is currently unset. * @param name the property name * @param value the new value */ public synchronized void setIfUnset(String name, String value) { if (get(name) == null) { set(name, value); } } private synchronized Properties getOverlay() { if (overlay==null){ overlay=new Properties(); } return overlay; } /** * Get the value of the <code>name</code>. If the key is deprecated, * it returns the value of the first key which replaces the deprecated key * and is not null. * If no such property exists, * then <code>defaultValue</code> is returned. * * @param name property name, will be trimmed before get value. * @param defaultValue default value. * @return property value, or <code>defaultValue</code> if the property * doesn't exist. */ public String get(String name, String defaultValue) { String[] names = handleDeprecation(deprecationContext.get(), name); String result = null; for(String n : names) { result = substituteVars(getProps().getProperty(n, defaultValue)); } return result; } /** * Get the value of the <code>name</code> property as an <code>int</code>. * * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>int</code>, * then an error is thrown. * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as an <code>int</code>, * or <code>defaultValue</code>. */ public int getInt(String name, int defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; String hexString = getHexDigits(valueString); if (hexString != null) { return Integer.parseInt(hexString, 16); } return Integer.parseInt(valueString); } /** * Get the value of the <code>name</code> property as a set of comma-delimited * <code>int</code> values. * * If no such property exists, an empty array is returned. * * @param name property name * @return property value interpreted as an array of comma-delimited * <code>int</code> values */ public int[] getInts(String name) { String[] strings = getTrimmedStrings(name); int[] ints = new int[strings.length]; for (int i = 0; i < strings.length; i++) { ints[i] = Integer.parseInt(strings[i]); } return ints; } /** * Set the value of the <code>name</code> property to an <code>int</code>. * * @param name property name. * @param value <code>int</code> value of the property. */ public void setInt(String name, int value) { set(name, Integer.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>long</code>. * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>long</code>, * then an error is thrown. * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as a <code>long</code>, * or <code>defaultValue</code>. */ public long getLong(String name, long defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; String hexString = getHexDigits(valueString); if (hexString != null) { return Long.parseLong(hexString, 16); } return Long.parseLong(valueString); } /** * Get the value of the <code>name</code> property as a <code>long</code> or * human readable format. If no such property exists, the provided default * value is returned, or if the specified value is not a valid * <code>long</code> or human readable format, then an error is thrown. You * can use the following suffix (case insensitive): k(kilo), m(mega), g(giga), * t(tera), p(peta), e(exa) * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as a <code>long</code>, * or <code>defaultValue</code>. */ public long getLongBytes(String name, long defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; return StringUtils.TraditionalBinaryPrefix.string2long(valueString); } private String getHexDigits(String value) { boolean negative = false; String str = value; String hexString = null; if (value.startsWith("-")) { negative = true; str = value.substring(1); } if (str.startsWith("0x") || str.startsWith("0X")) { hexString = str.substring(2); if (negative) { hexString = "-" + hexString; } return hexString; } return null; } /** * Set the value of the <code>name</code> property to a <code>long</code>. * * @param name property name. * @param value <code>long</code> value of the property. */ public void setLong(String name, long value) { set(name, Long.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>float</code>. * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>float</code>, * then an error is thrown. * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as a <code>float</code>, * or <code>defaultValue</code>. */ public float getFloat(String name, float defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; return Float.parseFloat(valueString); } /** * Set the value of the <code>name</code> property to a <code>float</code>. * * @param name property name. * @param value property value. */ public void setFloat(String name, float value) { set(name,Float.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>double</code>. * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>double</code>, * then an error is thrown. * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as a <code>double</code>, * or <code>defaultValue</code>. */ public double getDouble(String name, double defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; return Double.parseDouble(valueString); } /** * Set the value of the <code>name</code> property to a <code>double</code>. * * @param name property name. * @param value property value. */ public void setDouble(String name, double value) { set(name,Double.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>boolean</code>. * If no such property is specified, or if the specified value is not a valid * <code>boolean</code>, then <code>defaultValue</code> is returned. * * @param name property name. * @param defaultValue default value. * @return property value as a <code>boolean</code>, * or <code>defaultValue</code>. */ public boolean getBoolean(String name, boolean defaultValue) { String valueString = getTrimmed(name); if (null == valueString || valueString.isEmpty()) { return defaultValue; } if (StringUtils.equalsIgnoreCase("true", valueString)) return true; else if (StringUtils.equalsIgnoreCase("false", valueString)) return false; else return defaultValue; } /** * Set the value of the <code>name</code> property to a <code>boolean</code>. * * @param name property name. * @param value <code>boolean</code> value of the property. */ public void setBoolean(String name, boolean value) { set(name, Boolean.toString(value)); } /** * Set the given property, if it is currently unset. * @param name property name * @param value new value */ public void setBooleanIfUnset(String name, boolean value) { setIfUnset(name, Boolean.toString(value)); } /** * Set the value of the <code>name</code> property to the given type. This * is equivalent to <code>set(&lt;name&gt;, value.toString())</code>. * @param name property name * @param value new value */ public <T extends Enum<T>> void setEnum(String name, T value) { set(name, value.toString()); } /** * Return value matching this enumerated type. * Note that the returned value is trimmed by this method. * @param name Property name * @param defaultValue Value returned if no mapping exists * @throws IllegalArgumentException If mapping is illegal for the type * provided */ public <T extends Enum<T>> T getEnum(String name, T defaultValue) { final String val = getTrimmed(name); return null == val ? defaultValue : Enum.valueOf(defaultValue.getDeclaringClass(), val); } enum ParsedTimeDuration { NS { TimeUnit unit() { return TimeUnit.NANOSECONDS; } String suffix() { return "ns"; } }, US { TimeUnit unit() { return TimeUnit.MICROSECONDS; } String suffix() { return "us"; } }, MS { TimeUnit unit() { return TimeUnit.MILLISECONDS; } String suffix() { return "ms"; } }, S { TimeUnit unit() { return TimeUnit.SECONDS; } String suffix() { return "s"; } }, M { TimeUnit unit() { return TimeUnit.MINUTES; } String suffix() { return "m"; } }, H { TimeUnit unit() { return TimeUnit.HOURS; } String suffix() { return "h"; } }, D { TimeUnit unit() { return TimeUnit.DAYS; } String suffix() { return "d"; } }; abstract TimeUnit unit(); abstract String suffix(); static ParsedTimeDuration unitFor(String s) { for (ParsedTimeDuration ptd : values()) { // iteration order is in decl order, so SECONDS matched last if (s.endsWith(ptd.suffix())) { return ptd; } } return null; } static ParsedTimeDuration unitFor(TimeUnit unit) { for (ParsedTimeDuration ptd : values()) { if (ptd.unit() == unit) { return ptd; } } return null; } } /** * Set the value of <code>name</code> to the given time duration. This * is equivalent to <code>set(&lt;name&gt;, value + &lt;time suffix&gt;)</code>. * @param name Property name * @param value Time duration * @param unit Unit of time */ public void setTimeDuration(String name, long value, TimeUnit unit) { set(name, value + ParsedTimeDuration.unitFor(unit).suffix()); } /** * Return time duration in the given time unit. Valid units are encoded in * properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds * (ms), seconds (s), minutes (m), hours (h), and days (d). * @param name Property name * @param defaultValue Value returned if no mapping exists. * @param unit Unit to convert the stored property, if it exists. * @throws NumberFormatException If the property stripped of its unit is not * a number */ public long getTimeDuration(String name, long defaultValue, TimeUnit unit) { String vStr = get(name); if (null == vStr) { return defaultValue; } vStr = vStr.trim(); ParsedTimeDuration vUnit = ParsedTimeDuration.unitFor(vStr); if (null == vUnit) { LOG.warn("No unit for " + name + "(" + vStr + ") assuming " + unit); vUnit = ParsedTimeDuration.unitFor(unit); } else { vStr = vStr.substring(0, vStr.lastIndexOf(vUnit.suffix())); } return unit.convert(Long.parseLong(vStr), vUnit.unit()); } /** * Get the value of the <code>name</code> property as a <code>Pattern</code>. * If no such property is specified, or if the specified value is not a valid * <code>Pattern</code>, then <code>DefaultValue</code> is returned. * Note that the returned value is NOT trimmed by this method. * * @param name property name * @param defaultValue default value * @return property value as a compiled Pattern, or defaultValue */ public Pattern getPattern(String name, Pattern defaultValue) { String valString = get(name); if (null == valString || valString.isEmpty()) { return defaultValue; } try { return Pattern.compile(valString); } catch (PatternSyntaxException pse) { LOG.warn("Regular expression '" + valString + "' for property '" + name + "' not valid. Using default", pse); return defaultValue; } } /** * Set the given property to <code>Pattern</code>. * If the pattern is passed as null, sets the empty pattern which results in * further calls to getPattern(...) returning the default value. * * @param name property name * @param pattern new value */ public void setPattern(String name, Pattern pattern) { assert pattern != null : "Pattern cannot be null"; set(name, pattern.pattern()); } /** * Gets information about why a property was set. Typically this is the * path to the resource objects (file, URL, etc.) the property came from, but * it can also indicate that it was set programatically, or because of the * command line. * * @param name - The property name to get the source of. * @return null - If the property or its source wasn't found. Otherwise, * returns a list of the sources of the resource. The older sources are * the first ones in the list. So for example if a configuration is set from * the command line, and then written out to a file that is read back in the * first entry would indicate that it was set from the command line, while * the second one would indicate the file that the new configuration was read * in from. */ @InterfaceStability.Unstable public synchronized String[] getPropertySources(String name) { if (properties == null) { // If properties is null, it means a resource was newly added // but the props were cleared so as to load it upon future // requests. So lets force a load by asking a properties list. getProps(); } // Return a null right away if our properties still // haven't loaded or the resource mapping isn't defined if (properties == null || updatingResource == null) { return null; } else { String[] source = updatingResource.get(name); if(source == null) { return null; } else { return Arrays.copyOf(source, source.length); } } } /** * A class that represents a set of positive integer ranges. It parses * strings of the form: "2-3,5,7-" where ranges are separated by comma and * the lower/upper bounds are separated by dash. Either the lower or upper * bound may be omitted meaning all values up to or over. So the string * above means 2, 3, 5, and 7, 8, 9, ... */ public static class IntegerRanges implements Iterable<Integer>{ private static class Range { int start; int end; } private static class RangeNumberIterator implements Iterator<Integer> { Iterator<Range> internal; int at; int end; public RangeNumberIterator(List<Range> ranges) { if (ranges != null) { internal = ranges.iterator(); } at = -1; end = -2; } @Override public boolean hasNext() { if (at <= end) { return true; } else if (internal != null){ return internal.hasNext(); } return false; } @Override public Integer next() { if (at <= end) { at++; return at - 1; } else if (internal != null){ Range found = internal.next(); if (found != null) { at = found.start; end = found.end; at++; return at - 1; } } return null; } @Override public void remove() { throw new UnsupportedOperationException(); } }; List<Range> ranges = new ArrayList<Range>(); public IntegerRanges() { } public IntegerRanges(String newValue) { StringTokenizer itr = new StringTokenizer(newValue, ","); while (itr.hasMoreTokens()) { String rng = itr.nextToken().trim(); String[] parts = rng.split("-", 3); if (parts.length < 1 || parts.length > 2) { throw new IllegalArgumentException("integer range badly formed: " + rng); } Range r = new Range(); r.start = convertToInt(parts[0], 0); if (parts.length == 2) { r.end = convertToInt(parts[1], Integer.MAX_VALUE); } else { r.end = r.start; } if (r.start > r.end) { throw new IllegalArgumentException("IntegerRange from " + r.start + " to " + r.end + " is invalid"); } ranges.add(r); } } /** * Convert a string to an int treating empty strings as the default value. * @param value the string value * @param defaultValue the value for if the string is empty * @return the desired integer */ private static int convertToInt(String value, int defaultValue) { String trim = value.trim(); if (trim.length() == 0) { return defaultValue; } return Integer.parseInt(trim); } /** * Is the given value in the set of ranges * @param value the value to check * @return is the value in the ranges? */ public boolean isIncluded(int value) { for(Range r: ranges) { if (r.start <= value && value <= r.end) { return true; } } return false; } /** * @return true if there are no values in this range, else false. */ public boolean isEmpty() { return ranges == null || ranges.isEmpty(); } @Override public String toString() { StringBuilder result = new StringBuilder(); boolean first = true; for(Range r: ranges) { if (first) { first = false; } else { result.append(','); } result.append(r.start); result.append('-'); result.append(r.end); } return result.toString(); } @Override public Iterator<Integer> iterator() { return new RangeNumberIterator(ranges); } } /** * Parse the given attribute as a set of integer ranges * @param name the attribute name * @param defaultValue the default value if it is not set * @return a new set of ranges from the configured value */ public IntegerRanges getRange(String name, String defaultValue) { return new IntegerRanges(get(name, defaultValue)); } /** * Get the comma delimited values of the <code>name</code> property as * a collection of <code>String</code>s. * If no such property is specified then empty collection is returned. * <p> * This is an optimized version of {@link #getStrings(String)} * * @param name property name. * @return property value as a collection of <code>String</code>s. */ public Collection<String> getStringCollection(String name) { String valueString = get(name); return StringUtils.getStringCollection(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s. * If no such property is specified then <code>null</code> is returned. * * @param name property name. * @return property value as an array of <code>String</code>s, * or <code>null</code>. */ public String[] getStrings(String name) { String valueString = get(name); return StringUtils.getStrings(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s. * If no such property is specified then default value is returned. * * @param name property name. * @param defaultValue The default value * @return property value as an array of <code>String</code>s, * or default value. */ public String[] getStrings(String name, String... defaultValue) { String valueString = get(name); if (valueString == null) { return defaultValue; } else { return StringUtils.getStrings(valueString); } } /** * Get the comma delimited values of the <code>name</code> property as * a collection of <code>String</code>s, trimmed of the leading and trailing whitespace. * If no such property is specified then empty <code>Collection</code> is returned. * * @param name property name. * @return property value as a collection of <code>String</code>s, or empty <code>Collection</code> */ public Collection<String> getTrimmedStringCollection(String name) { String valueString = get(name); if (null == valueString) { Collection<String> empty = new ArrayList<String>(); return empty; } return StringUtils.getTrimmedStringCollection(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s, trimmed of the leading and trailing whitespace. * If no such property is specified then an empty array is returned. * * @param name property name. * @return property value as an array of trimmed <code>String</code>s, * or empty array. */ public String[] getTrimmedStrings(String name) { String valueString = get(name); return StringUtils.getTrimmedStrings(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s, trimmed of the leading and trailing whitespace. * If no such property is specified then default value is returned. * * @param name property name. * @param defaultValue The default value * @return property value as an array of trimmed <code>String</code>s, * or default value. */ public String[] getTrimmedStrings(String name, String... defaultValue) { String valueString = get(name); if (null == valueString) { return defaultValue; } else { return StringUtils.getTrimmedStrings(valueString); } } /** * Set the array of string values for the <code>name</code> property as * as comma delimited values. * * @param name property name. * @param values The values */ public void setStrings(String name, String... values) { set(name, StringUtils.arrayToString(values)); } /** * Get the value for a known password configuration element. * In order to enable the elimination of clear text passwords in config, * this method attempts to resolve the property name as an alias through * the CredentialProvider API and conditionally fallsback to config. * @param name property name * @return password */ public char[] getPassword(String name) throws IOException { char[] pass = null; pass = getPasswordFromCredentialProviders(name); if (pass == null) { pass = getPasswordFromConfig(name); } return pass; } /** * Try and resolve the provided element name as a credential provider * alias. * @param name alias of the provisioned credential * @return password or null if not found * @throws IOException */ protected char[] getPasswordFromCredentialProviders(String name) throws IOException { char[] pass = null; try { List<CredentialProvider> providers = CredentialProviderFactory.getProviders(this); if (providers != null) { for (CredentialProvider provider : providers) { try { CredentialEntry entry = provider.getCredentialEntry(name); if (entry != null) { pass = entry.getCredential(); break; } } catch (IOException ioe) { throw new IOException("Can't get key " + name + " from key provider" + "of type: " + provider.getClass().getName() + ".", ioe); } } } } catch (IOException ioe) { throw new IOException("Configuration problem with provider path.", ioe); } return pass; } /** * Fallback to clear text passwords in configuration. * @param name * @return clear text password or null */ protected char[] getPasswordFromConfig(String name) { char[] pass = null; if (getBoolean(CredentialProvider.CLEAR_TEXT_FALLBACK, true)) { String passStr = get(name); if (passStr != null) { pass = passStr.toCharArray(); } } return pass; } /** * Get the socket address for <code>hostProperty</code> as a * <code>InetSocketAddress</code>. If <code>hostProperty</code> is * <code>null</code>, <code>addressProperty</code> will be used. This * is useful for cases where we want to differentiate between host * bind address and address clients should use to establish connection. * * @param hostProperty bind host property name. * @param addressProperty address property name. * @param defaultAddressValue the default value * @param defaultPort the default port * @return InetSocketAddress */ public InetSocketAddress getSocketAddr( String hostProperty, String addressProperty, String defaultAddressValue, int defaultPort) { InetSocketAddress bindAddr = getSocketAddr( addressProperty, defaultAddressValue, defaultPort); final String host = get(hostProperty); if (host == null || host.isEmpty()) { return bindAddr; } return NetUtils.createSocketAddr( host, bindAddr.getPort(), hostProperty); } /** * Get the socket address for <code>name</code> property as a * <code>InetSocketAddress</code>. * @param name property name. * @param defaultAddress the default value * @param defaultPort the default port * @return InetSocketAddress */ public InetSocketAddress getSocketAddr( String name, String defaultAddress, int defaultPort) { final String address = getTrimmed(name, defaultAddress); return NetUtils.createSocketAddr(address, defaultPort, name); } /** * Set the socket address for the <code>name</code> property as * a <code>host:port</code>. */ public void setSocketAddr(String name, InetSocketAddress addr) { set(name, NetUtils.getHostPortString(addr)); } /** * Set the socket address a client can use to connect for the * <code>name</code> property as a <code>host:port</code>. The wildcard * address is replaced with the local host's address. If the host and address * properties are configured the host component of the address will be combined * with the port component of the addr to generate the address. This is to allow * optional control over which host name is used in multi-home bind-host * cases where a host can have multiple names * @param hostProperty the bind-host configuration name * @param addressProperty the service address configuration name * @param defaultAddressValue the service default address configuration value * @param addr InetSocketAddress of the service listener * @return InetSocketAddress for clients to connect */ public InetSocketAddress updateConnectAddr( String hostProperty, String addressProperty, String defaultAddressValue, InetSocketAddress addr) { final String host = get(hostProperty); final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue); if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) { //not our case, fall back to original logic return updateConnectAddr(addressProperty, addr); } final String connectHost = connectHostPort.split(":")[0]; // Create connect address using client address hostname and server port. return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost( connectHost, addr.getPort())); } /** * Set the socket address a client can use to connect for the * <code>name</code> property as a <code>host:port</code>. The wildcard * address is replaced with the local host's address. * @param name property name. * @param addr InetSocketAddress of a listener to store in the given property * @return InetSocketAddress for clients to connect */ public InetSocketAddress updateConnectAddr(String name, InetSocketAddress addr) { final InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr); setSocketAddr(name, connectAddr); return connectAddr; } /** * Load a class by name. * * @param name the class name. * @return the class object. * @throws ClassNotFoundException if the class is not found. */ public Class<?> getClassByName(String name) throws ClassNotFoundException { Class<?> ret = getClassByNameOrNull(name); if (ret == null) { throw new ClassNotFoundException("Class " + name + " not found"); } return ret; } /** * Load a class by name, returning null rather than throwing an exception * if it couldn't be loaded. This is to avoid the overhead of creating * an exception. * * @param name the class name * @return the class object, or null if it could not be found. */ public Class<?> getClassByNameOrNull(String name) { Map<String, WeakReference<Class<?>>> map; synchronized (CACHE_CLASSES) { map = CACHE_CLASSES.get(classLoader); if (map == null) { map = Collections.synchronizedMap( new WeakHashMap<String, WeakReference<Class<?>>>()); CACHE_CLASSES.put(classLoader, map); } } Class<?> clazz = null; WeakReference<Class<?>> ref = map.get(name); if (ref != null) { clazz = ref.get(); } if (clazz == null) { try { clazz = Class.forName(name, true, classLoader); } catch (ClassNotFoundException e) { // Leave a marker that the class isn't found map.put(name, new WeakReference<Class<?>>(NEGATIVE_CACHE_SENTINEL)); return null; } // two putters can race here, but they'll put the same class map.put(name, new WeakReference<Class<?>>(clazz)); return clazz; } else if (clazz == NEGATIVE_CACHE_SENTINEL) { return null; // not found } else { // cache hit return clazz; } } /** * Get the value of the <code>name</code> property * as an array of <code>Class</code>. * The value of the property specifies a list of comma separated class names. * If no such property is specified, then <code>defaultValue</code> is * returned. * * @param name the property name. * @param defaultValue default value. * @return property value as a <code>Class[]</code>, * or <code>defaultValue</code>. */ public Class<?>[] getClasses(String name, Class<?> ... defaultValue) { String[] classnames = getTrimmedStrings(name); if (classnames == null) return defaultValue; try { Class<?>[] classes = new Class<?>[classnames.length]; for(int i = 0; i < classnames.length; i++) { classes[i] = getClassByName(classnames[i]); } return classes; } catch (ClassNotFoundException e) { throw new RuntimeException(e); } } /** * Get the value of the <code>name</code> property as a <code>Class</code>. * If no such property is specified, then <code>defaultValue</code> is * returned. * * @param name the class name. * @param defaultValue default value. * @return property value as a <code>Class</code>, * or <code>defaultValue</code>. */ public Class<?> getClass(String name, Class<?> defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; try { return getClassByName(valueString); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } } /** * Get the value of the <code>name</code> property as a <code>Class</code> * implementing the interface specified by <code>xface</code>. * * If no such property is specified, then <code>defaultValue</code> is * returned. * * An exception is thrown if the returned class does not implement the named * interface. * * @param name the class name. * @param defaultValue default value. * @param xface the interface implemented by the named class. * @return property value as a <code>Class</code>, * or <code>defaultValue</code>. */ public <U> Class<? extends U> getClass(String name, Class<? extends U> defaultValue, Class<U> xface) { try { Class<?> theClass = getClass(name, defaultValue); if (theClass != null && !xface.isAssignableFrom(theClass)) throw new RuntimeException(theClass+" not "+xface.getName()); else if (theClass != null) return theClass.asSubclass(xface); else return null; } catch (Exception e) { throw new RuntimeException(e); } } /** * Get the value of the <code>name</code> property as a <code>List</code> * of objects implementing the interface specified by <code>xface</code>. * * An exception is thrown if any of the classes does not exist, or if it does * not implement the named interface. * * @param name the property name. * @param xface the interface implemented by the classes named by * <code>name</code>. * @return a <code>List</code> of objects implementing <code>xface</code>. */ @SuppressWarnings("unchecked") public <U> List<U> getInstances(String name, Class<U> xface) { List<U> ret = new ArrayList<U>(); Class<?>[] classes = getClasses(name); for (Class<?> cl: classes) { if (!xface.isAssignableFrom(cl)) { throw new RuntimeException(cl + " does not implement " + xface); } ret.add((U)ReflectionUtils.newInstance(cl, this)); } return ret; } /** * Set the value of the <code>name</code> property to the name of a * <code>theClass</code> implementing the given interface <code>xface</code>. * * An exception is thrown if <code>theClass</code> does not implement the * interface <code>xface</code>. * * @param name property name. * @param theClass property value. * @param xface the interface implemented by the named class. */ public void setClass(String name, Class<?> theClass, Class<?> xface) { if (!xface.isAssignableFrom(theClass)) throw new RuntimeException(theClass+" not "+xface.getName()); set(name, theClass.getName()); } /** * Get a local file under a directory named by <i>dirsProp</i> with * the given <i>path</i>. If <i>dirsProp</i> contains multiple directories, * then one is chosen based on <i>path</i>'s hash code. If the selected * directory does not exist, an attempt is made to create it. * * @param dirsProp directory in which to locate the file. * @param path file-path. * @return local file under the directory with the given path. */ public Path getLocalPath(String dirsProp, String path) throws IOException { String[] dirs = getTrimmedStrings(dirsProp); int hashCode = path.hashCode(); FileSystem fs = FileSystem.getLocal(this); for (int i = 0; i < dirs.length; i++) { // try each local dir int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; Path file = new Path(dirs[index], path); Path dir = file.getParent(); if (fs.mkdirs(dir) || fs.exists(dir)) { return file; } } LOG.warn("Could not make " + path + " in local directories from " + dirsProp); for(int i=0; i < dirs.length; i++) { int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; LOG.warn(dirsProp + "[" + index + "]=" + dirs[index]); } throw new IOException("No valid local directories in property: "+dirsProp); } /** * Get a local file name under a directory named in <i>dirsProp</i> with * the given <i>path</i>. If <i>dirsProp</i> contains multiple directories, * then one is chosen based on <i>path</i>'s hash code. If the selected * directory does not exist, an attempt is made to create it. * * @param dirsProp directory in which to locate the file. * @param path file-path. * @return local file under the directory with the given path. */ public File getFile(String dirsProp, String path) throws IOException { String[] dirs = getTrimmedStrings(dirsProp); int hashCode = path.hashCode(); for (int i = 0; i < dirs.length; i++) { // try each local dir int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; File file = new File(dirs[index], path); File dir = file.getParentFile(); if (dir.exists() || dir.mkdirs()) { return file; } } throw new IOException("No valid local directories in property: "+dirsProp); } /** * Get the {@link URL} for the named resource. * * @param name resource name. * @return the url for the named resource. */ public URL getResource(String name) { return classLoader.getResource(name); } /** * Get an input stream attached to the configuration resource with the * given <code>name</code>. * * @param name configuration resource name. * @return an input stream attached to the resource. */ public InputStream getConfResourceAsInputStream(String name) { try { URL url= getResource(name); if (url == null) { LOG.info(name + " not found"); return null; } else { LOG.info("found resource " + name + " at " + url); } return url.openStream(); } catch (Exception e) { return null; } } /** * Get a {@link Reader} attached to the configuration resource with the * given <code>name</code>. * * @param name configuration resource name. * @return a reader attached to the resource. */ public Reader getConfResourceAsReader(String name) { try { URL url= getResource(name); if (url == null) { LOG.info(name + " not found"); return null; } else { LOG.info("found resource " + name + " at " + url); } return new InputStreamReader(url.openStream(), Charsets.UTF_8); } catch (Exception e) { return null; } } /** * Get the set of parameters marked final. * * @return final parameter set. */ public Set<String> getFinalParameters() { Set<String> setFinalParams = Collections.newSetFromMap( new ConcurrentHashMap<String, Boolean>()); setFinalParams.addAll(finalParameters); return setFinalParams; } protected synchronized Properties getProps() { if (properties == null) { properties = new Properties(); Map<String, String[]> backup = new ConcurrentHashMap<String, String[]>(updatingResource); loadResources(properties, resources, quietmode); if (overlay != null) { properties.putAll(overlay); for (Map.Entry<Object,Object> item: overlay.entrySet()) { String key = (String)item.getKey(); String[] source = backup.get(key); if(source != null) { updatingResource.put(key, source); } } } } return properties; } /** * Return the number of keys in the configuration. * * @return number of keys in the configuration. */ public int size() { return getProps().size(); } /** * Clears all keys from the configuration. */ public void clear() { getProps().clear(); getOverlay().clear(); } /** * Get an {@link Iterator} to go through the list of <code>String</code> * key-value pairs in the configuration. * * @return an iterator over the entries. */ @Override public Iterator<Map.Entry<String, String>> iterator() { // Get a copy of just the string to string pairs. After the old object // methods that allow non-strings to be put into configurations are removed, // we could replace properties with a Map<String,String> and get rid of this // code. Map<String,String> result = new HashMap<String,String>(); for(Map.Entry<Object,Object> item: getProps().entrySet()) { if (item.getKey() instanceof String && item.getValue() instanceof String) { result.put((String) item.getKey(), (String) item.getValue()); } } return result.entrySet().iterator(); } private Document parse(DocumentBuilder builder, URL url) throws IOException, SAXException { if (!quietmode) { LOG.debug("parsing URL " + url); } if (url == null) { return null; } return parse(builder, url.openStream(), url.toString()); } private Document parse(DocumentBuilder builder, InputStream is, String systemId) throws IOException, SAXException { if (!quietmode) { LOG.debug("parsing input stream " + is); } if (is == null) { return null; } try { return (systemId == null) ? builder.parse(is) : builder.parse(is, systemId); } finally { is.close(); } } private void loadResources(Properties properties, ArrayList<Resource> resources, boolean quiet) { if(loadDefaults) { for (String resource : defaultResources) { loadResource(properties, new Resource(resource), quiet); } //support the hadoop-site.xml as a deprecated case if(getResource("hadoop-site.xml")!=null) { loadResource(properties, new Resource("hadoop-site.xml"), quiet); } } for (int i = 0; i < resources.size(); i++) { Resource ret = loadResource(properties, resources.get(i), quiet); if (ret != null) { resources.set(i, ret); } } } private Resource loadResource(Properties properties, Resource wrapper, boolean quiet) { String name = UNKNOWN_RESOURCE; try { Object resource = wrapper.getResource(); name = wrapper.getName(); DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); //ignore all comments inside the xml file docBuilderFactory.setIgnoringComments(true); //allow includes in the xml file docBuilderFactory.setNamespaceAware(true); try { docBuilderFactory.setXIncludeAware(true); } catch (UnsupportedOperationException e) { LOG.error("Failed to set setXIncludeAware(true) for parser " + docBuilderFactory + ":" + e, e); } DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = null; Element root = null; boolean returnCachedProperties = false; if (resource instanceof URL) { // an URL resource doc = parse(builder, (URL)resource); } else if (resource instanceof String) { // a CLASSPATH resource URL url = getResource((String)resource); doc = parse(builder, url); } else if (resource instanceof Path) { // a file resource // Can't use FileSystem API or we get an infinite loop // since FileSystem uses Configuration API. Use java.io.File instead. File file = new File(((Path)resource).toUri().getPath()) .getAbsoluteFile(); if (file.exists()) { if (!quiet) { LOG.debug("parsing File " + file); } doc = parse(builder, new BufferedInputStream( new FileInputStream(file)), ((Path)resource).toString()); } } else if (resource instanceof InputStream) { doc = parse(builder, (InputStream) resource, null); returnCachedProperties = true; } else if (resource instanceof Properties) { overlay(properties, (Properties)resource); } else if (resource instanceof Element) { root = (Element)resource; } if (root == null) { if (doc == null) { if (quiet) { return null; } throw new RuntimeException(resource + " not found"); } root = doc.getDocumentElement(); } Properties toAddTo = properties; if(returnCachedProperties) { toAddTo = new Properties(); } if (!"configuration".equals(root.getTagName())) LOG.fatal("bad conf file: top-level element not <configuration>"); NodeList props = root.getChildNodes(); DeprecationContext deprecations = deprecationContext.get(); for (int i = 0; i < props.getLength(); i++) { Node propNode = props.item(i); if (!(propNode instanceof Element)) continue; Element prop = (Element)propNode; if ("configuration".equals(prop.getTagName())) { loadResource(toAddTo, new Resource(prop, name), quiet); continue; } if (!"property".equals(prop.getTagName())) LOG.warn("bad conf file: element not <property>"); NodeList fields = prop.getChildNodes(); String attr = null; String value = null; boolean finalParameter = false; LinkedList<String> source = new LinkedList<String>(); for (int j = 0; j < fields.getLength(); j++) { Node fieldNode = fields.item(j); if (!(fieldNode instanceof Element)) continue; Element field = (Element)fieldNode; if ("name".equals(field.getTagName()) && field.hasChildNodes()) attr = StringInterner.weakIntern( ((Text)field.getFirstChild()).getData().trim()); if ("value".equals(field.getTagName()) && field.hasChildNodes()) value = StringInterner.weakIntern( ((Text)field.getFirstChild()).getData()); if ("final".equals(field.getTagName()) && field.hasChildNodes()) finalParameter = "true".equals(((Text)field.getFirstChild()).getData()); if ("source".equals(field.getTagName()) && field.hasChildNodes()) source.add(StringInterner.weakIntern( ((Text)field.getFirstChild()).getData())); } source.add(name); // Ignore this parameter if it has already been marked as 'final' if (attr != null) { if (deprecations.getDeprecatedKeyMap().containsKey(attr)) { DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(attr); keyInfo.clearAccessed(); for (String key:keyInfo.newKeys) { // update new keys with deprecated key's value loadProperty(toAddTo, name, key, value, finalParameter, source.toArray(new String[source.size()])); } } else { loadProperty(toAddTo, name, attr, value, finalParameter, source.toArray(new String[source.size()])); } } } if (returnCachedProperties) { overlay(properties, toAddTo); return new Resource(toAddTo, name); } return null; } catch (IOException e) { LOG.fatal("error parsing conf " + name, e); throw new RuntimeException(e); } catch (DOMException e) { LOG.fatal("error parsing conf " + name, e); throw new RuntimeException(e); } catch (SAXException e) { LOG.fatal("error parsing conf " + name, e); throw new RuntimeException(e); } catch (ParserConfigurationException e) { LOG.fatal("error parsing conf " + name , e); throw new RuntimeException(e); } } private void overlay(Properties to, Properties from) { for (Entry<Object, Object> entry: from.entrySet()) { to.put(entry.getKey(), entry.getValue()); } } private void loadProperty(Properties properties, String name, String attr, String value, boolean finalParameter, String[] source) { if (value != null || allowNullValueProperties) { if (!finalParameters.contains(attr)) { if (value==null && allowNullValueProperties) { value = DEFAULT_STRING_CHECK; } properties.setProperty(attr, value); if(source != null) { updatingResource.put(attr, source); } } else if (!value.equals(properties.getProperty(attr))) { LOG.warn(name+":an attempt to override final parameter: "+attr +"; Ignoring."); } } if (finalParameter && attr != null) { finalParameters.add(attr); } } /** * Write out the non-default properties in this configuration to the given * {@link OutputStream} using UTF-8 encoding. * * @param out the output stream to write to. */ public void writeXml(OutputStream out) throws IOException { writeXml(new OutputStreamWriter(out, "UTF-8")); } /** * Write out the non-default properties in this configuration to the given * {@link Writer}. * * @param out the writer to write to. */ public void writeXml(Writer out) throws IOException { Document doc = asXmlDocument(); try { DOMSource source = new DOMSource(doc); StreamResult result = new StreamResult(out); TransformerFactory transFactory = TransformerFactory.newInstance(); Transformer transformer = transFactory.newTransformer(); // Important to not hold Configuration log while writing result, since // 'out' may be an HDFS stream which needs to lock this configuration // from another thread. transformer.transform(source, result); } catch (TransformerException te) { throw new IOException(te); } } /** * Return the XML DOM corresponding to this Configuration. */ private synchronized Document asXmlDocument() throws IOException { Document doc; try { doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument(); } catch (ParserConfigurationException pe) { throw new IOException(pe); } Element conf = doc.createElement("configuration"); doc.appendChild(conf); conf.appendChild(doc.createTextNode("\n")); handleDeprecation(); //ensure properties is set and deprecation is handled for (Enumeration<Object> e = properties.keys(); e.hasMoreElements();) { String name = (String)e.nextElement(); Object object = properties.get(name); String value = null; if (object instanceof String) { value = (String) object; }else { continue; } Element propNode = doc.createElement("property"); conf.appendChild(propNode); Element nameNode = doc.createElement("name"); nameNode.appendChild(doc.createTextNode(name)); propNode.appendChild(nameNode); Element valueNode = doc.createElement("value"); valueNode.appendChild(doc.createTextNode(value)); propNode.appendChild(valueNode); if (updatingResource != null) { String[] sources = updatingResource.get(name); if(sources != null) { for(String s : sources) { Element sourceNode = doc.createElement("source"); sourceNode.appendChild(doc.createTextNode(s)); propNode.appendChild(sourceNode); } } } conf.appendChild(doc.createTextNode("\n")); } return doc; } /** * Writes out all the parameters and their properties (final and resource) to * the given {@link Writer} * The format of the output would be * { "properties" : [ {key1,value1,key1.isFinal,key1.resource}, {key2,value2, * key2.isFinal,key2.resource}... ] } * It does not output the parameters of the configuration object which is * loaded from an input stream. * @param out the Writer to write to * @throws IOException */ public static void dumpConfiguration(Configuration config, Writer out) throws IOException { JsonFactory dumpFactory = new JsonFactory(); JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out); dumpGenerator.writeStartObject(); dumpGenerator.writeFieldName("properties"); dumpGenerator.writeStartArray(); dumpGenerator.flush(); synchronized (config) { for (Map.Entry<Object,Object> item: config.getProps().entrySet()) { dumpGenerator.writeStartObject(); dumpGenerator.writeStringField("key", (String) item.getKey()); dumpGenerator.writeStringField("value", config.get((String) item.getKey())); dumpGenerator.writeBooleanField("isFinal", config.finalParameters.contains(item.getKey())); String[] resources = config.updatingResource.get(item.getKey()); String resource = UNKNOWN_RESOURCE; if(resources != null && resources.length > 0) { resource = resources[0]; } dumpGenerator.writeStringField("resource", resource); dumpGenerator.writeEndObject(); } } dumpGenerator.writeEndArray(); dumpGenerator.writeEndObject(); dumpGenerator.flush(); } /** * Get the {@link ClassLoader} for this job. * * @return the correct class loader. */ public ClassLoader getClassLoader() { return classLoader; } /** * Set the class loader that will be used to load the various objects. * * @param classLoader the new class loader. */ public void setClassLoader(ClassLoader classLoader) { this.classLoader = classLoader; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Configuration: "); if(loadDefaults) { toString(defaultResources, sb); if(resources.size()>0) { sb.append(", "); } } toString(resources, sb); return sb.toString(); } private <T> void toString(List<T> resources, StringBuilder sb) { ListIterator<T> i = resources.listIterator(); while (i.hasNext()) { if (i.nextIndex() != 0) { sb.append(", "); } sb.append(i.next()); } } /** * Set the quietness-mode. * * In the quiet-mode, error and informational messages might not be logged. * * @param quietmode <code>true</code> to set quiet-mode on, <code>false</code> * to turn it off. */ public synchronized void setQuietMode(boolean quietmode) { this.quietmode = quietmode; } synchronized boolean getQuietMode() { return this.quietmode; } /** For debugging. List non-default properties to the terminal and exit. */ public static void main(String[] args) throws Exception { new Configuration().writeXml(System.out); } @Override public void readFields(DataInput in) throws IOException { clear(); int size = WritableUtils.readVInt(in); for(int i=0; i < size; ++i) { String key = org.apache.hadoop.io.Text.readString(in); String value = org.apache.hadoop.io.Text.readString(in); set(key, value); String sources[] = WritableUtils.readCompressedStringArray(in); if(sources != null) { updatingResource.put(key, sources); } } } //@Override @Override public void write(DataOutput out) throws IOException { Properties props = getProps(); WritableUtils.writeVInt(out, props.size()); for(Map.Entry<Object, Object> item: props.entrySet()) { org.apache.hadoop.io.Text.writeString(out, (String) item.getKey()); org.apache.hadoop.io.Text.writeString(out, (String) item.getValue()); WritableUtils.writeCompressedStringArray(out, updatingResource.get(item.getKey())); } } /** * get keys matching the the regex * @param regex * @return Map<String,String> with matching keys */ public Map<String,String> getValByRegex(String regex) { Pattern p = Pattern.compile(regex); Map<String,String> result = new HashMap<String,String>(); Matcher m; for(Map.Entry<Object,Object> item: getProps().entrySet()) { if (item.getKey() instanceof String && item.getValue() instanceof String) { m = p.matcher((String)item.getKey()); if(m.find()) { // match result.put((String) item.getKey(), substituteVars(getProps().getProperty((String) item.getKey()))); } } } return result; } /** * A unique class which is used as a sentinel value in the caching * for getClassByName. {@see Configuration#getClassByNameOrNull(String)} */ private static abstract class NegativeCacheSentinel {} public static void dumpDeprecatedKeys() { DeprecationContext deprecations = deprecationContext.get(); for (Map.Entry<String, DeprecatedKeyInfo> entry : deprecations.getDeprecatedKeyMap().entrySet()) { StringBuilder newKeys = new StringBuilder(); for (String newKey : entry.getValue().newKeys) { newKeys.append(newKey).append("\t"); } System.out.println(entry.getKey() + "\t" + newKeys.toString()); } } /** * Returns whether or not a deprecated name has been warned. If the name is not * deprecated then always return false */ public static boolean hasWarnedDeprecation(String name) { DeprecationContext deprecations = deprecationContext.get(); if(deprecations.getDeprecatedKeyMap().containsKey(name)) { if(deprecations.getDeprecatedKeyMap().get(name).accessed.get()) { return true; } } return false; } }
apache-2.0
gregjones60/keycloak
model/api/src/main/java/org/keycloak/models/utils/reflection/PropertyQuery.java
5968
package org.keycloak.models.utils.reflection; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; /** * <p> Queries a target class for properties that match certain criteria. A property may either be a private or public * field, declared by the target class or inherited from a superclass, or a public method declared by the target class * or inherited from any of its superclasses. For properties that are exposed via a method, the property must be a * JavaBean style property, i.e. it must provide both an accessor and mutator method according to the JavaBean * specification. </p> <p/> <p> This class is not thread-safe, however the result returned by the getResultList() method * is. </p> * * @see PropertyQueries * @see PropertyCriteria */ public class PropertyQuery<V> { private final Class<?> targetClass; private final List<PropertyCriteria> criteria; PropertyQuery(Class<?> targetClass) { if (targetClass == null) { throw new IllegalArgumentException("targetClass parameter may not be null"); } this.targetClass = targetClass; this.criteria = new ArrayList<PropertyCriteria>(); } /** * Add a criteria to query * * @param criteria the criteria to add */ public PropertyQuery<V> addCriteria(PropertyCriteria criteria) { this.criteria.add(criteria); return this; } /** * Get the first result from the query, causing the query to be run. * * @return the first result, or null if there are no results */ public Property<V> getFirstResult() { Map<String, Property<V>> results = getResultList(); return results.isEmpty() ? null : results.values().iterator().next(); } /** * Get the first result from the query that is not marked as read only, causing the query to be run. * * @return the first writable result, or null if there are no results */ public Property<V> getFirstWritableResult() { Map<String, Property<V>> results = getWritableResultList(); return results.isEmpty() ? null : results.values().iterator().next(); } /** * Get a single result from the query, causing the query to be run. An exception is thrown if the query does not * return exactly one result. * * @return the single result * * @throws RuntimeException if the query does not return exactly one result */ public Property<V> getSingleResult() { Map<String, Property<V>> results = getResultList(); if (results.size() == 1) { return results.values().iterator().next(); } else if (results.isEmpty()) { throw new RuntimeException( "Expected one property match, but the criteria did not match any properties on " + targetClass.getName()); } else { throw new RuntimeException("Expected one property match, but the criteria matched " + results.size() + " properties on " + targetClass.getName()); } } /** * Get a single result from the query that is not marked as read only, causing the query to be run. An exception is * thrown if the query does not return exactly one result. * * @return the single writable result * * @throws RuntimeException if the query does not return exactly one result */ public Property<V> getWritableSingleResult() { Map<String, Property<V>> results = getWritableResultList(); if (results.size() == 1) { return results.values().iterator().next(); } else if (results.isEmpty()) { throw new RuntimeException( "Expected one property match, but the criteria did not match any properties on " + targetClass.getName()); } else { throw new RuntimeException("Expected one property match, but the criteria matched " + results.size() + " properties on " + targetClass.getName()); } } /** * Get the result from the query, causing the query to be run. * * @return the results, or an empty list if there are no results */ public Map<String, Property<V>> getResultList() { return getResultList(false); } /** * Get the non read only results from the query, causing the query to be run. * * @return the results, or an empty list if there are no results */ public Map<String, Property<V>> getWritableResultList() { return getResultList(true); } /** * Get the result from the query, causing the query to be run. * * @param writable if this query should only return properties that are not read only * * @return the results, or an empty list if there are no results */ private Map<String, Property<V>> getResultList(boolean writable) { Map<String, Property<V>> properties = new HashMap<String, Property<V>>(); // First check public accessor methods (we ignore private methods) for (Method method : targetClass.getMethods()) { if (!(method.getName().startsWith("is") || method.getName().startsWith("get"))) { continue; } boolean match = true; for (PropertyCriteria c : criteria) { if (!c.methodMatches(method)) { match = false; break; } } if (match) { MethodProperty<V> property = Properties.<V>createProperty(method); if (!writable || !property.isReadOnly()) { properties.put(property.getName(), property); } } } return Collections.unmodifiableMap(properties); } }
apache-2.0
signed/intellij-community
java/java-impl/src/com/intellij/externalSystem/JavaProjectDataService.java
5477
/* * Copyright 2000-2013 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.externalSystem; import com.intellij.openapi.externalSystem.model.DataNode; import com.intellij.openapi.externalSystem.model.Key; import com.intellij.openapi.externalSystem.model.ProjectKeys; import com.intellij.openapi.externalSystem.model.project.ProjectData; import com.intellij.openapi.externalSystem.service.project.IdeModifiableModelsProvider; import com.intellij.openapi.externalSystem.service.project.manage.AbstractProjectDataService; import com.intellij.openapi.externalSystem.util.DisposeAwareProjectChange; import com.intellij.openapi.externalSystem.util.ExternalSystemApiUtil; import com.intellij.openapi.project.Project; import com.intellij.openapi.projectRoots.JavaSdk; import com.intellij.openapi.projectRoots.JavaSdkVersion; import com.intellij.openapi.projectRoots.ProjectJdkTable; import com.intellij.openapi.projectRoots.Sdk; import com.intellij.openapi.roots.LanguageLevelProjectExtension; import com.intellij.openapi.roots.ProjectRootManager; import com.intellij.pom.java.LanguageLevel; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.Collection; import java.util.List; /** * @author Denis Zhdanov * @since 4/15/13 12:09 PM */ public class JavaProjectDataService extends AbstractProjectDataService<JavaProjectData, Project> { @NotNull @Override public Key<JavaProjectData> getTargetDataKey() { return JavaProjectData.KEY; } @Override public void importData(@NotNull final Collection<DataNode<JavaProjectData>> toImport, @Nullable final ProjectData projectData, @NotNull final Project project, @NotNull final IdeModifiableModelsProvider modelsProvider) { if (toImport.isEmpty() || projectData == null) { return; } if (toImport.size() != 1) { throw new IllegalArgumentException(String.format("Expected to get a single project but got %d: %s", toImport.size(), toImport)); } final DataNode<JavaProjectData> javaProjectDataNode = toImport.iterator().next(); final DataNode<ProjectData> projectDataNode = ExternalSystemApiUtil.findParent(javaProjectDataNode, ProjectKeys.PROJECT); assert projectDataNode != null; if (!ExternalSystemApiUtil.isOneToOneMapping(project, projectDataNode.getData())) { return; } JavaProjectData javaProjectData = javaProjectDataNode.getData(); // JDK. JavaSdkVersion version = javaProjectData.getJdkVersion(); JavaSdk javaSdk = JavaSdk.getInstance(); ProjectRootManager rootManager = ProjectRootManager.getInstance(project); Sdk sdk = rootManager.getProjectSdk(); if(sdk != null) { JavaSdkVersion currentVersion = javaSdk.getVersion(sdk); if (currentVersion == null || !currentVersion.isAtLeast(version)) { updateSdk(project, version); } } else { updateSdk(project, version); } // Language level. setLanguageLevel(javaProjectData.getLanguageLevel(), project); } private static void updateSdk(@NotNull final Project project, @NotNull final JavaSdkVersion version) { final Sdk sdk = findJdk(version); if (sdk == null) return; ExternalSystemApiUtil.executeProjectChangeAction(new DisposeAwareProjectChange(project) { @Override public void execute() { ProjectRootManager.getInstance(project).setProjectSdk(sdk); LanguageLevel level = version.getMaxLanguageLevel(); LanguageLevelProjectExtension languageLevelExtension = LanguageLevelProjectExtension.getInstance(project); if (level.compareTo(languageLevelExtension.getLanguageLevel()) < 0) { languageLevelExtension.setLanguageLevel(level); } } }); } @Nullable private static Sdk findJdk(@NotNull JavaSdkVersion version) { JavaSdk javaSdk = JavaSdk.getInstance(); List<Sdk> javaSdks = ProjectJdkTable.getInstance().getSdksOfType(javaSdk); Sdk candidate = null; for (Sdk sdk : javaSdks) { JavaSdkVersion v = javaSdk.getVersion(sdk); if (v == version) { return sdk; } if (candidate == null && v != null && version.getMaxLanguageLevel().isAtLeast(version.getMaxLanguageLevel())) { candidate = sdk; } } return candidate; } @SuppressWarnings("MethodMayBeStatic") public void setLanguageLevel(@NotNull final LanguageLevel languageLevel, @NotNull Project project) { final LanguageLevelProjectExtension languageLevelExtension = LanguageLevelProjectExtension.getInstance(project); if (languageLevelExtension.getLanguageLevel().isAtLeast(languageLevel)) { return; } ExternalSystemApiUtil.executeProjectChangeAction(new DisposeAwareProjectChange(project) { @Override public void execute() { languageLevelExtension.setLanguageLevel(languageLevel); } }); } }
apache-2.0
sitexa/vaadin
server/tests/src/com/vaadin/data/util/BeanItemContainerTest.java
35538
package com.vaadin.data.util; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import org.easymock.Capture; import org.easymock.EasyMock; import org.junit.Assert; import com.vaadin.data.Container; import com.vaadin.data.Container.Indexed.ItemAddEvent; import com.vaadin.data.Container.Indexed.ItemRemoveEvent; import com.vaadin.data.Container.ItemSetChangeListener; import com.vaadin.data.Item; import com.vaadin.data.util.NestedMethodPropertyTest.Address; import com.vaadin.data.util.filter.Compare; /** * Test basic functionality of BeanItemContainer. * * Most sorting related tests are in {@link BeanItemContainerSortTest}. */ public class BeanItemContainerTest extends AbstractBeanContainerTestBase { // basics from the common container test private Map<String, ClassName> nameToBean = new LinkedHashMap<String, ClassName>(); private BeanItemContainer<ClassName> getContainer() { return new BeanItemContainer<ClassName>(ClassName.class); } @Override public void setUp() { nameToBean.clear(); for (int i = 0; i < sampleData.length; i++) { ClassName className = new ClassName(sampleData[i], i); nameToBean.put(sampleData[i], className); } } @Override @SuppressWarnings("unchecked") protected void initializeContainer(Container container) { BeanItemContainer<ClassName> beanItemContainer = (BeanItemContainer<ClassName>) container; beanItemContainer.removeAllItems(); Iterator<ClassName> it = nameToBean.values().iterator(); while (it.hasNext()) { beanItemContainer.addBean(it.next()); } } @Override protected void validateContainer(Container container, Object expectedFirstItemId, Object expectedLastItemId, Object itemIdInSet, Object itemIdNotInSet, boolean checkGetItemNull, int expectedSize) { Object notInSet = nameToBean.get(itemIdNotInSet); if (notInSet == null && itemIdNotInSet != null) { notInSet = new ClassName(String.valueOf(itemIdNotInSet), 9999); } super.validateContainer(container, nameToBean.get(expectedFirstItemId), nameToBean.get(expectedLastItemId), nameToBean.get(itemIdInSet), notInSet, checkGetItemNull, expectedSize); } @Override protected boolean isFilteredOutItemNull() { return false; } public void testGetType_existingProperty_typeReturned() { BeanItemContainer<ClassName> container = getContainer(); Assert.assertEquals( "Unexpected type is returned for property 'simpleName'", String.class, container.getType("simpleName")); } public void testGetType_notExistingProperty_nullReturned() { BeanItemContainer<ClassName> container = getContainer(); Assert.assertNull("Not null type is returned for property ''", container.getType("")); } public void testBasicOperations() { testBasicContainerOperations(getContainer()); } public void testFiltering() { testContainerFiltering(getContainer()); } public void testSorting() { testContainerSorting(getContainer()); } public void testSortingAndFiltering() { testContainerSortingAndFiltering(getContainer()); } // duplicated from parent class and modified - adding items to // BeanItemContainer differs from other containers public void testContainerOrdered() { BeanItemContainer<String> container = new BeanItemContainer<String>( String.class); String id = "test1"; Item item = container.addBean(id); assertNotNull(item); assertEquals(id, container.firstItemId()); assertEquals(id, container.lastItemId()); // isFirstId assertTrue(container.isFirstId(id)); assertTrue(container.isFirstId(container.firstItemId())); // isLastId assertTrue(container.isLastId(id)); assertTrue(container.isLastId(container.lastItemId())); // Add a new item before the first // addItemAfter String newFirstId = "newFirst"; item = container.addItemAfter(null, newFirstId); assertNotNull(item); assertNotNull(container.getItem(newFirstId)); // isFirstId assertTrue(container.isFirstId(newFirstId)); assertTrue(container.isFirstId(container.firstItemId())); // isLastId assertTrue(container.isLastId(id)); assertTrue(container.isLastId(container.lastItemId())); // nextItemId assertEquals(id, container.nextItemId(newFirstId)); assertNull(container.nextItemId(id)); assertNull(container.nextItemId("not-in-container")); // prevItemId assertEquals(newFirstId, container.prevItemId(id)); assertNull(container.prevItemId(newFirstId)); assertNull(container.prevItemId("not-in-container")); // addItemAfter(Object) String newSecondItemId = "newSecond"; item = container.addItemAfter(newFirstId, newSecondItemId); // order is now: newFirstId, newSecondItemId, id assertNotNull(item); assertNotNull(container.getItem(newSecondItemId)); assertEquals(id, container.nextItemId(newSecondItemId)); assertEquals(newFirstId, container.prevItemId(newSecondItemId)); // addItemAfter(Object,Object) String fourthId = "id of the fourth item"; Item fourth = container.addItemAfter(newFirstId, fourthId); // order is now: newFirstId, fourthId, newSecondItemId, id assertNotNull(fourth); assertEquals(fourth, container.getItem(fourthId)); assertEquals(newSecondItemId, container.nextItemId(fourthId)); assertEquals(newFirstId, container.prevItemId(fourthId)); // addItemAfter(Object,Object) Object fifthId = "fifth"; Item fifth = container.addItemAfter(null, fifthId); // order is now: fifthId, newFirstId, fourthId, newSecondItemId, id assertNotNull(fifth); assertEquals(fifth, container.getItem(fifthId)); assertEquals(newFirstId, container.nextItemId(fifthId)); assertNull(container.prevItemId(fifthId)); } public void testContainerIndexed() { testContainerIndexed(getContainer(), nameToBean.get(sampleData[2]), 2, false, new ClassName("org.vaadin.test.Test", 8888), true); } @SuppressWarnings("deprecation") public void testCollectionConstructors() { List<ClassName> classNames = new ArrayList<ClassName>(); classNames.add(new ClassName("a.b.c.Def", 1)); classNames.add(new ClassName("a.b.c.Fed", 2)); classNames.add(new ClassName("b.c.d.Def", 3)); // note that this constructor is problematic, users should use the // version that // takes the bean class as a parameter BeanItemContainer<ClassName> container = new BeanItemContainer<ClassName>( classNames); Assert.assertEquals(3, container.size()); Assert.assertEquals(classNames.get(0), container.firstItemId()); Assert.assertEquals(classNames.get(1), container.getIdByIndex(1)); Assert.assertEquals(classNames.get(2), container.lastItemId()); BeanItemContainer<ClassName> container2 = new BeanItemContainer<ClassName>( ClassName.class, classNames); Assert.assertEquals(3, container2.size()); Assert.assertEquals(classNames.get(0), container2.firstItemId()); Assert.assertEquals(classNames.get(1), container2.getIdByIndex(1)); Assert.assertEquals(classNames.get(2), container2.lastItemId()); } // this only applies to the collection constructor with no type parameter @SuppressWarnings("deprecation") public void testEmptyCollectionConstructor() { try { new BeanItemContainer<ClassName>((Collection<ClassName>) null); Assert.fail("Initializing BeanItemContainer from a null collection should not work!"); } catch (IllegalArgumentException e) { // success } try { new BeanItemContainer<ClassName>(new ArrayList<ClassName>()); Assert.fail("Initializing BeanItemContainer from an empty collection should not work!"); } catch (IllegalArgumentException e) { // success } } public void testItemSetChangeListeners() { BeanItemContainer<ClassName> container = getContainer(); ItemSetChangeCounter counter = new ItemSetChangeCounter(); container.addListener(counter); ClassName cn1 = new ClassName("com.example.Test", 1111); ClassName cn2 = new ClassName("com.example.Test2", 2222); initializeContainer(container); counter.reset(); container.addBean(cn1); counter.assertOnce(); initializeContainer(container); counter.reset(); container.addItem(cn1); counter.assertOnce(); // no notification if already in container container.addItem(cn1); counter.assertNone(); container.addItem(cn2); counter.assertOnce(); initializeContainer(container); counter.reset(); container.addItemAfter(null, cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.firstItemId(), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAfter(container.firstItemId(), cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.getIdByIndex(1), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAfter(container.lastItemId(), cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.lastItemId(), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAt(0, cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.firstItemId(), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAt(1, cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.getIdByIndex(1), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAt(container.size(), cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.lastItemId(), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.removeItem(nameToBean.get(sampleData[0])); counter.assertOnce(); initializeContainer(container); counter.reset(); // no notification for removing a non-existing item container.removeItem(cn1); counter.assertNone(); initializeContainer(container); counter.reset(); container.removeAllItems(); counter.assertOnce(); // already empty container.removeAllItems(); counter.assertNone(); } public void testItemSetChangeListenersFiltering() { BeanItemContainer<ClassName> container = getContainer(); ItemSetChangeCounter counter = new ItemSetChangeCounter(); container.addListener(counter); ClassName cn1 = new ClassName("com.example.Test", 1111); ClassName cn2 = new ClassName("com.example.Test2", 2222); ClassName other = new ClassName("com.example.Other", 3333); // simply adding or removing container filters should cause event // (content changes) initializeContainer(container); counter.reset(); container.addContainerFilter(SIMPLE_NAME, "a", true, false); counter.assertOnce(); container.removeContainerFilters(SIMPLE_NAME); counter.assertOnce(); initializeContainer(container); counter.reset(); container.addContainerFilter(SIMPLE_NAME, "a", true, false); counter.assertOnce(); container.removeAllContainerFilters(); counter.assertOnce(); // perform operations while filtering container initializeContainer(container); counter.reset(); container.addContainerFilter(FULLY_QUALIFIED_NAME, "Test", true, false); counter.assertOnce(); // passes filter container.addBean(cn1); counter.assertOnce(); // passes filter but already in the container container.addBean(cn1); counter.assertNone(); initializeContainer(container); counter.reset(); // passes filter container.addItem(cn1); counter.assertOnce(); // already in the container container.addItem(cn1); counter.assertNone(); container.addItem(cn2); counter.assertOnce(); // does not pass filter container.addItem(other); counter.assertNone(); initializeContainer(container); counter.reset(); container.addItemAfter(null, cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.firstItemId(), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAfter(container.firstItemId(), cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.getIdByIndex(1), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAfter(container.lastItemId(), cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.lastItemId(), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAt(0, cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.firstItemId(), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAt(1, cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.getIdByIndex(1), FULLY_QUALIFIED_NAME).getValue()); initializeContainer(container); counter.reset(); container.addItemAt(container.size(), cn1); counter.assertOnce(); Assert.assertEquals( "com.example.Test", container.getContainerProperty(container.lastItemId(), FULLY_QUALIFIED_NAME).getValue()); // does not pass filter // note: testAddRemoveWhileFiltering() checks position for these after // removing filter etc, here concentrating on listeners initializeContainer(container); counter.reset(); container.addItemAfter(null, other); counter.assertNone(); initializeContainer(container); counter.reset(); container.addItemAfter(container.firstItemId(), other); counter.assertNone(); initializeContainer(container); counter.reset(); container.addItemAfter(container.lastItemId(), other); counter.assertNone(); initializeContainer(container); counter.reset(); container.addItemAt(0, other); counter.assertNone(); initializeContainer(container); counter.reset(); container.addItemAt(1, other); counter.assertNone(); initializeContainer(container); counter.reset(); container.addItemAt(container.size(), other); counter.assertNone(); // passes filter initializeContainer(container); counter.reset(); container.addItem(cn1); counter.assertOnce(); container.removeItem(cn1); counter.assertOnce(); // does not pass filter initializeContainer(container); counter.reset(); // not visible container.removeItem(nameToBean.get(sampleData[0])); counter.assertNone(); container.removeAllItems(); counter.assertOnce(); // no visible items container.removeAllItems(); counter.assertNone(); } public void testAddRemoveWhileFiltering() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person john = new Person("John"); Person jane = new Person("Jane"); Person matthew = new Person("Matthew"); Person jack = new Person("Jack"); Person michael = new Person("Michael"); Person william = new Person("William"); Person julia = new Person("Julia"); Person george = new Person("George"); Person mark = new Person("Mark"); container.addBean(john); container.addBean(jane); container.addBean(matthew); assertEquals(3, container.size()); // john, jane, matthew container.addContainerFilter("name", "j", true, true); assertEquals(2, container.size()); // john, jane, (matthew) // add a bean that passes the filter container.addBean(jack); assertEquals(3, container.size()); assertEquals(jack, container.lastItemId()); // john, jane, (matthew), jack // add beans that do not pass the filter container.addBean(michael); // john, jane, (matthew), jack, (michael) container.addItemAfter(null, william); // (william), john, jane, (matthew), jack, (michael) // add after an item that is shown container.addItemAfter(john, george); // (william), john, (george), jane, (matthew), jack, (michael) assertEquals(3, container.size()); assertEquals(john, container.firstItemId()); // add after an item that is not shown does nothing container.addItemAfter(william, julia); // (william), john, (george), jane, (matthew), jack, (michael) assertEquals(3, container.size()); assertEquals(john, container.firstItemId()); container.addItemAt(1, julia); // (william), john, julia, (george), jane, (matthew), jack, (michael) container.addItemAt(2, mark); // (william), john, julia, (mark), (george), jane, (matthew), jack, // (michael) container.removeItem(matthew); // (william), john, julia, (mark), (george), jane, jack, (michael) assertEquals(4, container.size()); assertEquals(jack, container.lastItemId()); container.removeContainerFilters("name"); assertEquals(8, container.size()); assertEquals(william, container.firstItemId()); assertEquals(john, container.nextItemId(william)); assertEquals(julia, container.nextItemId(john)); assertEquals(mark, container.nextItemId(julia)); assertEquals(george, container.nextItemId(mark)); assertEquals(jane, container.nextItemId(george)); assertEquals(jack, container.nextItemId(jane)); assertEquals(michael, container.lastItemId()); } public void testRefilterOnPropertyModification() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person john = new Person("John"); Person jane = new Person("Jane"); Person matthew = new Person("Matthew"); container.addBean(john); container.addBean(jane); container.addBean(matthew); assertEquals(3, container.size()); // john, jane, matthew container.addContainerFilter("name", "j", true, true); assertEquals(2, container.size()); // john, jane, (matthew) // #6053 currently, modification of an item that is not visible does not // trigger refiltering - should it? // matthew.setName("Julia"); // assertEquals(3, container.size()); // john, jane, julia john.setName("Mark"); assertEquals(2, container.size()); // (mark), jane, julia container.removeAllContainerFilters(); assertEquals(3, container.size()); } public void testAddAll() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person john = new Person("John"); Person jane = new Person("Jane"); Person matthew = new Person("Matthew"); container.addBean(john); container.addBean(jane); container.addBean(matthew); assertEquals(3, container.size()); // john, jane, matthew Person jack = new Person("Jack"); Person michael = new Person("Michael"); // addAll container.addAll(Arrays.asList(jack, michael)); // john, jane, matthew, jack, michael assertEquals(5, container.size()); assertEquals(jane, container.nextItemId(john)); assertEquals(matthew, container.nextItemId(jane)); assertEquals(jack, container.nextItemId(matthew)); assertEquals(michael, container.nextItemId(jack)); } public void testUnsupportedMethods() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); container.addBean(new Person("John")); try { container.addItem(); Assert.fail(); } catch (UnsupportedOperationException e) { // should get exception } try { container.addItemAfter(new Person("Jane")); Assert.fail(); } catch (UnsupportedOperationException e) { // should get exception } try { container.addItemAt(0); Assert.fail(); } catch (UnsupportedOperationException e) { // should get exception } try { container.addContainerProperty("lastName", String.class, ""); Assert.fail(); } catch (UnsupportedOperationException e) { // should get exception } assertEquals(1, container.size()); } public void testRemoveContainerProperty() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person john = new Person("John"); container.addBean(john); Assert.assertEquals("John", container .getContainerProperty(john, "name").getValue()); Assert.assertTrue(container.removeContainerProperty("name")); Assert.assertNull(container.getContainerProperty(john, "name")); Assert.assertNotNull(container.getItem(john)); // property removed also from item Assert.assertNull(container.getItem(john).getItemProperty("name")); } public void testAddNullBean() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person john = new Person("John"); container.addBean(john); assertNull(container.addItem(null)); assertNull(container.addItemAfter(null, null)); assertNull(container.addItemAfter(john, null)); assertNull(container.addItemAt(0, null)); assertEquals(1, container.size()); } public void testBeanIdResolver() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person john = new Person("John"); assertSame(john, container.getBeanIdResolver().getIdForBean(john)); } public void testNullBeanClass() { try { new BeanItemContainer<Object>((Class<Object>) null); } catch (IllegalArgumentException e) { // should get exception } } public void testAddNestedContainerProperty() { BeanItemContainer<NestedMethodPropertyTest.Person> container = new BeanItemContainer<NestedMethodPropertyTest.Person>( NestedMethodPropertyTest.Person.class); NestedMethodPropertyTest.Person john = new NestedMethodPropertyTest.Person( "John", new NestedMethodPropertyTest.Address("Ruukinkatu 2-4", 20540)); container.addBean(john); assertTrue(container.addNestedContainerProperty("address.street")); assertEquals("Ruukinkatu 2-4", container.getContainerProperty(john, "address.street") .getValue()); } public void testNestedContainerPropertyWithNullBean() { BeanItemContainer<NestedMethodPropertyTest.Person> container = new BeanItemContainer<NestedMethodPropertyTest.Person>( NestedMethodPropertyTest.Person.class); NestedMethodPropertyTest.Person john = new NestedMethodPropertyTest.Person( "John", null); assertNotNull(container.addBean(john)); assertTrue(container .addNestedContainerProperty("address.postalCodeObject")); assertTrue(container.addNestedContainerProperty("address.street")); // the nested properties should return null assertNull(container.getContainerProperty(john, "address.street") .getValue()); } public void testItemAddedEvent() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person bean = new Person("John"); ItemSetChangeListener addListener = createListenerMockFor(container); addListener.containerItemSetChange(EasyMock.isA(ItemAddEvent.class)); EasyMock.replay(addListener); container.addItem(bean); EasyMock.verify(addListener); } public void testItemAddedEvent_AddedItem() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person bean = new Person("John"); ItemSetChangeListener addListener = createListenerMockFor(container); Capture<ItemAddEvent> capturedEvent = captureAddEvent(addListener); EasyMock.replay(addListener); container.addItem(bean); assertEquals(bean, capturedEvent.getValue().getFirstItemId()); } public void testItemAddedEvent_addItemAt_IndexOfAddedItem() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person bean = new Person("John"); container.addItem(bean); ItemSetChangeListener addListener = createListenerMockFor(container); Capture<ItemAddEvent> capturedEvent = captureAddEvent(addListener); EasyMock.replay(addListener); container.addItemAt(1, new Person("")); assertEquals(1, capturedEvent.getValue().getFirstIndex()); } public void testItemAddedEvent_addItemAfter_IndexOfAddedItem() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person bean = new Person("John"); container.addItem(bean); ItemSetChangeListener addListener = createListenerMockFor(container); Capture<ItemAddEvent> capturedEvent = captureAddEvent(addListener); EasyMock.replay(addListener); container.addItemAfter(bean, new Person("")); assertEquals(1, capturedEvent.getValue().getFirstIndex()); } public void testItemAddedEvent_amountOfAddedItems() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); ItemSetChangeListener addListener = createListenerMockFor(container); Capture<ItemAddEvent> capturedEvent = captureAddEvent(addListener); EasyMock.replay(addListener); List<Person> beans = Arrays.asList(new Person("Jack"), new Person( "John")); container.addAll(beans); assertEquals(2, capturedEvent.getValue().getAddedItemsCount()); } public void testItemAddedEvent_someItemsAreFiltered_amountOfAddedItemsIsReducedByAmountOfFilteredItems() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); ItemSetChangeListener addListener = createListenerMockFor(container); Capture<ItemAddEvent> capturedEvent = captureAddEvent(addListener); EasyMock.replay(addListener); List<Person> beans = Arrays.asList(new Person("Jack"), new Person( "John")); container.addFilter(new Compare.Equal("name", "John")); container.addAll(beans); assertEquals(1, capturedEvent.getValue().getAddedItemsCount()); } public void testItemAddedEvent_someItemsAreFiltered_addedItemIsTheFirstVisibleItem() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person bean = new Person("John"); ItemSetChangeListener addListener = createListenerMockFor(container); Capture<ItemAddEvent> capturedEvent = captureAddEvent(addListener); EasyMock.replay(addListener); List<Person> beans = Arrays.asList(new Person("Jack"), bean); container.addFilter(new Compare.Equal("name", "John")); container.addAll(beans); assertEquals(bean, capturedEvent.getValue().getFirstItemId()); } public void testItemRemovedEvent() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person bean = new Person("John"); container.addItem(bean); ItemSetChangeListener removeListener = createListenerMockFor(container); removeListener.containerItemSetChange(EasyMock .isA(ItemRemoveEvent.class)); EasyMock.replay(removeListener); container.removeItem(bean); EasyMock.verify(removeListener); } public void testItemRemovedEvent_RemovedItem() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); Person bean = new Person("John"); container.addItem(bean); ItemSetChangeListener removeListener = createListenerMockFor(container); Capture<ItemRemoveEvent> capturedEvent = captureRemoveEvent(removeListener); EasyMock.replay(removeListener); container.removeItem(bean); assertEquals(bean, capturedEvent.getValue().getFirstItemId()); } public void testItemRemovedEvent_indexOfRemovedItem() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); container.addItem(new Person("Jack")); Person secondBean = new Person("John"); container.addItem(secondBean); ItemSetChangeListener removeListener = createListenerMockFor(container); Capture<ItemRemoveEvent> capturedEvent = captureRemoveEvent(removeListener); EasyMock.replay(removeListener); container.removeItem(secondBean); assertEquals(1, capturedEvent.getValue().getFirstIndex()); } public void testItemRemovedEvent_amountOfRemovedItems() { BeanItemContainer<Person> container = new BeanItemContainer<Person>( Person.class); container.addItem(new Person("Jack")); container.addItem(new Person("John")); ItemSetChangeListener removeListener = createListenerMockFor(container); Capture<ItemRemoveEvent> capturedEvent = captureRemoveEvent(removeListener); EasyMock.replay(removeListener); container.removeAllItems(); assertEquals(2, capturedEvent.getValue().getRemovedItemsCount()); } private Capture<ItemAddEvent> captureAddEvent( ItemSetChangeListener addListener) { Capture<ItemAddEvent> capturedEvent = new Capture<ItemAddEvent>(); addListener.containerItemSetChange(EasyMock.capture(capturedEvent)); return capturedEvent; } private Capture<ItemRemoveEvent> captureRemoveEvent( ItemSetChangeListener removeListener) { Capture<ItemRemoveEvent> capturedEvent = new Capture<ItemRemoveEvent>(); removeListener.containerItemSetChange(EasyMock.capture(capturedEvent)); return capturedEvent; } private ItemSetChangeListener createListenerMockFor( BeanItemContainer<Person> container) { ItemSetChangeListener listener = EasyMock .createNiceMock(ItemSetChangeListener.class); container.addItemSetChangeListener(listener); return listener; } public void testAddNestedContainerBeanBeforeData() { BeanItemContainer<NestedMethodPropertyTest.Person> container = new BeanItemContainer<NestedMethodPropertyTest.Person>( NestedMethodPropertyTest.Person.class); container.addNestedContainerBean("address"); assertTrue(container.getContainerPropertyIds().contains( "address.street")); NestedMethodPropertyTest.Person john = new NestedMethodPropertyTest.Person( "John", new Address("streetname", 12345)); container.addBean(john); assertTrue(container.getItem(john).getItemPropertyIds() .contains("address.street")); assertEquals("streetname", container.getItem(john).getItemProperty("address.street") .getValue()); } public void testAddNestedContainerBeanAfterData() { BeanItemContainer<NestedMethodPropertyTest.Person> container = new BeanItemContainer<NestedMethodPropertyTest.Person>( NestedMethodPropertyTest.Person.class); NestedMethodPropertyTest.Person john = new NestedMethodPropertyTest.Person( "John", new Address("streetname", 12345)); container.addBean(john); container.addNestedContainerBean("address"); assertTrue(container.getContainerPropertyIds().contains( "address.street")); assertTrue(container.getItem(john).getItemPropertyIds() .contains("address.street")); assertEquals("streetname", container.getItem(john).getItemProperty("address.street") .getValue()); } }
apache-2.0
guozhangwang/kafka
trogdor/src/main/java/org/apache/kafka/trogdor/workload/NullPayloadGenerator.java
1177
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.trogdor.workload; import com.fasterxml.jackson.annotation.JsonCreator; /** * A PayloadGenerator which always generates a null payload. */ public class NullPayloadGenerator implements PayloadGenerator { @JsonCreator public NullPayloadGenerator() { } @Override public byte[] generate(long position) { return null; } }
apache-2.0
mohamed--abdel-maksoud/chromium.src
components/devtools_bridge/test/android/javatests/src/org/chromium/components/devtools_bridge/SocketTunnelClient.java
10277
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.components.devtools_bridge; import android.net.LocalServerSocket; import android.net.LocalSocket; import android.util.Log; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicReference; /** * Listens LocalServerSocket and tunnels all connections to the SocketTunnelServer. */ public class SocketTunnelClient extends SocketTunnelBase { private static final String TAG = "SocketTunnelClient"; private enum State { INITIAL, RUNNING, STOPPED } private final AtomicReference<State> mState = new AtomicReference<State>(State.INITIAL); private final LocalServerSocket mSocket; private final ExecutorService mThreadPool = Executors.newCachedThreadPool(); // Connections with opened server to client stream. Always accesses on signaling thread. private final Map<Integer, Connection> mServerConnections = new HashMap<Integer, Connection>(); // Accepted connections are kept here until server returns SERVER_OPEN_ACK or SERVER_CLOSE. // New connections are added in the listening loop, checked and removed on signaling thread. // So add/read/remove synchronized through message round trip. private final ConcurrentMap<Integer, Connection> mPendingConnections = new ConcurrentHashMap<Integer, Connection>(); private final IdRegistry mIdRegistry = new IdRegistry(MIN_CONNECTION_ID, MAX_CONNECTION_ID, 2); /** * This class responsible for generating valid connection IDs. It count usage of connection: * one user for client to server stream and one for server to client one. When both are closed * it's safe to reuse ID. */ private static final class IdRegistry { private final int[] mLocks; private final int mMin; private final int mMax; private final int mMaxLocks; private final Object mLock = new Object(); public IdRegistry(int minId, int maxId, int maxLocks) { assert minId < maxId; assert maxLocks > 0; mMin = minId; mMax = maxId; mMaxLocks = maxLocks; mLocks = new int[maxId - minId + 1]; } public void lock(int id) { synchronized (mLock) { int index = toIndex(id); if (mLocks[index] == 0 || mLocks[index] == mMaxLocks) { throw new RuntimeException(); } mLocks[index]++; } } public void release(int id) { synchronized (mLock) { int index = toIndex(id); if (mLocks[index] == 0) { throw new RuntimeException("Releasing unlocked id " + Integer.toString(id)); } mLocks[index]--; } } public boolean isLocked(int id) { synchronized (mLock) { return mLocks[toIndex(id)] > 0; } } public int generate() throws NoIdAvailableException { synchronized (mLock) { for (int id = mMin; id != mMax; id++) { int index = toIndex(id); if (mLocks[index] == 0) { mLocks[index] = 1; return id; } } } throw new NoIdAvailableException(); } private int toIndex(int id) { if (id < mMin || id > mMax) { throw new RuntimeException(); } return id - mMin; } } private static class NoIdAvailableException extends Exception {} public SocketTunnelClient(String socketName) throws IOException { mSocket = new LocalServerSocket(socketName); } public boolean hasConnections() { return mServerConnections.size() + mPendingConnections.size() > 0; } @Override public AbstractDataChannel unbind() { AbstractDataChannel dataChannel = super.unbind(); if (mState.compareAndSet(State.RUNNING, State.STOPPED)) { terminateAllConnections(); closeSocket(); } return dataChannel; } @Override public void dispose() { if (mState.compareAndSet(State.INITIAL, State.STOPPED)) { closeSocket(); } assert mState.get() == State.STOPPED; mThreadPool.shutdown(); } @Override protected void onReceivedDataPacket(int connectionId, byte[] data) throws ProtocolError { checkCalledOnSignalingThread(); if (!mServerConnections.containsKey(connectionId)) throw new ProtocolError("Unknows connection id"); mServerConnections.get(connectionId).onReceivedDataPacket(data); } @Override protected void onReceivedControlPacket(int connectionId, byte opCode) throws ProtocolError { switch (opCode) { case SERVER_OPEN_ACK: onServerOpenAck(connectionId); break; case SERVER_CLOSE: onServerClose(connectionId); break; default: throw new ProtocolError("Invalid opCode"); } } private void onServerOpenAck(int connectionId) throws ProtocolError { checkCalledOnSignalingThread(); if (mServerConnections.containsKey(connectionId)) { throw new ProtocolError("Connection already acknowledged"); } if (!mPendingConnections.containsKey(connectionId)) { throw new ProtocolError("Unknow connection id"); } // Check/get is safe since it can be only removed on this thread. Connection connection = mPendingConnections.get(connectionId); mPendingConnections.remove(connectionId); mServerConnections.put(connectionId, connection); // Lock for client to server stream. mIdRegistry.lock(connectionId); mThreadPool.execute(connection); } private void onServerClose(int connectionId) throws ProtocolError { checkCalledOnSignalingThread(); if (mServerConnections.containsKey(connectionId)) { Connection connection = mServerConnections.get(connectionId); mServerConnections.remove(connectionId); mIdRegistry.release(connectionId); // Release sever to client stream. connection.closedByServer(); } else if (mPendingConnections.containsKey(connectionId)) { Connection connection = mPendingConnections.get(connectionId); mPendingConnections.remove(connectionId); connection.closedByServer(); sendToDataChannel(buildControlPacket(connectionId, CLIENT_CLOSE)); mIdRegistry.release(connectionId); // Release sever to client stream. } else { throw new ProtocolError("Closing unknown connection"); } } @Override protected void onDataChannelOpened() { if (!mState.compareAndSet(State.INITIAL, State.RUNNING)) { throw new InvalidStateException(); } mThreadPool.execute(new Runnable() { @Override public void run() { runListenLoop(); } }); } @Override protected void onDataChannelClosed() { // All new connections will be rejected. if (!mState.compareAndSet(State.RUNNING, State.STOPPED)) { throw new InvalidStateException(); } closeSocket(); } private void terminateAllConnections() { for (Connection connection : mServerConnections.values()) { connection.terminate(); } for (Connection connection : mPendingConnections.values()) { connection.terminate(); } closeSocket(); } private void closeSocket() { try { mSocket.close(); } catch (IOException e) { Log.d(TAG, "Failed to close socket: " + e); onSocketException(e, -1); } } private void runListenLoop() { try { while (true) { LocalSocket socket = mSocket.accept(); State state = mState.get(); if (mState.get() == State.RUNNING) { // Make sure no socket processed when stopped. clientOpenConnection(socket); } else { socket.close(); } } } catch (IOException e) { if (mState.get() != State.RUNNING) { onSocketException(e, -1); } // Else exception expected (socket closed). } } private void clientOpenConnection(LocalSocket socket) throws IOException { try { int id = mIdRegistry.generate(); // id generated locked for server to client stream. Connection connection = new Connection(id, socket); mPendingConnections.put(id, connection); sendToDataChannel(buildControlPacket(id, CLIENT_OPEN)); } catch (NoIdAvailableException e) { socket.close(); } } private final class Connection extends ConnectionBase implements Runnable { public Connection(int id, LocalSocket socket) { super(id, socket); } public void closedByServer() { shutdownOutput(); } @Override public void run() { assert mIdRegistry.isLocked(mId); runReadingLoop(); shutdownInput(); sendToDataChannel(buildControlPacket(mId, CLIENT_CLOSE)); mIdRegistry.release(mId); // Unlock for client to server stream. } } /** * Method called in inappropriate state. */ public static class InvalidStateException extends RuntimeException {} }
bsd-3-clause
daffycricket/tarotdroid
libFacebookSDK/src/main/java/com/facebook/NonCachingTokenCachingStrategy.java
1162
/** * Copyright 2012 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook; import android.os.Bundle; /** * Implements a trivial {@link TokenCachingStrategy} that does not actually cache any tokens. * It is intended for use when an access token may be used on a temporary basis but should not be * cached for future use (for instance, when handling a deep link). */ public class NonCachingTokenCachingStrategy extends TokenCachingStrategy { @Override public Bundle load() { return null; } @Override public void save(Bundle bundle) { } @Override public void clear() { } }
gpl-2.0
tkaefer/camunda-bpm-platform
engine/src/test/java/org/camunda/bpm/engine/test/jobexecutor/JobExecutorTestCase.java
1745
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.engine.test.jobexecutor; import java.util.Date; import org.camunda.bpm.engine.impl.persistence.entity.MessageEntity; import org.camunda.bpm.engine.impl.persistence.entity.TimerEntity; import org.camunda.bpm.engine.impl.test.PluggableProcessEngineTestCase; /** * @author Tom Baeyens */ public class JobExecutorTestCase extends PluggableProcessEngineTestCase { protected TweetHandler tweetHandler = new TweetHandler(); public void setUp() throws Exception { processEngineConfiguration.getJobHandlers().put(tweetHandler.getType(), tweetHandler); } public void tearDown() throws Exception { processEngineConfiguration.getJobHandlers().remove(tweetHandler.getType()); } protected MessageEntity createTweetMessage(String msg) { MessageEntity message = new MessageEntity(); message.setJobHandlerType("tweet"); message.setJobHandlerConfiguration(msg); return message; } protected TimerEntity createTweetTimer(String msg, Date duedate) { TimerEntity timer = new TimerEntity(); timer.setJobHandlerType("tweet"); timer.setJobHandlerConfiguration(msg); timer.setDuedate(duedate); return timer; } }
apache-2.0
jruchcolo/rice-cd
rice-middleware/kns/src/main/java/org/kuali/rice/kns/maintenance/package-info.java
689
/** * Copyright 2005-2015 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @Deprecated package org.kuali.rice.kns.maintenance;
apache-2.0
geothomasp/kualico-rice-kc
rice-middleware/ksb/api/src/main/java/org/kuali/rice/ksb/api/bus/ServiceDefinition.java
5651
/** * Copyright 2005-2015 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.rice.ksb.api.bus; import java.net.URL; import javax.xml.namespace.QName; import org.kuali.rice.core.api.config.ConfigurationException; import org.kuali.rice.core.api.security.credentials.CredentialsType; import org.kuali.rice.ksb.api.registry.ServiceRegistry; /** * Defines the common parameters for the publication and export of a service * to the {@link ServiceBus} and {@link ServiceRegistry}. Implementations of * this interface may define additional properties that are required for the * publication of services of that particular type. * * @author Kuali Rice Team (rice.collab@kuali.org) */ public interface ServiceDefinition { /** * Validates the service definition after creation of the service definition. * It's intended that portions of the KSB that handle publication and export * of services to the bus will execute this prior to successful export of * the service. * * @throws ConfigurationException if this service definition is not * configured properly */ void validate(); /** * Establishes and returns an {@link Endpoint} to this service which * generates the {@link ServiceConfiguration} for this service definition * as well as including the actual service implementation as provided by * {@link #getService()}. * * <p>The {@link #validate()} method should be invoked prior to executing * this method in order to ensure that the appropriate internal state for * the {@link ServiceDefinition} has been established. * * @return the established endpoint, should never return null */ Endpoint establishEndpoint(); /** * Return the actual service implementation to publish and export to the * service bus. * * @return the service to publish */ Object getService(); /** * Returns the qualified name for this service. * * @return the qualified name for this service */ QName getServiceName(); /** * Returns the URL of the endpoint which provides this service. * * @return the endpoint URL of the service */ URL getEndpointUrl(); /** * Returns the {@link ClassLoader} that should be set as the context * classloader on the thread prior to any invocations on the service * * @return the classloader for this service */ ClassLoader getServiceClassLoader(); /** * Returns the url path to export the service under. * * @return the url path to export the service under */ String getServicePath(); /** * Returns the id of the specific instance of the application which owns this service * * @return the id of the specific instance of the application which owns this service */ String getInstanceId(); /** * Returns the id of the application which owns this service. * * @return the id of the application which owns this service */ String getApplicationId(); /** * Returns the version of this service. * * @return the version of this service */ String getServiceVersion(); /** * Returns the type of this service. * * @return the type of this service */ String getType(); /** * Return true if this service uses queue-style messaging, false if it uses * topic-style messaging. * * @return true if this service uses queue-style messaging, false if it uses * topic-style messaging */ boolean isQueue(); /** * Returns the processing priority for messages that are sent to this service. * * @return the message processing priority for this service */ Integer getPriority(); /** * Returns the retry attempts to use when processing messages sent to this * service. * * @return the retry attempts for this service */ Integer getRetryAttempts(); /** * Returns the maximum amount of milliseconds a message to this service can * live and attempt to be processed successfully by this service before it's * forced into processing by it's exception handler. * * @return the maximum lifetime for this message, if null then this message has * an infinite lifetime */ Long getMillisToLive(); /** * Returns the name of the exception handler to invoke whenever messages to * this service fail to be sent. If null, the default message exception * handler will be used. * * @return the name of the message exception handler for this service, or * null if the default handler should be used */ String getMessageExceptionHandler(); /** * Returns true if this service is secured by standard KSB security features. * * @return true if this service is secured, false otherwise */ Boolean getBusSecurity(); /** * Returns the type of security credentials that should be used when * attempting to authorize access to this service. * * @return the type of security credentials to use when access this service */ CredentialsType getCredentialsType(); /** * Returns whether the service is secured with basic authentication * * @return true if this service is secured with basic authentication */ boolean isBasicAuthentication(); }
apache-2.0
lincoln-lil/flink
flink-runtime/src/test/java/org/apache/flink/runtime/operators/testutils/Match.java
1924
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.operators.testutils; /** * Utility class for keeping track of matches in join operator tests. * * @see MatchRemovingJoiner */ public class Match { private final String left; private final String right; public Match(String left, String right) { this.left = left; this.right = right; } @Override public boolean equals(Object obj) { Match o = (Match) obj; if (left == null && o.left == null && right.equals(o.right)) { return true; } else if (right == null && o.right == null && left.equals(o.left)) { return true; } else { return this.left.equals(o.left) && this.right.equals(o.right); } } @Override public int hashCode() { if (left == null) { return right.hashCode(); } else if (right == null) { return left.hashCode(); } else { return this.left.hashCode() ^ this.right.hashCode(); } } @Override public String toString() { return left + ", " + right; } }
apache-2.0
apache/flink
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/generated/AggsHandleFunction.java
1506
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.runtime.generated; import org.apache.flink.table.data.RowData; import org.apache.flink.table.functions.AggregateFunction; /** * The base class for handling aggregate functions. * * <p>It is code generated to handle all {@link AggregateFunction}s together in an aggregation. * * <p>It is the entry point for aggregate operators to operate all {@link AggregateFunction}s. */ public interface AggsHandleFunction extends AggsHandleFunctionBase { /** * Gets the result of the aggregation from the current accumulators. * * @return the final result (saved in a row) of the current accumulators. */ RowData getValue() throws Exception; }
apache-2.0
lincoln-lil/flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/python/PythonTableFunction.java
4509
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.functions.python; import org.apache.flink.annotation.Internal; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.table.catalog.DataTypeFactory; import org.apache.flink.table.functions.TableFunction; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.inference.TypeInference; import org.apache.flink.table.types.inference.TypeStrategies; import org.apache.flink.table.types.utils.TypeConversions; import org.apache.flink.types.Row; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; /** The wrapper of user defined python table function. */ @Internal public class PythonTableFunction extends TableFunction<Row> implements PythonFunction { private static final long serialVersionUID = 1L; private final String name; private final byte[] serializedScalarFunction; private final TypeInformation[] inputTypes; private final RowTypeInfo resultType; private final PythonFunctionKind pythonFunctionKind; private final boolean deterministic; private final PythonEnv pythonEnv; private final boolean takesRowAsInput; public PythonTableFunction( String name, byte[] serializedScalarFunction, TypeInformation[] inputTypes, RowTypeInfo resultType, PythonFunctionKind pythonFunctionKind, boolean deterministic, boolean takesRowAsInput, PythonEnv pythonEnv) { this.name = name; this.serializedScalarFunction = serializedScalarFunction; this.inputTypes = inputTypes; this.resultType = resultType; this.pythonFunctionKind = pythonFunctionKind; this.deterministic = deterministic; this.pythonEnv = pythonEnv; this.takesRowAsInput = takesRowAsInput; } public void eval(Object... args) { throw new UnsupportedOperationException( "This method is a placeholder and should not be called."); } @Override public byte[] getSerializedPythonFunction() { return serializedScalarFunction; } @Override public PythonEnv getPythonEnv() { return pythonEnv; } @Override public PythonFunctionKind getPythonFunctionKind() { return pythonFunctionKind; } @Override public boolean takesRowAsInput() { return takesRowAsInput; } @Override public boolean isDeterministic() { return deterministic; } @Override public TypeInformation[] getParameterTypes(Class[] signature) { if (inputTypes != null) { return inputTypes; } else { return super.getParameterTypes(signature); } } @Override public TypeInformation<Row> getResultType() { return resultType; } @Override public TypeInference getTypeInference(DataTypeFactory typeFactory) { TypeInference.Builder builder = TypeInference.newBuilder(); if (inputTypes != null) { final List<DataType> argumentDataTypes = Stream.of(inputTypes) .map(TypeConversions::fromLegacyInfoToDataType) .collect(Collectors.toList()); builder.typedArguments(argumentDataTypes); } return builder.outputTypeStrategy( TypeStrategies.explicit( TypeConversions.fromLegacyInfoToDataType(resultType))) .build(); } @Override public String toString() { return name; } }
apache-2.0
jdahlstrom/vaadin.react
client/src/main/java/com/vaadin/client/Paintable.java
1031
/* * Copyright 2000-2014 Vaadin Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.vaadin.client; /** * An interface used by client-side widgets or paintable parts to receive * updates from the corresponding server-side components in the form of * {@link UIDL}. * * Updates can be sent back to the server using the * {@link ApplicationConnection#updateVariable()} methods. */ @Deprecated public interface Paintable { public void updateFromUIDL(UIDL uidl, ApplicationConnection client); }
apache-2.0
maduhu/cw-omnibus
EmPubLite-AndroidStudio/T17-Alarm/EmPubLite/app/src/main/java/com/commonsware/empublite/DownloadCheckService.java
3350
package com.commonsware.empublite; import android.app.IntentService; import android.content.Intent; import android.util.Log; import com.commonsware.cwac.wakeful.WakefulIntentService; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import de.greenrobot.event.EventBus; import retrofit.RestAdapter; public class DownloadCheckService extends WakefulIntentService { private static final String OUR_BOOK_DATE="20120418"; private static final String UPDATE_FILENAME="book.zip"; public static final String UPDATE_BASEDIR="updates"; public DownloadCheckService() { super("DownloadCheckService"); } @Override protected void doWakefulWork(Intent intent) { try { String url=getUpdateUrl(); if (url != null) { File book=download(url); File updateDir=new File(getFilesDir(), UPDATE_BASEDIR); updateDir.mkdirs(); unzip(book, updateDir); book.delete(); EventBus.getDefault().post(new BookUpdatedEvent()); } } catch (Exception e) { Log.e(getClass().getSimpleName(), "Exception downloading update", e); } } private String getUpdateUrl() { RestAdapter restAdapter= new RestAdapter.Builder().setEndpoint("https://commonsware.com") .build(); BookUpdateInterface updateInterface= restAdapter.create(BookUpdateInterface.class); BookUpdateInfo info=updateInterface.update(); if (info.updatedOn.compareTo(OUR_BOOK_DATE) > 0) { return(info.updateUrl); } return(null); } private File download(String url) throws MalformedURLException, IOException { File output=new File(getFilesDir(), UPDATE_FILENAME); if (output.exists()) { output.delete(); } HttpURLConnection c= (HttpURLConnection)new URL(url).openConnection(); FileOutputStream fos=new FileOutputStream(output.getPath()); BufferedOutputStream out=new BufferedOutputStream(fos); try { InputStream in=c.getInputStream(); byte[] buffer=new byte[16384]; int len=0; while ((len=in.read(buffer)) > 0) { out.write(buffer, 0, len); } out.flush(); } finally { fos.getFD().sync(); out.close(); c.disconnect(); } return(output); } private static void unzip(File src, File dest) throws IOException { InputStream is=new FileInputStream(src); ZipInputStream zis=new ZipInputStream(new BufferedInputStream(is)); ZipEntry ze; dest.mkdirs(); while ((ze=zis.getNextEntry()) != null) { byte[] buffer=new byte[16384]; int count; FileOutputStream fos= new FileOutputStream(new File(dest, ze.getName())); BufferedOutputStream out=new BufferedOutputStream(fos); try { while ((count=zis.read(buffer)) != -1) { out.write(buffer, 0, count); } out.flush(); } finally { fos.getFD().sync(); out.close(); } zis.closeEntry(); } zis.close(); } }
apache-2.0
xiao-chen/hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
84286
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.timelineservice.storage; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.NavigableMap; import java.util.NavigableSet; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type; import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants; import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareFilter; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareOp; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineExistsFilter; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValueFilter; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValuesFilter; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefixFilter; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList.Operator; import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field; import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn; import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix; import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey; import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW; import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper; import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName; import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter; import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils; import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter; import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; /** * Tests for apps stored in TimelineStorage. */ public class TestHBaseTimelineStorageApps { private static HBaseTestingUtility util; private HBaseTimelineReaderImpl reader; private static final long CURRENT_TIME = System.currentTimeMillis(); @BeforeClass public static void setupBeforeClass() throws Exception { util = new HBaseTestingUtility(); util.startMiniCluster(); DataGeneratorForTest.createSchema(util.getConfiguration()); DataGeneratorForTest.loadApps(util, CURRENT_TIME); } @Before public void init() throws Exception { reader = new HBaseTimelineReaderImpl(); reader.init(util.getConfiguration()); reader.start(); } @After public void stop() throws Exception { if (reader != null) { reader.stop(); reader.close(); } } private static void matchMetrics(Map<Long, Number> m1, Map<Long, Number> m2) { assertEquals(m1.size(), m2.size()); for (Map.Entry<Long, Number> entry : m2.entrySet()) { Number val = m1.get(entry.getKey()); assertNotNull(val); assertEquals(val.longValue(), entry.getValue().longValue()); } } private boolean isApplicationRowKeyCorrect(byte[] rowKey, String cluster, String user, String flow, Long runid, String appName) { ApplicationRowKey key = ApplicationRowKey.parseRowKey(rowKey); assertEquals(cluster, key.getClusterId()); assertEquals(user, key.getUserId()); assertEquals(flow, key.getFlowName()); assertEquals(runid, key.getFlowRunId()); assertEquals(appName, key.getAppId()); return true; } @Test public void testWriteNullApplicationToHBase() throws Exception { TimelineEntities te = new TimelineEntities(); ApplicationEntity entity = new ApplicationEntity(); String appId = "application_1000178881110_2002"; entity.setId(appId); long cTime = 1425016501000L; entity.setCreatedTime(cTime); // add the info map in Timeline Entity Map<String, Object> infoMap = new HashMap<String, Object>(); infoMap.put("in fo M apK ey1", "infoMapValue1"); infoMap.put("infoMapKey2", 10); entity.addInfo(infoMap); te.addEntity(entity); HBaseTimelineWriterImpl hbi = null; try { Configuration c1 = util.getConfiguration(); hbi = new HBaseTimelineWriterImpl(); hbi.init(c1); hbi.start(); String cluster = "cluster_check_null_application"; String user = "user1check_null_application"; //set the flow name to null String flow = null; String flowVersion = "AB7822C10F1111"; long runid = 1002345678919L; hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, appId), te, UserGroupInformation.createRemoteUser(user)); hbi.stop(); // retrieve the row Scan scan = new Scan(); scan.setStartRow(Bytes.toBytes(cluster)); scan.setStopRow(Bytes.toBytes(cluster + "1")); Connection conn = ConnectionFactory.createConnection(c1); ResultScanner resultScanner = new ApplicationTableRW() .getResultScanner(c1, conn, scan); assertTrue(resultScanner != null); // try to iterate over results int count = 0; for (Result rr = resultScanner.next(); rr != null; rr = resultScanner.next()) { count++; } // there should be no rows written // no exceptions thrown during write assertEquals(0, count); } finally { if (hbi != null) { hbi.stop(); hbi.close(); } } } @Test public void testWriteApplicationToHBase() throws Exception { TimelineEntities te = new TimelineEntities(); ApplicationEntity entity = new ApplicationEntity(); String appId = "application_1000178881110_2002"; entity.setId(appId); Long cTime = 1425016501000L; entity.setCreatedTime(cTime); // add the info map in Timeline Entity Map<String, Object> infoMap = new HashMap<String, Object>(); infoMap.put("infoMapKey1", "infoMapValue1"); infoMap.put("infoMapKey2", 10); entity.addInfo(infoMap); // add the isRelatedToEntity info String key = "task"; String value = "is_related_to_entity_id_here"; Set<String> isRelatedToSet = new HashSet<String>(); isRelatedToSet.add(value); Map<String, Set<String>> isRelatedTo = new HashMap<String, Set<String>>(); isRelatedTo.put(key, isRelatedToSet); entity.setIsRelatedToEntities(isRelatedTo); // add the relatesTo info key = "container"; value = "relates_to_entity_id_here"; Set<String> relatesToSet = new HashSet<String>(); relatesToSet.add(value); value = "relates_to_entity_id_here_Second"; relatesToSet.add(value); Map<String, Set<String>> relatesTo = new HashMap<String, Set<String>>(); relatesTo.put(key, relatesToSet); entity.setRelatesToEntities(relatesTo); // add some config entries Map<String, String> conf = new HashMap<String, String>(); conf.put("config_param1", "value1"); conf.put("config_param2", "value2"); entity.addConfigs(conf); // add metrics Set<TimelineMetric> metrics = new HashSet<>(); TimelineMetric m1 = new TimelineMetric(); m1.setId("MAP_SLOT_MILLIS"); Map<Long, Number> metricValues = new HashMap<Long, Number>(); metricValues.put(CURRENT_TIME - 120000, 100000000); metricValues.put(CURRENT_TIME - 100000, 200000000); metricValues.put(CURRENT_TIME - 80000, 300000000); metricValues.put(CURRENT_TIME - 60000, 400000000); metricValues.put(CURRENT_TIME - 40000, 50000000000L); metricValues.put(CURRENT_TIME - 20000, 60000000000L); m1.setType(Type.TIME_SERIES); m1.setValues(metricValues); metrics.add(m1); entity.addMetrics(metrics); // add aggregated metrics TimelineEntity aggEntity = new TimelineEntity(); String type = TimelineEntityType.YARN_APPLICATION.toString(); aggEntity.setId(appId); aggEntity.setType(type); long cTime2 = 1425016502000L; aggEntity.setCreatedTime(cTime2); TimelineMetric aggMetric = new TimelineMetric(); aggMetric.setId("MEM_USAGE"); Map<Long, Number> aggMetricValues = new HashMap<Long, Number>(); long aggTs = CURRENT_TIME; aggMetricValues.put(aggTs - 120000, 102400000L); aggMetric.setType(Type.SINGLE_VALUE); aggMetric.setRealtimeAggregationOp(TimelineMetricOperation.SUM); aggMetric.setValues(aggMetricValues); Set<TimelineMetric> aggMetrics = new HashSet<>(); aggMetrics.add(aggMetric); entity.addMetrics(aggMetrics); te.addEntity(entity); HBaseTimelineWriterImpl hbi = null; try { Configuration c1 = util.getConfiguration(); hbi = new HBaseTimelineWriterImpl(); hbi.init(c1); hbi.start(); String cluster = "cluster_test_write_app"; String user = "user1"; String flow = "s!ome_f\tlow _n am!e"; String flowVersion = "AB7822C10F1111"; long runid = 1002345678919L; hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, appId), te, UserGroupInformation.createRemoteUser(user)); // Write entity again, this time without created time. entity = new ApplicationEntity(); appId = "application_1000178881110_2002"; entity.setId(appId); // add the info map in Timeline Entity Map<String, Object> infoMap1 = new HashMap<>(); infoMap1.put("infoMapKey3", "infoMapValue1"); entity.addInfo(infoMap1); te = new TimelineEntities(); te.addEntity(entity); hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, appId), te, UserGroupInformation.createRemoteUser(user)); hbi.stop(); infoMap.putAll(infoMap1); // retrieve the row ApplicationRowKey applicationRowKey = new ApplicationRowKey(cluster, user, flow, runid, appId); byte[] rowKey = applicationRowKey.getRowKey(); Get get = new Get(rowKey); get.setMaxVersions(Integer.MAX_VALUE); Connection conn = ConnectionFactory.createConnection(c1); Result result = new ApplicationTableRW().getResult(c1, conn, get); assertTrue(result != null); assertEquals(17, result.size()); // check the row key byte[] row1 = result.getRow(); assertTrue(isApplicationRowKeyCorrect(row1, cluster, user, flow, runid, appId)); // check info column family String id1 = ColumnRWHelper.readResult(result, ApplicationColumn.ID).toString(); assertEquals(appId, id1); Long cTime1 = (Long) ColumnRWHelper.readResult(result, ApplicationColumn.CREATED_TIME); assertEquals(cTime, cTime1); Map<String, Object> infoColumns = ColumnRWHelper.readResults( result, ApplicationColumnPrefix.INFO, new StringKeyConverter()); assertEquals(infoMap, infoColumns); // Remember isRelatedTo is of type Map<String, Set<String>> for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo .entrySet()) { Object isRelatedToValue = ColumnRWHelper.readResult( result, ApplicationColumnPrefix.IS_RELATED_TO, isRelatedToEntry.getKey()); String compoundValue = isRelatedToValue.toString(); // id7?id9?id6 Set<String> isRelatedToValues = new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue)); assertEquals(isRelatedTo.get(isRelatedToEntry.getKey()).size(), isRelatedToValues.size()); for (String v : isRelatedToEntry.getValue()) { assertTrue(isRelatedToValues.contains(v)); } } // RelatesTo for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo .entrySet()) { String compoundValue = ColumnRWHelper.readResult(result, ApplicationColumnPrefix.RELATES_TO, relatesToEntry.getKey()) .toString(); // id3?id4?id5 Set<String> relatesToValues = new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue)); assertEquals(relatesTo.get(relatesToEntry.getKey()).size(), relatesToValues.size()); for (String v : relatesToEntry.getValue()) { assertTrue(relatesToValues.contains(v)); } } KeyConverter<String> stringKeyConverter = new StringKeyConverter(); // Configuration Map<String, Object> configColumns = ColumnRWHelper.readResults( result, ApplicationColumnPrefix.CONFIG, stringKeyConverter); assertEquals(conf, configColumns); NavigableMap<String, NavigableMap<Long, Number>> metricsResult = ColumnRWHelper.readResultsWithTimestamps( result, ApplicationColumnPrefix.METRIC, stringKeyConverter); NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId()); matchMetrics(metricValues, metricMap); // read the timeline entity using the reader this time. In metrics limit // specify Integer MAX_VALUE. A TIME_SERIES will be returned(if more than // one value exists for a metric). TimelineEntity e1 = reader.getEntity( new TimelineReaderContext(cluster, user, flow, runid, appId, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(TimelineReader.Field.ALL), Integer.MAX_VALUE, null, null)); assertNotNull(e1); // verify attributes assertEquals(appId, e1.getId()); assertEquals(TimelineEntityType.YARN_APPLICATION.toString(), e1.getType()); assertEquals(cTime, e1.getCreatedTime()); Map<String, Object> infoMap2 = e1.getInfo(); // fromid key is added by storage. Remove it for comparision. infoMap2.remove("FROM_ID"); assertEquals(infoMap, infoMap2); Map<String, Set<String>> isRelatedTo2 = e1.getIsRelatedToEntities(); assertEquals(isRelatedTo, isRelatedTo2); Map<String, Set<String>> relatesTo2 = e1.getRelatesToEntities(); assertEquals(relatesTo, relatesTo2); Map<String, String> conf2 = e1.getConfigs(); assertEquals(conf, conf2); Set<TimelineMetric> metrics2 = e1.getMetrics(); assertEquals(2, metrics2.size()); for (TimelineMetric metric2 : metrics2) { Map<Long, Number> metricValues2 = metric2.getValues(); assertTrue(metric2.getId().equals("MAP_SLOT_MILLIS") || metric2.getId().equals("MEM_USAGE")); if (metric2.getId().equals("MAP_SLOT_MILLIS")) { assertEquals(6, metricValues2.size()); matchMetrics(metricValues, metricValues2); } if (metric2.getId().equals("MEM_USAGE")) { assertEquals(1, metricValues2.size()); matchMetrics(aggMetricValues, metricValues2); } } // In metrics limit specify a value of 3. No more than 3 values for a // metric will be returned. e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow, runid, appId, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(TimelineReader.Field.ALL), 3, null, null)); assertNotNull(e1); assertEquals(appId, e1.getId()); assertEquals(TimelineEntityType.YARN_APPLICATION.toString(), e1.getType()); assertEquals(conf, e1.getConfigs()); metrics2 = e1.getMetrics(); assertEquals(2, metrics2.size()); for (TimelineMetric metric2 : metrics2) { Map<Long, Number> metricValues2 = metric2.getValues(); assertTrue(metricValues2.size() <= 3); assertTrue(metric2.getId().equals("MAP_SLOT_MILLIS") || metric2.getId().equals("MEM_USAGE")); } // Check if single value(latest value) instead of time series is returned // if metricslimit is not set(null), irrespective of number of metric // values. e1 = reader.getEntity( new TimelineReaderContext(cluster, user, flow, runid, appId, entity.getType(), entity.getId()), new TimelineDataToRetrieve( null, null, EnumSet.of(TimelineReader.Field.ALL), null, null, null)); assertNotNull(e1); assertEquals(appId, e1.getId()); assertEquals(TimelineEntityType.YARN_APPLICATION.toString(), e1.getType()); assertEquals(cTime, e1.getCreatedTime()); infoMap2 = e1.getInfo(); // fromid key is added by storage. Remove it for comparison. infoMap2.remove("FROM_ID"); assertEquals(infoMap, e1.getInfo()); assertEquals(isRelatedTo, e1.getIsRelatedToEntities()); assertEquals(relatesTo, e1.getRelatesToEntities()); assertEquals(conf, e1.getConfigs()); assertEquals(2, e1.getMetrics().size()); for (TimelineMetric metric : e1.getMetrics()) { assertEquals(1, metric.getValues().size()); assertEquals(TimelineMetric.Type.SINGLE_VALUE, metric.getType()); assertTrue(metric.getId().equals("MAP_SLOT_MILLIS") || metric.getId().equals("MEM_USAGE")); assertEquals(1, metric.getValues().size()); if (metric.getId().equals("MAP_SLOT_MILLIS")) { assertTrue(metric.getValues().containsKey(CURRENT_TIME - 20000)); assertEquals(metricValues.get(CURRENT_TIME - 20000), metric.getValues().get(CURRENT_TIME - 20000)); } if (metric.getId().equals("MEM_USAGE")) { assertTrue(metric.getValues().containsKey(aggTs - 120000)); assertEquals(aggMetricValues.get(aggTs - 120000), metric.getValues().get(aggTs - 120000)); } } } finally { if (hbi != null) { hbi.stop(); hbi.close(); } } } @Test public void testEvents() throws IOException { TimelineEvent event = new TimelineEvent(); String eventId = ApplicationMetricsConstants.CREATED_EVENT_TYPE; event.setId(eventId); Long expTs = 1436512802000L; event.setTimestamp(expTs); String expKey = "foo_event"; Object expVal = "test"; event.addInfo(expKey, expVal); final TimelineEntity entity = new ApplicationEntity(); entity.setId(HBaseTimelineSchemaUtils.convertApplicationIdToString( ApplicationId.newInstance(0, 1))); entity.addEvent(event); TimelineEntities entities = new TimelineEntities(); entities.addEntity(entity); HBaseTimelineWriterImpl hbi = null; try { Configuration c1 = util.getConfiguration(); hbi = new HBaseTimelineWriterImpl(); hbi.init(c1); hbi.start(); String cluster = "cluster_test_events"; String user = "user2"; String flow = "other_flow_name"; String flowVersion = "1111F01C2287BA"; long runid = 1009876543218L; String appName = "application_123465899910_1001"; hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, appName), entities, UserGroupInformation.createRemoteUser(user)); hbi.stop(); // retrieve the row ApplicationRowKey applicationRowKey = new ApplicationRowKey(cluster, user, flow, runid, appName); byte[] rowKey = applicationRowKey.getRowKey(); Get get = new Get(rowKey); get.setMaxVersions(Integer.MAX_VALUE); Connection conn = ConnectionFactory.createConnection(c1); Result result = new ApplicationTableRW().getResult(c1, conn, get); assertTrue(result != null); // check the row key byte[] row1 = result.getRow(); assertTrue(isApplicationRowKeyCorrect(row1, cluster, user, flow, runid, appName)); Map<EventColumnName, Object> eventsResult = ColumnRWHelper.readResults(result, ApplicationColumnPrefix.EVENT, new EventColumnNameConverter()); // there should be only one event assertEquals(1, eventsResult.size()); for (Map.Entry<EventColumnName, Object> e : eventsResult.entrySet()) { EventColumnName eventColumnName = e.getKey(); // the qualifier is a compound key // hence match individual values assertEquals(eventId, eventColumnName.getId()); assertEquals(expTs, eventColumnName.getTimestamp()); assertEquals(expKey, eventColumnName.getInfoKey()); Object value = e.getValue(); // there should be only one timestamp and value assertEquals(expVal, value.toString()); } // read the timeline entity using the reader this time TimelineEntity e1 = reader.getEntity( new TimelineReaderContext(cluster, user, flow, runid, appName, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); TimelineEntity e2 = reader.getEntity( new TimelineReaderContext(cluster, user, null, null, appName, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertNotNull(e1); assertNotNull(e2); assertEquals(e1, e2); // check the events NavigableSet<TimelineEvent> events = e1.getEvents(); // there should be only one event assertEquals(1, events.size()); for (TimelineEvent e : events) { assertEquals(eventId, e.getId()); assertEquals(expTs, Long.valueOf(e.getTimestamp())); Map<String, Object> info = e.getInfo(); assertEquals(1, info.size()); for (Map.Entry<String, Object> infoEntry : info.entrySet()) { assertEquals(expKey, infoEntry.getKey()); assertEquals(expVal, infoEntry.getValue()); } } } finally { if (hbi != null) { hbi.stop(); hbi.close(); } } } @Test public void testNonIntegralMetricValues() throws IOException { TimelineEntities teApp = new TimelineEntities(); ApplicationEntity entityApp = new ApplicationEntity(); String appId = "application_1000178881110_2002"; entityApp.setId(appId); entityApp.setCreatedTime(1425016501000L); // add metrics with floating point values Set<TimelineMetric> metricsApp = new HashSet<>(); TimelineMetric mApp = new TimelineMetric(); mApp.setId("MAP_SLOT_MILLIS"); Map<Long, Number> metricAppValues = new HashMap<Long, Number>(); long ts = System.currentTimeMillis(); metricAppValues.put(ts - 20, 10.5); metricAppValues.put(ts - 10, 20.5); mApp.setType(Type.TIME_SERIES); mApp.setValues(metricAppValues); metricsApp.add(mApp); entityApp.addMetrics(metricsApp); teApp.addEntity(entityApp); TimelineEntities teEntity = new TimelineEntities(); TimelineEntity entity = new TimelineEntity(); entity.setId("hello"); entity.setType("world"); entity.setCreatedTime(1425016501000L); // add metrics with floating point values Set<TimelineMetric> metricsEntity = new HashSet<>(); TimelineMetric mEntity = new TimelineMetric(); mEntity.setId("MAP_SLOT_MILLIS"); mEntity.addValue(ts - 20, 10.5); metricsEntity.add(mEntity); entity.addMetrics(metricsEntity); teEntity.addEntity(entity); HBaseTimelineWriterImpl hbi = null; try { Configuration c1 = util.getConfiguration(); hbi = new HBaseTimelineWriterImpl(); hbi.init(c1); hbi.start(); // Writing application entity. TimelineCollectorContext context = new TimelineCollectorContext("c1", "u1", "f1", "v1", 1002345678919L, appId); UserGroupInformation user = UserGroupInformation.createRemoteUser("u1"); try { hbi.write(context, teApp, user); Assert.fail("Expected an exception as metric values are non integral"); } catch (IOException e) {} // Writing generic entity. try { hbi.write(context, teEntity, user); Assert.fail("Expected an exception as metric values are non integral"); } catch (IOException e) {} hbi.stop(); } finally { if (hbi != null) { hbi.stop(); hbi.close(); } } } @Test public void testReadApps() throws Exception { TimelineEntity entity = reader.getEntity( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1111111111_2222", TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertNotNull(entity); assertEquals(3, entity.getConfigs().size()); assertEquals(1, entity.getIsRelatedToEntities().size()); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertEquals(3, entities.size()); int cfgCnt = 0; int metricCnt = 0; int infoCnt = 0; int eventCnt = 0; int relatesToCnt = 0; int isRelatedToCnt = 0; for (TimelineEntity timelineEntity : entities) { cfgCnt += (timelineEntity.getConfigs() == null) ? 0 : timelineEntity.getConfigs().size(); metricCnt += (timelineEntity.getMetrics() == null) ? 0 : timelineEntity.getMetrics().size(); infoCnt += (timelineEntity.getInfo() == null) ? 0 : timelineEntity.getInfo().size(); eventCnt += (timelineEntity.getEvents() == null) ? 0 : timelineEntity.getEvents().size(); relatesToCnt += (timelineEntity.getRelatesToEntities() == null) ? 0 : timelineEntity.getRelatesToEntities().size(); isRelatedToCnt += (timelineEntity.getIsRelatedToEntities() == null) ? 0 : timelineEntity.getIsRelatedToEntities().size(); } assertEquals(5, cfgCnt); assertEquals(3, metricCnt); assertEquals(8, infoCnt); assertEquals(4, eventCnt); assertEquals(4, relatesToCnt); assertEquals(4, isRelatedToCnt); } @Test public void testFilterAppsByCreatedTime() throws Exception { Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().createdTimeBegin(1425016502000L) .createTimeEnd(1425016502040L).build(), new TimelineDataToRetrieve()); assertEquals(3, entities.size()); for (TimelineEntity entity : entities) { if (!entity.getId().equals("application_1111111111_2222") && !entity.getId().equals("application_1111111111_3333") && !entity.getId().equals("application_1111111111_4444")) { Assert.fail("Entities with ids' application_1111111111_2222, " + "application_1111111111_3333 and application_1111111111_4444" + " should be present"); } } entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().createdTimeBegin(1425016502015L) .build(), new TimelineDataToRetrieve()); assertEquals(2, entities.size()); for (TimelineEntity entity : entities) { if (!entity.getId().equals("application_1111111111_3333") && !entity.getId().equals("application_1111111111_4444")) { Assert.fail("Apps with ids' application_1111111111_3333 and" + " application_1111111111_4444 should be present"); } } entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().createTimeEnd(1425016502015L) .build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); for (TimelineEntity entity : entities) { if (!entity.getId().equals("application_1111111111_2222")) { Assert.fail("App with id application_1111111111_2222 should" + " be present"); } } } @Test public void testReadAppsDefaultView() throws Exception { TimelineEntity e1 = reader.getEntity( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1111111111_2222", TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineDataToRetrieve()); assertNotNull(e1); assertEquals(1, e1.getInfo().size()); assertTrue(e1.getConfigs().isEmpty() && e1.getMetrics().isEmpty() && e1.getIsRelatedToEntities().isEmpty() && e1.getRelatesToEntities().isEmpty()); Set<TimelineEntity> es1 = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve()); assertEquals(3, es1.size()); for (TimelineEntity e : es1) { assertEquals(1, e1.getInfo().size()); assertTrue(e.getConfigs().isEmpty() && e.getMetrics().isEmpty() && e.getIsRelatedToEntities().isEmpty() && e.getRelatesToEntities().isEmpty()); } } @Test public void testReadAppsByFields() throws Exception { TimelineEntity e1 = reader.getEntity( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1111111111_2222", TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineDataToRetrieve( null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null, null, null)); assertNotNull(e1); assertEquals(3, e1.getConfigs().size()); assertEquals(0, e1.getIsRelatedToEntities().size()); Set<TimelineEntity> es1 = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve( null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null, null, null)); assertEquals(3, es1.size()); int metricsCnt = 0; int isRelatedToCnt = 0; int infoCnt = 0; for (TimelineEntity entity : es1) { metricsCnt += entity.getMetrics().size(); isRelatedToCnt += entity.getIsRelatedToEntities().size(); infoCnt += entity.getInfo().size(); } assertEquals(3, infoCnt); assertEquals(4, isRelatedToCnt); assertEquals(3, metricsCnt); } @Test public void testReadAppsIsRelatedTo() throws Exception { TimelineFilterList irt = new TimelineFilterList(Operator.OR); irt.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1")))); irt.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4")))); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertEquals(2, entities.size()); int isRelatedToCnt = 0; for (TimelineEntity timelineEntity : entities) { isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_3333")) { Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_3333"); } } assertEquals(3, isRelatedToCnt); TimelineFilterList irt1 = new TimelineFilterList(); irt1.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3")))); irt1.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.NOT_EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto5")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt1).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); isRelatedToCnt = 0; for (TimelineEntity timelineEntity : entities) { isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_4444")) { Assert.fail("Entity id should have been application_1111111111_4444"); } } assertEquals(0, isRelatedToCnt); TimelineFilterList irt2 = new TimelineFilterList(Operator.OR); irt2.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1")))); irt2.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt2).build(), new TimelineDataToRetrieve()); assertEquals(2, entities.size()); isRelatedToCnt = 0; for (TimelineEntity timelineEntity : entities) { isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_3333")) { Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_3333"); } } assertEquals(0, isRelatedToCnt); TimelineFilterList irt3 = new TimelineFilterList(); irt3.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3", "relatedto5")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt3).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); isRelatedToCnt = 0; for (TimelineEntity timelineEntity : entities) { isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_3333")) { Assert.fail("Entity id should have been application_1111111111_3333"); } } assertEquals(0, isRelatedToCnt); TimelineFilterList irt4 = new TimelineFilterList(); irt4.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3")))); irt4.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "dummy_task", new HashSet<Object>(Arrays.asList("relatedto5")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt4).build(), new TimelineDataToRetrieve()); assertEquals(0, entities.size()); TimelineFilterList irt5 = new TimelineFilterList(); irt5.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3", "relatedto7")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt5).build(), new TimelineDataToRetrieve()); assertEquals(0, entities.size()); TimelineFilterList list1 = new TimelineFilterList(); list1.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1")))); list1.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "dummy_task", new HashSet<Object>(Arrays.asList("relatedto4")))); TimelineFilterList list2 = new TimelineFilterList(); list2.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4")))); TimelineFilterList irt6 = new TimelineFilterList(Operator.OR, list1, list2); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt6).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); isRelatedToCnt = 0; for (TimelineEntity timelineEntity : entities) { isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_3333")) { Assert.fail("Entity id should have been application_1111111111_3333"); } } assertEquals(0, isRelatedToCnt); } @Test public void testReadAppsRelatesTo() throws Exception { TimelineFilterList rt = new TimelineFilterList(Operator.OR); rt.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7")))); rt.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4")))); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertEquals(2, entities.size()); int relatesToCnt = 0; for (TimelineEntity timelineEntity : entities) { relatesToCnt += timelineEntity.getRelatesToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_4444")) { Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_4444"); } } assertEquals(3, relatesToCnt); TimelineFilterList rt1 = new TimelineFilterList(); rt1.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1")))); rt1.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.NOT_EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto3")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt1).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); relatesToCnt = 0; for (TimelineEntity timelineEntity : entities) { relatesToCnt += timelineEntity.getRelatesToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_3333")) { Assert.fail("Entity id should have been application_1111111111_3333"); } } assertEquals(0, relatesToCnt); TimelineFilterList rt2 = new TimelineFilterList(Operator.OR); rt2.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7")))); rt2.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt2).build(), new TimelineDataToRetrieve()); assertEquals(2, entities.size()); relatesToCnt = 0; for (TimelineEntity timelineEntity : entities) { relatesToCnt += timelineEntity.getRelatesToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_4444")) { Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_4444"); } } assertEquals(0, relatesToCnt); TimelineFilterList rt3 = new TimelineFilterList(); rt3.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1", "relatesto3")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt3).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); relatesToCnt = 0; for (TimelineEntity timelineEntity : entities) { relatesToCnt += timelineEntity.getRelatesToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_2222")) { Assert.fail("Entity id should have been application_1111111111_2222"); } } assertEquals(0, relatesToCnt); TimelineFilterList rt4 = new TimelineFilterList(); rt4.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1")))); rt4.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "dummy_container", new HashSet<Object>(Arrays.asList("relatesto5")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt4).build(), new TimelineDataToRetrieve()); assertEquals(0, entities.size()); TimelineFilterList rt5 = new TimelineFilterList(); rt5.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatedto1", "relatesto8")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt5).build(), new TimelineDataToRetrieve()); assertEquals(0, entities.size()); TimelineFilterList list1 = new TimelineFilterList(); list1.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7")))); list1.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "dummy_container", new HashSet<Object>(Arrays.asList("relatesto4")))); TimelineFilterList list2 = new TimelineFilterList(); list2.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4")))); TimelineFilterList rt6 = new TimelineFilterList(Operator.OR, list1, list2); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt6).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); relatesToCnt = 0; for (TimelineEntity timelineEntity : entities) { relatesToCnt += timelineEntity.getRelatesToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_2222")) { Assert.fail("Entity id should have been application_1111111111_2222"); } } assertEquals(0, relatesToCnt); TimelineFilterList list3 = new TimelineFilterList(); list3.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1")))); list3.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4")))); TimelineFilterList list4 = new TimelineFilterList(); list4.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1")))); list4.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto2")))); TimelineFilterList combinedList = new TimelineFilterList(Operator.OR, list3, list4); TimelineFilterList rt7 = new TimelineFilterList(Operator.AND, combinedList, new TimelineKeyValuesFilter( TimelineCompareOp.NOT_EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto3")))); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt7).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); relatesToCnt = 0; for (TimelineEntity timelineEntity : entities) { relatesToCnt += timelineEntity.getRelatesToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_3333")) { Assert.fail("Entity id should have been application_1111111111_3333"); } } assertEquals(0, relatesToCnt); } @Test public void testReadAppsRelationsAndEventFiltersDefaultView() throws Exception { TimelineFilterList eventFilter = new TimelineFilterList(); eventFilter.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event")); TimelineFilterList relatesTo = new TimelineFilterList(Operator.OR); relatesTo.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7")))); relatesTo.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4")))); TimelineFilterList isRelatedTo = new TimelineFilterList(); isRelatedTo.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3")))); isRelatedTo.addFilter(new TimelineKeyValuesFilter( TimelineCompareOp.NOT_EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto5")))); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(relatesTo) .isRelatedTo(isRelatedTo).eventFilters(eventFilter).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); int eventCnt = 0; int isRelatedToCnt = 0; int relatesToCnt = 0; for (TimelineEntity timelineEntity : entities) { eventCnt += timelineEntity.getEvents().size(); isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size(); relatesToCnt += timelineEntity.getRelatesToEntities().size(); if (!timelineEntity.getId().equals("application_1111111111_4444")) { Assert.fail("Entity id should have been application_1111111111_4444"); } } assertEquals(0, eventCnt); assertEquals(0, isRelatedToCnt); assertEquals(0, relatesToCnt); } @Test public void testReadAppsConfigFilters() throws Exception { TimelineFilterList list1 = new TimelineFilterList(); list1.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "cfg_param1", "value1")); list1.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "cfg_param2", "value2")); TimelineFilterList list2 = new TimelineFilterList(); list2.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "cfg_param1", "value3")); list2.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "config_param2", "value2")); TimelineFilterList confFilterList = new TimelineFilterList(Operator.OR, list1, list2); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null)); assertEquals(2, entities.size()); int cfgCnt = 0; for (TimelineEntity entity : entities) { cfgCnt += entity.getConfigs().size(); } assertEquals(5, cfgCnt); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertEquals(2, entities.size()); cfgCnt = 0; for (TimelineEntity entity : entities) { cfgCnt += entity.getConfigs().size(); } assertEquals(5, cfgCnt); TimelineFilterList confFilterList1 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "cfg_param1", "value1")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList1) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null)); assertEquals(1, entities.size()); cfgCnt = 0; for (TimelineEntity entity : entities) { cfgCnt += entity.getConfigs().size(); } assertEquals(3, cfgCnt); TimelineFilterList confFilterList2 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "cfg_param1", "value1"), new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "config_param2", "value2")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList2) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList confFilterList3 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "dummy_config", "value1")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList3) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList confFilterList4 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "dummy_config", "value1")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList4) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList confFilterList5 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "dummy_config", "value1", false)); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList5) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null)); assertEquals(3, entities.size()); } @Test public void testReadAppsEventFilters() throws Exception { TimelineFilterList ef = new TimelineFilterList(); ef.addFilter(new TimelineExistsFilter( TimelineCompareOp.EQUAL, "update_event")); ef.addFilter(new TimelineExistsFilter( TimelineCompareOp.NOT_EQUAL, "end_event")); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertEquals(1, entities.size()); int eventCnt = 0; for (TimelineEntity timelineEntity : entities) { eventCnt += timelineEntity.getEvents().size(); if (!timelineEntity.getId().equals("application_1111111111_4444")) { Assert.fail("Entity id should have been application_1111111111_4444"); } } assertEquals(1, eventCnt); TimelineFilterList ef1 = new TimelineFilterList(); ef1.addFilter(new TimelineExistsFilter( TimelineCompareOp.EQUAL, "update_event")); ef1.addFilter(new TimelineExistsFilter( TimelineCompareOp.NOT_EQUAL, "end_event")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef1).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); eventCnt = 0; for (TimelineEntity timelineEntity : entities) { eventCnt += timelineEntity.getEvents().size(); if (!timelineEntity.getId().equals("application_1111111111_4444")) { Assert.fail("Entity id should have been application_1111111111_4444"); } } assertEquals(0, eventCnt); TimelineFilterList ef2 = new TimelineFilterList(); ef2.addFilter(new TimelineExistsFilter( TimelineCompareOp.NOT_EQUAL, "end_event")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef2).build(), new TimelineDataToRetrieve()); assertEquals(2, entities.size()); eventCnt = 0; for (TimelineEntity timelineEntity : entities) { eventCnt += timelineEntity.getEvents().size(); if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_4444")) { Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_4444"); } } assertEquals(0, eventCnt); TimelineFilterList ef3 = new TimelineFilterList(); ef3.addFilter(new TimelineExistsFilter( TimelineCompareOp.EQUAL, "update_event")); ef3.addFilter(new TimelineExistsFilter( TimelineCompareOp.EQUAL, "dummy_event")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef3).build(), new TimelineDataToRetrieve()); assertEquals(0, entities.size()); TimelineFilterList list1 = new TimelineFilterList(); list1.addFilter(new TimelineExistsFilter( TimelineCompareOp.EQUAL, "update_event")); list1.addFilter(new TimelineExistsFilter( TimelineCompareOp.EQUAL, "dummy_event")); TimelineFilterList list2 = new TimelineFilterList(); list2.addFilter(new TimelineExistsFilter( TimelineCompareOp.EQUAL, "start_event")); TimelineFilterList ef4 = new TimelineFilterList(Operator.OR, list1, list2); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef4).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); eventCnt = 0; for (TimelineEntity timelineEntity : entities) { eventCnt += timelineEntity.getEvents().size(); if (!timelineEntity.getId().equals("application_1111111111_2222")) { Assert.fail("Entity id should have been application_1111111111_2222"); } } assertEquals(0, eventCnt); TimelineFilterList ef5 = new TimelineFilterList(); ef5.addFilter(new TimelineExistsFilter( TimelineCompareOp.NOT_EQUAL, "update_event")); ef5.addFilter(new TimelineExistsFilter( TimelineCompareOp.NOT_EQUAL, "end_event")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef5).build(), new TimelineDataToRetrieve()); assertEquals(1, entities.size()); eventCnt = 0; for (TimelineEntity timelineEntity : entities) { eventCnt += timelineEntity.getEvents().size(); if (!timelineEntity.getId().equals("application_1111111111_2222")) { Assert.fail("Entity id should have been application_1111111111_2222"); } } assertEquals(0, eventCnt); } @Test public void testReadAppsConfigPrefix() throws Exception { TimelineFilterList list = new TimelineFilterList(Operator.OR, new TimelinePrefixFilter(TimelineCompareOp.EQUAL, "cfg_")); TimelineEntity e1 = reader.getEntity( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1111111111_2222", TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineDataToRetrieve(list, null, null, null, null, null)); assertNotNull(e1); assertEquals(1, e1.getConfigs().size()); Set<TimelineEntity> es1 = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null) , new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve(list, null, null, null, null, null)); int cfgCnt = 0; for (TimelineEntity entity : es1) { cfgCnt += entity.getConfigs().size(); for (String confKey : entity.getConfigs().keySet()) { assertTrue("Config key returned should start with cfg_", confKey.startsWith("cfg_")); } } assertEquals(3, cfgCnt); } @Test public void testReadAppsConfigFilterPrefix() throws Exception { TimelineFilterList confFilterList = new TimelineFilterList(); confFilterList.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "cfg_param1", "value1")); TimelineFilterList list = new TimelineFilterList(Operator.OR, new TimelinePrefixFilter(TimelineCompareOp.EQUAL, "cfg_")); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList) .build(), new TimelineDataToRetrieve(list, null, null, null, null, null)); assertEquals(1, entities.size()); int cfgCnt = 0; for (TimelineEntity entity : entities) { cfgCnt += entity.getConfigs().size(); for (String confKey : entity.getConfigs().keySet()) { assertTrue("Config key returned should start with cfg_", confKey.startsWith("cfg_")); } } assertEquals(2, cfgCnt); TimelineFilterList list1 = new TimelineFilterList(); list1.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "cfg_param1", "value1")); list1.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "cfg_param2", "value2")); TimelineFilterList list2 = new TimelineFilterList(); list2.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "cfg_param1", "value3")); list2.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "config_param2", "value2")); TimelineFilterList confsToRetrieve = new TimelineFilterList(Operator.OR, new TimelinePrefixFilter(TimelineCompareOp.EQUAL, "config_")); TimelineFilterList confFilterList1 = new TimelineFilterList(Operator.OR, list1, list2); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList1) .build(), new TimelineDataToRetrieve(confsToRetrieve, null, null, null, null, null)); assertEquals(2, entities.size()); cfgCnt = 0; for (TimelineEntity entity : entities) { cfgCnt += entity.getConfigs().size(); for (String confKey : entity.getConfigs().keySet()) { assertTrue("Config key returned should start with config_", confKey.startsWith("config_")); } } assertEquals(2, cfgCnt); } @Test public void testReadAppsMetricFilters() throws Exception { TimelineFilterList list1 = new TimelineFilterList(); list1.addFilter(new TimelineCompareFilter( TimelineCompareOp.GREATER_OR_EQUAL, "MAP1_SLOT_MILLIS", 50000000900L)); TimelineFilterList list2 = new TimelineFilterList(); list2.addFilter(new TimelineCompareFilter( TimelineCompareOp.LESS_THAN, "MAP_SLOT_MILLIS", 80000000000L)); list2.addFilter(new TimelineCompareFilter( TimelineCompareOp.EQUAL, "MAP1_BYTES", 50)); TimelineFilterList metricFilterList = new TimelineFilterList(Operator.OR, list1, list2); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null)); assertEquals(2, entities.size()); int metricCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); } assertEquals(3, metricCnt); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertEquals(2, entities.size()); metricCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); } assertEquals(3, metricCnt); TimelineFilterList metricFilterList1 = new TimelineFilterList( new TimelineCompareFilter( TimelineCompareOp.LESS_OR_EQUAL, "MAP_SLOT_MILLIS", 80000000000L), new TimelineCompareFilter( TimelineCompareOp.NOT_EQUAL, "MAP1_BYTES", 30)); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList1) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null)); assertEquals(1, entities.size()); metricCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); } assertEquals(2, metricCnt); TimelineFilterList metricFilterList2 = new TimelineFilterList( new TimelineCompareFilter( TimelineCompareOp.LESS_THAN, "MAP_SLOT_MILLIS", 40000000000L), new TimelineCompareFilter( TimelineCompareOp.NOT_EQUAL, "MAP1_BYTES", 30)); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList2) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList metricFilterList3 = new TimelineFilterList( new TimelineCompareFilter( TimelineCompareOp.EQUAL, "dummy_metric", 5)); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList3) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList metricFilterList4 = new TimelineFilterList( new TimelineCompareFilter( TimelineCompareOp.NOT_EQUAL, "dummy_metric", 5)); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList4) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList metricFilterList5 = new TimelineFilterList( new TimelineCompareFilter( TimelineCompareOp.NOT_EQUAL, "dummy_metric", 5, false)); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList5) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null)); assertEquals(3, entities.size()); } @Test public void testReadAppsMetricPrefix() throws Exception { TimelineFilterList list = new TimelineFilterList(Operator.OR, new TimelinePrefixFilter(TimelineCompareOp.EQUAL, "MAP1_")); TimelineEntity e1 = reader.getEntity( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1111111111_2222", TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineDataToRetrieve(null, list, null, null, null, null)); assertNotNull(e1); assertEquals(1, e1.getMetrics().size()); Set<TimelineEntity> es1 = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve(null, list, null, null, null, null)); int metricCnt = 0; for (TimelineEntity entity : es1) { metricCnt += entity.getMetrics().size(); for (TimelineMetric metric : entity.getMetrics()) { assertTrue("Metric Id returned should start with MAP1_", metric.getId().startsWith("MAP1_")); } } assertEquals(2, metricCnt); } @Test public void testReadAppsMetricFilterPrefix() throws Exception { TimelineFilterList list = new TimelineFilterList(Operator.OR, new TimelinePrefixFilter(TimelineCompareOp.EQUAL, "MAP1_")); TimelineFilterList metricFilterList = new TimelineFilterList(); metricFilterList.addFilter(new TimelineCompareFilter( TimelineCompareOp.GREATER_OR_EQUAL, "MAP1_SLOT_MILLIS", 0L)); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList) .build(), new TimelineDataToRetrieve(null, list, null, null, null, null)); int metricCnt = 0; assertEquals(1, entities.size()); for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); } assertEquals(1, metricCnt); TimelineFilterList list1 = new TimelineFilterList(); list1.addFilter(new TimelineCompareFilter( TimelineCompareOp.GREATER_OR_EQUAL, "MAP1_SLOT_MILLIS", 50000000900L)); TimelineFilterList list2 = new TimelineFilterList(); list2.addFilter(new TimelineCompareFilter( TimelineCompareOp.LESS_THAN, "MAP_SLOT_MILLIS", 80000000000L)); list2.addFilter(new TimelineCompareFilter( TimelineCompareOp.EQUAL, "MAP1_BYTES", 50)); TimelineFilterList metricsToRetrieve = new TimelineFilterList(Operator.OR, new TimelinePrefixFilter(TimelineCompareOp.EQUAL, "MAP1_")); TimelineFilterList metricFilterList1 = new TimelineFilterList(Operator.OR, list1, list2); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList1) .build(), new TimelineDataToRetrieve(null, metricsToRetrieve, null, null, null, null)); metricCnt = 0; assertEquals(2, entities.size()); for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); for (TimelineMetric metric : entity.getMetrics()) { assertTrue("Metric Id returned should start with MAP1_", metric.getId().startsWith("MAP1_")); } } assertEquals(2, metricCnt); entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList1) .build(), new TimelineDataToRetrieve(null, metricsToRetrieve, EnumSet.of(Field.METRICS), Integer.MAX_VALUE, null, null)); metricCnt = 0; int metricValCnt = 0; assertEquals(2, entities.size()); for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); for (TimelineMetric metric : entity.getMetrics()) { metricValCnt += metric.getValues().size(); assertTrue("Metric Id returned should start with MAP1_", metric.getId().startsWith("MAP1_")); } } assertEquals(2, metricCnt); assertEquals(7, metricValCnt); } @Test public void testReadAppsMetricTimeRange() throws Exception { Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), 100, null, null)); assertEquals(3, entities.size()); int metricTimeSeriesCnt = 0; int metricCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); for (TimelineMetric m : entity.getMetrics()) { metricTimeSeriesCnt += m.getValues().size(); } } assertEquals(3, metricCnt); assertEquals(13, metricTimeSeriesCnt); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), 100, CURRENT_TIME - 40000, CURRENT_TIME)); assertEquals(3, entities.size()); metricCnt = 0; metricTimeSeriesCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); for (TimelineMetric m : entity.getMetrics()) { for (Long ts : m.getValues().keySet()) { assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME); } metricTimeSeriesCnt += m.getValues().size(); } } assertEquals(3, metricCnt); assertEquals(5, metricTimeSeriesCnt); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, CURRENT_TIME - 40000, CURRENT_TIME)); assertEquals(3, entities.size()); metricCnt = 0; metricTimeSeriesCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); for (TimelineMetric m : entity.getMetrics()) { for (Long ts : m.getValues().keySet()) { assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME); } metricTimeSeriesCnt += m.getValues().size(); } } assertEquals(3, metricCnt); assertEquals(3, metricTimeSeriesCnt); TimelineEntity entity = reader.getEntity(new TimelineReaderContext( "cluster1", "user1", "some_flow_name", 1002345678919L, "application_1111111111_2222", TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), 100, CURRENT_TIME - 40000, CURRENT_TIME)); assertNotNull(entity); assertEquals(2, entity.getMetrics().size()); metricTimeSeriesCnt = 0; for (TimelineMetric m : entity.getMetrics()) { for (Long ts : m.getValues().keySet()) { assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME); } metricTimeSeriesCnt += m.getValues().size(); } assertEquals(3, metricTimeSeriesCnt); } @Test public void testReadAppsInfoFilters() throws Exception { TimelineFilterList list1 = new TimelineFilterList(); list1.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "infoMapKey3", 85.85)); list1.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "infoMapKey1", "infoMapValue2")); TimelineFilterList list2 = new TimelineFilterList(); list2.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "infoMapKey1", "infoMapValue1")); list2.addFilter(new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "infoMapKey2", 10)); TimelineFilterList infoFilterList = new TimelineFilterList(Operator.OR, list1, list2); Set<TimelineEntity> entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null, null, null)); assertEquals(2, entities.size()); int infoCnt = 0; for (TimelineEntity entity : entities) { infoCnt += entity.getInfo().size(); } assertEquals(7, infoCnt); TimelineFilterList infoFilterList1 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "infoMapKey1", "infoMapValue1")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList1) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null, null, null)); assertEquals(1, entities.size()); infoCnt = 0; for (TimelineEntity entity : entities) { infoCnt += entity.getInfo().size(); } assertEquals(4, infoCnt); TimelineFilterList infoFilterList2 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "infoMapKey1", "infoMapValue2"), new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "infoMapKey3", 85.85)); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList2) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList infoFilterList3 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.EQUAL, "dummy_info", "some_value")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList3) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList infoFilterList4 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "dummy_info", "some_value")); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList4) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null, null, null)); assertEquals(0, entities.size()); TimelineFilterList infoFilterList5 = new TimelineFilterList( new TimelineKeyValueFilter( TimelineCompareOp.NOT_EQUAL, "dummy_info", "some_value", false)); entities = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList5) .build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null, null, null)); assertEquals(3, entities.size()); } @AfterClass public static void tearDownAfterClass() throws Exception { if (util != null) { util.shutdownMiniCluster(); } } }
apache-2.0
bhutchinson/rice
rice-middleware/core/impl/src/main/java/org/kuali/rice/core/impl/config/property/JAXBConfigImpl.java
28126
/** * Copyright 2005-2015 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.rice.core.impl.config.property; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Random; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Unmarshaller; import javax.xml.bind.UnmarshallerHandler; import javax.xml.parsers.ParserConfigurationException; import javax.xml.parsers.SAXParserFactory; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.kuali.rice.core.api.config.ConfigurationException; import org.kuali.rice.core.api.config.property.Config; import org.kuali.rice.core.api.util.RiceUtilities; import org.kuali.rice.core.framework.config.property.AbstractBaseConfig; import org.kuali.rice.core.util.ImmutableProperties; import org.xml.sax.Attributes; import org.xml.sax.InputSource; import org.xml.sax.SAXException; import org.xml.sax.XMLFilter; import org.xml.sax.helpers.XMLFilterImpl; /** * This implementation of the Config interface uses JAXB to parse the config file and maintains an * internal copy of all properties in their "raw" form (without any nested properties resolved). * This allows properties to be added in stages and still alter values of properties previously read * in. It also has settings for whether system properties should override all properties or only * serve as default when the property has not been defined. * * @author Kuali Rice Team (rice.collab@kuali.org) * */ public class JAXBConfigImpl extends AbstractBaseConfig { private static final Logger LOG = Logger.getLogger(JAXBConfigImpl.class); private static final String IMPORT_NAME = "config.location"; private static final String INDENT = " "; private static final String PLACEHOLDER_REGEX = "\\$\\{([^{}]+)\\}"; // keep the same random private static final Random RANDOM = new Random(); private final List<String> fileLocs = new ArrayList<String>(); private final Map<String, Object> objects = new LinkedHashMap<String, Object>(); private final Properties rawProperties = new Properties(); private final Properties resolvedProperties = new Properties(); // compile pattern for regex once private final Pattern pattern = Pattern.compile(PLACEHOLDER_REGEX); private boolean systemOverride; public JAXBConfigImpl() {} public JAXBConfigImpl(Config config) { this.copyConfig(config); } public JAXBConfigImpl(String fileLoc, Config config) { this.copyConfig(config); this.fileLocs.add(fileLoc); } public JAXBConfigImpl(List<String> fileLocs, Config config) { this.copyConfig(config); this.fileLocs.addAll(fileLocs); } public JAXBConfigImpl(String fileLoc) { this.fileLocs.add(fileLoc); } public JAXBConfigImpl(List<String> fileLocs) { this.fileLocs.addAll(fileLocs); } public JAXBConfigImpl(Properties properties) { this.putProperties(properties); } public JAXBConfigImpl(String fileLoc, Properties properties) { this.fileLocs.add(fileLoc); this.putProperties(properties); } public JAXBConfigImpl(List<String> fileLocs, Properties properties) { this.fileLocs.addAll(fileLocs); this.putProperties(properties); } /*****************************************************/ /* * We need the ability to take a config object and copy the raw + cached data into this config object. */ private void copyConfig(Config config) { if (config == null) { return; } this.putProperties(config.getProperties()); if (config.getObjects() != null) { this.objects.putAll(config.getObjects()); } } @Override public Object getObject(String key) { return objects.get(key); } @Override public Map<String, Object> getObjects() { return Collections.unmodifiableMap(objects); } @Override public Properties getProperties() { return new ImmutableProperties(resolvedProperties); } @Override public String getProperty(String key) { return resolvedProperties.getProperty(key); } /** * Provide an Immutable view of the raw properties for debugging purposes */ public Properties getRawProperties() { return new ImmutableProperties(rawProperties); } /** * * This overrides the property. Takes the place of the now deprecated overrideProperty * * @see Config#putProperty(java.lang.String, java.lang.String) */ @Override public void putProperty(String key, String value) { this.setProperty(key, replaceVariable(key, value)); resolveRawToCache(); } @Override public void putProperties(Properties properties) { // Nothing to do if (properties == null) { return; } // Cycle through the keys, using Rice's convention for expanding variables as we go replaceVariables(properties); // Still need to resolve placeholders in addition to expanding variables resolveRawToCache(); } /** * Expand variables and invoke this.setProperty() for each property in the properties object * passed in */ protected void replaceVariables(Properties properties) { replaceVariables("", properties); } protected void replaceVariables(String prefix, Properties properties) { SortedSet<String> keys = new TreeSet<String>(properties.stringPropertyNames()); for (String key : keys) { String originalValue = properties.getProperty(key); String replacedValue = replaceVariable(key, originalValue); logPropertyChange("", key, null, originalValue, replacedValue); this.setProperty(prefix, key, replacedValue); } } protected Unmarshaller getUnmarshaller() { try { Class<org.kuali.rice.core.impl.config.property.Config> c = org.kuali.rice.core.impl.config.property.Config.class; JAXBContext jaxbContext = JAXBContext.newInstance(c); return jaxbContext.createUnmarshaller(); } catch (JAXBException e) { throw new ConfigurationException("Error initializing JAXB for config", e); } } @Override public void parseConfig() throws IOException { LOG.info("----------------Loading Rice Configuration----------------"); if (fileLocs.isEmpty()) { // Nothing to do LOG.info("No config files specified"); return; } // Get a reference to an unmarshaller Unmarshaller unmarshaller = getUnmarshaller(); // Add host.ip and host.name configureBuiltIns(); // Parse all of the indicated config files, but do not resolve any right hand side variables for (String s : fileLocs) { parseConfig(s, unmarshaller, 0); } // now that all properties have been loaded, resolve the right hand side from // the raw properties into the resolved properties. This will also replace properties // defined in the files with system properties if systemOverride==true. resolveRawToCache(); LOG.info("----------------Rice Configuration Loaded-----------------"); logPropertyValues(resolvedProperties); } protected void logPropertyValues(Properties p) { LOG.info("Loaded " + p.size() + " properties"); if (LOG.isDebugEnabled()) { String s = getPropertyValuesAsString(p); LOG.debug("Displaying " + p.size() + " properties\n\n" + s + "\n"); } } protected String getPropertyValuesAsString(Properties p) { StringBuilder sb = new StringBuilder(); SortedSet<String> keys = new TreeSet<String>(p.stringPropertyNames()); for (String key : keys) { String rawValue = p.getProperty(key); String logValue = flatten(ConfigLogger.getDisplaySafeValue(key, rawValue)); sb.append(key); sb.append("="); sb.append("["); sb.append(logValue); sb.append("]\n"); } return sb.toString(); } protected String flatten(String s) { if (s == null) { return null; } else { return s.replace("\n", " ").replace("\r", " "); } } protected InputStream getInputStream(String filename) throws IOException { // have to check for empty filename because getResource will return non-null if passed "" if (StringUtils.isNotEmpty(filename)) { return RiceUtilities.getResourceAsStream(filename); } else { return null; } } protected void parseConfig(String filename, Unmarshaller unmarshaller, int depth) throws IOException { // Open an InputStream to the resource InputStream in = getInputStream(filename); // Setup an indentation prefix based on the recursive depth final String prefix = StringUtils.repeat(INDENT, depth); // If we couldn't open an input stream we are done if (in == null) { LOG.warn(prefix + "+ Skipping non-existent location [" + filename + "]"); return; } // Load properties from the InputStream if (isPropertiesFile(filename)) { // Handle normal Java .properties file loadProperties(in, prefix, filename); } else { // Handle Rice style XML files (These are not in the same format as Java XML properties files) loadRiceXML(in, prefix, filename, depth, unmarshaller); } } protected void loadRiceXML(InputStream in, String prefix, String filename, int depth, Unmarshaller unmarshaller) throws IOException { LOG.info(prefix + "+ Parsing config: [" + filename + "]"); org.kuali.rice.core.impl.config.property.Config config = unmarshalQuietly(unmarshaller, in); for (Param p : config.getParamList()) { if (p.getName().equals(IMPORT_NAME)) { doImport(p, unmarshaller, depth); } else if (p.isSystem()) { doSystem(p); } else if (p.isOverride() || !rawProperties.containsKey(p.getName())) { doSetProperty(prefix + " --- ", p); } } LOG.info(prefix + "- Parsed config: [" + filename + "]"); } protected void loadProperties(InputStream in, String prefix, String filename) throws IOException { LOG.info(prefix + "+ Loading properties: [" + filename + "]"); Properties properties = new Properties(); properties.load(in); replaceVariables(prefix + " --- ", properties); LOG.info(prefix + "- Loaded properties: [" + filename + "]"); } protected boolean isPropertiesFile(String filename) { String lower = StringUtils.lowerCase(filename); return StringUtils.endsWith(lower, ".properties"); } protected void doSetProperty(Param p) { doSetProperty("", p); } protected void doSetProperty(String prefix, Param p) { String name = p.getName(); if (p.isRandom()) { String randStr = String.valueOf(generateRandomInteger(p.getValue())); this.setProperty(prefix, p.getName(), randStr); LOG.info(prefix + "generating random string " + randStr + " for property " + p.getName()); } else { /* * myProp = dog We have a case where you might want myProp = ${myProp}:someOtherStuff:${foo} This would normally overwrite the existing myProp with * ${myProp}:someOtherStuff:${foo} but what we want is: myProp = dog:someOtherStuff:${foo} so that we put the existing value of myProp into the new value. Basically how * path works. */ String value = replaceVariable(name, p.getValue()); this.setProperty(prefix, name, value); } } protected void doSystem(Param p) { doSystem("", p); } protected void doSystem(String prefix, Param p) { // If override is false and the system property is already set, we can't override it boolean skip = !p.isOverride() && System.getProperty(p.getName()) != null; if (skip) { return; } // Set both a system property and a local config property String name = p.getName(); if (p.isRandom()) { String randStr = String.valueOf(generateRandomInteger(p.getValue())); System.setProperty(name, randStr); this.setProperty(prefix + " ", p.getName(), randStr); LOG.info(prefix + " --- " + "generating random string " + randStr + " for system property " + p.getName()); } else { // Resolve and set system params immediately so they can override existing system params. // Update rawProperties with the resolved value as well. (to prevent possible mismatch) HashSet<String> set = new HashSet<String>(); set.add(p.getName()); String value = parseValue(p.getValue(), set); System.setProperty(name, value); this.setProperty(prefix + " ", name, value); } } protected void doImport(Param p, Unmarshaller unmarshaller, int depth) throws IOException { String configLocation = StringUtils.trim(parseValue(p.getValue(), new HashSet<String>())); parseConfig(configLocation, unmarshaller, depth + 1); } /** * This will set the property. No logic checking so what you pass in gets set. We use this as a * focal point for debugging the raw config changes. */ protected void setProperty(String name, String value) { setProperty("", name, value); } protected void setProperty(String prefix, String name, String value) { String oldValue = rawProperties.getProperty(name); String msg = (prefix == null) ? "Raw Config Override: " : prefix + "Raw Config Override: "; logPropertyChange(msg, name, null, oldValue, value); rawProperties.setProperty(name, value); } protected String resolve(String key) { return resolve(key, null); } /** * This method will determine the value for a property by looking it up in the raw properties. * If the property value contains a nested property (foo=${nested}) it will start the recursion * by calling parseValue(). It will also check for a system property of the same name and, based * on the value of systemOverride, 'override with' the system property or 'default to' the * system property if not found in the raw properties. This method only determines the resolved * value, it does not modify the properties in the resolved or raw properties objects. * * @param key they key of the property for which to determine the value * @param keySet contains all keys used so far in this recursion. used to check for circular * references. * @return */ protected String resolve(String key, Set<String> keySet) { // check if we have already resolved this key and have circular reference if (keySet != null && keySet.contains(key)) { throw new ConfigurationException("Circular reference in config: " + key); } String value = this.rawProperties.getProperty(key); if ((value == null || systemOverride) && System.getProperties().containsKey(key)) { value = System.getProperty(key); } if (value != null && value.contains("${")) { if (keySet == null) { keySet = new HashSet<String>(); } keySet.add(key); value = parseValue(value, keySet); keySet.remove(key); } if (value == null) { value = ""; LOG.debug("Property key: '" + key + "' is not available and hence set to empty"); } return value; } /** * This method parses the value string to find all nested properties (foo=${nested}) and * replaces them with the value returned from calling resolve(). It does this in a new string * and does not modify the raw or resolved properties objects. * * @param value the string to search for nest properties * @param keySet contains all keys used so far in this recursion. used to check for circular * references. * @return */ protected String parseValue(String value, Set<String> keySet) { String result = value; Matcher matcher = pattern.matcher(value); while (matcher.find()) { // get the first, outermost ${} in the string. removes the ${} as well. String key = matcher.group(1); String resolved = resolve(key, keySet); result = matcher.replaceFirst(Matcher.quoteReplacement(resolved)); matcher = matcher.reset(result); } return result; } /** * This method is used when reading in new properties to check if there is a direct reference to * the key in the value. This emulates operating system environment variable setting behavior * and replaces the reference in the value with the current value of the property from the * rawProperties. * * <pre> * ex: * path=/usr/bin;${someVar} * path=${path};/some/other/path * * resolves to: * path=/usr/bin;${someVar};/some/other/path * </pre> * * It does not resolve the the value from rawProperties as it could contain nested properties * that might change later. If the property does not exist in the rawProperties it will check * for a default system property now to prevent a circular reference error. * * @param name the property name * @param value the value to check for nested property of the same name * @return */ protected String replaceVariable(String name, String value) { String regex = "(?:\\$\\{" + name + "\\})"; String temporary = null; // Look for a property in the map first and use that. If system override is true // then it will get overridden during the resolve phase. If the value is null // we need to check the system now so we don't throw an error. if (value.contains("${" + name + "}")) { if ((temporary = rawProperties.getProperty(name)) == null) { temporary = System.getProperty(name); } if (temporary != null) { return value.replaceAll(regex, Matcher.quoteReplacement(temporary)); } } return value; } /** * This method iterates through the raw properties and stores their resolved values in the * resolved properties map, which acts as a cache so we don't have to run the recursion every * time getProperty() is called. */ protected void resolveRawToCache() { // Make sure we have something to do if (rawProperties.size() == 0) { return; } // Store the existing resolved properties in another object Properties oldProps = new Properties(new ImmutableProperties(resolvedProperties)); // Clear the resolved properties object resolvedProperties.clear(); // Setup sorted property keys SortedSet<String> keys = new TreeSet<String>(rawProperties.stringPropertyNames()); // Cycle through the properties resolving values as we go for (String key : keys) { // Fully resolve the value for this key String newValue = resolve(key); // Extract the old value for this key String oldValue = oldProps.getProperty(key); // Extract the raw value for this key String rawValue = rawProperties.getProperty(key); // Log what happened (if anything) in terms of an existing property being overridden logPropertyChange("Resolved Config Override: ", key, rawValue, oldValue, newValue); // Store the fully resolved property value resolvedProperties.setProperty(key, newValue); } } protected void logPropertyChange(String msg, String key, String rawValue, String oldValue, String newValue) { // If INFO level logging is not enabled, we are done if (!LOG.isInfoEnabled()) { return; } // There was no previous value, we are done if (oldValue == null) { return; } // There was a previous value, but it's the same as the new value, we are done if (StringUtils.equals(oldValue, newValue)) { return; } // Create some log friendly strings String displayOld = flatten(ConfigLogger.getDisplaySafeValue(key, oldValue)); String displayNew = flatten(ConfigLogger.getDisplaySafeValue(key, newValue)); String displayRaw = flatten(rawValue); // Log what happened to this property value if (StringUtils.contains(rawValue, "$")) { LOG.info(msg + key + "(" + displayRaw + ")=[" + displayOld + "]->[" + displayNew + "]"); } else { LOG.info(msg + key + "=[" + displayOld + "]->[" + displayNew + "]"); } } /** * Configures built-in properties. */ protected void configureBuiltIns() { this.setProperty("host.ip", RiceUtilities.getIpNumber()); this.setProperty("host.name", RiceUtilities.getHostName()); } /** * Generates a random integer in the range specified by the specifier, in the format: min-max * * @param rangeSpec a range specification, 'min-max' * @return a random integer in the range specified by the specifier, in the format: min-max */ protected int generateRandomInteger(String rangeSpec) { return generateRandomInteger("", rangeSpec); } /** * Generates a random integer in the range specified by the specifier, in the format: min-max * * @param rangeSpec a range specification, 'min-max' * @return a random integer in the range specified by the specifier, in the format: min-max */ protected int generateRandomInteger(String prefix, String rangeSpec) { String[] range = rangeSpec.split("-"); if (range.length != 2) { throw new IllegalArgumentException("Invalid range specifier: " + rangeSpec); } int from = Integer.parseInt(range[0].trim()); int to = Integer.parseInt(range[1].trim()); if (from > to) { int tmp = from; from = to; to = tmp; } int num; // not very random huh... if (from == to) { num = from; LOG.info(prefix + " --- from==to, so not generating random value for property."); } else { num = from + RANDOM.nextInt((to - from) + 1); } return num; } public boolean isSystemOverride() { return systemOverride; } /** * If set to true then system properties will always be checked first, disregarding any values * in the config. * * The default is false. * * @param systemOverride */ public void setSystemOverride(boolean systemOverride) { this.systemOverride = systemOverride; } protected org.kuali.rice.core.impl.config.property.Config unmarshal(Unmarshaller unmarshaller, InputStream in) throws SAXException, ParserConfigurationException, IOException, IllegalStateException, JAXBException { SAXParserFactory spf = SAXParserFactory.newInstance(); spf.setNamespaceAware(true); XMLFilter filter = new ConfigNamespaceURIFilter(); filter.setParent(spf.newSAXParser().getXMLReader()); UnmarshallerHandler handler = unmarshaller.getUnmarshallerHandler(); filter.setContentHandler(handler); filter.parse(new InputSource(in)); return (org.kuali.rice.core.impl.config.property.Config) handler.getResult(); } protected org.kuali.rice.core.impl.config.property.Config unmarshalQuietly(Unmarshaller unmarshaller, InputStream in) { try { return unmarshal(unmarshaller, in); } catch (Exception e) { throw new IllegalStateException(e); } } /** * This is a SAX filter that adds the config xml namespace to the document if the document does * not have a namespace (for backwards compatibility). This filter assumes unqualified * attributes and does not modify their namespace (if any). * * This could be broken out into a more generic class if Rice makes more use of JAXB. * * @author Kuali Rice Team (kuali-rice@googlegroups.com) * */ public class ConfigNamespaceURIFilter extends XMLFilterImpl { public static final String CONFIG_URI = "http://rice.kuali.org/core/impl/config"; @Override public void startElement(String uri, String localName, String qName, Attributes atts) throws SAXException { if (StringUtils.isBlank(uri)) { uri = CONFIG_URI; } super.startElement(uri, localName, qName, atts); } @Override public void endElement(String uri, String localName, String qName) throws SAXException { if (StringUtils.isBlank(uri)) { uri = CONFIG_URI; } super.endElement(uri, localName, qName); } } @Override public void putObject(String key, Object value) { this.objects.put(key, value); } @Override public void putObjects(Map<String, Object> objects) { this.objects.putAll(objects); } @Override public void removeObject(String key) { this.objects.remove(key); } @Override public void removeProperty(String key) { this.rawProperties.remove(key); resolveRawToCache(); } @Override public void putConfig(Config config) { this.copyConfig(config); } @Override public String toString() { return String.valueOf(resolvedProperties); } }
apache-2.0
samaitra/jena
jena-core/src/main/java/org/apache/jena/rdfxml/xmlinput/DOM2Model.java
4470
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jena.rdfxml.xmlinput; import javax.xml.transform.Source; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.sax.SAXResult; import org.apache.jena.rdf.model.Model ; import org.apache.jena.shared.JenaException ; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.w3c.dom.Node; import org.xml.sax.SAXParseException; /** * Transform DOM nodes of RDF.XML into Jena Models. Known not to work with Java * 1.4.1. */ public class DOM2Model extends SAX2Model { static Logger logger = LoggerFactory.getLogger(DOM2Model.class) ; /** * Create a new DOM2Model. * * @param base * The retrieval URL, or the base URI to be used while parsing. * @param m * A Jena Model in which to put the triples, this can be null. If * it is null, then use {@link SAX2RDF#getHandlers}or * {@link SAX2RDF#setHandlersWith}to provide a {@link StatementHandler}, * and usually an {@link org.xml.sax.ErrorHandler} * @throws SAXParseException */ static public DOM2Model createD2M(String base, Model m) throws SAXParseException { return new DOM2Model(base, m, "", true) ; } /** * Create a new DOM2Model. This is particularly intended for when parsing a * non-root element within an XML document. In which case the application * needs to find this value in the outer context. Optionally, namespace * prefixes can be passed from the outer context using * {@link SAX2RDF#startPrefixMapping}. * * @param base * The retrieval URL, or the base URI to be used while parsing. * @param m * A Jena Model in which to put the triples, this can be null. If * it is null, then use {@link SAX2RDF#getHandlers}or * {@link SAX2RDF#setHandlersWith}to provide a {@link StatementHandler}, * and usually an {@link org.xml.sax.ErrorHandler} * @param lang * The current value of <code>xml:lang</code> when parsing * starts, usually "". * @throws SAXParseException */ static public DOM2Model createD2M(String base, Model m, String lang) throws SAXParseException { return new DOM2Model(base, m, lang, true) ; } DOM2Model(String base, Model m, String lang, boolean dummy) throws SAXParseException { super(base, m, lang); } /** * Parse a DOM Node with the RDF/XML parser, loading the triples into the * associated Model. Known not to work with Java 1.4.1. * * @param document */ public void load(Node document) { Source input = new DOMSource(document); // Make a SAXResult object using this handler SAXResult output = new SAXResult(this); output.setLexicalHandler(this); // Run transform TransformerFactory xformFactory = TransformerFactory.newInstance(); try { Transformer idTransform = xformFactory.newTransformer(); idTransform.transform(input, output); } catch (FatalParsingErrorException e) { // Old code ignored this, // given difficult bug report, don't be silent. logger.error("Unexpected exception in DOM2Model", e) ; } catch (RuntimeException rte) { throw rte; } catch (Exception nrte) { throw new JenaException(nrte); } finally { close(); } } }
apache-2.0
cshannon/activemq-artemis
tests/integration-tests/src/test/java/org/apache/activemq/artemis/tests/integration/paging/PagingSyncTest.java
4212
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.artemis.tests.integration.paging; import java.nio.ByteBuffer; import java.util.HashMap; import org.apache.activemq.artemis.api.core.ActiveMQBuffer; import org.apache.activemq.artemis.api.core.SimpleString; import org.apache.activemq.artemis.api.core.client.ClientMessage; import org.apache.activemq.artemis.api.core.client.ClientProducer; import org.apache.activemq.artemis.api.core.client.ClientSession; import org.apache.activemq.artemis.api.core.client.ClientSessionFactory; import org.apache.activemq.artemis.api.core.client.ServerLocator; import org.apache.activemq.artemis.core.config.Configuration; import org.apache.activemq.artemis.core.server.ActiveMQServer; import org.apache.activemq.artemis.api.core.RoutingType; import org.apache.activemq.artemis.core.server.impl.AddressInfo; import org.apache.activemq.artemis.core.settings.impl.AddressSettings; import org.apache.activemq.artemis.tests.util.ActiveMQTestBase; import org.junit.Test; /** * A PagingOrderTest. * <br> * PagingTest has a lot of tests already. I decided to create a newer one more specialized on Ordering and counters */ public class PagingSyncTest extends ActiveMQTestBase { private static final int PAGE_MAX = 100 * 1024; private static final int PAGE_SIZE = 10 * 1024; // Attributes ---------------------------------------------------- // Static -------------------------------------------------------- static final SimpleString ADDRESS = new SimpleString("SimpleAddress"); @Test public void testOrder1() throws Throwable { boolean persistentMessages = true; Configuration config = createDefaultInVMConfig().setJournalSyncNonTransactional(false); ActiveMQServer server = createServer(true, config, PAGE_SIZE, PAGE_MAX, new HashMap<String, AddressSettings>()); server.start(); final int messageSize = 1024; final int numberOfMessages = 500; ServerLocator locator = createInVMNonHALocator().setClientFailureCheckPeriod(1000).setConnectionTTL(2000).setReconnectAttempts(0).setBlockOnNonDurableSend(false).setBlockOnDurableSend(false).setBlockOnAcknowledge(false).setConsumerWindowSize(1024 * 1024); ClientSessionFactory sf = createSessionFactory(locator); ClientSession session = sf.createSession(false, false, false); server.addAddressInfo(new AddressInfo(ADDRESS, RoutingType.ANYCAST)); server.createQueue(ADDRESS, RoutingType.ANYCAST, ADDRESS, null, true, false); ClientProducer producer = session.createProducer(PagingTest.ADDRESS); byte[] body = new byte[messageSize]; ByteBuffer bb = ByteBuffer.wrap(body); for (int j = 1; j <= messageSize; j++) { bb.put(getSamplebyte(j)); } for (int i = 0; i < numberOfMessages; i++) { ClientMessage message = session.createMessage(persistentMessages); ActiveMQBuffer bodyLocal = message.getBodyBuffer(); bodyLocal.writeBytes(body); message.putIntProperty(new SimpleString("id"), i); producer.send(message); } session.commit(); session.close(); } // Package protected --------------------------------------------- // Protected ----------------------------------------------------- // Private ------------------------------------------------------- // Inner classes ------------------------------------------------- }
apache-2.0
jianglili007/jena
jena-core/src/main/java/org/apache/jena/rdf/model/ModelMaker.java
4652
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jena.rdf.model; import org.apache.jena.graph.* ; import org.apache.jena.shared.AlreadyExistsException ; import org.apache.jena.shared.DoesNotExistException ; import org.apache.jena.util.iterator.* ; /** A ModelMaker contains a collection of named models, methods for creating new models [both named and anonymous] and opening previously-named models, removing models, and accessing a single "default" Model for this Maker. <p>Additional constraints are placed on a ModelMaker as compared to its ancestor <code>ModelSource</code>. ModelMakers do not arbitrarily forget their contents - once they contain a named model, that model stays inside the ModelMaker until that ModelMaker goes away, and maybe for longer (eg if the ModelMaker fronted a database or directory). And new models can be added to a ModelMaker. */ public interface ModelMaker extends ModelSource { /** Create a new Model associated with the given name. If there is no such association, create one and return it. If one exists but <code>strict</code> is false, return the associated Model. Otherwise throw an AlreadyExistsException. @param name the name to give to the new Model @param strict true to cause existing bindings to throw an exception @exception AlreadyExistsException if that name is already bound. */ public Model createModel( String name, boolean strict ); /** Create a Model with the given name if no such model exists. Otherwise, answer the existing model. Equivalent to <br><code>createModel( name, false )</code>. */ public Model createModel( String name ); /** Find an existing Model that this factory knows about under the given name. If such a Model exists, return it. Otherwise, if <code>strict</code> is false, create a new Model, associate it with the name, and return it. Otherwise throw a DoesNotExistException. <p>When called with <code>strict=false</code>, is equivalent to the ancestor <code>openModel(String)</code> method. @param name the name of the Model to find and return @param strict false to create a new one if one doesn't already exist @exception DoesNotExistException if there's no such named Model */ public Model openModel( String name, boolean strict ); /** Remove the association between the name and the Model. create will now be able to create a Model with that name, and open will no longer be able to find it. Throws an exception if there's no such Model. The Model itself is not touched. @param name the name to disassociate @exception DoesNotExistException if the name is unbound */ public void removeModel( String name ); /** return true iff the factory has a Model with the given name @param name the name of the Model to look for @return true iff there's a Model with that name */ public boolean hasModel( String name ); /** Close the factory - no more requests need be honoured, and any clean-up can be done. */ public void close(); /** Answer a GraphMaker that makes graphs the same way this ModelMaker makes models. In general this will be an underlying GraphMaker. */ public GraphMaker getGraphMaker(); /** Answer an [extended] iterator where each element is the name of a model in the maker, and the complete sequence exhausts the set of names. No particular order is expected from the list. @return an extended iterator over the names of models known to this Maker. */ public ExtendedIterator<String> listModels(); }
apache-2.0
lincoln-lil/flink
flink-runtime/src/test/java/org/apache/flink/runtime/client/SerializedJobExecutionResultTest.java
5325
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.client; import org.apache.flink.api.common.JobExecutionResult; import org.apache.flink.api.common.JobID; import org.apache.flink.core.testutils.CommonTestUtils; import org.apache.flink.runtime.operators.testutils.ExpectedTestException; import org.apache.flink.util.ExceptionUtils; import org.apache.flink.util.FlinkRuntimeException; import org.apache.flink.util.OptionalFailure; import org.apache.flink.util.SerializedValue; import org.apache.flink.util.TestLogger; import org.junit.Test; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; /** Tests for the SerializedJobExecutionResult */ public class SerializedJobExecutionResultTest extends TestLogger { @Test public void testSerialization() throws Exception { final ClassLoader classloader = getClass().getClassLoader(); JobID origJobId = new JobID(); long origTime = 65927436589267L; Map<String, SerializedValue<OptionalFailure<Object>>> origMap = new HashMap<>(); origMap.put("name1", new SerializedValue<>(OptionalFailure.of(723L))); origMap.put("name2", new SerializedValue<>(OptionalFailure.of("peter"))); origMap.put( "name3", new SerializedValue<>(OptionalFailure.ofFailure(new ExpectedTestException()))); SerializedJobExecutionResult result = new SerializedJobExecutionResult(origJobId, origTime, origMap); // serialize and deserialize the object SerializedJobExecutionResult cloned = CommonTestUtils.createCopySerializable(result); assertEquals(origJobId, cloned.getJobId()); assertEquals(origTime, cloned.getNetRuntime()); assertEquals(origTime, cloned.getNetRuntime(TimeUnit.MILLISECONDS)); assertEquals(origMap, cloned.getSerializedAccumulatorResults()); // convert to deserialized result JobExecutionResult jResult = result.toJobExecutionResult(classloader); JobExecutionResult jResultCopied = result.toJobExecutionResult(classloader); assertEquals(origJobId, jResult.getJobID()); assertEquals(origJobId, jResultCopied.getJobID()); assertEquals(origTime, jResult.getNetRuntime()); assertEquals(origTime, jResult.getNetRuntime(TimeUnit.MILLISECONDS)); assertEquals(origTime, jResultCopied.getNetRuntime()); assertEquals(origTime, jResultCopied.getNetRuntime(TimeUnit.MILLISECONDS)); for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> entry : origMap.entrySet()) { String name = entry.getKey(); OptionalFailure<Object> value = entry.getValue().deserializeValue(classloader); if (value.isFailure()) { try { jResult.getAccumulatorResult(name); fail("expected failure"); } catch (FlinkRuntimeException ex) { assertTrue( ExceptionUtils.findThrowable(ex, ExpectedTestException.class) .isPresent()); } try { jResultCopied.getAccumulatorResult(name); fail("expected failure"); } catch (FlinkRuntimeException ex) { assertTrue( ExceptionUtils.findThrowable(ex, ExpectedTestException.class) .isPresent()); } } else { assertEquals(value.get(), jResult.getAccumulatorResult(name)); assertEquals(value.get(), jResultCopied.getAccumulatorResult(name)); } } } @Test public void testSerializationWithNullValues() throws Exception { SerializedJobExecutionResult result = new SerializedJobExecutionResult(null, 0L, null); SerializedJobExecutionResult cloned = CommonTestUtils.createCopySerializable(result); assertNull(cloned.getJobId()); assertEquals(0L, cloned.getNetRuntime()); assertNull(cloned.getSerializedAccumulatorResults()); JobExecutionResult jResult = result.toJobExecutionResult(getClass().getClassLoader()); assertNull(jResult.getJobID()); assertTrue(jResult.getAllAccumulatorResults().isEmpty()); } }
apache-2.0
bhutchinson/rice
rice-middleware/core/api/src/main/java/org/kuali/rice/core/api/criteria/PropertyPathPredicate.java
1165
/** * Copyright 2005-2015 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.rice.core.api.criteria; /** * An predicate which contains a property path. The property path is used * to identify what portion of an object model that the predicate applies * to. * * @author Kuali Rice Team (rice.collab@kuali.org) * */ public interface PropertyPathPredicate extends Predicate { /** * Returns the property path for this predicate which represents the * portion of the object model to which the predicate applies. * * @return the property path */ String getPropertyPath(); }
apache-2.0
wwjiang007/flink
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/FileChannelMemoryMappedBoundedDataTest.java
1593
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.io.network.partition; import java.io.IOException; import java.nio.file.Path; /** Tests that read the BoundedBlockingSubpartition with multiple threads in parallel. */ public class FileChannelMemoryMappedBoundedDataTest extends BoundedDataTestBase { @Override protected boolean isRegionBased() { return true; } @Override protected BoundedData createBoundedData(Path tempFilePath) throws IOException { return FileChannelMemoryMappedBoundedData.create(tempFilePath); } @Override protected BoundedData createBoundedDataWithRegion(Path tempFilePath, int regionSize) throws IOException { return FileChannelMemoryMappedBoundedData.createWithRegionSize(tempFilePath, regionSize); } }
apache-2.0