repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
steveloughran/hadoop-hdfs
src/test/hdfs/org/apache/hadoop/cli/CLITestCmdDFS.java
1301
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p/> * http://www.apache.org/licenses/LICENSE-2.0 * <p/> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.cli; import org.apache.hadoop.cli.util.*; import org.apache.hadoop.hdfs.tools.DFSAdmin; public class CLITestCmdDFS extends CLITestCmd { public CLITestCmdDFS(String str, CLICommandTypes type) { super(str, type); } @Override public CommandExecutor getExecutor(String tag) throws IllegalArgumentException { if (getType() instanceof CLICommandDFSAdmin) return new FSCmdExecutor(tag, new DFSAdmin()); return super.getExecutor(tag); } }
apache-2.0
Deepnekroz/kaa
server/common/server-shared/src/main/java/org/kaaproject/kaa/server/thrift/NeighborConnection.java
10749
/** * Copyright 2014-2016 CyberVision, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaaproject.kaa.server.thrift; import java.net.InetSocketAddress; import java.util.Collection; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import org.apache.thrift.TException; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TMultiplexedProtocol; import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.transport.TTransport; import org.kaaproject.kaa.server.common.thrift.KaaThriftService; import org.kaaproject.kaa.server.common.thrift.gen.operations.OperationsThriftService; import org.kaaproject.kaa.server.common.thrift.gen.operations.OperationsThriftService.Iface; import org.kaaproject.kaa.server.common.zk.gen.ConnectionInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Function; import com.twitter.common.quantity.Amount; import com.twitter.common.quantity.Time; import com.twitter.common.thrift.Thrift; import com.twitter.common.thrift.ThriftFactory; /** * Neighbor Connection Class. Hold thrift connection pool to specific operations * server. Provides sendEventMessage() for send messages to neighbor Operations * Server * * @author Andrey Panasenko * @author Andrew Shvayka * */ public final class NeighborConnection<T extends NeighborTemplate<V>, V> { /** The Constant LOG. */ private static final Logger LOG = LoggerFactory.getLogger(NeighborConnection.class); /** SOCKET_TIMEOUT on opened connection in seconds */ // In seconds private static final long DEFAULT_SOCKET_TIMEOUT_CONNECTION_TO_NEIGHBOR = 20; /** Default maximum number of event messages queue */ private static final int DEFAULT_EVENT_MESSAGE_QUEUE_LENGTH = 1024 * 1024; /** ID of connection in thriftHost:thriftPort formar */ private final String id; /** ConnectionInfo of neighbor Operations server */ private final ConnectionInfo connectionInfo; private final int maxNumberConnection; private final T template; /** Real SOCKET_TIMEOUT on opened connection, if not set used default */ private final long socketTimeout; /** Real maximum number of event messages queue */ private final int messageQueueLingth = DEFAULT_EVENT_MESSAGE_QUEUE_LENGTH; /** Thrift classes */ private ThriftFactory<OperationsThriftService.Iface> clientFactory; private Thrift<OperationsThriftService.Iface> thrift; /** Blocking queue of event messages */ private LinkedBlockingQueue<V> messageQueue; /** Fixed Thread pool to run event workers */ private ExecutorService executor; /** Future list of event workers */ private List<Future<?>> workers; private boolean started; /** * EventWorker Class. Provides sending EventMessages asynchronously. * EventWorker blocks if messageQueue is empty in poll() operation. */ public class EventWorker implements Runnable { private final T template; private final UUID uniqueId = UUID.randomUUID(); private OperationsThriftService.Iface client = getClient(); private boolean operate = true; public EventWorker(T template) { super(); this.template = template; } /* * (non-Javadoc) * * @see java.lang.Runnable#run() */ @Override public void run() { LinkedList<V> messages = new LinkedList<>(); // NOSONAR while (operate) { try { V event = messageQueue.poll(1, TimeUnit.HOURS); if (event != null) { messages.push(event); messageQueue.drainTo(messages); template.process(client, messages); LOG.debug("EventWorker [{}:<{}>] {} messages sent", id, uniqueId, messages.size()); messages.clear(); } } catch (TException te) { LOG.error("EventWorker [{}:{}] error sending event messages pack. ", id, uniqueId, te); template.onServerError(id, te); } catch (InterruptedException e) { LOG.info("EventWorker [{}<{}>] terminated: ", id, uniqueId, e); operate = false; } } } } public NeighborConnection(ConnectionInfo connectionInfo, int maxNumberConnection, long socketTimeout, T template) { this.connectionInfo = connectionInfo; this.maxNumberConnection = maxNumberConnection; this.socketTimeout = socketTimeout; this.template = template; this.id = Neighbors.getServerID(connectionInfo); } public NeighborConnection(ConnectionInfo connectionInfo, int maxNumberNeighborConnections, T template) { this(connectionInfo, maxNumberNeighborConnections, DEFAULT_SOCKET_TIMEOUT_CONNECTION_TO_NEIGHBOR, template); } /** * Cancel event workers. */ private void cancelWorkers() { for (Future<?> f : workers) { f.cancel(true); } workers.clear(); } /** * Return Thrift service client interface. * * @return OperationsThriftService.Iface */ public OperationsThriftService.Iface getClient() { return thrift.builder().disableStats().withRequestTimeout(Amount.of(socketTimeout, Time.SECONDS)).create(); } public synchronized void start() { if (!started) { executor = Executors.newFixedThreadPool(maxNumberConnection); messageQueue = new LinkedBlockingQueue<>(messageQueueLingth); workers = new LinkedList<>(); clientFactory = ThriftFactory.create(OperationsThriftService.Iface.class); InetSocketAddress address = new InetSocketAddress(connectionInfo.getThriftHost().toString(), connectionInfo.getThriftPort()); Set<InetSocketAddress> backends = new HashSet<InetSocketAddress>(); backends.add(address); thrift = clientFactory.withMaxConnectionsPerEndpoint(maxNumberConnection) .withSocketTimeout(Amount.of(socketTimeout, Time.SECONDS)). withClientFactory(new Function<TTransport, OperationsThriftService.Iface>(){ @Override public Iface apply(TTransport transport) { TProtocol protocol = new TBinaryProtocol(transport); TMultiplexedProtocol mprotocol = new TMultiplexedProtocol(protocol, KaaThriftService.OPERATIONS_SERVICE.getServiceName()); return new OperationsThriftService.Client(mprotocol); }}).build(backends); for (int i = 0; i < maxNumberConnection; i++) { EventWorker worker = new EventWorker(template); workers.add(executor.submit(worker)); } started = true; } else { LOG.debug("Neighbor Connection {} is already started", getId()); } } /** * Stops neighbor Operations server connections. */ public synchronized void shutdown() { if (started) { cancelWorkers(); executor.shutdown(); try { executor.awaitTermination(1, TimeUnit.SECONDS); } catch (InterruptedException e) { LOG.error("Neighbor Connection {} error terminates ExecutorService", getId(), e); } thrift.close(); started = false; } else { LOG.debug("Neighbor Connection {} is already stopped or was not started yet", getId()); } } /** * Send List<EventMessage> to neighbor Operartions Server. * * @param messages * List<EventMessage> * @throws InterruptedException * in case of queuing error occurred. */ public void sendMessages(Collection<V> messages) throws InterruptedException { for (V e : messages) { if (!messageQueue.offer(e, 1, TimeUnit.MINUTES)) { LOG.error("NeighborConnection [{}] event messages queue is full more than 1 minute. Operation impossible.", getId()); throw new InterruptedException("Event messages queue is full more than 10 minutes"); } } } /** * Neighbor Operations Server ID getter. * * @return the id */ public String getId() { return id; } /** * Return Neighbor Operations Server ConnectionInfo * * @return the connectionInfo */ public ConnectionInfo getConnectionInfo() { return connectionInfo; } /** * SOCKET_TIMEOUT on opened connection getter. * * @return the socketTimeout */ public long getSocketTimeout() { return socketTimeout; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((id == null) ? 0 : id.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } @SuppressWarnings("rawtypes") NeighborConnection other = (NeighborConnection) obj; if (id == null) { if (other.id != null) { return false; } } else if (!id.equals(other.id)) { return false; } return true; } /* * (non-Javadoc) * * @see java.lang.Object#toString() */ @Override public String toString() { return "NeighborConnection [Id=" + id + "]"; } }
apache-2.0
yafengguo/Apache-beam
sdks/java/core/src/main/java/org/apache/beam/sdk/coders/VarIntCoder.java
2987
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.sdk.coders; import com.fasterxml.jackson.annotation.JsonCreator; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.UTFDataFormatException; import org.apache.beam.sdk.util.VarInt; /** * A {@link Coder} that encodes {@link Integer Integers} using between 1 and 5 bytes. Negative * numbers always take 5 bytes, so {@link BigEndianIntegerCoder} may be preferable for * integers that are known to often be large or negative. */ public class VarIntCoder extends AtomicCoder<Integer> { @JsonCreator public static VarIntCoder of() { return INSTANCE; } ///////////////////////////////////////////////////////////////////////////// private static final VarIntCoder INSTANCE = new VarIntCoder(); private VarIntCoder() {} @Override public void encode(Integer value, OutputStream outStream, Context context) throws IOException, CoderException { if (value == null) { throw new CoderException("cannot encode a null Integer"); } VarInt.encode(value.intValue(), outStream); } @Override public Integer decode(InputStream inStream, Context context) throws IOException, CoderException { try { return VarInt.decodeInt(inStream); } catch (EOFException | UTFDataFormatException exn) { // These exceptions correspond to decoding problems, so change // what kind of exception they're branded as. throw new CoderException(exn); } } /** * {@inheritDoc} * * @return {@code true}. {@link VarIntCoder} is injective. */ @Override public boolean consistentWithEquals() { return true; } /** * {@inheritDoc} * * @return {@code true}. {@link #getEncodedElementByteSize} is cheap. */ @Override public boolean isRegisterByteSizeObserverCheap(Integer value, Context context) { return true; } @Override protected long getEncodedElementByteSize(Integer value, Context context) throws Exception { if (value == null) { throw new CoderException("cannot encode a null Integer"); } return VarInt.getLength(value.longValue()); } }
apache-2.0
zstackorg/zstack
core/src/main/java/org/zstack/core/rest/RESTApiFacadeImpl.java
12737
package org.zstack.core.rest; import org.apache.commons.lang.StringUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.config.BeanDefinition; import org.springframework.context.annotation.ClassPathScanningCandidateComponentProvider; import org.springframework.core.type.filter.AssignableTypeFilter; import org.zstack.core.CoreGlobalProperty; import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.cloudbus.CloudBusEventListener; import org.zstack.core.cloudbus.MessageSafe; import org.zstack.core.cloudbus.ResourceDestinationMaker; import org.zstack.core.thread.PeriodicTask; import org.zstack.core.thread.ThreadFacade; import org.zstack.header.AbstractService; import org.zstack.header.Component; import org.zstack.header.apimediator.ApiMediatorConstant; import org.zstack.header.exception.CloudRuntimeException; import org.zstack.header.message.*; import org.zstack.header.rest.RESTApiFacade; import org.zstack.header.rest.RestAPIResponse; import org.zstack.header.rest.RestAPIState; import org.zstack.header.rest.RestAPIVO; import org.zstack.header.search.APISearchMessage; import org.zstack.utils.ExceptionDSL; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; import javax.persistence.EntityManager; import javax.persistence.EntityManagerFactory; import javax.persistence.EntityTransaction; import javax.persistence.Query; import java.util.*; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; public class RESTApiFacadeImpl extends AbstractService implements RESTApiFacade, CloudBusEventListener, Component { private static final CLogger logger = Utils.getLogger(RESTApiFacadeImpl.class); private EntityManagerFactory entityManagerFactory; private Set<String> basePkgNames; private List<String> processingRequests = Collections.synchronizedList(new ArrayList<String>(100)); private Future<Void> restAPIVOCleanTask = null; private final static int restResultMaxLength = initMaxRestResultLength(); @Autowired private ResourceDestinationMaker destMaker; @Autowired private CloudBus bus; @Autowired private ThreadFacade thdf; @Override @MessageSafe public void handleMessage(Message msg) { if (msg instanceof DeleteRestApiVOMsg) { handle((DeleteRestApiVOMsg) msg); } else { bus.dealWithUnknownMessage(msg); } } @Override public String getId() { return bus.makeLocalServiceId(RESTApiConstant.SERVICE_ID); } private void handle(final DeleteRestApiVOMsg msg){ int ret = 1; int delete = 0; EntityManager mgr = getEntityManager(); EntityTransaction tran = mgr.getTransaction(); Long time = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()) - TimeUnit.DAYS.toSeconds(msg.getRetentionDay()); long start = System.currentTimeMillis(); try { while (ret > 0) { String sql = String.format("delete from RestAPIVO where unix_timestamp(lastOpDate) <= %d limit 1000", time); tran.begin(); Query query = mgr.createNativeQuery(sql); ret = query.executeUpdate(); tran.commit(); delete = delete + ret; if (delete == 0) { logger.debug("no RestApiVO history to clean"); return; } } logger.debug(String.format("delete %d days ago RestApiVO history %d, cost %d ms", msg.getRetentionDay(), delete, System.currentTimeMillis() - start)); } catch (Exception e) { tran.rollback(); logger.warn(String.format("unable to delete RestApiVO history because %s", e)); } finally { mgr.close(); } } void init() throws ClassNotFoundException, InstantiationException, IllegalAccessException { Set<APIEvent> boundEvents = new HashSet<APIEvent>(100); ClassPathScanningCandidateComponentProvider scanner = new ClassPathScanningCandidateComponentProvider(true); scanner.resetFilters(false); scanner.addIncludeFilter(new AssignableTypeFilter(APIEvent.class)); for (String pkg : getBasePkgNames()) { for (BeanDefinition bd : scanner.findCandidateComponents(pkg)) { Class<?> clazz = Class.forName(bd.getBeanClassName()); if (clazz == APIEvent.class) { continue; } APIEvent evt = (APIEvent) clazz.newInstance(); boundEvents.add(evt); } } bus.subscribeEvent(this, boundEvents.toArray(new APIEvent[boundEvents.size()])); } private static int initMaxRestResultLength() { int limit = CoreGlobalProperty.REST_API_RESULT_MAX_LENGTH; limit = Math.min(limit, 64000); return Math.max(limit, 1000); } public void setEntityManagerFactory(EntityManagerFactory entityManagerFactory) { this.entityManagerFactory = entityManagerFactory; } public EntityManagerFactory getEntityManagerFactory() { return entityManagerFactory; } private RestAPIVO persist(APIMessage msg) { RestAPIVO vo = new RestAPIVO(); vo.setUuid(msg.getId()); vo.setApiMessageName(msg.getMessageName()); vo.setState(RestAPIState.Processing); EntityManager mgr = getEntityManager(); EntityTransaction tran = mgr.getTransaction(); try { tran.begin(); mgr.persist(vo); mgr.flush(); mgr.refresh(vo); tran.commit(); return vo; } catch (Exception e) { ExceptionDSL.exceptionSafe(tran::rollback); throw new CloudRuntimeException(e); } finally { ExceptionDSL.exceptionSafe(mgr::close); } } @Override public RestAPIResponse send(APIMessage msg) { assert !(msg instanceof APIListMessage) && !(msg instanceof APISearchMessage) : "You must invoke call(APIMessage) for APIListMessage or APISearchMsg, the message you pass is " + msg.getMessageName(); RestAPIResponse rsp = new RestAPIResponse(); RestAPIVO vo = persist(msg); processingRequests.add(vo.getUuid()); rsp.setCreatedDate(vo.getCreateDate()); rsp.setState(vo.getState().toString()); rsp.setUuid(vo.getUuid()); msg.setServiceId(ApiMediatorConstant.SERVICE_ID); bus.send(msg); return rsp; } @Override public RestAPIResponse call(APIMessage msg) { RestAPIResponse rsp = new RestAPIResponse(); rsp.setCreatedDate(new Date()); msg.setServiceId(ApiMediatorConstant.SERVICE_ID); MessageReply reply = bus.call(msg); rsp.setFinishedDate(new Date()); rsp.setState(RestAPIState.Done.toString()); rsp.setResult(RESTApiDecoder.dump(reply)); return rsp; } private RestAPIVO find(String uuid) { EntityManager mgr = getEntityManager(); EntityTransaction tran = mgr.getTransaction(); try { tran.begin(); RestAPIVO vo = mgr.find(RestAPIVO.class, uuid); tran.commit(); return vo; } catch (Exception e) { tran.rollback(); throw new CloudRuntimeException(e); } finally { mgr.close(); } } @Override public RestAPIResponse getResult(String uuid) { RestAPIVO vo = find(uuid); if (vo == null) { return null; } RestAPIResponse rsp = new RestAPIResponse(); rsp.setCreatedDate(vo.getCreateDate()); rsp.setFinishedDate(vo.getLastOpDate()); rsp.setResult(vo.getResult()); rsp.setState(vo.getState().toString()); rsp.setUuid(vo.getUuid()); return rsp; } private synchronized EntityManager getEntityManager() { return entityManagerFactory.createEntityManager(); } private boolean update(APIEvent e) { String sql = "update RestAPIVO r set r.result = :result, r.state = :state where r.uuid = :uuid"; EntityManager mgr = getEntityManager(); EntityTransaction tran = mgr.getTransaction(); try { tran.begin(); Query query = mgr.createQuery(sql); query.setParameter("result", getApiResult(e)); query.setParameter("state", RestAPIState.Done); query.setParameter("uuid", e.getApiId()); int ret = query.executeUpdate(); tran.commit(); return ret > 0; } catch (Exception ex) { tran.rollback(); throw new CloudRuntimeException(ex); } finally { mgr.close(); } } private static String getApiResult(APIEvent e) { String apiResult = RESTApiDecoder.dump(e); apiResult = StringUtils.length(apiResult) > restResultMaxLength ? StringUtils.left(apiResult, restResultMaxLength) : apiResult; return apiResult; } @Override public boolean handleEvent(Event e) { try { if (e instanceof APIEvent) { APIEvent ae = (APIEvent) e; if (processingRequests.contains(ae.getApiId())) { boolean ret = update(ae); processingRequests.remove(ae.getApiId()); if (!ret) { logger.warn(String.format("Cannot find RestAPIVO[uuid:%s], something wrong happened", ae.getApiId())); } } } else { bus.dealWithUnknownMessage(e); } } catch (Exception ex) { logger.warn(ex.getMessage(), ex); } return false; } public Set<String> getBasePkgNames() { if (basePkgNames == null) { basePkgNames = new HashSet<String>(); basePkgNames.add("org.zstack"); } return basePkgNames; } public void refreshIntervalClean() { if (restAPIVOCleanTask != null){ restAPIVOCleanTask.cancel(true); } startIntervalClean(); } private void startIntervalClean() { checkParams(); if (RESTApiGlobalProperty.RESTAPIVO_RETENTION_DAY == -1){ logger.debug("ResetApiVO retention day -1 ,not clean"); return; } restAPIVOCleanTask = thdf.submitPeriodicTask(restAPIVOCleanTask(), RESTApiGlobalProperty.CLEAN_RESTAPIVO_DELAY); } private void checkParams() { if (RESTApiGlobalProperty.CLEAN_RESTAPIVO_DELAY < 0 || RESTApiGlobalProperty.CLEAN_RESTAPIVO_DELAY > 3600) { throw new IllegalArgumentException("RestApiVO period clean task delay time must >= 0s and <= 3600s"); } if (RESTApiGlobalProperty.CLEAN_INTERVAL_SECOND < 86400 || RESTApiGlobalProperty.CLEAN_INTERVAL_SECOND > 864000) { throw new IllegalArgumentException("RestApiVO period clean task interval must >= 86400s and <= 864000s"); } if (RESTApiGlobalProperty.RESTAPIVO_RETENTION_DAY < -1 || RESTApiGlobalProperty.RESTAPIVO_RETENTION_DAY > 365) { throw new IllegalArgumentException("RestApiVO retention day must >= -1 day and <= 365 day, if set -1, will not clean RestApiVO"); } } private PeriodicTask restAPIVOCleanTask(){ return new PeriodicTask(){ @Override public void run() { if (!destMaker.isManagedByUs(RESTApiConstant.CleanRestAPIVOKey)) { logger.debug(String.format("Not send DeleteRestApiVOMsg because not managed by us")); return; } DeleteRestApiVOMsg msg = new DeleteRestApiVOMsg(); msg.setRetentionDay(RESTApiGlobalProperty.RESTAPIVO_RETENTION_DAY); bus.makeTargetServiceIdByResourceUuid(msg, RESTApiConstant.SERVICE_ID, RESTApiConstant.CleanRestAPIVOKey); bus.send(msg); } @Override public TimeUnit getTimeUnit() { return TimeUnit.SECONDS; } @Override public long getInterval() { return RESTApiGlobalProperty.CLEAN_INTERVAL_SECOND; } @Override public String getName() { return String.format("clean-RestApiVO-periodic-Task"); } }; } @Override public boolean start() { startIntervalClean(); return true; } @Override public boolean stop() { return true; } }
apache-2.0
chubbymaggie/binnavi
src/main/java/com/google/security/zynamics/binnavi/Gui/Debug/ToolbarPanel/Implementations/CStopTraceListener.java
5299
/* Copyright 2011-2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.google.security.zynamics.binnavi.Gui.Debug.ToolbarPanel.Implementations; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import com.google.common.base.Preconditions; import com.google.security.zynamics.binnavi.debug.connection.packets.replies.EchoBreakpointsRemovedReply; import com.google.security.zynamics.binnavi.debug.debugger.DebugEventListenerAdapter; import com.google.security.zynamics.binnavi.debug.debugger.DebuggerHelpers; import com.google.security.zynamics.binnavi.debug.debugger.interfaces.IDebugEventListener; import com.google.security.zynamics.binnavi.debug.debugger.interfaces.IDebugger; import com.google.security.zynamics.binnavi.debug.models.breakpoints.BreakpointAddress; import com.google.security.zynamics.binnavi.debug.models.trace.TraceLogger; import com.google.security.zynamics.binnavi.disassembly.RelocatedAddress; import com.google.security.zynamics.zylib.general.Pair; import com.google.security.zynamics.zylib.gui.ProgressDialogs.CEndlessHelperThread; /** * This class is used to display a progress dialog until the echo breakpoints of a trace are * removed. */ public final class CStopTraceListener extends CEndlessHelperThread { /** * Debugger used to remove the breakpoints. */ private final IDebugger m_debugger; /** * Logger which is monitored by the trace listener. */ private final TraceLogger m_logger; /** * List of addresses removed by the trace logger during its finalization step. Initially this list * is null. */ private Set<BreakpointAddress> m_removedAddresses = null; /** * Flag that determines whether the stop trace process has been completed. */ private boolean m_isDone = false; /** * Received echo breakpoint removal replies before the listener knows what echo breakpoints were * removed by the logger. */ private final List<EchoBreakpointsRemovedReply> m_bufferedReplies = new ArrayList<EchoBreakpointsRemovedReply>(); /** * Listener that waits for the reply that signals that the echo breakpoints were removed. */ private final IDebugEventListener m_debuggerListener = new DebugEventListenerAdapter() { @Override public void debuggerClosed(final int code) { m_isDone = true; } @Override public void receivedReply(final EchoBreakpointsRemovedReply reply) { if (m_removedAddresses == null) { m_bufferedReplies.add(reply); } else { m_isDone = checkReply(reply); } } }; /** * Creates a new trace listener object. * * @param debugger Debugger used to remove the breakpoints. * @param logger The trace logger whose echo breakpoints are cleared. */ public CStopTraceListener(final IDebugger debugger, final TraceLogger logger) { Preconditions.checkNotNull(logger, "IE01565: Logger argument can not be null"); m_debugger = debugger; m_logger = logger; debugger.addListener(m_debuggerListener); } /** * Checks whether a given reply is the reply the trace listener has been waiting for. * * @param reply The reply to check. * * @return True, if the reply is the one the listener has been waiting for. False, otherwise. */ private boolean checkReply(final EchoBreakpointsRemovedReply reply) { final List<Pair<RelocatedAddress, Integer>> receivedAddresses = reply.getAddresses(); if (receivedAddresses.size() != m_removedAddresses.size()) { for (final Pair<RelocatedAddress, Integer> pair : receivedAddresses) { final RelocatedAddress receivedAddress = pair.first(); if (!m_removedAddresses.contains( DebuggerHelpers.getBreakpointAddress(m_debugger, receivedAddress))) { throw new IllegalStateException( "IE00680: the number of breakpoints removed differs from the number of received breakpoints in the debugger reply\n" + "The first breakpoint address missmatch is:" + DebuggerHelpers.getBreakpointAddress(m_debugger, receivedAddress)); } } } return true; } @Override protected void runExpensiveCommand() throws Exception { m_removedAddresses = new HashSet<BreakpointAddress>(m_logger.stop()); for (final EchoBreakpointsRemovedReply reply : m_bufferedReplies) { m_isDone |= checkReply(reply); } while (!m_isDone) { try { Thread.sleep(100); } catch (final InterruptedException exception) { // restore the interrupted status of the thread. // http://www.ibm.com/developerworks/java/library/j-jtp05236/index.html java.lang.Thread.currentThread().interrupt(); } } m_debugger.removeListener(m_debuggerListener); } }
apache-2.0
sdutry/struts2-jquery
struts2-jquery-datatables-plugin/src/main/java/com/jgeppert/struts2/jquery/datatables/views/freemarker/tags/DatatablesModel.java
1581
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.jgeppert.struts2.jquery.datatables.views.freemarker.tags; import com.jgeppert.struts2.jquery.datatables.components.Datatables; import com.opensymphony.xwork2.util.ValueStack; import org.apache.struts2.components.Component; import org.apache.struts2.views.freemarker.tags.TagModel; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; /** * @author <a href="http://www.jgeppert.com">Johannes Geppert</a> * @see Datatables */ public class DatatablesModel extends TagModel { public DatatablesModel(ValueStack stack, HttpServletRequest req, HttpServletResponse res) { super(stack, req, res); } @Override protected Component getBean() { return new Datatables(stack, req, res); } }
apache-2.0
ichaki5748/netty
transport/src/main/java/io/netty/channel/socket/DefaultDatagramChannelConfig.java
13898
/* * Copyright 2012 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.channel.socket; import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelException; import io.netty.channel.ChannelOption; import io.netty.channel.DefaultChannelConfig; import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.MessageSizeEstimator; import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.WriteBufferWaterMark; import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; import java.io.IOException; import java.net.DatagramSocket; import java.net.InetAddress; import java.net.MulticastSocket; import java.net.NetworkInterface; import java.net.SocketException; import java.util.Map; import static io.netty.channel.ChannelOption.*; /** * The default {@link DatagramChannelConfig} implementation. */ public class DefaultDatagramChannelConfig extends DefaultChannelConfig implements DatagramChannelConfig { private static final InternalLogger logger = InternalLoggerFactory.getInstance(DefaultDatagramChannelConfig.class); private final DatagramSocket javaSocket; private volatile boolean activeOnOpen; /** * Creates a new instance. */ public DefaultDatagramChannelConfig(DatagramChannel channel, DatagramSocket javaSocket) { super(channel, new FixedRecvByteBufAllocator(2048)); if (javaSocket == null) { throw new NullPointerException("javaSocket"); } this.javaSocket = javaSocket; } @Override @SuppressWarnings("deprecation") public Map<ChannelOption<?>, Object> getOptions() { return getOptions( super.getOptions(), SO_BROADCAST, SO_RCVBUF, SO_SNDBUF, SO_REUSEADDR, IP_MULTICAST_LOOP_DISABLED, IP_MULTICAST_ADDR, IP_MULTICAST_IF, IP_MULTICAST_TTL, IP_TOS, DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION); } @Override @SuppressWarnings({ "unchecked", "deprecation" }) public <T> T getOption(ChannelOption<T> option) { if (option == SO_BROADCAST) { return (T) Boolean.valueOf(isBroadcast()); } if (option == SO_RCVBUF) { return (T) Integer.valueOf(getReceiveBufferSize()); } if (option == SO_SNDBUF) { return (T) Integer.valueOf(getSendBufferSize()); } if (option == SO_REUSEADDR) { return (T) Boolean.valueOf(isReuseAddress()); } if (option == IP_MULTICAST_LOOP_DISABLED) { return (T) Boolean.valueOf(isLoopbackModeDisabled()); } if (option == IP_MULTICAST_ADDR) { return (T) getInterface(); } if (option == IP_MULTICAST_IF) { return (T) getNetworkInterface(); } if (option == IP_MULTICAST_TTL) { return (T) Integer.valueOf(getTimeToLive()); } if (option == IP_TOS) { return (T) Integer.valueOf(getTrafficClass()); } if (option == DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION) { return (T) Boolean.valueOf(activeOnOpen); } return super.getOption(option); } @Override @SuppressWarnings("deprecation") public <T> boolean setOption(ChannelOption<T> option, T value) { validate(option, value); if (option == SO_BROADCAST) { setBroadcast((Boolean) value); } else if (option == SO_RCVBUF) { setReceiveBufferSize((Integer) value); } else if (option == SO_SNDBUF) { setSendBufferSize((Integer) value); } else if (option == SO_REUSEADDR) { setReuseAddress((Boolean) value); } else if (option == IP_MULTICAST_LOOP_DISABLED) { setLoopbackModeDisabled((Boolean) value); } else if (option == IP_MULTICAST_ADDR) { setInterface((InetAddress) value); } else if (option == IP_MULTICAST_IF) { setNetworkInterface((NetworkInterface) value); } else if (option == IP_MULTICAST_TTL) { setTimeToLive((Integer) value); } else if (option == IP_TOS) { setTrafficClass((Integer) value); } else if (option == DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION) { setActiveOnOpen((Boolean) value); } else { return super.setOption(option, value); } return true; } private void setActiveOnOpen(boolean activeOnOpen) { if (channel.isRegistered()) { throw new IllegalStateException("Can only changed before channel was registered"); } this.activeOnOpen = activeOnOpen; } @Override public boolean isBroadcast() { try { return javaSocket.getBroadcast(); } catch (SocketException e) { throw new ChannelException(e); } } @Override public DatagramChannelConfig setBroadcast(boolean broadcast) { try { // See: https://github.com/netty/netty/issues/576 if (broadcast && !javaSocket.getLocalAddress().isAnyLocalAddress() && !PlatformDependent.isWindows() && !PlatformDependent.isRoot()) { // Warn a user about the fact that a non-root user can't receive a // broadcast packet on *nix if the socket is bound on non-wildcard address. logger.warn( "A non-root user can't receive a broadcast packet if the socket " + "is not bound to a wildcard address; setting the SO_BROADCAST flag " + "anyway as requested on the socket which is bound to " + javaSocket.getLocalSocketAddress() + '.'); } javaSocket.setBroadcast(broadcast); } catch (SocketException e) { throw new ChannelException(e); } return this; } @Override public InetAddress getInterface() { if (javaSocket instanceof MulticastSocket) { try { return ((MulticastSocket) javaSocket).getInterface(); } catch (SocketException e) { throw new ChannelException(e); } } else { throw new UnsupportedOperationException(); } } @Override public DatagramChannelConfig setInterface(InetAddress interfaceAddress) { if (javaSocket instanceof MulticastSocket) { try { ((MulticastSocket) javaSocket).setInterface(interfaceAddress); } catch (SocketException e) { throw new ChannelException(e); } } else { throw new UnsupportedOperationException(); } return this; } @Override public boolean isLoopbackModeDisabled() { if (javaSocket instanceof MulticastSocket) { try { return ((MulticastSocket) javaSocket).getLoopbackMode(); } catch (SocketException e) { throw new ChannelException(e); } } else { throw new UnsupportedOperationException(); } } @Override public DatagramChannelConfig setLoopbackModeDisabled(boolean loopbackModeDisabled) { if (javaSocket instanceof MulticastSocket) { try { ((MulticastSocket) javaSocket).setLoopbackMode(loopbackModeDisabled); } catch (SocketException e) { throw new ChannelException(e); } } else { throw new UnsupportedOperationException(); } return this; } @Override public NetworkInterface getNetworkInterface() { if (javaSocket instanceof MulticastSocket) { try { return ((MulticastSocket) javaSocket).getNetworkInterface(); } catch (SocketException e) { throw new ChannelException(e); } } else { throw new UnsupportedOperationException(); } } @Override public DatagramChannelConfig setNetworkInterface(NetworkInterface networkInterface) { if (javaSocket instanceof MulticastSocket) { try { ((MulticastSocket) javaSocket).setNetworkInterface(networkInterface); } catch (SocketException e) { throw new ChannelException(e); } } else { throw new UnsupportedOperationException(); } return this; } @Override public boolean isReuseAddress() { try { return javaSocket.getReuseAddress(); } catch (SocketException e) { throw new ChannelException(e); } } @Override public DatagramChannelConfig setReuseAddress(boolean reuseAddress) { try { javaSocket.setReuseAddress(reuseAddress); } catch (SocketException e) { throw new ChannelException(e); } return this; } @Override public int getReceiveBufferSize() { try { return javaSocket.getReceiveBufferSize(); } catch (SocketException e) { throw new ChannelException(e); } } @Override public DatagramChannelConfig setReceiveBufferSize(int receiveBufferSize) { try { javaSocket.setReceiveBufferSize(receiveBufferSize); } catch (SocketException e) { throw new ChannelException(e); } return this; } @Override public int getSendBufferSize() { try { return javaSocket.getSendBufferSize(); } catch (SocketException e) { throw new ChannelException(e); } } @Override public DatagramChannelConfig setSendBufferSize(int sendBufferSize) { try { javaSocket.setSendBufferSize(sendBufferSize); } catch (SocketException e) { throw new ChannelException(e); } return this; } @Override public int getTimeToLive() { if (javaSocket instanceof MulticastSocket) { try { return ((MulticastSocket) javaSocket).getTimeToLive(); } catch (IOException e) { throw new ChannelException(e); } } else { throw new UnsupportedOperationException(); } } @Override public DatagramChannelConfig setTimeToLive(int ttl) { if (javaSocket instanceof MulticastSocket) { try { ((MulticastSocket) javaSocket).setTimeToLive(ttl); } catch (IOException e) { throw new ChannelException(e); } } else { throw new UnsupportedOperationException(); } return this; } @Override public int getTrafficClass() { try { return javaSocket.getTrafficClass(); } catch (SocketException e) { throw new ChannelException(e); } } @Override public DatagramChannelConfig setTrafficClass(int trafficClass) { try { javaSocket.setTrafficClass(trafficClass); } catch (SocketException e) { throw new ChannelException(e); } return this; } @Override public DatagramChannelConfig setWriteSpinCount(int writeSpinCount) { super.setWriteSpinCount(writeSpinCount); return this; } @Override public DatagramChannelConfig setConnectTimeoutMillis(int connectTimeoutMillis) { super.setConnectTimeoutMillis(connectTimeoutMillis); return this; } @Override @Deprecated public DatagramChannelConfig setMaxMessagesPerRead(int maxMessagesPerRead) { super.setMaxMessagesPerRead(maxMessagesPerRead); return this; } @Override public DatagramChannelConfig setAllocator(ByteBufAllocator allocator) { super.setAllocator(allocator); return this; } @Override public DatagramChannelConfig setRecvByteBufAllocator(RecvByteBufAllocator allocator) { super.setRecvByteBufAllocator(allocator); return this; } @Override public DatagramChannelConfig setAutoRead(boolean autoRead) { super.setAutoRead(autoRead); return this; } @Override public DatagramChannelConfig setAutoClose(boolean autoClose) { super.setAutoClose(autoClose); return this; } @Override public DatagramChannelConfig setWriteBufferHighWaterMark(int writeBufferHighWaterMark) { super.setWriteBufferHighWaterMark(writeBufferHighWaterMark); return this; } @Override public DatagramChannelConfig setWriteBufferLowWaterMark(int writeBufferLowWaterMark) { super.setWriteBufferLowWaterMark(writeBufferLowWaterMark); return this; } @Override public DatagramChannelConfig setWriteBufferWaterMark(WriteBufferWaterMark writeBufferWaterMark) { super.setWriteBufferWaterMark(writeBufferWaterMark); return this; } @Override public DatagramChannelConfig setMessageSizeEstimator(MessageSizeEstimator estimator) { super.setMessageSizeEstimator(estimator); return this; } }
apache-2.0
ashank/Office-365-SDK-for-Android
sdk/office365-mail-calendar-contact-sdk/odata/engine/src/main/java/com/msopentech/odatajclient/engine/data/metadata/edm/ComplexTypeDeserializer.java
4530
/** * Copyright © Microsoft Open Technologies, Inc. * * All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS * OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION * ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A * PARTICULAR PURPOSE, MERCHANTABILITY OR NON-INFRINGEMENT. * * See the Apache License, Version 2.0 for the specific language * governing permissions and limitations under the License. */ package com.msopentech.odatajclient.engine.data.metadata.edm; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.JsonToken; import com.fasterxml.jackson.databind.DeserializationContext; import com.msopentech.odatajclient.engine.data.metadata.edm.v4.Annotation; import com.msopentech.odatajclient.engine.utils.ODataVersion; import java.io.IOException; import org.apache.commons.lang3.BooleanUtils; public class ComplexTypeDeserializer extends AbstractEdmDeserializer<AbstractComplexType> { @Override protected AbstractComplexType doDeserialize(final JsonParser jp, final DeserializationContext ctxt) throws IOException, JsonProcessingException { final AbstractComplexType complexType = ODataVersion.V3 == client.getWorkingVersion() ? new com.msopentech.odatajclient.engine.data.metadata.edm.v3.ComplexType() : new com.msopentech.odatajclient.engine.data.metadata.edm.v4.ComplexType(); for (; jp.getCurrentToken() != JsonToken.END_OBJECT; jp.nextToken()) { final JsonToken token = jp.getCurrentToken(); if (token == JsonToken.FIELD_NAME) { if ("Name".equals(jp.getCurrentName())) { complexType.setName(jp.nextTextValue()); } else if ("Abstract".equals(jp.getCurrentName())) { ((com.msopentech.odatajclient.engine.data.metadata.edm.v4.ComplexType) complexType). setAbstractEntityType(BooleanUtils.toBoolean(jp.nextTextValue())); } else if ("BaseType".equals(jp.getCurrentName())) { ((com.msopentech.odatajclient.engine.data.metadata.edm.v4.ComplexType) complexType). setBaseType(jp.nextTextValue()); } else if ("OpenType".equals(jp.getCurrentName())) { ((com.msopentech.odatajclient.engine.data.metadata.edm.v4.ComplexType) complexType). setOpenType(BooleanUtils.toBoolean(jp.nextTextValue())); } else if ("Property".equals(jp.getCurrentName())) { jp.nextToken(); if (complexType instanceof com.msopentech.odatajclient.engine.data.metadata.edm.v3.ComplexType) { ((com.msopentech.odatajclient.engine.data.metadata.edm.v3.ComplexType) complexType). getProperties().add(jp.getCodec().readValue(jp, com.msopentech.odatajclient.engine.data.metadata.edm.v3.Property.class)); } else { ((com.msopentech.odatajclient.engine.data.metadata.edm.v4.ComplexType) complexType). getProperties().add(jp.getCodec().readValue(jp, com.msopentech.odatajclient.engine.data.metadata.edm.v4.Property.class)); } } else if ("NavigationProperty".equals(jp.getCurrentName())) { jp.nextToken(); ((com.msopentech.odatajclient.engine.data.metadata.edm.v4.ComplexType) complexType). getNavigationProperties().add(jp.getCodec().readValue(jp, com.msopentech.odatajclient.engine.data.metadata.edm.v4.NavigationProperty.class)); } else if ("Annotation".equals(jp.getCurrentName())) { jp.nextToken(); ((com.msopentech.odatajclient.engine.data.metadata.edm.v4.ComplexType) complexType). setAnnotation(jp.getCodec().readValue(jp, Annotation.class)); } } } return complexType; } }
apache-2.0
DwayneJengSage/BridgePF
test/org/sagebionetworks/bridge/models/surveys/TestSurvey.java
10293
package org.sagebionetworks.bridge.models.surveys; import static org.sagebionetworks.bridge.TestConstants.TEST_STUDY_IDENTIFIER; import java.util.List; import java.util.UUID; import org.joda.time.DateTime; import org.joda.time.LocalDate; import org.sagebionetworks.bridge.TestUtils; import org.sagebionetworks.bridge.dynamodb.DynamoSurvey; import org.sagebionetworks.bridge.dynamodb.DynamoSurveyQuestion; import org.sagebionetworks.bridge.time.DateUtils; import org.sagebionetworks.bridge.models.surveys.SurveyRule.Operator; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBIgnore; import com.fasterxml.jackson.annotation.JsonIgnore; import com.google.common.collect.Lists; /** * Surveys are complicated. Here's an example survey with nearly every type of question. * */ public class TestSurvey extends DynamoSurvey { public static SurveyQuestion selectBy(Survey survey, DataType type) { for (SurveyQuestion question : survey.getUnmodifiableQuestionList()) { if (question.getConstraints().getDataType() == type) { return question; } } return null; } private DynamoSurveyQuestion multiValueQuestion = new DynamoSurveyQuestion() { { Image terrible = new Image("http://terrible.svg", 600, 300); Image poor = new Image("http://poor.svg", 600, 300); Image ok = new Image("http://ok.svg", 600, 300); Image good = new Image("http://good.svg", 600, 300); Image great = new Image("http://great.svg", 600, 300); MultiValueConstraints mvc = new MultiValueConstraints(DataType.INTEGER); List<SurveyQuestionOption> options = Lists.newArrayList( new SurveyQuestionOption("Terrible", null, "1", terrible), new SurveyQuestionOption("Poor", null, "2", poor), new SurveyQuestionOption("OK", null, "3", ok), new SurveyQuestionOption("Good", null, "4", good), new SurveyQuestionOption("Great", null, "5", great) ); mvc.setEnumeration(options); mvc.setAllowOther(false); mvc.setAllowMultiple(true); setConstraints(mvc); setPrompt("How do you feel today?"); setPromptDetail("Is that how you really feel?"); setIdentifier("feeling"); setUiHint(UIHint.LIST); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion stringQuestion = new DynamoSurveyQuestion() { { StringConstraints c = new StringConstraints(); c.setMinLength(2); c.setMaxLength(255); c.setPattern("\\d{3}-\\d{3}-\\d{4}"); c.setPatternErrorMessage("Provide phone number in format ###-###-####"); setPrompt("Please enter an emergency phone number"); setPromptDetail("This should be for someone besides yourself."); setIdentifier("name"); setUiHint(UIHint.TEXTFIELD); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion booleanQuestion = new DynamoSurveyQuestion() { { BooleanConstraints c = new BooleanConstraints(); setPrompt("Do you have high blood pressure?"); setIdentifier("high_bp"); setPromptDetail("Be honest: do you have high blood pressue?"); setUiHint(UIHint.CHECKBOX); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion dateQuestion = new DynamoSurveyQuestion() { { DateConstraints c = new DateConstraints(); c.setEarliestValue(LocalDate.parse("2010-10-10")); c.setLatestValue(new LocalDate(DateUtils.getCurrentMillisFromEpoch())); setPrompt("When did you last have a medical check-up?"); setIdentifier("last_checkup"); setUiHint(UIHint.DATEPICKER); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion dateTimeQuestion = new DynamoSurveyQuestion() { { DateTimeConstraints c = new DateTimeConstraints(); c.setAllowFuture(true); c.setEarliestValue(new DateTime(DateUtils.convertToMillisFromEpoch("2010-10-10T00:00:00.000Z"))); c.setLatestValue(new DateTime()); setPrompt("When is your next medical check-up scheduled?"); setIdentifier("last_reading"); setUiHint(UIHint.DATETIMEPICKER); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion decimalQuestion = new DynamoSurveyQuestion() { { DecimalConstraints c = new DecimalConstraints(); c.setMinValue(0.0d); c.setMaxValue(10.0d); c.setStep(0.1d); setPrompt("What dosage (in grams) do you take of deleuterium each day?"); setIdentifier("deleuterium_dosage"); setUiHint(UIHint.SLIDER); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion durationQuestion = new DynamoSurveyQuestion() { { DurationConstraints c = new DurationConstraints(); setPrompt("How log does your appointment take, on average?"); setIdentifier("time_for_appt"); setUiHint(UIHint.NUMBERFIELD); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion integerQuestion = new DynamoSurveyQuestion() { { IntegerConstraints c = new IntegerConstraints(); c.setMinValue(0d); c.setMaxValue(4d); c.getRules().add(new SurveyRule.Builder().withOperator(Operator.LE).withValue(2).withSkipToTarget("name").build()); c.getRules().add(new SurveyRule.Builder().withOperator(Operator.DE).withSkipToTarget("name").build()); setPrompt("How many times a day do you take your blood pressure?"); setIdentifier("bp_x_day"); setUiHint(UIHint.NUMBERFIELD); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion timeQuestion = new DynamoSurveyQuestion() { { TimeConstraints c = new TimeConstraints(); setPrompt("What times of the day do you take deleuterium?"); setIdentifier("deleuterium_x_day"); setUiHint(UIHint.TIMEPICKER); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion yearMonthQuestion = new DynamoSurveyQuestion() { { YearMonthConstraints c = new YearMonthConstraints(); c.setAllowFuture(true); setPrompt("What year and month did you get a diagnosis?"); setIdentifier("diagnosis-year-month"); setUiHint(UIHint.YEARMONTH); setConstraints(c); setGuid(UUID.randomUUID().toString()); } }; private DynamoSurveyQuestion postalCodeQuestion = new DynamoSurveyQuestion() { { PostalCodeConstraints pcc = new PostalCodeConstraints(); pcc.setCountryCode(CountryCode.US); setPrompt("What are the first 3 digits of your zip code?"); setIdentifier("postal-code"); setUiHint(UIHint.POSTALCODE); setConstraints(pcc); setGuid(UUID.randomUUID().toString()); } }; public TestSurvey(Class<?> cls, boolean makeNew) { setGuid(UUID.randomUUID().toString()); setName("General Blood Pressure Survey"); setIdentifier(TestUtils.randomName(cls)); setModifiedOn(DateUtils.getCurrentMillisFromEpoch()); setCreatedOn(DateUtils.getCurrentMillisFromEpoch()); setVersion(2L); setPublished(true); setSchemaRevision(42); setStudyIdentifier(TEST_STUDY_IDENTIFIER); List<SurveyElement> elements = getElements(); elements.add(booleanQuestion); elements.add(dateQuestion); elements.add(dateTimeQuestion); elements.add(decimalQuestion); elements.add(integerQuestion); elements.add(durationQuestion); elements.add(timeQuestion); elements.add(multiValueQuestion); elements.add(stringQuestion); elements.add(yearMonthQuestion); elements.add(postalCodeQuestion); if (makeNew) { setGuid(null); setPublished(false); setVersion(null); setCreatedOn(0L); for (SurveyElement element : getElements()) { element.setGuid(null); } } } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getMultiValueQuestion() { return multiValueQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getStringQuestion() { return stringQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getBooleanQuestion() { return booleanQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getDateQuestion() { return dateQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getDateTimeQuestion() { return dateTimeQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getDecimalQuestion() { return decimalQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getIntegerQuestion() { return integerQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getDurationQuestion() { return durationQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getTimeQuestion() { return timeQuestion; } @DynamoDBIgnore @JsonIgnore public SurveyQuestion getYearMonthQuestion() { return yearMonthQuestion; } }
apache-2.0
deepakddixit/incubator-geode
extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/EmbeddedTomcat.java
5528
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.modules.session; import java.io.File; import java.net.InetAddress; import java.net.MalformedURLException; import javax.servlet.ServletException; import org.apache.catalina.Context; import org.apache.catalina.Engine; import org.apache.catalina.Host; import org.apache.catalina.LifecycleException; import org.apache.catalina.Valve; import org.apache.catalina.connector.Connector; import org.apache.catalina.core.StandardEngine; import org.apache.catalina.core.StandardService; import org.apache.catalina.core.StandardWrapper; import org.apache.catalina.loader.WebappLoader; import org.apache.catalina.realm.MemoryRealm; import org.apache.catalina.startup.Embedded; import org.apache.catalina.valves.ValveBase; import org.apache.juli.logging.Log; import org.apache.juli.logging.LogFactory; import org.apache.geode.modules.session.catalina.JvmRouteBinderValve; public class EmbeddedTomcat { private String contextPath = null; private Embedded container = null; private Log logger = LogFactory.getLog(getClass()); /** * The directory to create the Tomcat server configuration under. */ private String catalinaHome = "tomcat"; /** * The port to run the Tomcat server on. */ private int port = 8089; /** * The classes directory for the web application being run. */ private String classesDir = "target/classes"; private Context rootContext = null; private Engine engine; /** * The web resources directory for the web application being run. */ private String webappDir = ""; public EmbeddedTomcat(String contextPath, int port, String jvmRoute) throws MalformedURLException { this.contextPath = contextPath; this.port = port; // create server container = new Embedded(); container.setCatalinaHome(catalinaHome); container.setRealm(new MemoryRealm()); // create webapp loader WebappLoader loader = new WebappLoader(this.getClass().getClassLoader()); if (classesDir != null) { loader.addRepository(new File(classesDir).toURI().toURL().toString()); } rootContext = container.createContext("", webappDir); rootContext.setLoader(loader); rootContext.setReloadable(true); // Otherwise we get NPE when instantiating servlets rootContext.setIgnoreAnnotations(true); // create host Host localHost = container.createHost("127.0.0.1", new File("").getAbsolutePath()); localHost.addChild(rootContext); localHost.setDeployOnStartup(true); // create engine engine = container.createEngine(); engine.setName("localEngine"); engine.addChild(localHost); engine.setDefaultHost(localHost.getName()); engine.setJvmRoute(jvmRoute); engine.setService(new StandardService()); container.addEngine(engine); // create http connector Connector httpConnector = container.createConnector((InetAddress) null, port, false); container.addConnector(httpConnector); container.setAwait(true); // Create the JVMRoute valve for session failover ValveBase valve = new JvmRouteBinderValve(); ((StandardEngine) engine).addValve(valve); } /** * Starts the embedded Tomcat server. */ public void startContainer() throws LifecycleException { // start server container.start(); // add shutdown hook to stop server Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { stopContainer(); } }); } /** * Stops the embedded Tomcat server. */ public void stopContainer() { try { if (container != null) { container.stop(); logger.info("Stopped container"); } } catch (LifecycleException exception) { logger.warn("Cannot Stop Tomcat" + exception.getMessage()); } } public StandardWrapper addServlet(String path, String name, String clazz) throws ServletException { StandardWrapper servlet = (StandardWrapper) rootContext.createWrapper(); servlet.setName(name); servlet.setServletClass(clazz); servlet.setLoadOnStartup(1); rootContext.addChild(servlet); rootContext.addServletMapping(path, name); servlet.setParent(rootContext); // servlet.load(); return servlet; } public Embedded getEmbedded() { return container; } public Context getRootContext() { return rootContext; } public String getPath() { return contextPath; } public void setPath(String path) { this.contextPath = path; } public int getPort() { return port; } public void addValve(Valve valve) { ((StandardEngine) engine).addValve(valve); } public void removeValve(Valve valve) { ((StandardEngine) engine).removeValve(valve); } }
apache-2.0
Seinlin/gerrit
gerrit-server/src/main/java/com/google/gerrit/server/mail/OutgoingEmail.java
15161
// Copyright (C) 2009 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.gerrit.server.mail; import com.google.common.collect.Sets; import com.google.gerrit.common.errors.EmailException; import com.google.gerrit.reviewdb.client.Account; import com.google.gerrit.reviewdb.client.UserIdentity; import com.google.gerrit.server.account.AccountState; import com.google.gerrit.server.mail.EmailHeader.AddressList; import com.google.gerrit.server.validators.OutgoingEmailValidationListener; import com.google.gerrit.server.validators.ValidationException; import com.google.gwtorm.server.OrmException; import org.apache.commons.lang.StringUtils; import org.apache.velocity.Template; import org.apache.velocity.VelocityContext; import org.apache.velocity.context.InternalContextAdapterImpl; import org.apache.velocity.runtime.RuntimeInstance; import org.apache.velocity.runtime.parser.node.SimpleNode; import org.eclipse.jgit.util.SystemReader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.StringReader; import java.io.StringWriter; import java.net.MalformedURLException; import java.net.URL; import java.util.Collection; import java.util.Date; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; /** Sends an email to one or more interested parties. */ public abstract class OutgoingEmail { private static final Logger log = LoggerFactory.getLogger(OutgoingEmail.class); private static final String HDR_TO = "To"; private static final String HDR_CC = "CC"; protected String messageClass; private final HashSet<Account.Id> rcptTo = new HashSet<>(); private final Map<String, EmailHeader> headers; private final Set<Address> smtpRcptTo = Sets.newHashSet(); private Address smtpFromAddress; private StringBuilder body; protected VelocityContext velocityContext; protected final EmailArguments args; protected Account.Id fromId; protected OutgoingEmail(EmailArguments ea, String mc) { args = ea; messageClass = mc; headers = new LinkedHashMap<>(); } public void setFrom(final Account.Id id) { fromId = id; } /** * Format and enqueue the message for delivery. * * @throws EmailException */ public void send() throws EmailException { if (!args.emailSender.isEnabled()) { // Server has explicitly disabled email sending. // return; } init(); format(); appendText(velocifyFile("Footer.vm")); if (shouldSendMessage()) { if (fromId != null) { final Account fromUser = args.accountCache.get(fromId).getAccount(); if (fromUser.getGeneralPreferences().isCopySelfOnEmails()) { // If we are impersonating a user, make sure they receive a CC of // this message so they can always review and audit what we sent // on their behalf to others. // add(RecipientType.CC, fromId); } else if (rcptTo.remove(fromId)) { // If they don't want a copy, but we queued one up anyway, // drop them from the recipient lists. // final String fromEmail = fromUser.getPreferredEmail(); for (Iterator<Address> i = smtpRcptTo.iterator(); i.hasNext();) { if (i.next().email.equals(fromEmail)) { i.remove(); } } for (EmailHeader hdr : headers.values()) { if (hdr instanceof AddressList) { ((AddressList) hdr).remove(fromEmail); } } if (smtpRcptTo.isEmpty()) { return; } } } OutgoingEmailValidationListener.Args va = new OutgoingEmailValidationListener.Args(); va.messageClass = messageClass; va.smtpFromAddress = smtpFromAddress; va.smtpRcptTo = smtpRcptTo; va.headers = headers; va.body = body.toString(); for (OutgoingEmailValidationListener validator : args.outgoingEmailValidationListeners) { try { validator.validateOutgoingEmail(va); } catch (ValidationException e) { return; } } args.emailSender.send(va.smtpFromAddress, va.smtpRcptTo, va.headers, va.body); } } /** Format the message body by calling {@link #appendText(String)}. */ protected abstract void format() throws EmailException; /** * Setup the message headers and envelope (TO, CC, BCC). * * @throws EmailException if an error occurred. */ protected void init() throws EmailException { setupVelocityContext(); smtpFromAddress = args.fromAddressGenerator.from(fromId); setHeader("Date", new Date()); headers.put("From", new EmailHeader.AddressList(smtpFromAddress)); headers.put(HDR_TO, new EmailHeader.AddressList()); headers.put(HDR_CC, new EmailHeader.AddressList()); setHeader("Message-ID", ""); if (fromId != null) { // If we have a user that this message is supposedly caused by // but the From header on the email does not match the user as // it is a generic header for this Gerrit server, include the // Reply-To header with the current user's email address. // final Address a = toAddress(fromId); if (a != null && !smtpFromAddress.email.equals(a.email)) { setHeader("Reply-To", a.email); } } setHeader("X-Gerrit-MessageType", messageClass); body = new StringBuilder(); if (fromId != null && args.fromAddressGenerator.isGenericAddress(fromId)) { appendText(getFromLine()); } } protected String getFromLine() { final Account account = args.accountCache.get(fromId).getAccount(); final String name = account.getFullName(); final String email = account.getPreferredEmail(); StringBuilder f = new StringBuilder(); if ((name != null && !name.isEmpty()) || (email != null && !email.isEmpty())) { f.append("From"); if (name != null && !name.isEmpty()) { f.append(" ").append(name); } if (email != null && !email.isEmpty()) { f.append(" <").append(email).append(">"); } f.append(":\n\n"); } return f.toString(); } public String getGerritHost() { if (getGerritUrl() != null) { try { return new URL(getGerritUrl()).getHost(); } catch (MalformedURLException e) { // Try something else. } } // Fall back onto whatever the local operating system thinks // this server is called. We hopefully didn't get here as a // good admin would have configured the canonical url. // return SystemReader.getInstance().getHostname(); } public String getSettingsUrl() { if (getGerritUrl() != null) { final StringBuilder r = new StringBuilder(); r.append(getGerritUrl()); r.append("settings"); return r.toString(); } return null; } public String getGerritUrl() { return args.urlProvider.get(); } /** Set a header in the outgoing message using a template. */ protected void setVHeader(final String name, final String value) throws EmailException { setHeader(name, velocify(value)); } /** Set a header in the outgoing message. */ protected void setHeader(final String name, final String value) { headers.put(name, new EmailHeader.String(value)); } protected void setHeader(final String name, final Date date) { headers.put(name, new EmailHeader.Date(date)); } /** Append text to the outgoing email body. */ protected void appendText(final String text) { if (text != null) { body.append(text); } } /** Lookup a human readable name for an account, usually the "full name". */ protected String getNameFor(final Account.Id accountId) { if (accountId == null) { return args.gerritPersonIdent.getName(); } final Account userAccount = args.accountCache.get(accountId).getAccount(); String name = userAccount.getFullName(); if (name == null) { name = userAccount.getPreferredEmail(); } if (name == null) { name = args.anonymousCowardName + " #" + accountId; } return name; } /** * Gets the human readable name and email for an account; * if neither are available, returns the Anonymous Coward name. * * @param accountId user to fetch. * @return name/email of account, or Anonymous Coward if unset. */ public String getNameEmailFor(Account.Id accountId) { AccountState who = args.accountCache.get(accountId); String name = who.getAccount().getFullName(); String email = who.getAccount().getPreferredEmail(); if (name != null && email != null) { return name + " <" + email + ">"; } else if (name != null) { return name; } else if (email != null) { return email; } else /* (name == null && email == null) */{ return args.anonymousCowardName + " #" + accountId; } } /** * Gets the human readable name and email for an account; * if both are unavailable, returns the username. If no * username is set, this function returns null. * * @param accountId user to fetch. * @return name/email of account, username, or null if unset. */ public String getUserNameEmailFor(Account.Id accountId) { AccountState who = args.accountCache.get(accountId); String name = who.getAccount().getFullName(); String email = who.getAccount().getPreferredEmail(); if (name != null && email != null) { return name + " <" + email + ">"; } else if (email != null) { return email; } else if (name != null) { return name; } String username = who.getUserName(); if (username != null) { return username; } return null; } protected boolean shouldSendMessage() { if (body.length() == 0) { // If we have no message body, don't send. log.warn("Skipping delivery of email with no body"); return false; } if (smtpRcptTo.isEmpty()) { // If we have nobody to send this message to, then all of our // selection filters previously for this type of message were // unable to match a destination. Don't bother sending it. log.info("Skipping delivery of email with no recipients"); return false; } if (smtpRcptTo.size() == 1 && rcptTo.size() == 1 && rcptTo.contains(fromId)) { // If the only recipient is also the sender, don't bother. // return false; } return true; } /** Schedule this message for delivery to the listed accounts. */ protected void add(final RecipientType rt, final Collection<Account.Id> list) { for (final Account.Id id : list) { add(rt, id); } } protected void add(final RecipientType rt, final UserIdentity who) { if (who != null && who.getAccount() != null) { add(rt, who.getAccount()); } } /** Schedule delivery of this message to the given account. */ protected void add(final RecipientType rt, final Account.Id to) { try { if (!rcptTo.contains(to) && isVisibleTo(to)) { rcptTo.add(to); add(rt, toAddress(to)); } } catch (OrmException e) { log.error("Error reading database for account: " + to, e); } } /** * @param to account. * @throws OrmException * @return whether this email is visible to the given account. */ protected boolean isVisibleTo(final Account.Id to) throws OrmException { return true; } /** Schedule delivery of this message to the given account. */ protected void add(final RecipientType rt, final Address addr) { if (addr != null && addr.email != null && addr.email.length() > 0) { if (args.emailSender.canEmail(addr.email)) { if (smtpRcptTo.add(addr)) { switch (rt) { case TO: ((EmailHeader.AddressList) headers.get(HDR_TO)).add(addr); break; case CC: ((EmailHeader.AddressList) headers.get(HDR_CC)).add(addr); break; case BCC: break; } } } else { log.warn("Not emailing " + addr.email + " (prohibited by allowrcpt)"); } } } private Address toAddress(final Account.Id id) { final Account a = args.accountCache.get(id).getAccount(); final String e = a.getPreferredEmail(); if (!a.isActive() || e == null) { return null; } return new Address(a.getFullName(), e); } protected void setupVelocityContext() { velocityContext = new VelocityContext(); velocityContext.put("email", this); velocityContext.put("messageClass", messageClass); velocityContext.put("StringUtils", StringUtils.class); } protected String velocify(String template) throws EmailException { try { RuntimeInstance runtime = args.velocityRuntime; String templateName = "OutgoingEmail"; SimpleNode tree = runtime.parse(new StringReader(template), templateName); InternalContextAdapterImpl ica = new InternalContextAdapterImpl(velocityContext); ica.pushCurrentTemplateName(templateName); try { tree.init(ica, runtime); StringWriter w = new StringWriter(); tree.render(ica, w); return w.toString(); } finally { ica.popCurrentTemplateName(); } } catch (Exception e) { throw new EmailException("Cannot format velocity template: " + template, e); } } protected String velocifyFile(String name) throws EmailException { try { RuntimeInstance runtime = args.velocityRuntime; if (runtime.getLoaderNameForResource(name) == null) { name = "com/google/gerrit/server/mail/" + name; } Template template = runtime.getTemplate(name, "UTF-8"); StringWriter w = new StringWriter(); template.merge(velocityContext, w); return w.toString(); } catch (Exception e) { throw new EmailException("Cannot format velocity template " + name, e); } } public String joinStrings(Iterable<Object> in, String joiner) { return joinStrings(in.iterator(), joiner); } public String joinStrings(Iterator<Object> in, String joiner) { if (!in.hasNext()) { return ""; } Object first = in.next(); if (!in.hasNext()) { return safeToString(first); } StringBuilder r = new StringBuilder(); r.append(safeToString(first)); while (in.hasNext()) { r.append(joiner).append(safeToString(in.next())); } return r.toString(); } private static String safeToString(Object obj) { return obj != null ? obj.toString() : ""; } }
apache-2.0
floodlight/loxigen-artifacts
openflowj/gen-src/main/java/org/projectfloodlight/openflow/protocol/ver15/OFOxmIpv4DstMaskedVer15.java
10876
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University // Copyright (c) 2011, 2012 Open Networking Foundation // Copyright (c) 2012, 2013 Big Switch Networks, Inc. // This library was generated by the LoxiGen Compiler. // See the file LICENSE.txt which should have been included in the source distribution // Automatically generated by LOXI from template of_class.java // Do not modify package org.projectfloodlight.openflow.protocol.ver15; import org.projectfloodlight.openflow.protocol.*; import org.projectfloodlight.openflow.protocol.action.*; import org.projectfloodlight.openflow.protocol.actionid.*; import org.projectfloodlight.openflow.protocol.bsntlv.*; import org.projectfloodlight.openflow.protocol.errormsg.*; import org.projectfloodlight.openflow.protocol.meterband.*; import org.projectfloodlight.openflow.protocol.instruction.*; import org.projectfloodlight.openflow.protocol.instructionid.*; import org.projectfloodlight.openflow.protocol.match.*; import org.projectfloodlight.openflow.protocol.stat.*; import org.projectfloodlight.openflow.protocol.oxm.*; import org.projectfloodlight.openflow.protocol.oxs.*; import org.projectfloodlight.openflow.protocol.queueprop.*; import org.projectfloodlight.openflow.types.*; import org.projectfloodlight.openflow.util.*; import org.projectfloodlight.openflow.exceptions.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Set; import io.netty.buffer.ByteBuf; import com.google.common.hash.PrimitiveSink; import com.google.common.hash.Funnel; class OFOxmIpv4DstMaskedVer15 implements OFOxmIpv4DstMasked { private static final Logger logger = LoggerFactory.getLogger(OFOxmIpv4DstMaskedVer15.class); // version: 1.5 final static byte WIRE_VERSION = 6; final static int LENGTH = 12; private final static IPv4Address DEFAULT_VALUE = IPv4Address.NONE; private final static IPv4Address DEFAULT_VALUE_MASK = IPv4Address.NONE; // OF message fields private final IPv4Address value; private final IPv4Address mask; // // Immutable default instance final static OFOxmIpv4DstMaskedVer15 DEFAULT = new OFOxmIpv4DstMaskedVer15( DEFAULT_VALUE, DEFAULT_VALUE_MASK ); // package private constructor - used by readers, builders, and factory OFOxmIpv4DstMaskedVer15(IPv4Address value, IPv4Address mask) { if(value == null) { throw new NullPointerException("OFOxmIpv4DstMaskedVer15: property value cannot be null"); } if(mask == null) { throw new NullPointerException("OFOxmIpv4DstMaskedVer15: property mask cannot be null"); } this.value = value; this.mask = mask; } // Accessors for OF message fields @Override public long getTypeLen() { return 0x80001908L; } @Override public IPv4Address getValue() { return value; } @Override public IPv4Address getMask() { return mask; } @Override public MatchField<IPv4Address> getMatchField() { return MatchField.IPV4_DST; } @Override public boolean isMasked() { return true; } public OFOxm<IPv4Address> getCanonical() { if (IPv4Address.NO_MASK.equals(mask)) { return new OFOxmIpv4DstVer15(value); } else if(IPv4Address.FULL_MASK.equals(mask)) { return null; } else { return this; } } @Override public OFVersion getVersion() { return OFVersion.OF_15; } public OFOxmIpv4DstMasked.Builder createBuilder() { return new BuilderWithParent(this); } static class BuilderWithParent implements OFOxmIpv4DstMasked.Builder { final OFOxmIpv4DstMaskedVer15 parentMessage; // OF message fields private boolean valueSet; private IPv4Address value; private boolean maskSet; private IPv4Address mask; BuilderWithParent(OFOxmIpv4DstMaskedVer15 parentMessage) { this.parentMessage = parentMessage; } @Override public long getTypeLen() { return 0x80001908L; } @Override public IPv4Address getValue() { return value; } @Override public OFOxmIpv4DstMasked.Builder setValue(IPv4Address value) { this.value = value; this.valueSet = true; return this; } @Override public IPv4Address getMask() { return mask; } @Override public OFOxmIpv4DstMasked.Builder setMask(IPv4Address mask) { this.mask = mask; this.maskSet = true; return this; } @Override public MatchField<IPv4Address> getMatchField() { return MatchField.IPV4_DST; } @Override public boolean isMasked() { return true; } @Override public OFOxm<IPv4Address> getCanonical()throws UnsupportedOperationException { throw new UnsupportedOperationException("Property canonical not supported in version 1.5"); } @Override public OFVersion getVersion() { return OFVersion.OF_15; } @Override public OFOxmIpv4DstMasked build() { IPv4Address value = this.valueSet ? this.value : parentMessage.value; if(value == null) throw new NullPointerException("Property value must not be null"); IPv4Address mask = this.maskSet ? this.mask : parentMessage.mask; if(mask == null) throw new NullPointerException("Property mask must not be null"); // return new OFOxmIpv4DstMaskedVer15( value, mask ); } } static class Builder implements OFOxmIpv4DstMasked.Builder { // OF message fields private boolean valueSet; private IPv4Address value; private boolean maskSet; private IPv4Address mask; @Override public long getTypeLen() { return 0x80001908L; } @Override public IPv4Address getValue() { return value; } @Override public OFOxmIpv4DstMasked.Builder setValue(IPv4Address value) { this.value = value; this.valueSet = true; return this; } @Override public IPv4Address getMask() { return mask; } @Override public OFOxmIpv4DstMasked.Builder setMask(IPv4Address mask) { this.mask = mask; this.maskSet = true; return this; } @Override public MatchField<IPv4Address> getMatchField() { return MatchField.IPV4_DST; } @Override public boolean isMasked() { return true; } @Override public OFOxm<IPv4Address> getCanonical()throws UnsupportedOperationException { throw new UnsupportedOperationException("Property canonical not supported in version 1.5"); } @Override public OFVersion getVersion() { return OFVersion.OF_15; } // @Override public OFOxmIpv4DstMasked build() { IPv4Address value = this.valueSet ? this.value : DEFAULT_VALUE; if(value == null) throw new NullPointerException("Property value must not be null"); IPv4Address mask = this.maskSet ? this.mask : DEFAULT_VALUE_MASK; if(mask == null) throw new NullPointerException("Property mask must not be null"); return new OFOxmIpv4DstMaskedVer15( value, mask ); } } final static Reader READER = new Reader(); static class Reader implements OFMessageReader<OFOxmIpv4DstMasked> { @Override public OFOxmIpv4DstMasked readFrom(ByteBuf bb) throws OFParseError { // fixed value property typeLen == 0x80001908L int typeLen = bb.readInt(); if(typeLen != (int) 0x80001908) throw new OFParseError("Wrong typeLen: Expected=0x80001908L(0x80001908L), got="+typeLen); IPv4Address value = IPv4Address.read4Bytes(bb); IPv4Address mask = IPv4Address.read4Bytes(bb); OFOxmIpv4DstMaskedVer15 oxmIpv4DstMaskedVer15 = new OFOxmIpv4DstMaskedVer15( value, mask ); if(logger.isTraceEnabled()) logger.trace("readFrom - read={}", oxmIpv4DstMaskedVer15); return oxmIpv4DstMaskedVer15; } } public void putTo(PrimitiveSink sink) { FUNNEL.funnel(this, sink); } final static OFOxmIpv4DstMaskedVer15Funnel FUNNEL = new OFOxmIpv4DstMaskedVer15Funnel(); static class OFOxmIpv4DstMaskedVer15Funnel implements Funnel<OFOxmIpv4DstMaskedVer15> { private static final long serialVersionUID = 1L; @Override public void funnel(OFOxmIpv4DstMaskedVer15 message, PrimitiveSink sink) { // fixed value property typeLen = 0x80001908L sink.putInt((int) 0x80001908); message.value.putTo(sink); message.mask.putTo(sink); } } public void writeTo(ByteBuf bb) { WRITER.write(bb, this); } final static Writer WRITER = new Writer(); static class Writer implements OFMessageWriter<OFOxmIpv4DstMaskedVer15> { @Override public void write(ByteBuf bb, OFOxmIpv4DstMaskedVer15 message) { // fixed value property typeLen = 0x80001908L bb.writeInt((int) 0x80001908); message.value.write4Bytes(bb); message.mask.write4Bytes(bb); } } @Override public String toString() { StringBuilder b = new StringBuilder("OFOxmIpv4DstMaskedVer15("); b.append("value=").append(value); b.append(", "); b.append("mask=").append(mask); b.append(")"); return b.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; OFOxmIpv4DstMaskedVer15 other = (OFOxmIpv4DstMaskedVer15) obj; if (value == null) { if (other.value != null) return false; } else if (!value.equals(other.value)) return false; if (mask == null) { if (other.mask != null) return false; } else if (!mask.equals(other.mask)) return false; return true; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((value == null) ? 0 : value.hashCode()); result = prime * result + ((mask == null) ? 0 : mask.hashCode()); return result; } }
apache-2.0
rancherio/cattle
modules/caas/common/src/main/java/io/cattle/platform/containersync/model/ContainerEventEvent.java
644
package io.cattle.platform.containersync.model; import io.cattle.platform.core.addon.ContainerEvent; import io.cattle.platform.core.constants.ClusterConstants; import io.cattle.platform.eventing.model.EventVO; import io.cattle.platform.framework.event.FrameworkEvents; public class ContainerEventEvent extends EventVO<ContainerEvent, Object> { public ContainerEventEvent() { setName(FrameworkEvents.CONTAINER_EVENT); } public ContainerEventEvent(ContainerEvent data) { this(); setData(data); setResourceType(ClusterConstants.TYPE); setResourceId(data.getClusterId().toString()); } }
apache-2.0
DariusX/camel
core/camel-core-engine/src/main/java/org/apache/camel/reifier/dataformat/ProtobufDataFormatReifier.java
1607
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.reifier.dataformat; import java.util.Map; import org.apache.camel.CamelContext; import org.apache.camel.model.DataFormatDefinition; import org.apache.camel.model.dataformat.ProtobufDataFormat; public class ProtobufDataFormatReifier extends DataFormatReifier<ProtobufDataFormat> { public ProtobufDataFormatReifier(CamelContext camelContext, DataFormatDefinition definition) { super(camelContext, (ProtobufDataFormat)definition); } @Override protected void prepareDataFormatConfig(Map<String, Object> properties) { properties.put("instanceClass", definition.getInstanceClass()); properties.put("contentTypeFormat", definition.getContentTypeFormat()); properties.put("defaultInstance", definition.getDefaultInstance()); } }
apache-2.0
goodwinnk/intellij-community
platform/remote-servers/impl/src/com/intellij/remoteServer/impl/configuration/deployment/DeployToServerConfigurationTypesRegistrar.java
1605
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.remoteServer.impl.configuration.deployment; import com.intellij.execution.configurations.ConfigurationType; import com.intellij.ide.ApplicationInitializedListener; import com.intellij.openapi.extensions.ExtensionPoint; import com.intellij.remoteServer.ServerType; import org.jetbrains.annotations.NotNull; /** * @author nik */ public class DeployToServerConfigurationTypesRegistrar implements ApplicationInitializedListener { @Override public void componentsInitialized() { //todo[nik] improve this: configuration types should be loaded lazily ExtensionPoint<ConfigurationType> point = ConfigurationType.CONFIGURATION_TYPE_EP.getPoint(null); for (ServerType serverType : ServerType.EP_NAME.getExtensionList()) { point.registerExtension(new DeployToServerConfigurationType(serverType)); } } @NotNull public static DeployToServerConfigurationType getDeployConfigurationType(@NotNull ServerType<?> serverType) { for (ConfigurationType type : ConfigurationType.CONFIGURATION_TYPE_EP.getExtensionList()) { if (type instanceof DeployToServerConfigurationType) { DeployToServerConfigurationType configurationType = (DeployToServerConfigurationType)type; if (configurationType.getServerType().equals(serverType)) { return configurationType; } } } throw new IllegalArgumentException("Cannot find run configuration type for " + serverType.getClass()); } }
apache-2.0
mduerig/jackrabbit-oak
oak-core/src/main/java/org/apache/jackrabbit/oak/Oak.java
38295
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.oak; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; import static com.google.common.collect.Lists.newArrayList; import static java.util.Collections.emptyMap; import static org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils.registerMBean; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import javax.annotation.Nonnull; import javax.jcr.NoSuchWorkspaceException; import javax.management.JMException; import javax.management.MBeanServer; import javax.management.ObjectName; import javax.management.StandardMBean; import javax.security.auth.login.LoginException; import com.google.common.base.Function; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.io.Closer; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.api.ContentRepository; import org.apache.jackrabbit.oak.api.ContentSession; import org.apache.jackrabbit.oak.api.Descriptors; import org.apache.jackrabbit.oak.api.Root; import org.apache.jackrabbit.oak.api.jmx.QueryEngineSettingsMBean; import org.apache.jackrabbit.oak.api.jmx.RepositoryManagementMBean; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; import org.apache.jackrabbit.oak.commons.jmx.AnnotatedStandardMBean; import org.apache.jackrabbit.oak.core.ContentRepositoryImpl; import org.apache.jackrabbit.oak.management.RepositoryManager; import org.apache.jackrabbit.oak.plugins.atomic.AtomicCounterEditorProvider; import org.apache.jackrabbit.oak.plugins.commit.ConflictHook; import org.apache.jackrabbit.oak.plugins.commit.ConflictValidatorProvider; import org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate; import org.apache.jackrabbit.oak.plugins.index.CompositeIndexEditorProvider; import org.apache.jackrabbit.oak.plugins.index.IndexConstants; import org.apache.jackrabbit.oak.plugins.index.IndexEditorProvider; import org.apache.jackrabbit.oak.plugins.index.IndexMBeanRegistration; import org.apache.jackrabbit.oak.plugins.index.IndexUpdateProvider; import org.apache.jackrabbit.oak.plugins.index.counter.NodeCounterEditorProvider; import org.apache.jackrabbit.oak.plugins.index.counter.jmx.NodeCounter; import org.apache.jackrabbit.oak.plugins.index.counter.jmx.NodeCounterMBean; import org.apache.jackrabbit.oak.plugins.index.counter.jmx.NodeCounterOld; import org.apache.jackrabbit.oak.plugins.index.nodetype.NodeTypeIndexProvider; import org.apache.jackrabbit.oak.plugins.index.property.OrderedPropertyIndexEditorProvider; import org.apache.jackrabbit.oak.plugins.index.property.PropertyIndexEditorProvider; import org.apache.jackrabbit.oak.plugins.index.property.PropertyIndexProvider; import org.apache.jackrabbit.oak.plugins.index.property.jmx.PropertyIndexAsyncReindex; import org.apache.jackrabbit.oak.plugins.index.property.jmx.PropertyIndexAsyncReindexMBean; import org.apache.jackrabbit.oak.plugins.index.reference.ReferenceEditorProvider; import org.apache.jackrabbit.oak.plugins.index.reference.ReferenceIndexProvider; import org.apache.jackrabbit.oak.plugins.itemsave.ItemSaveValidatorProvider; import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore; import org.apache.jackrabbit.oak.plugins.name.NameValidatorProvider; import org.apache.jackrabbit.oak.plugins.name.NamespaceEditorProvider; import org.apache.jackrabbit.oak.plugins.nodetype.TypeEditorProvider; import org.apache.jackrabbit.oak.plugins.observation.ChangeCollectorProvider; import org.apache.jackrabbit.oak.plugins.version.VersionHook; import org.apache.jackrabbit.oak.query.QueryEngineSettings; import org.apache.jackrabbit.oak.query.stats.QueryStatsMBean; import org.apache.jackrabbit.oak.security.SecurityProviderImpl; import org.apache.jackrabbit.oak.spi.commit.CompositeConflictHandler; import org.apache.jackrabbit.oak.spi.commit.CommitHook; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.CompositeEditorProvider; import org.apache.jackrabbit.oak.spi.commit.CompositeHook; import org.apache.jackrabbit.oak.spi.commit.ConflictHandler; import org.apache.jackrabbit.oak.spi.commit.ConflictHandlers; import org.apache.jackrabbit.oak.spi.commit.Editor; import org.apache.jackrabbit.oak.spi.commit.EditorHook; import org.apache.jackrabbit.oak.spi.commit.EditorProvider; import org.apache.jackrabbit.oak.spi.commit.Observable; import org.apache.jackrabbit.oak.spi.commit.Observer; import org.apache.jackrabbit.oak.spi.commit.ThreeWayConflictHandler; import org.apache.jackrabbit.oak.spi.lifecycle.CompositeInitializer; import org.apache.jackrabbit.oak.spi.lifecycle.RepositoryInitializer; import org.apache.jackrabbit.oak.spi.lifecycle.WorkspaceInitializer; import org.apache.jackrabbit.oak.spi.query.CompositeQueryIndexProvider; import org.apache.jackrabbit.oak.spi.query.QueryIndexProvider; import org.apache.jackrabbit.oak.spi.query.QueryIndexProviderAware; import org.apache.jackrabbit.oak.spi.query.QueryLimits; import org.apache.jackrabbit.oak.spi.security.SecurityConfiguration; import org.apache.jackrabbit.oak.spi.security.SecurityProvider; import org.apache.jackrabbit.oak.spi.state.Clusterable; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.apache.jackrabbit.oak.spi.whiteboard.CompositeRegistration; import org.apache.jackrabbit.oak.spi.whiteboard.DefaultWhiteboard; import org.apache.jackrabbit.oak.spi.whiteboard.Registration; import org.apache.jackrabbit.oak.spi.whiteboard.Tracker; import org.apache.jackrabbit.oak.spi.whiteboard.Whiteboard; import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardAware; import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils; import org.apache.jackrabbit.oak.spi.descriptors.AggregatingDescriptors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Builder class for constructing {@link ContentRepository} instances with * a set of specified plugin components. This class acts as a public facade * that hides the internal implementation classes and the details of how * they get instantiated and wired together. * * @since Oak 0.6 */ public class Oak { private static final Logger LOG = LoggerFactory.getLogger(Oak.class); /** * Constant for the default workspace name */ public static final String DEFAULT_WORKSPACE_NAME = "default"; private final NodeStore store; private final List<RepositoryInitializer> initializers = newArrayList(); private AnnotatedQueryEngineSettings queryEngineSettings = new AnnotatedQueryEngineSettings(); private final List<QueryIndexProvider> queryIndexProviders = newArrayList(); private final List<IndexEditorProvider> indexEditorProviders = newArrayList(); private final List<CommitHook> commitHooks = newArrayList(); private final List<Observer> observers = Lists.newArrayList(); private List<EditorProvider> editorProviders = newArrayList(); private CompositeConflictHandler conflictHandler; private SecurityProvider securityProvider; private ScheduledExecutorService scheduledExecutor; private Executor executor; private final Closer closer = Closer.create(); private ContentRepository contentRepository; private Clusterable clusterable; /** * Default {@code ScheduledExecutorService} used for scheduling background tasks. * This default spawns up to 32 background thread on an as need basis. Idle * threads are pruned after one minute. * @return fresh ScheduledExecutorService */ public static ScheduledExecutorService defaultScheduledExecutor() { ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(32, new ThreadFactory() { private final AtomicInteger counter = new AtomicInteger(); @Override public Thread newThread(@Nonnull Runnable r) { Thread thread = new Thread(r, createName()); thread.setDaemon(true); return thread; } private String createName() { return "oak-scheduled-executor-" + counter.getAndIncrement(); } }); executor.setKeepAliveTime(1, TimeUnit.MINUTES); executor.allowCoreThreadTimeOut(true); return executor; } /** * Default {@code ExecutorService} used for scheduling concurrent tasks. * This default spawns as many threads as required with a priority of * {@code Thread.MIN_PRIORITY}. Idle threads are pruned after one minute. * @return fresh ExecutorService */ public static ExecutorService defaultExecutorService() { ThreadPoolExecutor executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new ThreadFactory() { private final AtomicInteger counter = new AtomicInteger(); @Override public Thread newThread(@Nonnull Runnable r) { Thread thread = new Thread(r, createName()); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; } private String createName() { return "oak-executor-" + counter.getAndIncrement(); } }); executor.setKeepAliveTime(1, TimeUnit.MINUTES); executor.allowCoreThreadTimeOut(true); return executor; } private synchronized ScheduledExecutorService getScheduledExecutor() { if (scheduledExecutor == null) { scheduledExecutor = defaultScheduledExecutor(); closer.register(new ExecutorCloser(scheduledExecutor)); } return scheduledExecutor; } private synchronized Executor getExecutor() { if (executor == null) { ExecutorService executorService = defaultExecutorService(); executor = executorService; closer.register(new ExecutorCloser(executorService)); } return executor; } private MBeanServer mbeanServer; private String defaultWorkspaceName = DEFAULT_WORKSPACE_NAME; @SuppressWarnings("unchecked") private static <T> T getValue( Map<?, ?> properties, String name, Class<T> type, T def) { Object value = properties.get(name); if (type.isInstance(value)) { return (T) value; } else { return def; } } private static <T> T getValue( Map<?, ?> properties, String name, Class<T> type) { return getValue(properties, name, type, null); } private Whiteboard whiteboard = new DefaultWhiteboard() { @Override public <T> Registration register( final Class<T> type, T service, Map<?, ?> properties) { final Registration registration = super.register(type, service, properties); final Closer observerSubscription = Closer.create(); Future<?> future = null; if (type == Runnable.class) { Runnable runnable = (Runnable) service; Long period = getValue(properties, "scheduler.period", Long.class); if (period != null) { Boolean concurrent = getValue( properties, "scheduler.concurrent", Boolean.class, Boolean.FALSE); if (concurrent) { future = getScheduledExecutor().scheduleAtFixedRate( runnable, period, period, TimeUnit.SECONDS); } else { future = getScheduledExecutor().scheduleWithFixedDelay( runnable, period, period, TimeUnit.SECONDS); } } } else if (type == Observer.class && store instanceof Observable) { observerSubscription.register(((Observable) store).addObserver((Observer) service)); } ObjectName objectName = null; Object name = properties.get("jmx.objectname"); if (mbeanServer != null && name != null) { try { if (name instanceof ObjectName) { objectName = (ObjectName) name; } else { objectName = new ObjectName(String.valueOf(name)); } if (type.getName().equals(service.getClass().getName().concat("MBean")) || service instanceof StandardMBean){ mbeanServer.registerMBean(service, objectName); } else { //Wrap the MBean in std MBean mbeanServer.registerMBean(new StandardMBean(service, type), objectName); } } catch (JMException e) { LOG.warn("Unexpected exception while registering MBean of type [{}] " + "against name [{}]", type, objectName, e); } } final Future<?> f = future; final ObjectName on = objectName; return new Registration() { @Override public void unregister() { if (f != null) { f.cancel(false); } if (on != null) { try { mbeanServer.unregisterMBean(on); } catch (JMException e) { LOG.warn("Unexpected exception while unregistering MBean of type {} " + "against name {} ", type, on, e); } } try { observerSubscription.close(); } catch (IOException e) { LOG.warn("Unexpected IOException while unsubscribing observer", e); } registration.unregister(); } }; } }; /** * Map containing the (names -> delayInSecods) of the background indexing * tasks that need to be started with this repository. A {@code null} value * means no background tasks will run. */ private Map<String, Long> asyncTasks; private boolean failOnMissingIndexProvider; public Oak(NodeStore store) { this.store = checkNotNull(store); } public Oak() { this(new MemoryNodeStore()); // this(new DocumentMK.Builder().open()); // this(new LogWrapper(new DocumentMK.Builder().open())); } /** * Define the current repository as being a {@link Clusterable} one. * * @param c * @return */ @Nonnull public Oak with(@Nonnull Clusterable c) { this.clusterable = checkNotNull(c); return this; } /** * Sets the default workspace name that should be used in case of login * with {@code null} workspace name. If this method has not been called * some internal default value will be used. * * @param defaultWorkspaceName The name of the default workspace. * @return this builder. */ @Nonnull public Oak with(@Nonnull String defaultWorkspaceName) { this.defaultWorkspaceName = checkNotNull(defaultWorkspaceName); return this; } @Nonnull public Oak with(@Nonnull RepositoryInitializer initializer) { initializers.add(checkNotNull(initializer)); return this; } @Nonnull public Oak with(@Nonnull QueryLimits settings) { QueryEngineSettings s = new QueryEngineSettings(); s.setFailTraversal(settings.getFailTraversal()); s.setFullTextComparisonWithoutIndex(settings.getFullTextComparisonWithoutIndex()); s.setLimitInMemory(settings.getLimitInMemory()); s.setLimitReads(settings.getLimitReads()); this.queryEngineSettings = new AnnotatedQueryEngineSettings(s); return this; } /** * Associates the given query index provider with the repository to * be created. * * @param provider query index provider * @return this builder */ @Nonnull public Oak with(@Nonnull QueryIndexProvider provider) { queryIndexProviders.add(checkNotNull(provider)); return this; } /** * Associates the given index hook provider with the repository to * be created. * * @param provider index hook provider * @return this builder */ @Nonnull public Oak with(@Nonnull IndexEditorProvider provider) { indexEditorProviders.add(checkNotNull(provider)); return this; } /** * Associates the given commit hook with the repository to be created. * * @param hook commit hook * @return this builder */ @Nonnull public Oak with(@Nonnull CommitHook hook) { checkNotNull(hook); withEditorHook(); commitHooks.add(hook); return this; } /** * Turns all currently tracked editors to an editor commit hook and * associates that hook with the repository to be created. This way * a sequence of {@code with()} calls that alternates between editors * and other commit hooks will have all the editors in the correct * order while still being able to leverage the performance gains of * multiple editors iterating over the changes simultaneously. */ private void withEditorHook() { if (!editorProviders.isEmpty()) { commitHooks.add(new EditorHook( CompositeEditorProvider.compose(editorProviders))); editorProviders = newArrayList(); } } /** * Associates the given editor provider with the repository to be created. * * @param provider editor provider * @return this builder */ @Nonnull public Oak with(@Nonnull EditorProvider provider) { editorProviders.add(checkNotNull(provider)); return this; } /** * Associates the given editor with the repository to be created. * * @param editor editor * @return this builder */ @Nonnull public Oak with(@Nonnull final Editor editor) { checkNotNull(editor); return with(new EditorProvider() { @Override @Nonnull public Editor getRootEditor( NodeState before, NodeState after, NodeBuilder builder, CommitInfo info) { return editor; } }); } @Nonnull public Oak with(@Nonnull SecurityProvider securityProvider) { this.securityProvider = checkNotNull(securityProvider); if (securityProvider instanceof WhiteboardAware) { ((WhiteboardAware) securityProvider).setWhiteboard(whiteboard); } for (SecurityConfiguration sc : securityProvider.getConfigurations()) { RepositoryInitializer ri = sc.getRepositoryInitializer(); if (ri != RepositoryInitializer.DEFAULT) { initializers.add(ri); } for (ThreeWayConflictHandler tch : sc.getConflictHandlers()) { with(tch); } } return this; } /** * Associates the given conflict handler with the repository to be created. * * @param conflictHandler conflict handler * @return this builder * @deprecated Use {@link #with(ThreeWayConflictHandler)} instead */ @Deprecated @Nonnull public Oak with(@Nonnull ConflictHandler conflictHandler) { return with(ConflictHandlers.wrap(conflictHandler)); } @Nonnull public Oak with(@Nonnull ThreeWayConflictHandler conflictHandler) { checkNotNull(conflictHandler); withEditorHook(); if (this.conflictHandler == null) { if (conflictHandler instanceof CompositeConflictHandler) { this.conflictHandler = (CompositeConflictHandler) conflictHandler; } else { this.conflictHandler = new CompositeConflictHandler(); this.conflictHandler.addHandler(conflictHandler); } commitHooks.add(new ConflictHook(conflictHandler)); } else { this.conflictHandler.addHandler(conflictHandler); } return this; } @Nonnull public Oak with(@Nonnull ScheduledExecutorService scheduledExecutor) { this.scheduledExecutor = checkNotNull(scheduledExecutor); return this; } @Nonnull public Oak with(@Nonnull Executor executor) { this.executor = checkNotNull(executor); return this; } @Nonnull public Oak with(@Nonnull MBeanServer mbeanServer) { this.mbeanServer = checkNotNull(mbeanServer); return this; } @Nonnull public Oak with(@Nonnull Whiteboard whiteboard) { this.whiteboard = checkNotNull(whiteboard); if (securityProvider instanceof WhiteboardAware) { ((WhiteboardAware) securityProvider).setWhiteboard(whiteboard); } QueryEngineSettings queryEngineSettings = WhiteboardUtils.getService(whiteboard, QueryEngineSettings.class); if (queryEngineSettings != null) { this.queryEngineSettings = new AnnotatedQueryEngineSettings(queryEngineSettings); } return this; } @Nonnull public Oak with(@Nonnull Observer observer) { observers.add(checkNotNull(observer)); return this; } /** * <p> * Enable the asynchronous (background) indexing behavior. * </p> * <p> * Please note that when enabling the background indexer, you need to take * care of calling * <code>#shutdown</code> on the <code>executor</code> provided for this Oak instance. * </p> * @deprecated Use {@link Oak#withAsyncIndexing(String, long)} instead */ @Deprecated public Oak withAsyncIndexing() { return withAsyncIndexing("async", 5); } public Oak withFailOnMissingIndexProvider(){ failOnMissingIndexProvider = true; return this; } public Oak withAtomicCounter() { return with(new AtomicCounterEditorProvider( new Supplier<Clusterable>() { @Override public Clusterable get() { return clusterable; } }, new Supplier<ScheduledExecutorService>() { @Override public ScheduledExecutorService get() { return scheduledExecutor; } }, new Supplier<NodeStore>() { @Override public NodeStore get() { return store; } }, new Supplier<Whiteboard>() { @Override public Whiteboard get() { return whiteboard; } })); } /** * <p> * Enable the asynchronous (background) indexing behavior for the provided * task name. * </p> * <p> * Please note that when enabling the background indexer, you need to take * care of calling * <code>#shutdown</code> on the <code>executor</code> provided for this Oak instance. * </p> */ public Oak withAsyncIndexing(@Nonnull String name, long delayInSeconds) { if (this.asyncTasks == null) { asyncTasks = new HashMap<String, Long>(); } checkState(delayInSeconds > 0, "delayInSeconds value must be > 0"); asyncTasks.put(AsyncIndexUpdate.checkValidName(name), delayInSeconds); return this; } @Nonnull public Whiteboard getWhiteboard() { return this.whiteboard; } /** * Returns the content repository instance created with the given * configuration. If the repository doesn't exist yet, a new instance will * be created and returned for each subsequent call of this method. * * @return content repository */ public ContentRepository createContentRepository() { if (contentRepository == null) { contentRepository = createNewContentRepository(); } return contentRepository; } private ContentRepository createNewContentRepository() { final RepoStateCheckHook repoStateCheckHook = new RepoStateCheckHook(); final List<Registration> regs = Lists.newArrayList(); regs.add(whiteboard.register(Executor.class, getExecutor(), Collections.emptyMap())); IndexEditorProvider indexEditors = CompositeIndexEditorProvider.compose(indexEditorProviders); OakInitializer.initialize(store, new CompositeInitializer(initializers), indexEditors); QueryIndexProvider indexProvider = CompositeQueryIndexProvider.compose(queryIndexProviders); commitHooks.add(repoStateCheckHook); List<CommitHook> initHooks = new ArrayList<CommitHook>(commitHooks); initHooks.add(new EditorHook(CompositeEditorProvider .compose(editorProviders))); if (asyncTasks != null) { IndexMBeanRegistration indexRegistration = new IndexMBeanRegistration( whiteboard); regs.add(indexRegistration); for (Entry<String, Long> t : asyncTasks.entrySet()) { AsyncIndexUpdate task = new AsyncIndexUpdate(t.getKey(), store, indexEditors); indexRegistration.registerAsyncIndexer(task, t.getValue()); closer.register(task); } PropertyIndexAsyncReindex asyncPI = new PropertyIndexAsyncReindex( new AsyncIndexUpdate(IndexConstants.ASYNC_REINDEX_VALUE, store, indexEditors, true), getExecutor()); regs.add(registerMBean(whiteboard, PropertyIndexAsyncReindexMBean.class, asyncPI, PropertyIndexAsyncReindexMBean.TYPE, "async")); } if (NodeCounter.USE_OLD_COUNTER) { regs.add(registerMBean(whiteboard, NodeCounterMBean.class, new NodeCounterOld(store), NodeCounterMBean.TYPE, "nodeCounter")); } else { regs.add(registerMBean(whiteboard, NodeCounterMBean.class, new NodeCounter(store), NodeCounterMBean.TYPE, "nodeCounter")); } regs.add(registerMBean(whiteboard, QueryEngineSettingsMBean.class, queryEngineSettings, QueryEngineSettingsMBean.TYPE, "settings")); regs.add(registerMBean(whiteboard, QueryStatsMBean.class, queryEngineSettings.getQueryStats(), QueryStatsMBean.TYPE, "Oak Query Statistics (Extended)")); // FIXME: OAK-810 move to proper workspace initialization // initialize default workspace Iterable<WorkspaceInitializer> workspaceInitializers = Iterables.transform(securityProvider.getConfigurations(), new Function<SecurityConfiguration, WorkspaceInitializer>() { @Override public WorkspaceInitializer apply(SecurityConfiguration sc) { WorkspaceInitializer wi = sc.getWorkspaceInitializer(); if (wi instanceof QueryIndexProviderAware){ ((QueryIndexProviderAware) wi).setQueryIndexProvider(indexProvider); } return wi; } }); OakInitializer.initialize( workspaceInitializers, store, defaultWorkspaceName, indexEditors); // add index hooks later to prevent the OakInitializer to do excessive indexing with(new IndexUpdateProvider(indexEditors, failOnMissingIndexProvider)); withEditorHook(); // Register observer last to prevent sending events while initialising for (Observer observer : observers) { regs.add(whiteboard.register(Observer.class, observer, emptyMap())); } RepositoryManager repositoryManager = new RepositoryManager(whiteboard); regs.add(registerMBean(whiteboard, RepositoryManagementMBean.class, repositoryManager, RepositoryManagementMBean.TYPE, repositoryManager.getName())); CommitHook composite = CompositeHook.compose(commitHooks); regs.add(whiteboard.register(CommitHook.class, composite, Collections.emptyMap())); final Tracker<Descriptors> t = whiteboard.track(Descriptors.class); return new ContentRepositoryImpl( store, composite, defaultWorkspaceName, queryEngineSettings.unwrap(), indexProvider, securityProvider, new AggregatingDescriptors(t)) { @Override public void close() throws IOException { super.close(); repoStateCheckHook.close(); new CompositeRegistration(regs).unregister(); closer.close(); } }; } /** * Creates a content repository with the given configuration * and logs in to the default workspace with no credentials, * returning the resulting content session. * <p> * This method exists mostly as a convenience for one-off tests, * as there's no way to create other sessions for accessing the * same repository. * <p> * There is typically no need to explicitly close the returned * session unless the repository has explicitly been configured * to reserve some resources until all sessions have been closed. * The repository will be garbage collected once the session is no * longer used. * * @return content session */ public ContentSession createContentSession() { try { return createContentRepository().login(null, null); } catch (NoSuchWorkspaceException e) { throw new IllegalStateException("Default workspace not found", e); } catch (LoginException e) { throw new IllegalStateException("Anonymous login not allowed", e); } } /** * Creates a content repository with the given configuration * and returns a {@link Root} instance after logging in to the * default workspace with no credentials. * <p> * This method exists mostly as a convenience for one-off tests, as * the returned root is the only way to access the session or the * repository. * <p> * Note that since there is no way to close the underlying content * session, this method should only be used when no components that * require sessions to be closed have been configured. The repository * and the session will be garbage collected once the root is no longer * used. * * @return root instance */ public Root createRoot() { return createContentSession().getLatestRoot(); } /** * CommitHook to ensure that commit only go through till repository is not * closed. Once repository is closed the commits would be failed */ private static class RepoStateCheckHook implements CommitHook, Closeable { private volatile boolean closed; @Nonnull @Override public NodeState processCommit(NodeState before, NodeState after, CommitInfo info) throws CommitFailedException { if (closed){ throw new CommitFailedException( CommitFailedException.OAK, 2, "ContentRepository closed"); } return after; } @Override public void close() throws IOException { this.closed = true; } } /** * Settings of the query engine. This instance is an AnnotatedStandardMBean. */ private static final class AnnotatedQueryEngineSettings extends AnnotatedStandardMBean implements QueryEngineSettingsMBean { private final QueryEngineSettings settings; /** * Create a new query engine settings object. Creating the object is * relatively slow, and at runtime, as few such objects as possible should * be created (ideally, only one per Oak instance). Creating new instances * also means they can not be configured using JMX, as one would expect. */ private AnnotatedQueryEngineSettings(QueryEngineSettings settings) { super(QueryEngineSettingsMBean.class); this.settings = settings; } /** * Create a new query engine settings object. Creating the object is * relatively slow, and at runtime, as few such objects as possible should * be created (ideally, only one per Oak instance). Creating new instances * also means they can not be configured using JMX, as one would expect. */ private AnnotatedQueryEngineSettings() { this(new QueryEngineSettings()); } @Override public long getLimitInMemory() { return settings.getLimitInMemory(); } @Override public void setLimitInMemory(long limitInMemory) { settings.setLimitInMemory(limitInMemory); } @Override public long getLimitReads() { return settings.getLimitReads(); } @Override public void setLimitReads(long limitReads) { settings.setLimitReads(limitReads); } @Override public boolean getFailTraversal() { return settings.getFailTraversal(); } @Override public void setFailTraversal(boolean failQueriesWithoutIndex) { settings.setFailTraversal(failQueriesWithoutIndex); } @Override public boolean isFastQuerySize() { return settings.isFastQuerySize(); } @Override public void setFastQuerySize(boolean fastQuerySize) { settings.setFastQuerySize(fastQuerySize); } public QueryStatsMBean getQueryStats() { return settings.getQueryStats(); } public QueryEngineSettings unwrap() { return settings; } @Override public String toString() { return settings.toString(); } } public static class OakDefaultComponents { public static final OakDefaultComponents INSTANCE = new OakDefaultComponents(); private final Iterable<CommitHook> commitHooks = ImmutableList.of(new VersionHook()); private final Iterable<RepositoryInitializer> repositoryInitializers = ImmutableList .of(new InitialContent()); private final Iterable<EditorProvider> editorProviders = ImmutableList.of( new ItemSaveValidatorProvider(), new NameValidatorProvider(), new NamespaceEditorProvider(), new TypeEditorProvider(), new ConflictValidatorProvider(), new ChangeCollectorProvider()); private final Iterable<IndexEditorProvider> indexEditorProviders = ImmutableList.of( new ReferenceEditorProvider(), new PropertyIndexEditorProvider(), new NodeCounterEditorProvider(), new OrderedPropertyIndexEditorProvider()); private final Iterable<QueryIndexProvider> queryIndexProviders = ImmutableList .of(new ReferenceIndexProvider(), new PropertyIndexProvider(), new NodeTypeIndexProvider()); private final SecurityProvider securityProvider = new SecurityProviderImpl(); private OakDefaultComponents() { } public Iterable<CommitHook> commitHooks() { return commitHooks; } public Iterable<RepositoryInitializer> repositoryInitializers() { return repositoryInitializers; } public Iterable<EditorProvider> editorProviders() { return editorProviders; } public Iterable<IndexEditorProvider> indexEditorProviders() { return indexEditorProviders; } public Iterable<QueryIndexProvider> queryIndexProviders() { return queryIndexProviders; } public SecurityProvider securityProvider() { return securityProvider; } } }
apache-2.0
gradle/gradle
subprojects/messaging/src/main/java/org/gradle/internal/serialize/AbstractEncoder.java
2819
/* * Copyright 2013 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gradle.internal.serialize; import javax.annotation.Nullable; import java.io.IOException; import java.io.OutputStream; public abstract class AbstractEncoder implements Encoder { private EncoderStream stream; @Override public OutputStream getOutputStream() { if (stream == null) { stream = new EncoderStream(); } return stream; } @Override public void writeBytes(byte[] bytes) throws IOException { writeBytes(bytes, 0, bytes.length); } @Override public void writeBinary(byte[] bytes) throws IOException { writeBinary(bytes, 0, bytes.length); } @Override public void writeBinary(byte[] bytes, int offset, int count) throws IOException { writeSmallInt(count); writeBytes(bytes, offset, count); } @Override public void encodeChunked(EncodeAction<Encoder> writeAction) throws Exception { throw new UnsupportedOperationException(); } @Override public void writeSmallInt(int value) throws IOException { writeInt(value); } @Override public void writeSmallLong(long value) throws IOException { writeLong(value); } @Override public void writeNullableSmallInt(@Nullable Integer value) throws IOException { if (value == null) { writeBoolean(false); } else { writeBoolean(true); writeSmallInt(value); } } @Override public void writeNullableString(@Nullable CharSequence value) throws IOException { if (value == null) { writeBoolean(false); } else { writeBoolean(true); writeString(value.toString()); } } private class EncoderStream extends OutputStream { @Override public void write(byte[] buffer) throws IOException { writeBytes(buffer); } @Override public void write(byte[] buffer, int offset, int length) throws IOException { writeBytes(buffer, offset, length); } @Override public void write(int b) throws IOException { writeByte((byte) b); } } }
apache-2.0
newkek/incubator-tinkerpop
gremlin-core/src/main/java/org/apache/tinkerpop/gremlin/process/traversal/dsl/graph/GraphTraversal.java
82136
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.tinkerpop.gremlin.process.traversal.dsl.graph; import org.apache.tinkerpop.gremlin.process.computer.VertexProgram; import org.apache.tinkerpop.gremlin.process.computer.traversal.step.map.PageRankVertexProgramStep; import org.apache.tinkerpop.gremlin.process.computer.traversal.step.map.PeerPressureVertexProgramStep; import org.apache.tinkerpop.gremlin.process.computer.traversal.step.map.ProgramVertexProgramStep; import org.apache.tinkerpop.gremlin.process.traversal.Order; import org.apache.tinkerpop.gremlin.process.traversal.P; import org.apache.tinkerpop.gremlin.process.traversal.Path; import org.apache.tinkerpop.gremlin.process.traversal.Pop; import org.apache.tinkerpop.gremlin.process.traversal.Scope; import org.apache.tinkerpop.gremlin.process.traversal.Step; import org.apache.tinkerpop.gremlin.process.traversal.Traversal; import org.apache.tinkerpop.gremlin.process.traversal.Traverser; import org.apache.tinkerpop.gremlin.process.traversal.lambda.ColumnTraversal; import org.apache.tinkerpop.gremlin.process.traversal.lambda.FunctionTraverser; import org.apache.tinkerpop.gremlin.process.traversal.lambda.LoopTraversal; import org.apache.tinkerpop.gremlin.process.traversal.lambda.PredicateTraverser; import org.apache.tinkerpop.gremlin.process.traversal.lambda.TrueTraversal; import org.apache.tinkerpop.gremlin.process.traversal.step.ByModulating; import org.apache.tinkerpop.gremlin.process.traversal.step.Mutating; import org.apache.tinkerpop.gremlin.process.traversal.step.TimesModulating; import org.apache.tinkerpop.gremlin.process.traversal.step.TraversalOptionParent; import org.apache.tinkerpop.gremlin.process.traversal.step.branch.BranchStep; import org.apache.tinkerpop.gremlin.process.traversal.step.branch.ChooseStep; import org.apache.tinkerpop.gremlin.process.traversal.step.branch.LocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.branch.RepeatStep; import org.apache.tinkerpop.gremlin.process.traversal.step.branch.UnionStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.AndStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.CoinStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.ConnectiveStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.CyclicPathStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.DedupGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.DropStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.HasStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.IsStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.LambdaFilterStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.NotStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.OrStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.RangeGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.SampleGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.SimplePathStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.TailGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.TimeLimitStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.TraversalFilterStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.WherePredicateStep; import org.apache.tinkerpop.gremlin.process.traversal.step.filter.WhereTraversalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.AddEdgeStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.AddVertexStartStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.AddVertexStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.CoalesceStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.ConstantStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.CountGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.CountLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.DedupLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.EdgeOtherVertexStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.EdgeVertexStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.FoldStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.GraphStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.GroupCountStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.GroupStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.GroupStepV3d0; import org.apache.tinkerpop.gremlin.process.traversal.step.map.IdStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.LabelStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.LambdaCollectingBarrierStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.LambdaFlatMapStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.LambdaMapStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.LoopsStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.MatchStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.MaxGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.MaxLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.MeanGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.MeanLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.MinGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.MinLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.NoOpBarrierStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.OrderGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.OrderLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.PathStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.ProjectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.PropertiesStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.PropertyKeyStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.PropertyMapStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.PropertyValueStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.RangeLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.SackStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.SampleLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.SelectOneStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.SelectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.SumGlobalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.SumLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.TailLocalStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.TraversalFlatMapStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.TraversalMapStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.TreeStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.UnfoldStep; import org.apache.tinkerpop.gremlin.process.traversal.step.map.VertexStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.AddPropertyStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.AggregateStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.GroupCountSideEffectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.GroupSideEffectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.GroupSideEffectStepV3d0; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.IdentityStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.InjectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.LambdaSideEffectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.ProfileSideEffectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.SackValueStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.SideEffectCapStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.StartStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.StoreStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.SubgraphStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.TraversalSideEffectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.TreeSideEffectStep; import org.apache.tinkerpop.gremlin.process.traversal.step.util.HasContainer; import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; import org.apache.tinkerpop.gremlin.process.traversal.traverser.util.TraverserSet; import org.apache.tinkerpop.gremlin.process.traversal.util.TraversalHelper; import org.apache.tinkerpop.gremlin.process.traversal.util.TraversalMetrics; import org.apache.tinkerpop.gremlin.structure.Column; import org.apache.tinkerpop.gremlin.structure.Direction; import org.apache.tinkerpop.gremlin.structure.Edge; import org.apache.tinkerpop.gremlin.structure.Element; import org.apache.tinkerpop.gremlin.structure.Graph; import org.apache.tinkerpop.gremlin.structure.Property; import org.apache.tinkerpop.gremlin.structure.PropertyType; import org.apache.tinkerpop.gremlin.structure.T; import org.apache.tinkerpop.gremlin.structure.Vertex; import org.apache.tinkerpop.gremlin.structure.VertexProperty; import org.apache.tinkerpop.gremlin.util.function.ConstantSupplier; import java.util.Arrays; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; /** * @author Marko A. Rodriguez (http://markorodriguez.com) */ public interface GraphTraversal<S, E> extends Traversal<S, E> { public interface Admin<S, E> extends Traversal.Admin<S, E>, GraphTraversal<S, E> { @Override public default <E2> GraphTraversal.Admin<S, E2> addStep(final Step<?, E2> step) { return (GraphTraversal.Admin<S, E2>) Traversal.Admin.super.addStep((Step) step); } @Override public default GraphTraversal<S, E> iterate() { return GraphTraversal.super.iterate(); } @Override public GraphTraversal.Admin<S, E> clone(); } @Override public default GraphTraversal.Admin<S, E> asAdmin() { return (GraphTraversal.Admin<S, E>) this; } ///////////////////// MAP STEPS ///////////////////// /** * Map a traverser referencing an object of type <code>E</code> to an object of type <code>E2</code>. * * @param function the lambda expression that does the functional mapping * @return the traversal with an appended {@link LambdaMapStep}. */ public default <E2> GraphTraversal<S, E2> map(final Function<Traverser<E>, E2> function) { this.asAdmin().getBytecode().addStep(Symbols.map, function); return this.asAdmin().addStep(new LambdaMapStep<>(this.asAdmin(), function)); } public default <E2> GraphTraversal<S, E2> map(final Traversal<?, E2> mapTraversal) { this.asAdmin().getBytecode().addStep(Symbols.map, mapTraversal); return this.asAdmin().addStep(new TraversalMapStep<>(this.asAdmin(), mapTraversal)); } /** * Map a {@link Traverser} referencing an object of type <code>E</code> to an iterator of objects of type <code>E2</code>. * The resultant iterator is drained one-by-one before a new <code>E</code> object is pulled in for processing. * * @param function the lambda expression that does the functional mapping * @param <E2> the type of the returned iterator objects * @return the traversal with an appended {@link LambdaFlatMapStep}. */ public default <E2> GraphTraversal<S, E2> flatMap(final Function<Traverser<E>, Iterator<E2>> function) { this.asAdmin().getBytecode().addStep(Symbols.flatMap, function); return this.asAdmin().addStep(new LambdaFlatMapStep<>(this.asAdmin(), function)); } /** * Map a {@link Traverser} referencing an object of type <code>E</code> to an iterator of objects of type <code>E2</code>. * The internal traversal is drained one-by-one before a new <code>E</code> object is pulled in for processing. * * @param flatMapTraversal the traversal generating objects of type <code>E2</code> * @param <E2> the end type of the internal traversal * @return the traversal with an appended {@link TraversalFlatMapStep}. */ public default <E2> GraphTraversal<S, E2> flatMap(final Traversal<?, E2> flatMapTraversal) { this.asAdmin().getBytecode().addStep(Symbols.flatMap, flatMapTraversal); return this.asAdmin().addStep(new TraversalFlatMapStep<>(this.asAdmin(), flatMapTraversal)); } /** * Map the {@link Element} to its {@link Element#id}. * * @return the traversal with an appended {@link IdStep}. */ public default GraphTraversal<S, Object> id() { this.asAdmin().getBytecode().addStep(Symbols.id); return this.asAdmin().addStep(new IdStep<>(this.asAdmin())); } /** * Map the {@link Element} to its {@link Element#label}. * * @return the traversal with an appended {@link LabelStep}. */ public default GraphTraversal<S, String> label() { this.asAdmin().getBytecode().addStep(Symbols.label); return this.asAdmin().addStep(new LabelStep<>(this.asAdmin())); } /** * Map the <code>E</code> object to itself. In other words, a "no op." * * @return the traversal with an appended {@link IdentityStep}. */ public default GraphTraversal<S, E> identity() { this.asAdmin().getBytecode().addStep(Symbols.identity); return this.asAdmin().addStep(new IdentityStep<>(this.asAdmin())); } /** * Map any object to a fixed <code>E</code> value. * * @return the traversal with an appended {@link ConstantStep}. */ public default <E2> GraphTraversal<S, E2> constant(final E2 e) { this.asAdmin().getBytecode().addStep(Symbols.constant, e); return this.asAdmin().addStep(new ConstantStep<E, E2>(this.asAdmin(), e)); } public default GraphTraversal<S, Vertex> V(final Object... vertexIdsOrElements) { this.asAdmin().getBytecode().addStep(Symbols.V, vertexIdsOrElements); return this.asAdmin().addStep(new GraphStep<>(this.asAdmin(), Vertex.class, false, vertexIdsOrElements)); } /** * Map the {@link Vertex} to its adjacent vertices given a direction and edge labels. * * @param direction the direction to traverse from the current vertex * @param edgeLabels the edge labels to traverse * @return the traversal with an appended {@link VertexStep}. */ public default GraphTraversal<S, Vertex> to(final Direction direction, final String... edgeLabels) { this.asAdmin().getBytecode().addStep(Symbols.to, direction, edgeLabels); return this.asAdmin().addStep(new VertexStep<>(this.asAdmin(), Vertex.class, direction, edgeLabels)); } /** * Map the {@link Vertex} to its outgoing adjacent vertices given the edge labels. * * @param edgeLabels the edge labels to traverse * @return the traversal with an appended {@link VertexStep}. */ public default GraphTraversal<S, Vertex> out(final String... edgeLabels) { this.asAdmin().getBytecode().addStep(Symbols.out, edgeLabels); return this.asAdmin().addStep(new VertexStep<>(this.asAdmin(), Vertex.class, Direction.OUT, edgeLabels)); } /** * Map the {@link Vertex} to its incoming adjacent vertices given the edge labels. * * @param edgeLabels the edge labels to traverse * @return the traversal with an appended {@link VertexStep}. */ public default GraphTraversal<S, Vertex> in(final String... edgeLabels) { this.asAdmin().getBytecode().addStep(Symbols.in, edgeLabels); return this.asAdmin().addStep(new VertexStep<>(this.asAdmin(), Vertex.class, Direction.IN, edgeLabels)); } /** * Map the {@link Vertex} to its adjacent vertices given the edge labels. * * @param edgeLabels the edge labels to traverse * @return the traversal with an appended {@link VertexStep}. */ public default GraphTraversal<S, Vertex> both(final String... edgeLabels) { this.asAdmin().getBytecode().addStep(Symbols.both, edgeLabels); return this.asAdmin().addStep(new VertexStep<>(this.asAdmin(), Vertex.class, Direction.BOTH, edgeLabels)); } /** * Map the {@link Vertex} to its incident edges given the direction and edge labels. * * @param direction the direction to traverse from the current vertex * @param edgeLabels the edge labels to traverse * @return the traversal with an appended {@link VertexStep}. */ public default GraphTraversal<S, Edge> toE(final Direction direction, final String... edgeLabels) { this.asAdmin().getBytecode().addStep(Symbols.toE, direction, edgeLabels); return this.asAdmin().addStep(new VertexStep<>(this.asAdmin(), Edge.class, direction, edgeLabels)); } /** * Map the {@link Vertex} to its outgoing incident edges given the edge labels. * * @param edgeLabels the edge labels to traverse * @return the traversal with an appended {@link VertexStep}. */ public default GraphTraversal<S, Edge> outE(final String... edgeLabels) { this.asAdmin().getBytecode().addStep(Symbols.outE, edgeLabels); return this.asAdmin().addStep(new VertexStep<>(this.asAdmin(), Edge.class, Direction.OUT, edgeLabels)); } /** * Map the {@link Vertex} to its incoming incident edges given the edge labels. * * @param edgeLabels the edge labels to traverse * @return the traversal with an appended {@link VertexStep}. */ public default GraphTraversal<S, Edge> inE(final String... edgeLabels) { this.asAdmin().getBytecode().addStep(Symbols.inE, edgeLabels); return this.asAdmin().addStep(new VertexStep<>(this.asAdmin(), Edge.class, Direction.IN, edgeLabels)); } /** * Map the {@link Vertex} to its incident edges given the edge labels. * * @param edgeLabels the edge labels to traverse * @return the traversal with an appended {@link VertexStep}. */ public default GraphTraversal<S, Edge> bothE(final String... edgeLabels) { this.asAdmin().getBytecode().addStep(Symbols.bothE, edgeLabels); return this.asAdmin().addStep(new VertexStep<>(this.asAdmin(), Edge.class, Direction.BOTH, edgeLabels)); } /** * Map the {@link Edge} to its incident vertices given the direction. * * @param direction the direction to traverser from the current edge * @return the traversal with an appended {@link EdgeVertexStep}. */ public default GraphTraversal<S, Vertex> toV(final Direction direction) { this.asAdmin().getBytecode().addStep(Symbols.toV, direction); return this.asAdmin().addStep(new EdgeVertexStep(this.asAdmin(), direction)); } /** * Map the {@link Edge} to its incoming/head incident {@link Vertex}. * * @return the traversal with an appended {@link EdgeVertexStep}. */ public default GraphTraversal<S, Vertex> inV() { this.asAdmin().getBytecode().addStep(Symbols.inV); return this.asAdmin().addStep(new EdgeVertexStep(this.asAdmin(), Direction.IN)); } /** * Map the {@link Edge} to its outgoing/tail incident {@link Vertex}. * * @return the traversal with an appended {@link EdgeVertexStep}. */ public default GraphTraversal<S, Vertex> outV() { this.asAdmin().getBytecode().addStep(Symbols.outV); return this.asAdmin().addStep(new EdgeVertexStep(this.asAdmin(), Direction.OUT)); } /** * Map the {@link Edge} to its incident vertices. * * @return the traversal with an appended {@link EdgeVertexStep}. */ public default GraphTraversal<S, Vertex> bothV() { this.asAdmin().getBytecode().addStep(Symbols.bothV); return this.asAdmin().addStep(new EdgeVertexStep(this.asAdmin(), Direction.BOTH)); } /** * Map the {@link Edge} to the incident vertex that was not just traversed from in the path history. * * @return the traversal with an appended {@link EdgeOtherVertexStep}. */ public default GraphTraversal<S, Vertex> otherV() { this.asAdmin().getBytecode().addStep(Symbols.otherV); return this.asAdmin().addStep(new EdgeOtherVertexStep(this.asAdmin())); } /** * Order all the objects in the traversal up to this point and then emit them one-by-one in their ordered sequence. * * @return the traversal with an appended {@link OrderGlobalStep}. */ public default GraphTraversal<S, E> order() { this.asAdmin().getBytecode().addStep(Symbols.order); return this.asAdmin().addStep(new OrderGlobalStep<>(this.asAdmin())); } /** * Order either the {@link Scope#local} object (e.g. a list, map, etc.) or the entire {@link Scope#global} traversal stream. * * @param scope whether the ordering is the current local object or the entire global stream. * @return the traversal with an appended {@link OrderGlobalStep} or {@link OrderLocalStep}. */ public default GraphTraversal<S, E> order(final Scope scope) { this.asAdmin().getBytecode().addStep(Symbols.order, scope); return this.asAdmin().addStep(scope.equals(Scope.global) ? new OrderGlobalStep<>(this.asAdmin()) : new OrderLocalStep<>(this.asAdmin())); } /** * Map the {@link Element} to its associated properties given the provide property keys. * If no property keys are provided, then all properties are emitted. * * @param propertyKeys the properties to retrieve * @param <E2> the value type of the returned properties * @return the traversal with an appended {@link PropertiesStep}. */ public default <E2> GraphTraversal<S, ? extends Property<E2>> properties(final String... propertyKeys) { this.asAdmin().getBytecode().addStep(Symbols.properties, propertyKeys); return this.asAdmin().addStep(new PropertiesStep<>(this.asAdmin(), PropertyType.PROPERTY, propertyKeys)); } /** * Map the {@link Element} to the values of the associated properties given the provide property keys. * If no property keys are provided, then all property values are emitted. * * @param propertyKeys the properties to retrieve their value from * @param <E2> the value type of the properties * @return the traversal with an appended {@link PropertiesStep}. */ public default <E2> GraphTraversal<S, E2> values(final String... propertyKeys) { this.asAdmin().getBytecode().addStep(Symbols.values, propertyKeys); return this.asAdmin().addStep(new PropertiesStep<>(this.asAdmin(), PropertyType.VALUE, propertyKeys)); } /** * Map the {@link Element} to a {@link Map} of the properties key'd according to their {@link Property#key}. * If no property keys are provided, then all properties are retrieved. * * @param propertyKeys the properties to retrieve * @param <E2> the value type of the returned properties * @return the traversal with an appended {@link PropertyMapStep}. */ public default <E2> GraphTraversal<S, Map<String, E2>> propertyMap(final String... propertyKeys) { this.asAdmin().getBytecode().addStep(Symbols.propertyMap, propertyKeys); return this.asAdmin().addStep(new PropertyMapStep<>(this.asAdmin(), false, PropertyType.PROPERTY, propertyKeys)); } /** * Map the {@link Element} to a {@link Map} of the property values key'd according to their {@link Property#key}. * If no property keys are provided, then all property values are retrieved. * * @param propertyKeys the properties to retrieve * @param <E2> the value type of the returned properties * @return the traversal with an appended {@link PropertyMapStep}. */ public default <E2> GraphTraversal<S, Map<String, E2>> valueMap(final String... propertyKeys) { this.asAdmin().getBytecode().addStep(Symbols.valueMap, propertyKeys); return this.asAdmin().addStep(new PropertyMapStep<>(this.asAdmin(), false, PropertyType.VALUE, propertyKeys)); } /** * Map the {@link Element} to a {@link Map} of the property values key'd according to their {@link Property#key}. * If no property keys are provided, then all property values are retrieved. * * @param includeTokens whether to include {@link T} tokens in the emitted map. * @param propertyKeys the properties to retrieve * @param <E2> the value type of the returned properties * @return the traversal with an appended {@link PropertyMapStep}. */ public default <E2> GraphTraversal<S, Map<String, E2>> valueMap(final boolean includeTokens, final String... propertyKeys) { this.asAdmin().getBytecode().addStep(Symbols.valueMap, includeTokens, propertyKeys); return this.asAdmin().addStep(new PropertyMapStep<>(this.asAdmin(), includeTokens, PropertyType.VALUE, propertyKeys)); } public default <E2> GraphTraversal<S, Collection<E2>> select(final Column column) { this.asAdmin().getBytecode().addStep(Symbols.select, column); return this.asAdmin().addStep(new TraversalMapStep<>(this.asAdmin(), new ColumnTraversal(column))); } /** * @deprecated As of release 3.1.0, replaced by {@link GraphTraversal#select(Column)} */ @Deprecated public default <E2> GraphTraversal<S, E2> mapValues() { return this.select(Column.values).unfold(); } /** * @deprecated As of release 3.1.0, replaced by {@link GraphTraversal#select(Column)} */ @Deprecated public default <E2> GraphTraversal<S, E2> mapKeys() { return this.select(Column.keys).unfold(); } /** * Map the {@link Property} to its {@link Property#key}. * * @return the traversal with an appended {@link PropertyKeyStep}. */ public default GraphTraversal<S, String> key() { this.asAdmin().getBytecode().addStep(Symbols.key); return this.asAdmin().addStep(new PropertyKeyStep(this.asAdmin())); } /** * Map the {@link Property} to its {@link Property#value}. * * @return the traversal with an appended {@link PropertyValueStep}. */ public default <E2> GraphTraversal<S, E2> value() { this.asAdmin().getBytecode().addStep(Symbols.value); return this.asAdmin().addStep(new PropertyValueStep<>(this.asAdmin())); } /** * Map the {@link Traverser} to its {@link Path} history via {@link Traverser#path}. * * @return the traversal with an appended {@link PathStep}. */ public default GraphTraversal<S, Path> path() { this.asAdmin().getBytecode().addStep(Symbols.path); return this.asAdmin().addStep(new PathStep<>(this.asAdmin())); } /** * Map the {@link Traverser} to a {@link Map} of bindings as specified by the provided match traversals. * * @param matchTraversals the traversal that maintain variables which must hold for the life of the traverser * @param <E2> the type of the obejcts bound in the variables * @return the traversal with an appended {@link MatchStep}. */ public default <E2> GraphTraversal<S, Map<String, E2>> match(final Traversal<?, ?>... matchTraversals) { this.asAdmin().getBytecode().addStep(Symbols.match, matchTraversals); return this.asAdmin().addStep(new MatchStep<>(this.asAdmin(), ConnectiveStep.Connective.AND, matchTraversals)); } /** * Map the {@link Traverser} to its {@link Traverser#sack} value. * * @param <E2> the sack value type * @return the traversal with an appended {@link SackStep}. */ public default <E2> GraphTraversal<S, E2> sack() { this.asAdmin().getBytecode().addStep(Symbols.sack); return this.asAdmin().addStep(new SackStep<>(this.asAdmin())); } public default GraphTraversal<S, Integer> loops() { this.asAdmin().getBytecode().addStep(Symbols.loops); return this.asAdmin().addStep(new LoopsStep<>(this.asAdmin())); } public default <E2> GraphTraversal<S, Map<String, E2>> project(final String projectKey, final String... otherProjectKeys) { final String[] projectKeys = new String[otherProjectKeys.length + 1]; projectKeys[0] = projectKey; System.arraycopy(otherProjectKeys, 0, projectKeys, 1, otherProjectKeys.length); this.asAdmin().getBytecode().addStep(Symbols.project, projectKey, otherProjectKeys); return this.asAdmin().addStep(new ProjectStep<>(this.asAdmin(), projectKeys)); } /** * Map the {@link Traverser} to a {@link Map} projection of sideEffect values, map values, and/or path values. * * @param pop if there are multiple objects referenced in the path, the {@link Pop} to use. * @param selectKey1 the first key to project * @param selectKey2 the second key to project * @param otherSelectKeys the third+ keys to project * @param <E2> the type of the objects projected * @return the traversal with an appended {@link SelectStep}. */ public default <E2> GraphTraversal<S, Map<String, E2>> select(final Pop pop, final String selectKey1, final String selectKey2, String... otherSelectKeys) { final String[] selectKeys = new String[otherSelectKeys.length + 2]; selectKeys[0] = selectKey1; selectKeys[1] = selectKey2; System.arraycopy(otherSelectKeys, 0, selectKeys, 2, otherSelectKeys.length); this.asAdmin().getBytecode().addStep(Symbols.select, pop, selectKey1, selectKey2, otherSelectKeys); return this.asAdmin().addStep(new SelectStep<>(this.asAdmin(), pop, selectKeys)); } /** * Map the {@link Traverser} to a {@link Map} projection of sideEffect values, map values, and/or path values. * * @param selectKey1 the first key to project * @param selectKey2 the second key to project * @param otherSelectKeys the third+ keys to project * @param <E2> the type of the objects projected * @return the traversal with an appended {@link SelectStep}. */ public default <E2> GraphTraversal<S, Map<String, E2>> select(final String selectKey1, final String selectKey2, String... otherSelectKeys) { final String[] selectKeys = new String[otherSelectKeys.length + 2]; selectKeys[0] = selectKey1; selectKeys[1] = selectKey2; System.arraycopy(otherSelectKeys, 0, selectKeys, 2, otherSelectKeys.length); this.asAdmin().getBytecode().addStep(Symbols.select, selectKey1, selectKey2, otherSelectKeys); return this.asAdmin().addStep(new SelectStep<>(this.asAdmin(), null, selectKeys)); } public default <E2> GraphTraversal<S, E2> select(final Pop pop, final String selectKey) { this.asAdmin().getBytecode().addStep(Symbols.select, pop, selectKey); return this.asAdmin().addStep(new SelectOneStep<>(this.asAdmin(), pop, selectKey)); } public default <E2> GraphTraversal<S, E2> select(final String selectKey) { this.asAdmin().getBytecode().addStep(Symbols.select, selectKey); return this.asAdmin().addStep(new SelectOneStep<>(this.asAdmin(), null, selectKey)); } public default <E2> GraphTraversal<S, E2> unfold() { this.asAdmin().getBytecode().addStep(Symbols.unfold); return this.asAdmin().addStep(new UnfoldStep<>(this.asAdmin())); } public default GraphTraversal<S, List<E>> fold() { this.asAdmin().getBytecode().addStep(Symbols.fold); return this.asAdmin().addStep(new FoldStep<>(this.asAdmin())); } public default <E2> GraphTraversal<S, E2> fold(final E2 seed, final BiFunction<E2, E, E2> foldFunction) { this.asAdmin().getBytecode().addStep(Symbols.fold, seed, foldFunction); return this.asAdmin().addStep(new FoldStep<>(this.asAdmin(), new ConstantSupplier<>(seed), foldFunction)); // TODO: User should provide supplier? } /** * Map the traversal stream to its reduction as a sum of the {@link Traverser#bulk} values (i.e. count the number of traversers up to this point). * * @return the traversal with an appended {@link CountGlobalStep}. */ public default GraphTraversal<S, Long> count() { this.asAdmin().getBytecode().addStep(Symbols.count); return this.asAdmin().addStep(new CountGlobalStep<>(this.asAdmin())); } public default GraphTraversal<S, Long> count(final Scope scope) { this.asAdmin().getBytecode().addStep(Symbols.count, scope); return this.asAdmin().addStep(scope.equals(Scope.global) ? new CountGlobalStep<>(this.asAdmin()) : new CountLocalStep<>(this.asAdmin())); } /** * Map the traversal stream to its reduction as a sum of the {@link Traverser#get} values multiplied by their {@link Traverser#bulk} (i.e. sum the traverser values up to this point). * * @return the traversal with an appended {@link SumGlobalStep}. */ public default <E2 extends Number> GraphTraversal<S, E2> sum() { this.asAdmin().getBytecode().addStep(Symbols.sum); return this.asAdmin().addStep(new SumGlobalStep<>(this.asAdmin())); } public default <E2 extends Number> GraphTraversal<S, E2> sum(final Scope scope) { this.asAdmin().getBytecode().addStep(Symbols.sum, scope); return this.asAdmin().addStep(scope.equals(Scope.global) ? new SumGlobalStep<>(this.asAdmin()) : new SumLocalStep(this.asAdmin())); } public default <E2 extends Number> GraphTraversal<S, E2> max() { this.asAdmin().getBytecode().addStep(Symbols.max); return this.asAdmin().addStep(new MaxGlobalStep<>(this.asAdmin())); } public default <E2 extends Number> GraphTraversal<S, E2> max(final Scope scope) { this.asAdmin().getBytecode().addStep(Symbols.max, scope); return this.asAdmin().addStep(scope.equals(Scope.global) ? new MaxGlobalStep<>(this.asAdmin()) : new MaxLocalStep(this.asAdmin())); } public default <E2 extends Number> GraphTraversal<S, E2> min() { this.asAdmin().getBytecode().addStep(Symbols.min); return this.asAdmin().addStep(new MinGlobalStep<>(this.asAdmin())); } public default <E2 extends Number> GraphTraversal<S, E2> min(final Scope scope) { this.asAdmin().getBytecode().addStep(Symbols.min, scope); return this.asAdmin().addStep(scope.equals(Scope.global) ? new MinGlobalStep<E2>(this.asAdmin()) : new MinLocalStep<>(this.asAdmin())); } public default <E2 extends Number> GraphTraversal<S, E2> mean() { this.asAdmin().getBytecode().addStep(Symbols.mean); return this.asAdmin().addStep(new MeanGlobalStep<>(this.asAdmin())); } public default <E2 extends Number> GraphTraversal<S, E2> mean(final Scope scope) { this.asAdmin().getBytecode().addStep(Symbols.mean, scope); return this.asAdmin().addStep(scope.equals(Scope.global) ? new MeanGlobalStep<>(this.asAdmin()) : new MeanLocalStep(this.asAdmin())); } public default <K, V> GraphTraversal<S, Map<K, V>> group() { this.asAdmin().getBytecode().addStep(Symbols.group); return this.asAdmin().addStep(new GroupStep<>(this.asAdmin())); } /** * @deprecated As of release 3.1.0, replaced by {@link #group()} */ @Deprecated public default <K, V> GraphTraversal<S, Map<K, V>> groupV3d0() { this.asAdmin().getBytecode().addStep(Symbols.groupV3d0); return this.asAdmin().addStep(new GroupStepV3d0<>(this.asAdmin())); } public default <K> GraphTraversal<S, Map<K, Long>> groupCount() { this.asAdmin().getBytecode().addStep(Symbols.groupCount); return this.asAdmin().addStep(new GroupCountStep<>(this.asAdmin())); } public default GraphTraversal<S, Tree> tree() { this.asAdmin().getBytecode().addStep(Symbols.tree); return this.asAdmin().addStep(new TreeStep<>(this.asAdmin())); } public default GraphTraversal<S, Vertex> addV(final String vertexLabel) { this.asAdmin().getBytecode().addStep(Symbols.addV, vertexLabel); return this.asAdmin().addStep(new AddVertexStep<>(this.asAdmin(), vertexLabel)); } public default GraphTraversal<S, Vertex> addV() { this.asAdmin().getBytecode().addStep(Symbols.addV); return this.asAdmin().addStep(new AddVertexStep<>(this.asAdmin(), null)); } /** * @deprecated As of release 3.1.0, replaced by {@link #addV()} */ @Deprecated public default GraphTraversal<S, Vertex> addV(final Object... propertyKeyValues) { this.addV(); for (int i = 0; i < propertyKeyValues.length; i = i + 2) { this.property(propertyKeyValues[i], propertyKeyValues[i + 1]); } //((AddVertexStep) this.asAdmin().getEndStep()).addPropertyMutations(propertyKeyValues); return (GraphTraversal<S, Vertex>) this; } public default GraphTraversal<S, Edge> addE(final String edgeLabel) { this.asAdmin().getBytecode().addStep(Symbols.addE, edgeLabel); return this.asAdmin().addStep(new AddEdgeStep<>(this.asAdmin(), edgeLabel)); } public default GraphTraversal<S, E> to(final String toStepLabel) { this.asAdmin().getBytecode().addStep(Symbols.to, toStepLabel); ((AddEdgeStep) this.asAdmin().getEndStep()).addTo(__.select(toStepLabel)); return this; } public default GraphTraversal<S, E> from(final String fromStepLabel) { this.asAdmin().getBytecode().addStep(Symbols.from, fromStepLabel); ((AddEdgeStep) this.asAdmin().getEndStep()).addFrom(__.select(fromStepLabel)); return this; } public default GraphTraversal<S, E> to(final Traversal<E, Vertex> toVertex) { this.asAdmin().getBytecode().addStep(Symbols.to, toVertex); ((AddEdgeStep) this.asAdmin().getEndStep()).addTo(toVertex); return this; } public default GraphTraversal<S, E> from(final Traversal<E, Vertex> fromVertex) { this.asAdmin().getBytecode().addStep(Symbols.from, fromVertex); ((AddEdgeStep) this.asAdmin().getEndStep()).addFrom(fromVertex); return this; } /** * @deprecated As of release 3.1.0, replaced by {@link #addE(String)} */ @Deprecated public default GraphTraversal<S, Edge> addE(final Direction direction, final String firstVertexKeyOrEdgeLabel, final String edgeLabelOrSecondVertexKey, final Object... propertyKeyValues) { if (propertyKeyValues.length % 2 == 0) { // addOutE("createdBy", "a") this.addE(firstVertexKeyOrEdgeLabel); if (direction.equals(Direction.OUT)) this.to(edgeLabelOrSecondVertexKey); else this.from(edgeLabelOrSecondVertexKey); for (int i = 0; i < propertyKeyValues.length; i = i + 2) { this.property(propertyKeyValues[i], propertyKeyValues[i + 1]); } //((Mutating) this.asAdmin().getEndStep()).addPropertyMutations(propertyKeyValues); return (GraphTraversal<S, Edge>) this; } else { // addInE("a", "codeveloper", "b", "year", 2009) this.addE(edgeLabelOrSecondVertexKey); if (direction.equals(Direction.OUT)) this.from(firstVertexKeyOrEdgeLabel).to((String) propertyKeyValues[0]); else this.to(firstVertexKeyOrEdgeLabel).from((String) propertyKeyValues[0]); for (int i = 1; i < propertyKeyValues.length; i = i + 2) { this.property(propertyKeyValues[i], propertyKeyValues[i + 1]); } //((Mutating) this.asAdmin().getEndStep()).addPropertyMutations(Arrays.copyOfRange(propertyKeyValues, 1, propertyKeyValues.length)); return (GraphTraversal<S, Edge>) this; } } /** * @deprecated As of release 3.1.0, replaced by {@link #addE(String)} */ @Deprecated public default GraphTraversal<S, Edge> addOutE(final String firstVertexKeyOrEdgeLabel, final String edgeLabelOrSecondVertexKey, final Object... propertyKeyValues) { return this.addE(Direction.OUT, firstVertexKeyOrEdgeLabel, edgeLabelOrSecondVertexKey, propertyKeyValues); } /** * @deprecated As of release 3.1.0, replaced by {@link #addE(String)} */ @Deprecated public default GraphTraversal<S, Edge> addInE(final String firstVertexKeyOrEdgeLabel, final String edgeLabelOrSecondVertexKey, final Object... propertyKeyValues) { return this.addE(Direction.IN, firstVertexKeyOrEdgeLabel, edgeLabelOrSecondVertexKey, propertyKeyValues); } ///////////////////// FILTER STEPS ///////////////////// public default GraphTraversal<S, E> filter(final Predicate<Traverser<E>> predicate) { this.asAdmin().getBytecode().addStep(Symbols.filter, predicate); return this.asAdmin().addStep(new LambdaFilterStep<>(this.asAdmin(), predicate)); } public default GraphTraversal<S, E> filter(final Traversal<?, ?> filterTraversal) { this.asAdmin().getBytecode().addStep(Symbols.filter, filterTraversal); return this.asAdmin().addStep(new TraversalFilterStep<>(this.asAdmin(), (Traversal) filterTraversal)); } public default GraphTraversal<S, E> or(final Traversal<?, ?>... orTraversals) { this.asAdmin().getBytecode().addStep(Symbols.or, orTraversals); return this.asAdmin().addStep(new OrStep(this.asAdmin(), orTraversals)); } public default GraphTraversal<S, E> and(final Traversal<?, ?>... andTraversals) { this.asAdmin().getBytecode().addStep(Symbols.and, andTraversals); return this.asAdmin().addStep(new AndStep(this.asAdmin(), andTraversals)); } public default GraphTraversal<S, E> inject(final E... injections) { this.asAdmin().getBytecode().addStep(Symbols.inject, injections); return this.asAdmin().addStep(new InjectStep<>(this.asAdmin(), injections)); } /** * Remove all duplicates in the traversal stream up to this point. * * @param scope whether the deduplication is on the stream (global) or the current object (local). * @param dedupLabels if labels are provided, then the scope labels determine de-duplication. No labels implies current object. * @return the traversal with an appended {@link DedupGlobalStep}. */ public default GraphTraversal<S, E> dedup(final Scope scope, final String... dedupLabels) { this.asAdmin().getBytecode().addStep(Symbols.dedup, scope, dedupLabels); return this.asAdmin().addStep(scope.equals(Scope.global) ? new DedupGlobalStep<>(this.asAdmin(), dedupLabels) : new DedupLocalStep(this.asAdmin())); } /** * Remove all duplicates in the traversal stream up to this point. * * @param dedupLabels if labels are provided, then the scoped object's labels determine de-duplication. No labels implies current object. * @return the traversal with an appended {@link DedupGlobalStep}. */ public default GraphTraversal<S, E> dedup(final String... dedupLabels) { this.asAdmin().getBytecode().addStep(Symbols.dedup, dedupLabels); return this.asAdmin().addStep(new DedupGlobalStep<>(this.asAdmin(), dedupLabels)); } public default GraphTraversal<S, E> where(final String startKey, final P<String> predicate) { this.asAdmin().getBytecode().addStep(Symbols.where, startKey, predicate); return this.asAdmin().addStep(new WherePredicateStep<>(this.asAdmin(), Optional.ofNullable(startKey), predicate)); } public default GraphTraversal<S, E> where(final P<String> predicate) { this.asAdmin().getBytecode().addStep(Symbols.where, predicate); return this.asAdmin().addStep(new WherePredicateStep<>(this.asAdmin(), Optional.empty(), predicate)); } public default GraphTraversal<S, E> where(final Traversal<?, ?> whereTraversal) { this.asAdmin().getBytecode().addStep(Symbols.where, whereTraversal); return TraversalHelper.getVariableLocations(whereTraversal.asAdmin()).isEmpty() ? this.asAdmin().addStep(new TraversalFilterStep<>(this.asAdmin(), (Traversal) whereTraversal)) : this.asAdmin().addStep(new WhereTraversalStep<>(this.asAdmin(), whereTraversal)); } public default GraphTraversal<S, E> has(final String propertyKey, final P<?> predicate) { this.asAdmin().getBytecode().addStep(Symbols.has, propertyKey, predicate); return this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(propertyKey, predicate))); } public default GraphTraversal<S, E> has(final T accessor, final P<?> predicate) { this.asAdmin().getBytecode().addStep(Symbols.has, accessor, predicate); return this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(accessor.getAccessor(), predicate))); } public default GraphTraversal<S, E> has(final String propertyKey, final Object value) { if (value instanceof P) return this.has(propertyKey, (P) value); else if (value instanceof Traversal) return this.has(propertyKey, (Traversal) value); else return this.has(propertyKey, P.eq(value)); } public default GraphTraversal<S, E> has(final T accessor, final Object value) { this.asAdmin().getBytecode().addStep(Symbols.has, accessor, value); return value instanceof P ? this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(accessor.getAccessor(), (P) value))) : this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(accessor.getAccessor(), P.eq(value)))); } public default GraphTraversal<S, E> has(final String label, final String propertyKey, final P<?> predicate) { this.asAdmin().getBytecode().addStep(Symbols.has, label, propertyKey, predicate); this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(T.label.getAccessor(), P.eq(label)))); return this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(propertyKey, predicate))); } public default GraphTraversal<S, E> has(final String label, final String propertyKey, final Object value) { this.asAdmin().getBytecode().addStep(Symbols.has, label, propertyKey, value); this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(T.label.getAccessor(), P.eq(label)))); return value instanceof P ? this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(propertyKey, (P) value))) : this.asAdmin().addStep(new HasStep(this.asAdmin(), HasContainer.makeHasContainers(propertyKey, P.eq(value)))); } public default GraphTraversal<S, E> has(final T accessor, final Traversal<?, ?> propertyTraversal) { this.asAdmin().getBytecode().addStep(Symbols.has, accessor, propertyTraversal); return this.asAdmin().addStep( new TraversalFilterStep<>(this.asAdmin(), propertyTraversal.asAdmin().addStep(0, new PropertiesStep(propertyTraversal.asAdmin(), PropertyType.VALUE, accessor.getAccessor())))); } public default GraphTraversal<S, E> has(final String propertyKey, final Traversal<?, ?> propertyTraversal) { this.asAdmin().getBytecode().addStep(Symbols.has, propertyKey, propertyTraversal); return this.asAdmin().addStep( new TraversalFilterStep<>(this.asAdmin(), propertyTraversal.asAdmin().addStep(0, new PropertiesStep(propertyTraversal.asAdmin(), PropertyType.VALUE, propertyKey)))); } public default GraphTraversal<S, E> has(final String propertyKey) { this.asAdmin().getBytecode().addStep(Symbols.has, propertyKey); return this.asAdmin().addStep(new TraversalFilterStep<>(this.asAdmin(), __.values(propertyKey))); } public default GraphTraversal<S, E> hasNot(final String propertyKey) { this.asAdmin().getBytecode().addStep(Symbols.hasNot, propertyKey); return this.asAdmin().addStep(new NotStep<>(this.asAdmin(), __.values(propertyKey))); } public default GraphTraversal<S, E> has(final T accessor, final Object value, final Object... values) { if (value instanceof Object[]) { final Object[] arr = (Object[]) value; if (values.length == 0) { if (arr.length == 1) { return has(accessor, P.eq(arr[0])); } return has(accessor, P.within(arr)); } } else if (values.length == 0) { return has(accessor, value instanceof P ? (P) value : P.eq(value)); } final Object[] objects = new Object[values.length + 1]; objects[0] = value; System.arraycopy(values, 0, objects, 1, values.length); return has(accessor, P.within(objects)); } public default GraphTraversal<S, E> hasLabel(final Object value, final Object... values) { return has(T.label, value, values); } public default GraphTraversal<S, E> hasId(final Object value, final Object... values) { return has(T.id, value, values); } public default GraphTraversal<S, E> hasKey(final Object value, final Object... values) { return has(T.key, value, values); } public default GraphTraversal<S, E> hasValue(final Object value, final Object... values) { return has(T.value, value, values); } public default GraphTraversal<S, E> is(final P<E> predicate) { this.asAdmin().getBytecode().addStep(Symbols.is, predicate); return this.asAdmin().addStep(new IsStep<>(this.asAdmin(), predicate)); } /** * Filter the <code>E</code> object if it is not {@link P#eq} to the provided value. * * @param value the value that the object must equal. * @return the traversal with an appended {@link IsStep}. */ public default GraphTraversal<S, E> is(final Object value) { this.asAdmin().getBytecode().addStep(Symbols.is, value); return value instanceof P ? this.asAdmin().addStep(new IsStep<>(this.asAdmin(), (P<E>) value)) : this.asAdmin().addStep(new IsStep<>(this.asAdmin(), P.eq((E) value))); } public default GraphTraversal<S, E> not(final Traversal<?, ?> notTraversal) { this.asAdmin().getBytecode().addStep(Symbols.not, notTraversal); return this.asAdmin().addStep(new NotStep<>(this.asAdmin(), (Traversal<E, ?>) notTraversal)); } /** * Filter the <code>E</code> object given a biased coin toss. * * @param probability the probability that the object will pass through * @return the traversal with an appended {@link CoinStep}. */ public default GraphTraversal<S, E> coin(final double probability) { this.asAdmin().getBytecode().addStep(Symbols.coin, probability); return this.asAdmin().addStep(new CoinStep<>(this.asAdmin(), probability)); } public default GraphTraversal<S, E> range(final long low, final long high) { this.asAdmin().getBytecode().addStep(Symbols.range, low, high); return this.asAdmin().addStep(new RangeGlobalStep<>(this.asAdmin(), low, high)); } public default <E2> GraphTraversal<S, E2> range(final Scope scope, final long low, final long high) { this.asAdmin().getBytecode().addStep(Symbols.range, scope, low, high); return this.asAdmin().addStep(scope.equals(Scope.global) ? new RangeGlobalStep<>(this.asAdmin(), low, high) : new RangeLocalStep<>(this.asAdmin(), low, high)); } public default GraphTraversal<S, E> limit(final long limit) { this.asAdmin().getBytecode().addStep(Symbols.limit, limit); return this.asAdmin().addStep(new RangeGlobalStep<>(this.asAdmin(), 0, limit)); } public default <E2> GraphTraversal<S, E2> limit(final Scope scope, final long limit) { this.asAdmin().getBytecode().addStep(Symbols.limit, scope, limit); return this.asAdmin().addStep(scope.equals(Scope.global) ? new RangeGlobalStep<>(this.asAdmin(), 0, limit) : new RangeLocalStep<>(this.asAdmin(), 0, limit)); } public default GraphTraversal<S, E> tail() { this.asAdmin().getBytecode().addStep(Symbols.tail); return this.asAdmin().addStep(new TailGlobalStep<>(this.asAdmin(), 1)); } public default GraphTraversal<S, E> tail(final long limit) { this.asAdmin().getBytecode().addStep(Symbols.tail, limit); return this.asAdmin().addStep(new TailGlobalStep<>(this.asAdmin(), limit)); } public default <E2> GraphTraversal<S, E2> tail(final Scope scope) { this.asAdmin().getBytecode().addStep(Symbols.tail, scope); return this.asAdmin().addStep(scope.equals(Scope.global) ? new TailGlobalStep<>(this.asAdmin(), 1) : new TailLocalStep<>(this.asAdmin(), 1)); } public default <E2> GraphTraversal<S, E2> tail(final Scope scope, final long limit) { this.asAdmin().getBytecode().addStep(Symbols.tail, scope, limit); return this.asAdmin().addStep(scope.equals(Scope.global) ? new TailGlobalStep<>(this.asAdmin(), limit) : new TailLocalStep<>(this.asAdmin(), limit)); } /** * Once the first {@link Traverser} hits this step, a count down is started. Once the time limit is up, all remaining traversers are filtered out. * * @param timeLimit the count down time * @return the traversal with an appended {@link TimeLimitStep} */ public default GraphTraversal<S, E> timeLimit(final long timeLimit) { this.asAdmin().getBytecode().addStep(Symbols.timeLimit, timeLimit); return this.asAdmin().addStep(new TimeLimitStep<E>(this.asAdmin(), timeLimit)); } /** * Filter the <code>E</code> object if its {@link Traverser#path} is not {@link Path#isSimple}. * * @return the traversal with an appended {@link SimplePathStep}. */ public default GraphTraversal<S, E> simplePath() { this.asAdmin().getBytecode().addStep(Symbols.simplePath); return this.asAdmin().addStep(new SimplePathStep<>(this.asAdmin())); } /** * Filter the <code>E</code> object if its {@link Traverser#path} is {@link Path#isSimple}. * * @return the traversal with an appended {@link CyclicPathStep}. */ public default GraphTraversal<S, E> cyclicPath() { this.asAdmin().getBytecode().addStep(Symbols.cyclicPath); return this.asAdmin().addStep(new CyclicPathStep<>(this.asAdmin())); } public default GraphTraversal<S, E> sample(final int amountToSample) { this.asAdmin().getBytecode().addStep(Symbols.sample, amountToSample); return this.asAdmin().addStep(new SampleGlobalStep<>(this.asAdmin(), amountToSample)); } public default GraphTraversal<S, E> sample(final Scope scope, final int amountToSample) { this.asAdmin().getBytecode().addStep(Symbols.sample, scope, amountToSample); return this.asAdmin().addStep(scope.equals(Scope.global) ? new SampleGlobalStep<>(this.asAdmin(), amountToSample) : new SampleLocalStep<>(this.asAdmin(), amountToSample)); } public default GraphTraversal<S, E> drop() { this.asAdmin().getBytecode().addStep(Symbols.drop); return this.asAdmin().addStep(new DropStep<>(this.asAdmin())); } ///////////////////// SIDE-EFFECT STEPS ///////////////////// public default GraphTraversal<S, E> sideEffect(final Consumer<Traverser<E>> consumer) { this.asAdmin().getBytecode().addStep(Symbols.sideEffect, consumer); return this.asAdmin().addStep(new LambdaSideEffectStep<>(this.asAdmin(), consumer)); } public default GraphTraversal<S, E> sideEffect(final Traversal<?, ?> sideEffectTraversal) { this.asAdmin().getBytecode().addStep(Symbols.sideEffect, sideEffectTraversal); return this.asAdmin().addStep(new TraversalSideEffectStep<>(this.asAdmin(), (Traversal) sideEffectTraversal)); } public default <E2> GraphTraversal<S, E2> cap(final String sideEffectKey, final String... sideEffectKeys) { this.asAdmin().getBytecode().addStep(Symbols.cap, sideEffectKey, sideEffectKeys); return this.asAdmin().addStep(new SideEffectCapStep<>(this.asAdmin(), sideEffectKey, sideEffectKeys)); } public default GraphTraversal<S, Edge> subgraph(final String sideEffectKey) { this.asAdmin().getBytecode().addStep(Symbols.subgraph, sideEffectKey); return this.asAdmin().addStep(new SubgraphStep(this.asAdmin(), sideEffectKey)); } public default GraphTraversal<S, E> aggregate(final String sideEffectKey) { this.asAdmin().getBytecode().addStep(Symbols.aggregate, sideEffectKey); return this.asAdmin().addStep(new AggregateStep<>(this.asAdmin(), sideEffectKey)); } public default GraphTraversal<S, E> group(final String sideEffectKey) { this.asAdmin().getBytecode().addStep(Symbols.group, sideEffectKey); return this.asAdmin().addStep(new GroupSideEffectStep<>(this.asAdmin(), sideEffectKey)); } /** * @deprecated As of release 3.1.0, replaced by {@link #group(String)}. */ public default GraphTraversal<S, E> groupV3d0(final String sideEffectKey) { this.asAdmin().getBytecode().addStep(Symbols.groupV3d0, sideEffectKey); return this.asAdmin().addStep(new GroupSideEffectStepV3d0<>(this.asAdmin(), sideEffectKey)); } public default GraphTraversal<S, E> groupCount(final String sideEffectKey) { this.asAdmin().getBytecode().addStep(Symbols.groupCount, sideEffectKey); return this.asAdmin().addStep(new GroupCountSideEffectStep<>(this.asAdmin(), sideEffectKey)); } public default GraphTraversal<S, E> tree(final String sideEffectKey) { this.asAdmin().getBytecode().addStep(Symbols.tree, sideEffectKey); return this.asAdmin().addStep(new TreeSideEffectStep<>(this.asAdmin(), sideEffectKey)); } public default <V, U> GraphTraversal<S, E> sack(final BiFunction<V, U, V> sackOperator) { this.asAdmin().getBytecode().addStep(Symbols.sack, sackOperator); return this.asAdmin().addStep(new SackValueStep<>(this.asAdmin(), sackOperator)); } /** * @deprecated As of release 3.1.0, replaced by {@link #sack(BiFunction)} with {@link #by(String)}. */ @Deprecated public default <V, U> GraphTraversal<S, E> sack(final BiFunction<V, U, V> sackOperator, final String elementPropertyKey) { return this.sack(sackOperator).by(elementPropertyKey); } public default GraphTraversal<S, E> store(final String sideEffectKey) { this.asAdmin().getBytecode().addStep(Symbols.store, sideEffectKey); return this.asAdmin().addStep(new StoreStep<>(this.asAdmin(), sideEffectKey)); } public default GraphTraversal<S, E> profile(final String sideEffectKey) { this.asAdmin().getBytecode().addStep(Traversal.Symbols.profile, sideEffectKey); return this.asAdmin().addStep(new ProfileSideEffectStep<>(this.asAdmin(), sideEffectKey)); } @Override public default GraphTraversal<S, TraversalMetrics> profile() { return (GraphTraversal<S, TraversalMetrics>) Traversal.super.profile(); } /** * Sets a {@link Property} value and related meta properties if supplied, if supported by the {@link Graph} * and if the {@link Element} is a {@link VertexProperty}. This method is the long-hand version of * {@link #property(Object, Object, Object...)} with the difference that the * {@link org.apache.tinkerpop.gremlin.structure.VertexProperty.Cardinality} can be supplied. * <p/> * Generally speaking, this method will append an {@link AddPropertyStep} to the {@link Traversal} but when * possible, this method will attempt to fold key/value pairs into an {@link AddVertexStep}, {@link AddEdgeStep} or * {@link AddVertexStartStep}. This potential optimization can only happen if cardinality is not supplied * and when meta-properties are not included. * * @param cardinality the specified cardinality of the property where {@code null} will allow the {@link Graph} * to use its default settings * @param key the key for the property * @param value the value for the property * @param keyValues any meta properties to be assigned to this property */ public default GraphTraversal<S, E> property(final VertexProperty.Cardinality cardinality, final Object key, final Object value, final Object... keyValues) { if (null == cardinality) this.asAdmin().getBytecode().addStep(Symbols.property, key, value, keyValues); else this.asAdmin().getBytecode().addStep(Symbols.property, cardinality, key, value, keyValues); // if it can be detected that this call to property() is related to an addV/E() then we can attempt to fold // the properties into that step to gain an optimization for those graphs that support such capabilities. if ((this.asAdmin().getEndStep() instanceof AddVertexStep || this.asAdmin().getEndStep() instanceof AddEdgeStep || this.asAdmin().getEndStep() instanceof AddVertexStartStep) && keyValues.length == 0 && null == cardinality) { ((Mutating) this.asAdmin().getEndStep()).addPropertyMutations(key, value); } else { this.asAdmin().addStep(new AddPropertyStep(this.asAdmin(), cardinality, key, value)); ((AddPropertyStep) this.asAdmin().getEndStep()).addPropertyMutations(keyValues); } return this; } /** * Sets the key and value of a {@link Property}. If the {@link Element} is a {@link VertexProperty} and the * {@link Graph} supports it, meta properties can be set. Use of this method assumes that the * {@link org.apache.tinkerpop.gremlin.structure.VertexProperty.Cardinality} is defaulted to {@code null} which * means that the default cardinality for the {@link Graph} will be used. * <p/> * This method is effectively calls * {@link #property(org.apache.tinkerpop.gremlin.structure.VertexProperty.Cardinality, Object, Object, Object...)} * as {@code property(null, key, value, keyValues}. * * @param key the key for the property * @param value the value for the property * @param keyValues any meta properties to be assigned to this property */ public default GraphTraversal<S, E> property(final Object key, final Object value, final Object... keyValues) { return key instanceof VertexProperty.Cardinality ? this.property((VertexProperty.Cardinality) key, value, keyValues[0], keyValues.length > 1 ? Arrays.copyOfRange(keyValues, 1, keyValues.length) : new Object[]{}) : this.property(null, key, value, keyValues); } ///////////////////// BRANCH STEPS ///////////////////// public default <M, E2> GraphTraversal<S, E2> branch(final Traversal<?, M> branchTraversal) { this.asAdmin().getBytecode().addStep(Symbols.branch, branchTraversal); final BranchStep<E, E2, M> branchStep = new BranchStep<>(this.asAdmin()); branchStep.setBranchTraversal((Traversal.Admin<E, M>) branchTraversal); return this.asAdmin().addStep(branchStep); } public default <M, E2> GraphTraversal<S, E2> branch(final Function<Traverser<E>, M> function) { this.asAdmin().getBytecode().addStep(Symbols.branch, function); final BranchStep<E, E2, M> branchStep = new BranchStep<>(this.asAdmin()); branchStep.setBranchTraversal((Traversal.Admin<E, M>) __.map(function)); return this.asAdmin().addStep(branchStep); } public default <M, E2> GraphTraversal<S, E2> choose(final Traversal<?, M> choiceTraversal) { this.asAdmin().getBytecode().addStep(Symbols.choose, choiceTraversal); return this.asAdmin().addStep(new ChooseStep<>(this.asAdmin(), (Traversal.Admin<E, M>) choiceTraversal)); } public default <E2> GraphTraversal<S, E2> choose(final Traversal<?, ?> traversalPredicate, final Traversal<?, E2> trueChoice, final Traversal<?, E2> falseChoice) { this.asAdmin().getBytecode().addStep(Symbols.choose, traversalPredicate, trueChoice, falseChoice); return this.asAdmin().addStep(new ChooseStep<E, E2, Boolean>(this.asAdmin(), (Traversal.Admin<E, ?>) traversalPredicate, (Traversal.Admin<E, E2>) trueChoice, (Traversal.Admin<E, E2>) falseChoice)); } public default <M, E2> GraphTraversal<S, E2> choose(final Function<E, M> choiceFunction) { this.asAdmin().getBytecode().addStep(Symbols.choose, choiceFunction); return this.asAdmin().addStep(new ChooseStep<>(this.asAdmin(), (Traversal.Admin<E, M>) __.map(new FunctionTraverser<>(choiceFunction)))); } public default <E2> GraphTraversal<S, E2> choose(final Predicate<E> choosePredicate, final Traversal<?, E2> trueChoice, final Traversal<?, E2> falseChoice) { this.asAdmin().getBytecode().addStep(Symbols.choose, choosePredicate, trueChoice, falseChoice); return this.asAdmin().addStep(new ChooseStep<E, E2, Boolean>(this.asAdmin(), (Traversal.Admin<E, ?>) __.filter(new PredicateTraverser<>(choosePredicate)), (Traversal.Admin<E, E2>) trueChoice, (Traversal.Admin<E, E2>) falseChoice)); } public default <E2> GraphTraversal<S, E2> optional(final Traversal<?, E2> optionalTraversal) { this.asAdmin().getBytecode().addStep(Symbols.optional, optionalTraversal); return this.asAdmin().addStep(new ChooseStep<>(this.asAdmin(), (Traversal.Admin<E, ?>) optionalTraversal, (Traversal.Admin<E, E2>) optionalTraversal.asAdmin().clone(), (Traversal.Admin<E, E2>) __.<E2>identity())); } public default <E2> GraphTraversal<S, E2> union(final Traversal<?, E2>... unionTraversals) { this.asAdmin().getBytecode().addStep(Symbols.union, unionTraversals); return this.asAdmin().addStep(new UnionStep(this.asAdmin(), Arrays.copyOf(unionTraversals, unionTraversals.length, Traversal.Admin[].class))); } public default <E2> GraphTraversal<S, E2> coalesce(final Traversal<?, E2>... coalesceTraversals) { this.asAdmin().getBytecode().addStep(Symbols.coalesce, coalesceTraversals); return this.asAdmin().addStep(new CoalesceStep(this.asAdmin(), Arrays.copyOf(coalesceTraversals, coalesceTraversals.length, Traversal.Admin[].class))); } public default GraphTraversal<S, E> repeat(final Traversal<?, E> repeatTraversal) { this.asAdmin().getBytecode().addStep(Symbols.repeat, repeatTraversal); return RepeatStep.addRepeatToTraversal(this, (Traversal.Admin<E, E>) repeatTraversal); } public default GraphTraversal<S, E> emit(final Traversal<?, ?> emitTraversal) { this.asAdmin().getBytecode().addStep(Symbols.emit, emitTraversal); return RepeatStep.addEmitToTraversal(this, (Traversal.Admin<E, ?>) emitTraversal); } public default GraphTraversal<S, E> emit(final Predicate<Traverser<E>> emitPredicate) { this.asAdmin().getBytecode().addStep(Symbols.emit, emitPredicate); return RepeatStep.addEmitToTraversal(this, (Traversal.Admin<E, ?>) __.filter(emitPredicate)); } public default GraphTraversal<S, E> emit() { this.asAdmin().getBytecode().addStep(Symbols.emit); return RepeatStep.addEmitToTraversal(this, TrueTraversal.instance()); } public default GraphTraversal<S, E> until(final Traversal<?, ?> untilTraversal) { this.asAdmin().getBytecode().addStep(Symbols.until, untilTraversal); return RepeatStep.addUntilToTraversal(this, (Traversal.Admin<E, ?>) untilTraversal); } public default GraphTraversal<S, E> until(final Predicate<Traverser<E>> untilPredicate) { this.asAdmin().getBytecode().addStep(Symbols.until, untilPredicate); return RepeatStep.addEmitToTraversal(this, (Traversal.Admin<E, ?>) __.filter(untilPredicate)); } public default GraphTraversal<S, E> times(final int maxLoops) { this.asAdmin().getBytecode().addStep(Symbols.times, maxLoops); if (this.asAdmin().getEndStep() instanceof TimesModulating) { ((TimesModulating) this.asAdmin().getEndStep()).modulateTimes(maxLoops); return this; } else return RepeatStep.addUntilToTraversal(this, new LoopTraversal<>(maxLoops)); } public default <E2> GraphTraversal<S, E2> local(final Traversal<?, E2> localTraversal) { this.asAdmin().getBytecode().addStep(Symbols.local, localTraversal); return this.asAdmin().addStep(new LocalStep<>(this.asAdmin(), localTraversal.asAdmin())); } /////////////////// VERTEX PROGRAM STEPS //////////////// public default GraphTraversal<S, E> pageRank() { return this.pageRank(0.85d); } public default GraphTraversal<S, E> pageRank(final double alpha) { this.asAdmin().getBytecode().addStep(Symbols.pageRank, alpha); return this.asAdmin().addStep((Step<E, E>) new PageRankVertexProgramStep(this.asAdmin(), alpha)); } public default GraphTraversal<S, E> peerPressure() { this.asAdmin().getBytecode().addStep(Symbols.peerPressure); return this.asAdmin().addStep((Step<E, E>) new PeerPressureVertexProgramStep(this.asAdmin())); } public default GraphTraversal<S, E> program(final VertexProgram<?> vertexProgram) { return this.asAdmin().addStep((Step<E, E>) new ProgramVertexProgramStep(this.asAdmin(), vertexProgram)); } ///////////////////// UTILITY STEPS ///////////////////// public default GraphTraversal<S, E> as(final String stepLabel, final String... stepLabels) { this.asAdmin().getBytecode().addStep(Symbols.as, stepLabel, stepLabels); if (this.asAdmin().getSteps().size() == 0) this.asAdmin().addStep(new StartStep<>(this.asAdmin())); final Step<?, E> endStep = this.asAdmin().getEndStep(); endStep.addLabel(stepLabel); for (final String label : stepLabels) { endStep.addLabel(label); } return this; } public default GraphTraversal<S, E> barrier() { return this.barrier(Integer.MAX_VALUE); } public default GraphTraversal<S, E> barrier(final int maxBarrierSize) { this.asAdmin().getBytecode().addStep(Symbols.barrier, maxBarrierSize); return this.asAdmin().addStep(new NoOpBarrierStep<>(this.asAdmin(), maxBarrierSize)); } public default GraphTraversal<S, E> barrier(final Consumer<TraverserSet<Object>> barrierConsumer) { this.asAdmin().getBytecode().addStep(Symbols.barrier, barrierConsumer); return this.asAdmin().addStep(new LambdaCollectingBarrierStep<>(this.asAdmin(), (Consumer) barrierConsumer, Integer.MAX_VALUE)); } //// BY-MODULATORS public default GraphTraversal<S, E> by() { this.asAdmin().getBytecode().addStep(Symbols.by); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(); return this; } public default GraphTraversal<S, E> by(final Traversal<?, ?> traversal) { this.asAdmin().getBytecode().addStep(Symbols.by, traversal); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(traversal.asAdmin()); return this; } public default GraphTraversal<S, E> by(final T token) { this.asAdmin().getBytecode().addStep(Symbols.by, token); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(token); return this; } public default GraphTraversal<S, E> by(final String key) { this.asAdmin().getBytecode().addStep(Symbols.by, key); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(key); return this; } public default <V> GraphTraversal<S, E> by(final Function<V, Object> function) { this.asAdmin().getBytecode().addStep(Symbols.by, function); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(function); return this; } //// COMPARATOR BY-MODULATORS public default <V> GraphTraversal<S, E> by(final Traversal<?, ?> traversal, final Comparator<V> comparator) { this.asAdmin().getBytecode().addStep(Symbols.by, traversal, comparator); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(traversal.asAdmin(), comparator); return this; } public default GraphTraversal<S, E> by(final Comparator<E> comparator) { this.asAdmin().getBytecode().addStep(Symbols.by, comparator); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(comparator); return this; } public default GraphTraversal<S, E> by(final Order order) { this.asAdmin().getBytecode().addStep(Symbols.by, order); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(order); return this; } public default <V> GraphTraversal<S, E> by(final String key, final Comparator<V> comparator) { this.asAdmin().getBytecode().addStep(Symbols.by, key, comparator); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(key, comparator); return this; } /*public default <V> GraphTraversal<S, E> by(final Column column, final Comparator<V> comparator) { ((ByModulating) this.asAdmin().getEndStep()).modulateBy(column, comparator); return this; } public default <V> GraphTraversal<S, E> by(final T token, final Comparator<V> comparator) { ((ByModulating) this.asAdmin().getEndStep()).modulateBy(token, comparator); return this; }*/ public default <U> GraphTraversal<S, E> by(final Function<U, Object> function, final Comparator comparator) { this.asAdmin().getBytecode().addStep(Symbols.by, function, comparator); ((ByModulating) this.asAdmin().getEndStep()).modulateBy(function, comparator); return this; } //// public default <M, E2> GraphTraversal<S, E> option(final M pickToken, final Traversal<E, E2> traversalOption) { this.asAdmin().getBytecode().addStep(Symbols.option, pickToken, traversalOption); ((TraversalOptionParent<M, E, E2>) this.asAdmin().getEndStep()).addGlobalChildOption(pickToken, traversalOption.asAdmin()); return this; } public default <E2> GraphTraversal<S, E> option(final Traversal<E, E2> traversalOption) { this.asAdmin().getBytecode().addStep(Symbols.option, traversalOption); return this.option(TraversalOptionParent.Pick.any, traversalOption.asAdmin()); } //// @Override public default GraphTraversal<S, E> iterate() { Traversal.super.iterate(); return this; } //// public static final class Symbols { private Symbols() { // static fields only } public static final String map = "map"; public static final String flatMap = "flatMap"; public static final String id = "id"; public static final String label = "label"; public static final String identity = "identity"; public static final String constant = "constant"; public static final String V = "V"; public static final String E = "E"; public static final String to = "to"; public static final String out = "out"; public static final String in = "in"; public static final String both = "both"; public static final String toE = "toE"; public static final String outE = "outE"; public static final String inE = "inE"; public static final String bothE = "bothE"; public static final String toV = "toV"; public static final String outV = "outV"; public static final String inV = "inV"; public static final String bothV = "bothV"; public static final String otherV = "otherV"; public static final String order = "order"; public static final String properties = "properties"; public static final String values = "values"; public static final String propertyMap = "propertyMap"; public static final String valueMap = "valueMap"; public static final String select = "select"; public static final String key = "key"; public static final String value = "value"; public static final String path = "path"; public static final String match = "match"; public static final String sack = "sack"; public static final String loops = "loops"; public static final String project = "project"; public static final String unfold = "unfold"; public static final String fold = "fold"; public static final String count = "count"; public static final String sum = "sum"; public static final String max = "max"; public static final String min = "min"; public static final String mean = "mean"; public static final String group = "group"; @Deprecated public static final String groupV3d0 = "groupV3d0"; public static final String groupCount = "groupCount"; public static final String tree = "tree"; public static final String addV = "addV"; public static final String addE = "addE"; public static final String from = "from"; public static final String filter = "filter"; public static final String or = "or"; public static final String and = "and"; public static final String inject = "inject"; public static final String dedup = "dedup"; public static final String where = "where"; public static final String has = "has"; public static final String hasNot = "hasNot"; public static final String hasLabel = "hasLabel"; public static final String hasId = "hasId"; public static final String hasKey = "hasKey"; public static final String hasValue = "hasValue"; public static final String is = "is"; public static final String not = "not"; public static final String range = "range"; public static final String limit = "limit"; public static final String tail = "tail"; public static final String coin = "coin"; public static final String timeLimit = "timeLimit"; public static final String simplePath = "simplePath"; public static final String cyclicPath = "cyclicPath"; public static final String sample = "sample"; public static final String drop = "drop"; public static final String sideEffect = "sideEffect"; public static final String cap = "cap"; public static final String property = "property"; public static final String store = "store"; public static final String aggregate = "aggregate"; public static final String subgraph = "subgraph"; public static final String barrier = "barrier"; public static final String local = "local"; public static final String emit = "emit"; public static final String repeat = "repeat"; public static final String until = "until"; public static final String branch = "branch"; public static final String union = "union"; public static final String coalesce = "coalesce"; public static final String choose = "choose"; public static final String optional = "optional"; public static final String pageRank = "pageRank"; public static final String peerPressure = "peerPressure"; public static final String program = "program"; public static final String by = "by"; public static final String times = "times"; public static final String as = "as"; public static final String option = "option"; } }
apache-2.0
igniterealtime/Smack
smack-core/src/test/java/org/jivesoftware/smack/packet/StreamErrorTest.java
6184
/** * * Copyright the original author or authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.smack.packet; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import java.io.IOException; import org.jivesoftware.smack.packet.StreamError.Condition; import org.jivesoftware.smack.parsing.SmackParsingException; import org.jivesoftware.smack.test.util.SmackTestUtil; import org.jivesoftware.smack.util.PacketParserUtils; import org.jivesoftware.smack.xml.XmlPullParser; import org.jivesoftware.smack.xml.XmlPullParserException; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; public class StreamErrorTest { @ParameterizedTest @EnumSource(SmackTestUtil.XmlPullParserKind.class) public void testParsingOfSimpleStreamError(SmackTestUtil.XmlPullParserKind parserKind) throws XmlPullParserException, IOException, SmackParsingException { final String xml = // Usually the stream:stream element has more attributes (to, version, ...) // We omit those, since they are not relevant for testing "<stream:stream from='im.example.com' id='++TR84Sm6A3hnt3Q065SnAbbk3Y=' xmlns:stream='http://etherx.jabber.org/streams'>" + "<stream:error>" + "<conflict xmlns='urn:ietf:params:xml:ns:xmpp-streams' /> +" + "</stream:error>" + "</stream:stream>"; XmlPullParser parser = SmackTestUtil.getParserFor(xml, "error", parserKind); StreamError error = PacketParserUtils.parseStreamError(parser); assertNotNull(error); assertEquals(Condition.conflict, error.getCondition()); } @ParameterizedTest @EnumSource(SmackTestUtil.XmlPullParserKind.class) public void testParsingOfStreamErrorWithText(SmackTestUtil.XmlPullParserKind parserKind) throws XmlPullParserException, IOException, SmackParsingException { final String xml = // Usually the stream:stream element has more attributes (to, version, ...) // We omit those, since they are not relevant for testing "<stream:stream from='im.example.com' id='++TR84Sm6A3hnt3Q065SnAbbk3Y=' xmlns:stream='http://etherx.jabber.org/streams'>" + "<stream:error>" + "<conflict xmlns='urn:ietf:params:xml:ns:xmpp-streams' />" + "<text xml:lang='' xmlns='urn:ietf:params:xml:ns:xmpp-streams'>" + "Replaced by new connection" + "</text>" + "</stream:error>" + "</stream:stream>"; XmlPullParser parser = SmackTestUtil.getParserFor(xml, "error", parserKind); StreamError error = PacketParserUtils.parseStreamError(parser); assertNotNull(error); assertEquals(Condition.conflict, error.getCondition()); assertEquals("Replaced by new connection", error.getDescriptiveText()); } @ParameterizedTest @EnumSource(SmackTestUtil.XmlPullParserKind.class) public void testParsingOfStreamErrorWithTextAndOptionalElement(SmackTestUtil.XmlPullParserKind parserKind) throws XmlPullParserException, IOException, SmackParsingException { final String xml = // Usually the stream:stream element has more attributes (to, version, ...) // We omit those, since they are not relevant for testing "<stream:stream from='im.example.com' id='++TR84Sm6A3hnt3Q065SnAbbk3Y=' xmlns:stream='http://etherx.jabber.org/streams'>" + "<stream:error>" + "<conflict xmlns='urn:ietf:params:xml:ns:xmpp-streams' />" + "<text xml:lang='' xmlns='urn:ietf:params:xml:ns:xmpp-streams'>" + "Replaced by new connection" + "</text>" + "<appSpecificElement xmlns='myns'>" + "Text contents of application-specific condition element: Foo Bar" + "</appSpecificElement>" + "</stream:error>" + "</stream:stream>"; XmlPullParser parser = SmackTestUtil.getParserFor(xml, "error", parserKind); StreamError error = PacketParserUtils.parseStreamError(parser); assertNotNull(error); assertEquals(Condition.conflict, error.getCondition()); assertEquals("Replaced by new connection", error.getDescriptiveText()); XmlElement appSpecificElement = error.getExtension("appSpecificElement", "myns"); assertNotNull(appSpecificElement); } @ParameterizedTest @EnumSource(SmackTestUtil.XmlPullParserKind.class) public void testStreamErrorXmlNotWellFormed(SmackTestUtil.XmlPullParserKind parserKind) throws XmlPullParserException, IOException, SmackParsingException { final String xml = // Usually the stream:stream element has more attributes (to, version, ...) // We omit those, since they are not relevant for testing "<stream:stream from='im.example.com' id='++TR84Sm6A3hnt3Q065SnAbbk3Y=' xmlns:stream='http://etherx.jabber.org/streams'>" + "<stream:error><xml-not-well-formed xmlns='urn:ietf:params:xml:ns:xmpp-streams'/></stream:error>" + "</stream:stream>"; XmlPullParser parser = SmackTestUtil.getParserFor(xml, "error", parserKind); StreamError error = PacketParserUtils.parseStreamError(parser); assertNotNull(error); assertEquals(Condition.not_well_formed, error.getCondition()); } }
apache-2.0
pieterjanpintens/grpc-java
grpclb/src/generated/main/java/io/grpc/grpclb/ClientStats.java
45158
// Generated by the protocol buffer compiler. DO NOT EDIT! // source: load_balancer.proto package io.grpc.grpclb; /** * <pre> * Contains client level statistics that are useful to load balancing. Each * count except the timestamp should be reset to zero after reporting the stats. * </pre> * * Protobuf type {@code grpc.lb.v1.ClientStats} */ public final class ClientStats extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:grpc.lb.v1.ClientStats) ClientStatsOrBuilder { private static final long serialVersionUID = 0L; // Use ClientStats.newBuilder() to construct. private ClientStats(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private ClientStats() { numCallsStarted_ = 0L; numCallsFinished_ = 0L; numCallsFinishedWithClientFailedToSend_ = 0L; numCallsFinishedKnownReceived_ = 0L; callsFinishedWithDrop_ = java.util.Collections.emptyList(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ClientStats( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownFieldProto3( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { com.google.protobuf.Timestamp.Builder subBuilder = null; if (timestamp_ != null) { subBuilder = timestamp_.toBuilder(); } timestamp_ = input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(timestamp_); timestamp_ = subBuilder.buildPartial(); } break; } case 16: { numCallsStarted_ = input.readInt64(); break; } case 24: { numCallsFinished_ = input.readInt64(); break; } case 48: { numCallsFinishedWithClientFailedToSend_ = input.readInt64(); break; } case 56: { numCallsFinishedKnownReceived_ = input.readInt64(); break; } case 66: { if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { callsFinishedWithDrop_ = new java.util.ArrayList<io.grpc.grpclb.ClientStatsPerToken>(); mutable_bitField0_ |= 0x00000020; } callsFinishedWithDrop_.add( input.readMessage(io.grpc.grpclb.ClientStatsPerToken.parser(), extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { callsFinishedWithDrop_ = java.util.Collections.unmodifiableList(callsFinishedWithDrop_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.grpc.grpclb.LoadBalancerProto.internal_static_grpc_lb_v1_ClientStats_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return io.grpc.grpclb.LoadBalancerProto.internal_static_grpc_lb_v1_ClientStats_fieldAccessorTable .ensureFieldAccessorsInitialized( io.grpc.grpclb.ClientStats.class, io.grpc.grpclb.ClientStats.Builder.class); } private int bitField0_; public static final int TIMESTAMP_FIELD_NUMBER = 1; private com.google.protobuf.Timestamp timestamp_; /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public boolean hasTimestamp() { return timestamp_ != null; } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public com.google.protobuf.Timestamp getTimestamp() { return timestamp_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : timestamp_; } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public com.google.protobuf.TimestampOrBuilder getTimestampOrBuilder() { return getTimestamp(); } public static final int NUM_CALLS_STARTED_FIELD_NUMBER = 2; private long numCallsStarted_; /** * <pre> * The total number of RPCs that started. * </pre> * * <code>int64 num_calls_started = 2;</code> */ public long getNumCallsStarted() { return numCallsStarted_; } public static final int NUM_CALLS_FINISHED_FIELD_NUMBER = 3; private long numCallsFinished_; /** * <pre> * The total number of RPCs that finished. * </pre> * * <code>int64 num_calls_finished = 3;</code> */ public long getNumCallsFinished() { return numCallsFinished_; } public static final int NUM_CALLS_FINISHED_WITH_CLIENT_FAILED_TO_SEND_FIELD_NUMBER = 6; private long numCallsFinishedWithClientFailedToSend_; /** * <pre> * The total number of RPCs that failed to reach a server except dropped RPCs. * </pre> * * <code>int64 num_calls_finished_with_client_failed_to_send = 6;</code> */ public long getNumCallsFinishedWithClientFailedToSend() { return numCallsFinishedWithClientFailedToSend_; } public static final int NUM_CALLS_FINISHED_KNOWN_RECEIVED_FIELD_NUMBER = 7; private long numCallsFinishedKnownReceived_; /** * <pre> * The total number of RPCs that finished and are known to have been received * by a server. * </pre> * * <code>int64 num_calls_finished_known_received = 7;</code> */ public long getNumCallsFinishedKnownReceived() { return numCallsFinishedKnownReceived_; } public static final int CALLS_FINISHED_WITH_DROP_FIELD_NUMBER = 8; private java.util.List<io.grpc.grpclb.ClientStatsPerToken> callsFinishedWithDrop_; /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public java.util.List<io.grpc.grpclb.ClientStatsPerToken> getCallsFinishedWithDropList() { return callsFinishedWithDrop_; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public java.util.List<? extends io.grpc.grpclb.ClientStatsPerTokenOrBuilder> getCallsFinishedWithDropOrBuilderList() { return callsFinishedWithDrop_; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public int getCallsFinishedWithDropCount() { return callsFinishedWithDrop_.size(); } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public io.grpc.grpclb.ClientStatsPerToken getCallsFinishedWithDrop(int index) { return callsFinishedWithDrop_.get(index); } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public io.grpc.grpclb.ClientStatsPerTokenOrBuilder getCallsFinishedWithDropOrBuilder( int index) { return callsFinishedWithDrop_.get(index); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (timestamp_ != null) { output.writeMessage(1, getTimestamp()); } if (numCallsStarted_ != 0L) { output.writeInt64(2, numCallsStarted_); } if (numCallsFinished_ != 0L) { output.writeInt64(3, numCallsFinished_); } if (numCallsFinishedWithClientFailedToSend_ != 0L) { output.writeInt64(6, numCallsFinishedWithClientFailedToSend_); } if (numCallsFinishedKnownReceived_ != 0L) { output.writeInt64(7, numCallsFinishedKnownReceived_); } for (int i = 0; i < callsFinishedWithDrop_.size(); i++) { output.writeMessage(8, callsFinishedWithDrop_.get(i)); } unknownFields.writeTo(output); } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (timestamp_ != null) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, getTimestamp()); } if (numCallsStarted_ != 0L) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(2, numCallsStarted_); } if (numCallsFinished_ != 0L) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(3, numCallsFinished_); } if (numCallsFinishedWithClientFailedToSend_ != 0L) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(6, numCallsFinishedWithClientFailedToSend_); } if (numCallsFinishedKnownReceived_ != 0L) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(7, numCallsFinishedKnownReceived_); } for (int i = 0; i < callsFinishedWithDrop_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(8, callsFinishedWithDrop_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof io.grpc.grpclb.ClientStats)) { return super.equals(obj); } io.grpc.grpclb.ClientStats other = (io.grpc.grpclb.ClientStats) obj; boolean result = true; result = result && (hasTimestamp() == other.hasTimestamp()); if (hasTimestamp()) { result = result && getTimestamp() .equals(other.getTimestamp()); } result = result && (getNumCallsStarted() == other.getNumCallsStarted()); result = result && (getNumCallsFinished() == other.getNumCallsFinished()); result = result && (getNumCallsFinishedWithClientFailedToSend() == other.getNumCallsFinishedWithClientFailedToSend()); result = result && (getNumCallsFinishedKnownReceived() == other.getNumCallsFinishedKnownReceived()); result = result && getCallsFinishedWithDropList() .equals(other.getCallsFinishedWithDropList()); result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasTimestamp()) { hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; hash = (53 * hash) + getTimestamp().hashCode(); } hash = (37 * hash) + NUM_CALLS_STARTED_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getNumCallsStarted()); hash = (37 * hash) + NUM_CALLS_FINISHED_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getNumCallsFinished()); hash = (37 * hash) + NUM_CALLS_FINISHED_WITH_CLIENT_FAILED_TO_SEND_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getNumCallsFinishedWithClientFailedToSend()); hash = (37 * hash) + NUM_CALLS_FINISHED_KNOWN_RECEIVED_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getNumCallsFinishedKnownReceived()); if (getCallsFinishedWithDropCount() > 0) { hash = (37 * hash) + CALLS_FINISHED_WITH_DROP_FIELD_NUMBER; hash = (53 * hash) + getCallsFinishedWithDropList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static io.grpc.grpclb.ClientStats parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.grpc.grpclb.ClientStats parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.grpc.grpclb.ClientStats parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.grpc.grpclb.ClientStats parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.grpc.grpclb.ClientStats parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.grpc.grpclb.ClientStats parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.grpc.grpclb.ClientStats parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.grpc.grpclb.ClientStats parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static io.grpc.grpclb.ClientStats parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static io.grpc.grpclb.ClientStats parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static io.grpc.grpclb.ClientStats parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.grpc.grpclb.ClientStats parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(io.grpc.grpclb.ClientStats prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * <pre> * Contains client level statistics that are useful to load balancing. Each * count except the timestamp should be reset to zero after reporting the stats. * </pre> * * Protobuf type {@code grpc.lb.v1.ClientStats} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:grpc.lb.v1.ClientStats) io.grpc.grpclb.ClientStatsOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.grpc.grpclb.LoadBalancerProto.internal_static_grpc_lb_v1_ClientStats_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return io.grpc.grpclb.LoadBalancerProto.internal_static_grpc_lb_v1_ClientStats_fieldAccessorTable .ensureFieldAccessorsInitialized( io.grpc.grpclb.ClientStats.class, io.grpc.grpclb.ClientStats.Builder.class); } // Construct using io.grpc.grpclb.ClientStats.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getCallsFinishedWithDropFieldBuilder(); } } public Builder clear() { super.clear(); if (timestampBuilder_ == null) { timestamp_ = null; } else { timestamp_ = null; timestampBuilder_ = null; } numCallsStarted_ = 0L; numCallsFinished_ = 0L; numCallsFinishedWithClientFailedToSend_ = 0L; numCallsFinishedKnownReceived_ = 0L; if (callsFinishedWithDropBuilder_ == null) { callsFinishedWithDrop_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); } else { callsFinishedWithDropBuilder_.clear(); } return this; } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.grpc.grpclb.LoadBalancerProto.internal_static_grpc_lb_v1_ClientStats_descriptor; } public io.grpc.grpclb.ClientStats getDefaultInstanceForType() { return io.grpc.grpclb.ClientStats.getDefaultInstance(); } public io.grpc.grpclb.ClientStats build() { io.grpc.grpclb.ClientStats result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public io.grpc.grpclb.ClientStats buildPartial() { io.grpc.grpclb.ClientStats result = new io.grpc.grpclb.ClientStats(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (timestampBuilder_ == null) { result.timestamp_ = timestamp_; } else { result.timestamp_ = timestampBuilder_.build(); } result.numCallsStarted_ = numCallsStarted_; result.numCallsFinished_ = numCallsFinished_; result.numCallsFinishedWithClientFailedToSend_ = numCallsFinishedWithClientFailedToSend_; result.numCallsFinishedKnownReceived_ = numCallsFinishedKnownReceived_; if (callsFinishedWithDropBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020)) { callsFinishedWithDrop_ = java.util.Collections.unmodifiableList(callsFinishedWithDrop_); bitField0_ = (bitField0_ & ~0x00000020); } result.callsFinishedWithDrop_ = callsFinishedWithDrop_; } else { result.callsFinishedWithDrop_ = callsFinishedWithDropBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.setField(field, value); } public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.grpc.grpclb.ClientStats) { return mergeFrom((io.grpc.grpclb.ClientStats)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.grpc.grpclb.ClientStats other) { if (other == io.grpc.grpclb.ClientStats.getDefaultInstance()) return this; if (other.hasTimestamp()) { mergeTimestamp(other.getTimestamp()); } if (other.getNumCallsStarted() != 0L) { setNumCallsStarted(other.getNumCallsStarted()); } if (other.getNumCallsFinished() != 0L) { setNumCallsFinished(other.getNumCallsFinished()); } if (other.getNumCallsFinishedWithClientFailedToSend() != 0L) { setNumCallsFinishedWithClientFailedToSend(other.getNumCallsFinishedWithClientFailedToSend()); } if (other.getNumCallsFinishedKnownReceived() != 0L) { setNumCallsFinishedKnownReceived(other.getNumCallsFinishedKnownReceived()); } if (callsFinishedWithDropBuilder_ == null) { if (!other.callsFinishedWithDrop_.isEmpty()) { if (callsFinishedWithDrop_.isEmpty()) { callsFinishedWithDrop_ = other.callsFinishedWithDrop_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureCallsFinishedWithDropIsMutable(); callsFinishedWithDrop_.addAll(other.callsFinishedWithDrop_); } onChanged(); } } else { if (!other.callsFinishedWithDrop_.isEmpty()) { if (callsFinishedWithDropBuilder_.isEmpty()) { callsFinishedWithDropBuilder_.dispose(); callsFinishedWithDropBuilder_ = null; callsFinishedWithDrop_ = other.callsFinishedWithDrop_; bitField0_ = (bitField0_ & ~0x00000020); callsFinishedWithDropBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getCallsFinishedWithDropFieldBuilder() : null; } else { callsFinishedWithDropBuilder_.addAllMessages(other.callsFinishedWithDrop_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { io.grpc.grpclb.ClientStats parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (io.grpc.grpclb.ClientStats) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private com.google.protobuf.Timestamp timestamp_ = null; private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> timestampBuilder_; /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public boolean hasTimestamp() { return timestampBuilder_ != null || timestamp_ != null; } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public com.google.protobuf.Timestamp getTimestamp() { if (timestampBuilder_ == null) { return timestamp_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : timestamp_; } else { return timestampBuilder_.getMessage(); } } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public Builder setTimestamp(com.google.protobuf.Timestamp value) { if (timestampBuilder_ == null) { if (value == null) { throw new NullPointerException(); } timestamp_ = value; onChanged(); } else { timestampBuilder_.setMessage(value); } return this; } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public Builder setTimestamp( com.google.protobuf.Timestamp.Builder builderForValue) { if (timestampBuilder_ == null) { timestamp_ = builderForValue.build(); onChanged(); } else { timestampBuilder_.setMessage(builderForValue.build()); } return this; } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public Builder mergeTimestamp(com.google.protobuf.Timestamp value) { if (timestampBuilder_ == null) { if (timestamp_ != null) { timestamp_ = com.google.protobuf.Timestamp.newBuilder(timestamp_).mergeFrom(value).buildPartial(); } else { timestamp_ = value; } onChanged(); } else { timestampBuilder_.mergeFrom(value); } return this; } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public Builder clearTimestamp() { if (timestampBuilder_ == null) { timestamp_ = null; onChanged(); } else { timestamp_ = null; timestampBuilder_ = null; } return this; } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public com.google.protobuf.Timestamp.Builder getTimestampBuilder() { onChanged(); return getTimestampFieldBuilder().getBuilder(); } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ public com.google.protobuf.TimestampOrBuilder getTimestampOrBuilder() { if (timestampBuilder_ != null) { return timestampBuilder_.getMessageOrBuilder(); } else { return timestamp_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : timestamp_; } } /** * <pre> * The timestamp of generating the report. * </pre> * * <code>.google.protobuf.Timestamp timestamp = 1;</code> */ private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> getTimestampFieldBuilder() { if (timestampBuilder_ == null) { timestampBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( getTimestamp(), getParentForChildren(), isClean()); timestamp_ = null; } return timestampBuilder_; } private long numCallsStarted_ ; /** * <pre> * The total number of RPCs that started. * </pre> * * <code>int64 num_calls_started = 2;</code> */ public long getNumCallsStarted() { return numCallsStarted_; } /** * <pre> * The total number of RPCs that started. * </pre> * * <code>int64 num_calls_started = 2;</code> */ public Builder setNumCallsStarted(long value) { numCallsStarted_ = value; onChanged(); return this; } /** * <pre> * The total number of RPCs that started. * </pre> * * <code>int64 num_calls_started = 2;</code> */ public Builder clearNumCallsStarted() { numCallsStarted_ = 0L; onChanged(); return this; } private long numCallsFinished_ ; /** * <pre> * The total number of RPCs that finished. * </pre> * * <code>int64 num_calls_finished = 3;</code> */ public long getNumCallsFinished() { return numCallsFinished_; } /** * <pre> * The total number of RPCs that finished. * </pre> * * <code>int64 num_calls_finished = 3;</code> */ public Builder setNumCallsFinished(long value) { numCallsFinished_ = value; onChanged(); return this; } /** * <pre> * The total number of RPCs that finished. * </pre> * * <code>int64 num_calls_finished = 3;</code> */ public Builder clearNumCallsFinished() { numCallsFinished_ = 0L; onChanged(); return this; } private long numCallsFinishedWithClientFailedToSend_ ; /** * <pre> * The total number of RPCs that failed to reach a server except dropped RPCs. * </pre> * * <code>int64 num_calls_finished_with_client_failed_to_send = 6;</code> */ public long getNumCallsFinishedWithClientFailedToSend() { return numCallsFinishedWithClientFailedToSend_; } /** * <pre> * The total number of RPCs that failed to reach a server except dropped RPCs. * </pre> * * <code>int64 num_calls_finished_with_client_failed_to_send = 6;</code> */ public Builder setNumCallsFinishedWithClientFailedToSend(long value) { numCallsFinishedWithClientFailedToSend_ = value; onChanged(); return this; } /** * <pre> * The total number of RPCs that failed to reach a server except dropped RPCs. * </pre> * * <code>int64 num_calls_finished_with_client_failed_to_send = 6;</code> */ public Builder clearNumCallsFinishedWithClientFailedToSend() { numCallsFinishedWithClientFailedToSend_ = 0L; onChanged(); return this; } private long numCallsFinishedKnownReceived_ ; /** * <pre> * The total number of RPCs that finished and are known to have been received * by a server. * </pre> * * <code>int64 num_calls_finished_known_received = 7;</code> */ public long getNumCallsFinishedKnownReceived() { return numCallsFinishedKnownReceived_; } /** * <pre> * The total number of RPCs that finished and are known to have been received * by a server. * </pre> * * <code>int64 num_calls_finished_known_received = 7;</code> */ public Builder setNumCallsFinishedKnownReceived(long value) { numCallsFinishedKnownReceived_ = value; onChanged(); return this; } /** * <pre> * The total number of RPCs that finished and are known to have been received * by a server. * </pre> * * <code>int64 num_calls_finished_known_received = 7;</code> */ public Builder clearNumCallsFinishedKnownReceived() { numCallsFinishedKnownReceived_ = 0L; onChanged(); return this; } private java.util.List<io.grpc.grpclb.ClientStatsPerToken> callsFinishedWithDrop_ = java.util.Collections.emptyList(); private void ensureCallsFinishedWithDropIsMutable() { if (!((bitField0_ & 0x00000020) == 0x00000020)) { callsFinishedWithDrop_ = new java.util.ArrayList<io.grpc.grpclb.ClientStatsPerToken>(callsFinishedWithDrop_); bitField0_ |= 0x00000020; } } private com.google.protobuf.RepeatedFieldBuilderV3< io.grpc.grpclb.ClientStatsPerToken, io.grpc.grpclb.ClientStatsPerToken.Builder, io.grpc.grpclb.ClientStatsPerTokenOrBuilder> callsFinishedWithDropBuilder_; /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public java.util.List<io.grpc.grpclb.ClientStatsPerToken> getCallsFinishedWithDropList() { if (callsFinishedWithDropBuilder_ == null) { return java.util.Collections.unmodifiableList(callsFinishedWithDrop_); } else { return callsFinishedWithDropBuilder_.getMessageList(); } } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public int getCallsFinishedWithDropCount() { if (callsFinishedWithDropBuilder_ == null) { return callsFinishedWithDrop_.size(); } else { return callsFinishedWithDropBuilder_.getCount(); } } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public io.grpc.grpclb.ClientStatsPerToken getCallsFinishedWithDrop(int index) { if (callsFinishedWithDropBuilder_ == null) { return callsFinishedWithDrop_.get(index); } else { return callsFinishedWithDropBuilder_.getMessage(index); } } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder setCallsFinishedWithDrop( int index, io.grpc.grpclb.ClientStatsPerToken value) { if (callsFinishedWithDropBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCallsFinishedWithDropIsMutable(); callsFinishedWithDrop_.set(index, value); onChanged(); } else { callsFinishedWithDropBuilder_.setMessage(index, value); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder setCallsFinishedWithDrop( int index, io.grpc.grpclb.ClientStatsPerToken.Builder builderForValue) { if (callsFinishedWithDropBuilder_ == null) { ensureCallsFinishedWithDropIsMutable(); callsFinishedWithDrop_.set(index, builderForValue.build()); onChanged(); } else { callsFinishedWithDropBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder addCallsFinishedWithDrop(io.grpc.grpclb.ClientStatsPerToken value) { if (callsFinishedWithDropBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCallsFinishedWithDropIsMutable(); callsFinishedWithDrop_.add(value); onChanged(); } else { callsFinishedWithDropBuilder_.addMessage(value); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder addCallsFinishedWithDrop( int index, io.grpc.grpclb.ClientStatsPerToken value) { if (callsFinishedWithDropBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureCallsFinishedWithDropIsMutable(); callsFinishedWithDrop_.add(index, value); onChanged(); } else { callsFinishedWithDropBuilder_.addMessage(index, value); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder addCallsFinishedWithDrop( io.grpc.grpclb.ClientStatsPerToken.Builder builderForValue) { if (callsFinishedWithDropBuilder_ == null) { ensureCallsFinishedWithDropIsMutable(); callsFinishedWithDrop_.add(builderForValue.build()); onChanged(); } else { callsFinishedWithDropBuilder_.addMessage(builderForValue.build()); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder addCallsFinishedWithDrop( int index, io.grpc.grpclb.ClientStatsPerToken.Builder builderForValue) { if (callsFinishedWithDropBuilder_ == null) { ensureCallsFinishedWithDropIsMutable(); callsFinishedWithDrop_.add(index, builderForValue.build()); onChanged(); } else { callsFinishedWithDropBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder addAllCallsFinishedWithDrop( java.lang.Iterable<? extends io.grpc.grpclb.ClientStatsPerToken> values) { if (callsFinishedWithDropBuilder_ == null) { ensureCallsFinishedWithDropIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, callsFinishedWithDrop_); onChanged(); } else { callsFinishedWithDropBuilder_.addAllMessages(values); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder clearCallsFinishedWithDrop() { if (callsFinishedWithDropBuilder_ == null) { callsFinishedWithDrop_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); } else { callsFinishedWithDropBuilder_.clear(); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public Builder removeCallsFinishedWithDrop(int index) { if (callsFinishedWithDropBuilder_ == null) { ensureCallsFinishedWithDropIsMutable(); callsFinishedWithDrop_.remove(index); onChanged(); } else { callsFinishedWithDropBuilder_.remove(index); } return this; } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public io.grpc.grpclb.ClientStatsPerToken.Builder getCallsFinishedWithDropBuilder( int index) { return getCallsFinishedWithDropFieldBuilder().getBuilder(index); } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public io.grpc.grpclb.ClientStatsPerTokenOrBuilder getCallsFinishedWithDropOrBuilder( int index) { if (callsFinishedWithDropBuilder_ == null) { return callsFinishedWithDrop_.get(index); } else { return callsFinishedWithDropBuilder_.getMessageOrBuilder(index); } } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public java.util.List<? extends io.grpc.grpclb.ClientStatsPerTokenOrBuilder> getCallsFinishedWithDropOrBuilderList() { if (callsFinishedWithDropBuilder_ != null) { return callsFinishedWithDropBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(callsFinishedWithDrop_); } } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public io.grpc.grpclb.ClientStatsPerToken.Builder addCallsFinishedWithDropBuilder() { return getCallsFinishedWithDropFieldBuilder().addBuilder( io.grpc.grpclb.ClientStatsPerToken.getDefaultInstance()); } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public io.grpc.grpclb.ClientStatsPerToken.Builder addCallsFinishedWithDropBuilder( int index) { return getCallsFinishedWithDropFieldBuilder().addBuilder( index, io.grpc.grpclb.ClientStatsPerToken.getDefaultInstance()); } /** * <pre> * The list of dropped calls. * </pre> * * <code>repeated .grpc.lb.v1.ClientStatsPerToken calls_finished_with_drop = 8;</code> */ public java.util.List<io.grpc.grpclb.ClientStatsPerToken.Builder> getCallsFinishedWithDropBuilderList() { return getCallsFinishedWithDropFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< io.grpc.grpclb.ClientStatsPerToken, io.grpc.grpclb.ClientStatsPerToken.Builder, io.grpc.grpclb.ClientStatsPerTokenOrBuilder> getCallsFinishedWithDropFieldBuilder() { if (callsFinishedWithDropBuilder_ == null) { callsFinishedWithDropBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< io.grpc.grpclb.ClientStatsPerToken, io.grpc.grpclb.ClientStatsPerToken.Builder, io.grpc.grpclb.ClientStatsPerTokenOrBuilder>( callsFinishedWithDrop_, ((bitField0_ & 0x00000020) == 0x00000020), getParentForChildren(), isClean()); callsFinishedWithDrop_ = null; } return callsFinishedWithDropBuilder_; } public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFieldsProto3(unknownFields); } public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:grpc.lb.v1.ClientStats) } // @@protoc_insertion_point(class_scope:grpc.lb.v1.ClientStats) private static final io.grpc.grpclb.ClientStats DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new io.grpc.grpclb.ClientStats(); } public static io.grpc.grpclb.ClientStats getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<ClientStats> PARSER = new com.google.protobuf.AbstractParser<ClientStats>() { public ClientStats parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ClientStats(input, extensionRegistry); } }; public static com.google.protobuf.Parser<ClientStats> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<ClientStats> getParserForType() { return PARSER; } public io.grpc.grpclb.ClientStats getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }
apache-2.0
jonathanchristison/fabric8
common-util/src/main/java/io/fabric8/common/util/ObjectUtils.java
1849
/** * Copyright 2005-2015 Red Hat, Inc. * * Red Hat licenses this file to you under the Apache License, version * 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package io.fabric8.common.util; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.ObjectOutputStream; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ObjectUtils { private static final Logger LOGGER = LoggerFactory.getLogger(ObjectUtils.class); public static byte[] toBytes(Object object) { byte[] result = null; if (object instanceof byte[]) { return (byte[]) object; } ByteArrayOutputStream baos = new ByteArrayOutputStream(); ObjectOutputStream oos = null; try { oos = new ObjectOutputStream(baos); oos.writeObject(object); result = baos.toByteArray(); } catch (IOException e) { LOGGER.error("Failed to serialize object {}.", object, e); } finally { if (oos != null) { try { oos.close(); } catch (IOException e) { } } if (baos != null) { try { baos.close(); } catch (IOException e) { } } } return result; } }
apache-2.0
WangTaoTheTonic/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobClient.java
27946
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.blob; import org.apache.flink.api.common.JobID; import org.apache.flink.configuration.BlobServerOptions; import org.apache.flink.configuration.Configuration; import org.apache.flink.core.fs.FSDataInputStream; import org.apache.flink.core.fs.FileSystem; import org.apache.flink.core.fs.Path; import org.apache.flink.runtime.instance.ActorGateway; import org.apache.flink.runtime.messages.JobManagerMessages; import org.apache.flink.runtime.net.SSLUtils; import org.apache.flink.util.InstantiationUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.Option; import scala.concurrent.Await; import scala.concurrent.Future; import scala.concurrent.duration.FiniteDuration; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLParameters; import javax.net.ssl.SSLSocket; import java.io.Closeable; import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.InetSocketAddress; import java.net.Socket; import java.security.MessageDigest; import java.util.ArrayList; import java.util.Collections; import java.util.List; import static org.apache.flink.runtime.blob.BlobServerProtocol.BUFFER_SIZE; import static org.apache.flink.runtime.blob.BlobServerProtocol.CONTENT_ADDRESSABLE; import static org.apache.flink.runtime.blob.BlobServerProtocol.DELETE_OPERATION; import static org.apache.flink.runtime.blob.BlobServerProtocol.GET_OPERATION; import static org.apache.flink.runtime.blob.BlobServerProtocol.JOB_ID_SCOPE; import static org.apache.flink.runtime.blob.BlobServerProtocol.MAX_KEY_LENGTH; import static org.apache.flink.runtime.blob.BlobServerProtocol.NAME_ADDRESSABLE; import static org.apache.flink.runtime.blob.BlobServerProtocol.PUT_OPERATION; import static org.apache.flink.runtime.blob.BlobServerProtocol.RETURN_ERROR; import static org.apache.flink.runtime.blob.BlobServerProtocol.RETURN_OKAY; import static org.apache.flink.runtime.blob.BlobUtils.readFully; import static org.apache.flink.runtime.blob.BlobUtils.readLength; import static org.apache.flink.runtime.blob.BlobUtils.writeLength; /** * The BLOB client can communicate with the BLOB server and either upload (PUT), download (GET), * or delete (DELETE) BLOBs. */ public final class BlobClient implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(BlobClient.class); /** The socket connection to the BLOB server. */ private Socket socket; /** * Instantiates a new BLOB client. * * @param serverAddress * the network address of the BLOB server * @param clientConfig * additional configuration like SSL parameters required to connect to the blob server * @throws IOException * thrown if the connection to the BLOB server could not be established */ public BlobClient(InetSocketAddress serverAddress, Configuration clientConfig) throws IOException { try { // Check if ssl is enabled SSLContext clientSSLContext = null; if (clientConfig != null && clientConfig.getBoolean(BlobServerOptions.SSL_ENABLED)) { clientSSLContext = SSLUtils.createSSLClientContext(clientConfig); } if (clientSSLContext != null) { LOG.info("Using ssl connection to the blob server"); SSLSocket sslSocket = (SSLSocket) clientSSLContext.getSocketFactory().createSocket( serverAddress.getAddress(), serverAddress.getPort()); // Enable hostname verification for remote SSL connections if (!serverAddress.getAddress().isLoopbackAddress()) { SSLParameters newSSLParameters = sslSocket.getSSLParameters(); SSLUtils.setSSLVerifyHostname(clientConfig, newSSLParameters); sslSocket.setSSLParameters(newSSLParameters); } this.socket = sslSocket; } else { this.socket = new Socket(); this.socket.connect(serverAddress); } } catch(Exception e) { BlobUtils.closeSilently(socket, LOG); throw new IOException("Could not connect to BlobServer at address " + serverAddress, e); } } @Override public void close() throws IOException { this.socket.close(); } public boolean isClosed() { return this.socket.isClosed(); } // -------------------------------------------------------------------------------------------- // GET // -------------------------------------------------------------------------------------------- /** * Downloads the BLOB identified by the given job ID and key from the BLOB server. If no such BLOB exists on the * server, a {@link FileNotFoundException} is thrown. * * @param jobID * the job ID identifying the BLOB to download * @param key * the key identifying the BLOB to download * @return an input stream to read the retrieved data from * @throws IOException * thrown if an I/O error occurs during the download */ public InputStream get(JobID jobID, String key) throws IOException { if (key.length() > MAX_KEY_LENGTH) { throw new IllegalArgumentException("Keys must not be longer than " + MAX_KEY_LENGTH); } if (this.socket.isClosed()) { throw new IllegalStateException("BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } if (LOG.isDebugEnabled()) { LOG.debug(String.format("GET BLOB %s / \"%s\" from %s", jobID, key, socket.getLocalSocketAddress())); } try { OutputStream os = this.socket.getOutputStream(); InputStream is = this.socket.getInputStream(); sendGetHeader(os, jobID, key, null); receiveAndCheckResponse(is); return new BlobInputStream(is, null); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("GET operation failed: " + t.getMessage(), t); } } /** * Downloads the BLOB identified by the given BLOB key from the BLOB server. If no such BLOB exists on the server, a * {@link FileNotFoundException} is thrown. * * @param blobKey * the BLOB key identifying the BLOB to download * @return an input stream to read the retrieved data from * @throws IOException * thrown if an I/O error occurs during the download */ public InputStream get(BlobKey blobKey) throws IOException { if (this.socket.isClosed()) { throw new IllegalStateException("BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } if (LOG.isDebugEnabled()) { LOG.debug(String.format("GET content addressable BLOB %s from %s", blobKey, socket.getLocalSocketAddress())); } try { OutputStream os = this.socket.getOutputStream(); InputStream is = this.socket.getInputStream(); // Send GET header sendGetHeader(os, null, null, blobKey); receiveAndCheckResponse(is); return new BlobInputStream(is, blobKey); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("GET operation failed: " + t.getMessage(), t); } } /** * Constructs and writes the header data for a GET operation to the given output stream. * * @param outputStream * the output stream to write the header data to * @param jobID * the job ID identifying the BLOB to download or <code>null</code> to indicate the BLOB key should be used * to identify the BLOB on the server instead * @param key * the key identifying the BLOB to download or <code>null</code> to indicate the BLOB key should be used to * identify the BLOB on the server instead * @param blobKey * the BLOB key to identify the BLOB to download if either the job ID or the regular key are * <code>null</code> * @throws IOException * thrown if an I/O error occurs while writing the header data to the output stream */ private void sendGetHeader(OutputStream outputStream, JobID jobID, String key, BlobKey blobKey) throws IOException { // Signal type of operation outputStream.write(GET_OPERATION); // Check if GET should be done in content-addressable manner if (jobID == null || key == null) { outputStream.write(CONTENT_ADDRESSABLE); blobKey.writeToOutputStream(outputStream); } else { outputStream.write(NAME_ADDRESSABLE); // Send job ID and key outputStream.write(jobID.getBytes()); byte[] keyBytes = key.getBytes(BlobUtils.DEFAULT_CHARSET); writeLength(keyBytes.length, outputStream); outputStream.write(keyBytes); } } private void receiveAndCheckResponse(InputStream is) throws IOException { int response = is.read(); if (response < 0) { throw new EOFException("Premature end of response"); } if (response == RETURN_ERROR) { Throwable cause = readExceptionFromStream(is); throw new IOException("Server side error: " + cause.getMessage(), cause); } else if (response != RETURN_OKAY) { throw new IOException("Unrecognized response"); } } // -------------------------------------------------------------------------------------------- // PUT // -------------------------------------------------------------------------------------------- /** * Uploads the data of the given byte array to the BLOB server in a content-addressable manner. * * @param value * the buffer to upload * @return the computed BLOB key identifying the BLOB on the server * @throws IOException * thrown if an I/O error occurs while uploading the data to the BLOB server */ public BlobKey put(byte[] value) throws IOException { return put(value, 0, value.length); } /** * Uploads data from the given byte array to the BLOB server in a content-addressable manner. * * @param value * the buffer to upload data from * @param offset * the read offset within the buffer * @param len * the number of bytes to upload from the buffer * @return the computed BLOB key identifying the BLOB on the server * @throws IOException * thrown if an I/O error occurs while uploading the data to the BLOB server */ public BlobKey put(byte[] value, int offset, int len) throws IOException { return putBuffer(null, null, value, offset, len); } /** * Uploads the data of the given byte array to the BLOB server and stores it under the given job ID and key. * * @param jobId * the job ID to identify the uploaded data * @param key * the key to identify the uploaded data * @param value * the buffer to upload * @throws IOException * thrown if an I/O error occurs while uploading the data to the BLOB server */ public void put(JobID jobId, String key, byte[] value) throws IOException { put(jobId, key, value, 0, value.length); } /** * Uploads data from the given byte array to the BLOB server and stores it under the given job ID and key. * * @param jobId * the job ID to identify the uploaded data * @param key * the key to identify the uploaded data * @param value * the buffer to upload data from * @param offset * the read offset within the buffer * @param len * the number of bytes to upload from the buffer * @throws IOException * thrown if an I/O error occurs while uploading the data to the BLOB server */ public void put(JobID jobId, String key, byte[] value, int offset, int len) throws IOException { if (key.length() > MAX_KEY_LENGTH) { throw new IllegalArgumentException("Keys must not be longer than " + MAX_KEY_LENGTH); } putBuffer(jobId, key, value, offset, len); } /** * Uploads data from the given input stream to the BLOB server and stores it under the given job ID and key. * * @param jobId * the job ID to identify the uploaded data * @param key * the key to identify the uploaded data * @param inputStream * the input stream to read the data from * @throws IOException * thrown if an I/O error occurs while reading the data from the input stream or uploading the data to the * BLOB server */ public void put(JobID jobId, String key, InputStream inputStream) throws IOException { if (key.length() > MAX_KEY_LENGTH) { throw new IllegalArgumentException("Keys must not be longer than " + MAX_KEY_LENGTH); } putInputStream(jobId, key, inputStream); } /** * Uploads the data from the given input stream to the BLOB server in a content-addressable manner. * * @param inputStream * the input stream to read the data from * @return the computed BLOB key identifying the BLOB on the server * @throws IOException * thrown if an I/O error occurs while reading the data from the input stream or uploading the data to the * BLOB server */ public BlobKey put(InputStream inputStream) throws IOException { return putInputStream(null, null, inputStream); } /** * Uploads data from the given byte buffer to the BLOB server. * * @param jobId * the ID of the job the BLOB belongs to or <code>null</code> to store the BLOB in a content-addressable * manner * @param key * the key to identify the BLOB on the server or <code>null</code> to store the BLOB in a content-addressable * manner * @param value * the buffer to read the data from * @param offset * the read offset within the buffer * @param len * the number of bytes to read from the buffer * @return the computed BLOB key if the BLOB has been stored in a content-addressable manner, <code>null</code> * otherwise * @throws IOException * thrown if an I/O error occurs while uploading the data to the BLOB server */ private BlobKey putBuffer(JobID jobId, String key, byte[] value, int offset, int len) throws IOException { if (this.socket.isClosed()) { throw new IllegalStateException("BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } if (LOG.isDebugEnabled()) { if (jobId == null) { LOG.debug(String.format("PUT content addressable BLOB buffer (%d bytes) to %s", len, socket.getLocalSocketAddress())); } else { LOG.debug(String.format("PUT BLOB buffer (%d bytes) under %s / \"%s\" to %s", len, jobId, key, socket.getLocalSocketAddress())); } } try { final OutputStream os = this.socket.getOutputStream(); final MessageDigest md = jobId == null ? BlobUtils.createMessageDigest() : null; // Send the PUT header sendPutHeader(os, jobId, key); // Send the value in iterations of BUFFER_SIZE int remainingBytes = len; while (remainingBytes > 0) { final int bytesToSend = Math.min(BUFFER_SIZE, remainingBytes); writeLength(bytesToSend, os); os.write(value, offset, bytesToSend); // Update the message digest if necessary if (md != null) { md.update(value, offset, bytesToSend); } remainingBytes -= bytesToSend; offset += bytesToSend; } // send -1 as the stream end writeLength(-1, os); // Receive blob key and compare final InputStream is = this.socket.getInputStream(); return receivePutResponseAndCompare(is, md); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("PUT operation failed: " + t.getMessage(), t); } } /** * Uploads data from the given input stream to the BLOB server. * * @param jobId * the ID of the job the BLOB belongs to or <code>null</code> to store the BLOB in a content-addressable * manner * @param key * the key to identify the BLOB on the server or <code>null</code> to store the BLOB in a content-addressable * manner * @param inputStream * the input stream to read the data from * @return he computed BLOB key if the BLOB has been stored in a content-addressable manner, <code>null</code> * otherwise * @throws IOException * thrown if an I/O error occurs while uploading the data to the BLOB server */ private BlobKey putInputStream(JobID jobId, String key, InputStream inputStream) throws IOException { if (this.socket.isClosed()) { throw new IllegalStateException("BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } if (LOG.isDebugEnabled()) { if (jobId == null) { LOG.debug(String.format("PUT content addressable BLOB stream to %s", socket.getLocalSocketAddress())); } else { LOG.debug(String.format("PUT BLOB stream under %s / \"%s\" to %s", jobId, key, socket.getLocalSocketAddress())); } } try { final OutputStream os = this.socket.getOutputStream(); final MessageDigest md = jobId == null ? BlobUtils.createMessageDigest() : null; final byte[] xferBuf = new byte[BUFFER_SIZE]; // Send the PUT header sendPutHeader(os, jobId, key); while (true) { final int read = inputStream.read(xferBuf); if (read < 0) { // we are done. send a -1 and be done writeLength(-1, os); break; } if (read > 0) { writeLength(read, os); os.write(xferBuf, 0, read); if (md != null) { md.update(xferBuf, 0, read); } } } // Receive blob key and compare final InputStream is = this.socket.getInputStream(); return receivePutResponseAndCompare(is, md); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("PUT operation failed: " + t.getMessage(), t); } } private BlobKey receivePutResponseAndCompare(InputStream is, MessageDigest md) throws IOException { int response = is.read(); if (response < 0) { throw new EOFException("Premature end of response"); } else if (response == RETURN_OKAY) { if (md == null) { // not content addressable return null; } BlobKey remoteKey = BlobKey.readFromInputStream(is); BlobKey localKey = new BlobKey(md.digest()); if (!localKey.equals(remoteKey)) { throw new IOException("Detected data corruption during transfer"); } return localKey; } else if (response == RETURN_ERROR) { Throwable cause = readExceptionFromStream(is); throw new IOException("Server side error: " + cause.getMessage(), cause); } else { throw new IOException("Unrecognized response: " + response + '.'); } } /** * Constructs and writes the header data for a PUT request to the given output stream. * NOTE: If the jobId and key are null, we send the data to the content addressable section. * * @param outputStream * the output stream to write the PUT header data to * @param jobID * the ID of job the BLOB belongs to or <code>null</code> to indicate the upload of a * content-addressable BLOB * @param key * the key of the BLOB to upload or <code>null</code> to indicate the upload of a content-addressable BLOB * @throws IOException * thrown if an I/O error occurs while writing the header data to the output stream */ private void sendPutHeader(OutputStream outputStream, JobID jobID, String key) throws IOException { // sanity check that either both are null or both are not null if ((jobID != null || key != null) && !(jobID != null && key != null)) { throw new IllegalArgumentException(); } // Signal type of operation outputStream.write(PUT_OPERATION); // Check if PUT should be done in content-addressable manner if (jobID == null) { outputStream.write(CONTENT_ADDRESSABLE); } else { outputStream.write(NAME_ADDRESSABLE); // Send job ID and the key byte[] idBytes = jobID.getBytes(); byte[] keyBytes = key.getBytes(BlobUtils.DEFAULT_CHARSET); outputStream.write(idBytes); writeLength(keyBytes.length, outputStream); outputStream.write(keyBytes); } } // -------------------------------------------------------------------------------------------- // DELETE // -------------------------------------------------------------------------------------------- /** * Deletes the BLOB identified by the given BLOB key from the BLOB server. * * @param key * the key to identify the BLOB * @throws IOException * thrown if an I/O error occurs while transferring the request to * the BLOB server or if the BLOB server cannot delete the file */ public void delete(BlobKey key) throws IOException { if (key == null) { throw new IllegalArgumentException("BLOB key must not be null"); } deleteInternal(null, null, key); } /** * Deletes the BLOB identified by the given job ID and key from the BLOB server. * * @param jobId * the job ID to identify the BLOB * @param key * the key to identify the BLOB * @throws IOException * thrown if an I/O error occurs while transferring the request to the BLOB server */ public void delete(JobID jobId, String key) throws IOException { if (jobId == null) { throw new IllegalArgumentException("JobID must not be null"); } if (key == null) { throw new IllegalArgumentException("Key must not be null"); } if (key.length() > MAX_KEY_LENGTH) { throw new IllegalArgumentException("Keys must not be longer than " + MAX_KEY_LENGTH); } deleteInternal(jobId, key, null); } /** * Deletes all BLOBs belonging to the job with the given ID from the BLOB server * * @param jobId * the job ID to identify the BLOBs to be deleted * @throws IOException * thrown if an I/O error occurs while transferring the request to the BLOB server */ public void deleteAll(JobID jobId) throws IOException { if (jobId == null) { throw new IllegalArgumentException("Argument jobID must not be null"); } deleteInternal(jobId, null, null); } /** * Delete one or multiple BLOBs from the BLOB server. * * @param jobId The job ID to identify the BLOB(s) to be deleted. * @param key The key to identify the specific BLOB to delete or <code>null</code> to delete * all BLOBs associated with the job id. * @param bKey The blob key to identify a specific content addressable BLOB. This parameter * is exclusive with jobId and key. * @throws IOException Thrown if an I/O error occurs while transferring the request to the BLOB server. */ private void deleteInternal(JobID jobId, String key, BlobKey bKey) throws IOException { if ((jobId != null && bKey != null) || (jobId == null && bKey == null)) { throw new IllegalArgumentException(); } try { final OutputStream outputStream = this.socket.getOutputStream(); final InputStream inputStream = this.socket.getInputStream(); // Signal type of operation outputStream.write(DELETE_OPERATION); // Check if DELETE should be done in content-addressable manner if (jobId == null) { // delete blob key outputStream.write(CONTENT_ADDRESSABLE); bKey.writeToOutputStream(outputStream); } else if (key != null) { // delete BLOB for jobID and name key outputStream.write(NAME_ADDRESSABLE); // Send job ID and the key byte[] idBytes = jobId.getBytes(); byte[] keyBytes = key.getBytes(BlobUtils.DEFAULT_CHARSET); outputStream.write(idBytes); writeLength(keyBytes.length, outputStream); outputStream.write(keyBytes); } else { // delete all blobs for JobID outputStream.write(JOB_ID_SCOPE); byte[] idBytes = jobId.getBytes(); outputStream.write(idBytes); } int response = inputStream.read(); if (response < 0) { throw new EOFException("Premature end of response"); } if (response == RETURN_ERROR) { Throwable cause = readExceptionFromStream(inputStream); throw new IOException("Server side error: " + cause.getMessage(), cause); } else if (response != RETURN_OKAY) { throw new IOException("Unrecognized response"); } } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("DELETE operation failed: " + t.getMessage(), t); } } /** * Retrieves the {@link BlobServer} address from the JobManager and uploads * the JAR files to it. * * @param jobManager Server address of the {@link BlobServer} * @param askTimeout Ask timeout for blob server address retrieval * @param clientConfig Any additional configuration for the blob client * @param jars List of JAR files to upload * @throws IOException Thrown if the address retrieval or upload fails */ public static List<BlobKey> uploadJarFiles( ActorGateway jobManager, FiniteDuration askTimeout, Configuration clientConfig, List<Path> jars) throws IOException { if (jars.isEmpty()) { return Collections.emptyList(); } else { Object msg = JobManagerMessages.getRequestBlobManagerPort(); Future<Object> futureBlobPort = jobManager.ask(msg, askTimeout); try { // Retrieve address Object result = Await.result(futureBlobPort, askTimeout); if (result instanceof Integer) { int port = (Integer) result; LOG.info("Blob client connecting to " + jobManager.path()); Option<String> jmHost = jobManager.actor().path().address().host(); String jmHostname = jmHost.isDefined() ? jmHost.get() : "localhost"; InetSocketAddress serverAddress = new InetSocketAddress(jmHostname, port); // Now, upload return uploadJarFiles(serverAddress, clientConfig, jars); } else { throw new Exception("Expected port number (int) as answer, received " + result); } } catch (Exception e) { throw new IOException("Could not retrieve the JobManager's blob port.", e); } } } /** * Uploads the JAR files to a {@link BlobServer} at the given address. * * @param serverAddress Server address of the {@link BlobServer} * @param clientConfig Any additional configuration for the blob client * @param jars List of JAR files to upload * @throws IOException Thrown if the upload fails */ public static List<BlobKey> uploadJarFiles(InetSocketAddress serverAddress, Configuration clientConfig, List<Path> jars) throws IOException { if (jars.isEmpty()) { return Collections.emptyList(); } else { List<BlobKey> blobKeys = new ArrayList<>(); try (BlobClient blobClient = new BlobClient(serverAddress, clientConfig)) { for (final Path jar : jars) { final FileSystem fs = jar.getFileSystem(); FSDataInputStream is = null; try { is = fs.open(jar); final BlobKey key = blobClient.put(is); blobKeys.add(key); } finally { if (is != null) { is.close(); } } } } return blobKeys; } } // -------------------------------------------------------------------------------------------- // Miscellaneous // -------------------------------------------------------------------------------------------- private static Throwable readExceptionFromStream(InputStream in) throws IOException { int len = readLength(in); byte[] bytes = new byte[len]; readFully(in, bytes, 0, len, "Error message"); try { return (Throwable) InstantiationUtil.deserializeObject(bytes, ClassLoader.getSystemClassLoader()); } catch (ClassNotFoundException e) { // should never occur throw new IOException("Could not transfer error message", e); } } }
apache-2.0
massdosage/citrine-scheduler
src/main/java/fm/last/citrine/model/Task.java
11457
/* * Copyright 2010 Last.fm * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fm.last.citrine.model; import java.util.HashSet; import java.util.Set; import javax.persistence.Basic; import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.FetchType; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.JoinTable; import javax.persistence.ManyToMany; import javax.persistence.Table; import javax.persistence.Transient; import javax.persistence.Version; import org.apache.log4j.Logger; import org.hibernate.annotations.AccessType; /** * Class that represents a Task. It contains values that are used to trigger Quartz Jobs at a certain time as well as * values that are needed by Tasks to run. */ @Entity @Table(name = TableConstants.TABLE_TASKS) public class Task { private static Logger log = Logger.getLogger(Task.class); private static final boolean DEFAULT_STOP_ON_ERROR = false; private static final boolean DEFAULT_ERROR_IF_RUNNING = true; private static final boolean DEFAULT_ENABLED = true; private long id; private int version; private String name; private String description; private Integer priority; // not used yet private String timerSchedule; private String groupName; private String command; private String beanName; private boolean enabled = DEFAULT_ENABLED; private boolean stopOnError = DEFAULT_STOP_ON_ERROR; private boolean errorIfRunning = DEFAULT_ERROR_IF_RUNNING; private Set<Task> parentTasks = new HashSet<Task>(); private Set<Task> childTasks = new HashSet<Task>(); private Notification notification; public Task(String name, String groupName, String beanName, boolean enabled, boolean stopOnError, String command, String timerSchedule) { this.name = name; this.timerSchedule = timerSchedule; this.enabled = enabled; this.stopOnError = stopOnError; this.groupName = groupName; this.command = command; this.beanName = beanName; this.notification = new Notification(); } public Task(String name, String groupName, String beanName) { this(name, groupName, beanName, DEFAULT_ENABLED, DEFAULT_STOP_ON_ERROR, null, null); } public Task(String name) { this(name, null, null); } public Task() { this(null); } @Transient public Set<Long> getParentTaskIds(boolean recursive) { Set<Long> parentIds = new HashSet<Long>(); for (Task parent : this.getParentTasks()) { parentIds.add(parent.getId()); if (recursive) { parentIds.addAll(parent.getParentTaskIds(recursive)); } } return parentIds; } @Transient public Set<Long> getChildTaskIds(boolean recursive) { Set<Long> childIds = new HashSet<Long>(); for (Task child : this.getChildTasks()) { childIds.add(child.getId()); if (recursive) { childIds.addAll(child.getChildTaskIds(recursive)); } } return childIds; } @Id @GeneratedValue(strategy = GenerationType.IDENTITY) public long getId() { return id; } public void setId(long id) { this.id = id; } @Version public int getVersion() { return version; } public void setVersion(int version) { this.version = version; } @Basic @Column(nullable = false) public String getName() { return name; } public void setName(String name) { this.name = name; } @Column(length = 4000) public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } @Basic @Column(nullable = true) public Integer getPriority() { return priority; } public void setPriority(Integer priority) { this.priority = priority; } @Basic public String getTimerSchedule() { return timerSchedule; } public void setTimerSchedule(String timerSchedule) { this.timerSchedule = timerSchedule; } @Basic public boolean isEnabled() { return enabled; } public void setEnabled(boolean enabled) { this.enabled = enabled; } /** * Indicates whether child tasks should run if an error occurs while running this tasks. Currently this is implemented * in an "AND" fashion - if a child has two parents with this flag set, and only one of them fails, the child will * still run (i.e. BOTH parents must fail to stop the child running). * * @return Whether child tasks should be run if an error occurs in this task. */ @Basic @Column(nullable = true) public boolean isStopOnError() { // to implement "OR" behaviour as opposed to the description above, when the first parent finishes, it would // have to set some sort of flag (a failed batchrun for the child?) to prevent the child running when the // second parent finishes, this is considerably trickier than AND return stopOnError; } public void setStopOnError(boolean stopOnError) { this.stopOnError = stopOnError; } /** * @return Whether this Task should throw an error if an attempt is made to start it while it is already running. */ @Basic public boolean isErrorIfRunning() { return errorIfRunning; } /** * @param errorIfRunning Whether this Task should throw an error if an attempt is made to start it while it is already * running. */ public void setErrorIfRunning(boolean errorIfRunning) { this.errorIfRunning = errorIfRunning; } @Basic @Column(nullable = false) public String getGroupName() { return groupName; } public void setGroupName(String groupName) { this.groupName = groupName; } @Basic public String getCommand() { return command; } public void setCommand(String command) { this.command = command; } @Basic @Column(nullable = false) public String getBeanName() { return beanName; } public void setBeanName(String beanName) { this.beanName = beanName; } @ManyToMany(fetch = FetchType.EAGER, cascade = { CascadeType.PERSIST, CascadeType.MERGE }, mappedBy = "childTasks", targetEntity = fm.last.citrine.model.Task.class) public Set<Task> getParentTasks() { return parentTasks; } public void setParentTasks(Set<Task> dependingTasks) { this.parentTasks = dependingTasks; } @AccessType("field") @ManyToMany(fetch = FetchType.EAGER, cascade = { CascadeType.PERSIST, CascadeType.MERGE }, targetEntity = fm.last.citrine.model.Task.class) @JoinTable(name = TableConstants.TABLE_TASK_CHILD_TASK, joinColumns = { @JoinColumn(name = "task_id") }, inverseJoinColumns = { @JoinColumn(name = "child_task_id") }) public Set<Task> getChildTasks() { return childTasks; } public void setChildTasks(Set<Task> children) { childTasks.clear(); if (children != null) { for (Task child : children) { addChildTask(child); } } } public boolean addChildTask(Task childTask) { childTask.addParentTask(this); return this.childTasks.add(childTask); } // do not allow parent Tasks to be added directly, force task graph to be built from adding child // tasks to ensure links between two sets are always correct private boolean addParentTask(Task parentTask) { return this.parentTasks.add(parentTask); } public boolean removeChildTask(Task childTask) { return this.childTasks.remove(childTask); } public boolean removeParentTask(Task parentTask) { return this.parentTasks.remove(parentTask); } /** * @return Whether this Task has at least one child Task or not. */ public boolean hasChild() { return this.childTasks.size() > 0; } /** * @return Whether this Task has at least one parent Task or not. */ public boolean hasParent() { return this.parentTasks.size() > 0; } /** * @return the notification */ public Notification getNotification() { return notification; } /** * @param notification the notification to set */ public void setNotification(Notification notification) { this.notification = notification; } /* * (non-Javadoc) * @see java.lang.Object#hashCode() */ @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((name == null) ? 0 : name.hashCode()); result = prime * result + ((groupName == null) ? 0 : groupName.hashCode()); result = prime * result + ((beanName == null) ? 0 : beanName.hashCode()); result = prime * result + ((command == null) ? 0 : command.hashCode()); result = prime * result + ((description == null) ? 0 : description.hashCode()); result = prime * result + (enabled ? 1231 : 1237); result = prime * result + (stopOnError ? 1231 : 1237); result = prime * result + (errorIfRunning ? 1231 : 1237); result = prime * result + ((priority == null) ? 0 : priority.hashCode()); result = prime * result + ((timerSchedule == null) ? 0 : timerSchedule.hashCode()); return result; } /* * (non-Javadoc) * @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof Task)) { return false; } final Task other = (Task) obj; if (name == null) { if (other.name != null) { return false; } } else if (!name.equals(other.name)) { return false; } if (groupName == null) { if (other.groupName != null) { return false; } } else if (!groupName.equals(other.groupName)) { return false; } if (description == null) { if (other.description != null) { return false; } } else if (!description.equals(other.description)) { return false; } if (timerSchedule == null) { if (other.timerSchedule != null) { return false; } } else if (!timerSchedule.equals(other.timerSchedule)) { return false; } if (beanName == null) { if (other.beanName != null) { return false; } } else if (!beanName.equals(other.beanName)) { return false; } if (command == null) { if (other.command != null) { return false; } } else if (!command.equals(other.command)) { return false; } if (enabled != other.enabled) { return false; } if (stopOnError != other.stopOnError) { return false; } if (errorIfRunning != other.errorIfRunning) { return false; } if (priority == null) { if (other.priority != null) { return false; } } else if (!priority.equals(other.priority)) { return false; } return true; } public String toString() { return "id=" + id + ",name=" + name; } }
apache-2.0
mapr/incubator-tez
tez-runtime-library/src/test/java/org/apache/tez/dag/library/vertexmanager/TestShuffleVertexManager.java
53260
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.tez.dag.library.vertexmanager; import com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.tez.common.ReflectionUtils; import org.apache.tez.common.TezUtils; import org.apache.tez.dag.api.EdgeManagerPlugin; import org.apache.tez.dag.api.EdgeManagerPluginContext; import org.apache.tez.dag.api.EdgeManagerPluginDescriptor; import org.apache.tez.dag.api.EdgeProperty; import org.apache.tez.dag.api.EdgeProperty.SchedulingType; import org.apache.tez.dag.api.InputDescriptor; import org.apache.tez.dag.api.OutputDescriptor; import org.apache.tez.dag.api.TezUncheckedException; import org.apache.tez.dag.api.UserPayload; import org.apache.tez.dag.api.VertexLocationHint; import org.apache.tez.dag.api.VertexManagerPluginContext; import org.apache.tez.dag.api.VertexManagerPluginContext.TaskWithLocationHint; import org.apache.tez.dag.api.VertexManagerPluginDescriptor; import org.apache.tez.dag.api.event.VertexState; import org.apache.tez.dag.api.event.VertexStateUpdate; import org.apache.tez.runtime.api.events.DataMovementEvent; import org.apache.tez.runtime.api.events.VertexManagerEvent; import org.apache.tez.runtime.library.shuffle.impl.ShuffleUserPayloads.VertexManagerEventPayloadProto; import org.junit.Assert; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import java.io.IOException; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyMap; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class TestShuffleVertexManager { @SuppressWarnings({ "unchecked", "rawtypes" }) @Test(timeout = 5000) public void testShuffleVertexManagerAutoParallelism() throws Exception { Configuration conf = new Configuration(); conf.setBoolean( ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_ENABLE_AUTO_PARALLEL, true); conf.setLong(ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_DESIRED_TASK_INPUT_SIZE, 1000L); ShuffleVertexManager manager = null; HashMap<String, EdgeProperty> mockInputVertices = new HashMap<String, EdgeProperty>(); String mockSrcVertexId1 = "Vertex1"; EdgeProperty eProp1 = EdgeProperty.create( EdgeProperty.DataMovementType.SCATTER_GATHER, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String mockSrcVertexId2 = "Vertex2"; EdgeProperty eProp2 = EdgeProperty.create( EdgeProperty.DataMovementType.SCATTER_GATHER, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String mockSrcVertexId3 = "Vertex3"; EdgeProperty eProp3 = EdgeProperty.create( EdgeProperty.DataMovementType.BROADCAST, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); final String mockManagedVertexId = "Vertex4"; mockInputVertices.put(mockSrcVertexId1, eProp1); mockInputVertices.put(mockSrcVertexId2, eProp2); mockInputVertices.put(mockSrcVertexId3, eProp3); final VertexManagerPluginContext mockContext = mock(VertexManagerPluginContext.class); when(mockContext.getInputVertexEdgeProperties()).thenReturn(mockInputVertices); when(mockContext.getVertexName()).thenReturn(mockManagedVertexId); when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(4); //Check via setters ShuffleVertexManager.ShuffleVertexManagerConfigBuilder configurer = ShuffleVertexManager .createConfigBuilder(null); VertexManagerPluginDescriptor pluginDesc = configurer.setAutoReduceParallelism(true) .setDesiredTaskInputSize(1000l) .setMinTaskParallelism(10).setSlowStartMaxSrcCompletionFraction(0.5f).build(); when(mockContext.getUserPayload()).thenReturn(pluginDesc.getUserPayload()); manager = ReflectionUtils.createClazzInstance(pluginDesc.getClassName(), new Class[]{VertexManagerPluginContext.class}, new Object[]{mockContext}); manager.initialize(); verify(mockContext, times(1)).vertexReconfigurationPlanned(); // Tez notified of reconfig Assert.assertTrue(manager.enableAutoParallelism == true); Assert.assertTrue(manager.desiredTaskInputDataSize == 1000l); Assert.assertTrue(manager.minTaskParallelism == 10); Assert.assertTrue(manager.slowStartMinSrcCompletionFraction == 0.25f); Assert.assertTrue(manager.slowStartMaxSrcCompletionFraction == 0.5f); configurer = ShuffleVertexManager.createConfigBuilder(null); pluginDesc = configurer.setAutoReduceParallelism(false).build(); when(mockContext.getUserPayload()).thenReturn(pluginDesc.getUserPayload()); manager = ReflectionUtils.createClazzInstance(pluginDesc.getClassName(), new Class[]{VertexManagerPluginContext.class}, new Object[]{mockContext}); manager.initialize(); verify(mockContext, times(1)).vertexReconfigurationPlanned(); // Tez not notified of reconfig Assert.assertTrue(manager.enableAutoParallelism == false); Assert.assertTrue(manager.desiredTaskInputDataSize == ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_DESIRED_TASK_INPUT_SIZE_DEFAULT); Assert.assertTrue(manager.minTaskParallelism == 1); Assert.assertTrue(manager.slowStartMinSrcCompletionFraction == ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MIN_SRC_FRACTION_DEFAULT); Assert.assertTrue(manager.slowStartMaxSrcCompletionFraction == ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MAX_SRC_FRACTION_DEFAULT); final HashSet<Integer> scheduledTasks = new HashSet<Integer>(); doAnswer(new Answer() { public Object answer(InvocationOnMock invocation) { Object[] args = invocation.getArguments(); scheduledTasks.clear(); List<TaskWithLocationHint> tasks = (List<TaskWithLocationHint>)args[0]; for (TaskWithLocationHint task : tasks) { scheduledTasks.add(task.getTaskIndex()); } return null; }}).when(mockContext).scheduleVertexTasks(anyList()); final Map<String, EdgeManagerPlugin> newEdgeManagers = new HashMap<String, EdgeManagerPlugin>(); doAnswer(new Answer() { public Object answer(InvocationOnMock invocation) throws Exception { when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(2); newEdgeManagers.clear(); for (Entry<String, EdgeManagerPluginDescriptor> entry : ((Map<String, EdgeManagerPluginDescriptor>)invocation.getArguments()[2]).entrySet()) { final UserPayload userPayload = entry.getValue().getUserPayload(); EdgeManagerPluginContext emContext = new EdgeManagerPluginContext() { @Override public UserPayload getUserPayload() { return userPayload == null ? null : userPayload; } @Override public String getSourceVertexName() { return null; } @Override public String getDestinationVertexName() { return null; } @Override public int getSourceVertexNumTasks() { return 2; } @Override public int getDestinationVertexNumTasks() { return 2; } }; EdgeManagerPlugin edgeManager = ReflectionUtils .createClazzInstance(entry.getValue().getClassName(), new Class[]{EdgeManagerPluginContext.class}, new Object[]{emContext}); edgeManager.initialize(); newEdgeManagers.put(entry.getKey(), edgeManager); } return null; }}).when(mockContext).setVertexParallelism(eq(2), any(VertexLocationHint.class), anyMap(), anyMap()); // check initialization manager = createManager(conf, mockContext, 0.1f, 0.1f); // Tez notified of reconfig verify(mockContext, times(2)).vertexReconfigurationPlanned(); Assert.assertTrue(manager.bipartiteSources == 2); // source vertices have 0 tasks. when(mockContext.getVertexNumTasks(mockSrcVertexId1)).thenReturn(0); when(mockContext.getVertexNumTasks(mockSrcVertexId2)).thenReturn(0); when(mockContext.getVertexNumTasks(mockSrcVertexId3)).thenReturn(1); // check waiting for notification before scheduling manager.onVertexStarted(null); Assert.assertFalse(manager.pendingTasks.isEmpty()); // source vertices have 0 tasks. so only 1 notification needed. triggers scheduling manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.isEmpty()); verify(mockContext, times(1)).doneReconfiguringVertex(); // reconfig done Assert.assertTrue(scheduledTasks.size() == 4); // all tasks scheduled scheduledTasks.clear(); // TODO TEZ-1714 locking verify(mockContext, times(1)).vertexManagerDone(); // notified after scheduling all tasks // check scheduling only after onVertexStarted manager = createManager(conf, mockContext, 0.1f, 0.1f); // Tez notified of reconfig verify(mockContext, times(3)).vertexReconfigurationPlanned(); Assert.assertTrue(manager.bipartiteSources == 2); // source vertices have 0 tasks. so only 1 notification needed. does not trigger scheduling manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); verify(mockContext, times(1)).doneReconfiguringVertex(); // reconfig done Assert.assertTrue(scheduledTasks.size() == 0); // no tasks scheduled manager.onVertexStarted(null); verify(mockContext, times(2)).doneReconfiguringVertex(); // reconfig done Assert.assertTrue(manager.pendingTasks.isEmpty()); Assert.assertTrue(scheduledTasks.size() == 4); // all tasks scheduled when(mockContext.getVertexNumTasks(mockSrcVertexId1)).thenReturn(2); when(mockContext.getVertexNumTasks(mockSrcVertexId2)).thenReturn(2); ByteBuffer payload = VertexManagerEventPayloadProto.newBuilder().setOutputSize(5000L).build().toByteString().asReadOnlyByteBuffer(); VertexManagerEvent vmEvent = VertexManagerEvent.create("Vertex", payload); // parallelism not change due to large data size manager = createManager(conf, mockContext, 0.1f, 0.1f); verify(mockContext, times(4)).vertexReconfigurationPlanned(); // Tez notified of reconfig manager.onVertexStarted(null); Assert.assertTrue(manager.pendingTasks.size() == 4); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); manager.onVertexManagerEventReceived(vmEvent); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); verify(mockContext, times(0)).setVertexParallelism(anyInt(), any(VertexLocationHint.class), anyMap(), anyMap()); verify(mockContext, times(2)).doneReconfiguringVertex(); // trigger scheduling manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); verify(mockContext, times(0)).setVertexParallelism(anyInt(), any(VertexLocationHint.class), anyMap(), anyMap()); verify(mockContext, times(3)).doneReconfiguringVertex(); // reconfig done Assert.assertEquals(0, manager.pendingTasks.size()); // all tasks scheduled Assert.assertEquals(4, scheduledTasks.size()); // TODO TEZ-1714 locking verify(mockContext, times(2)).vertexManagerDone(); // notified after scheduling all tasks Assert.assertEquals(1, manager.numBipartiteSourceTasksCompleted); Assert.assertEquals(5000L, manager.completedSourceTasksOutputSize); /** * Test for TEZ-978 * Delay determining parallelism until enough data has been received. */ scheduledTasks.clear(); payload = VertexManagerEventPayloadProto.newBuilder().setOutputSize(1L).build().toByteString().asReadOnlyByteBuffer(); vmEvent = VertexManagerEvent.create("Vertex", payload); //min/max fraction of 0.01/0.75 would ensure that we hit determineParallelism code path on receiving first event itself. manager = createManager(conf, mockContext, 0.01f, 0.75f); manager.onVertexStarted(null); Assert.assertEquals(4, manager.pendingTasks.size()); // no tasks scheduled Assert.assertEquals(4, manager.totalNumBipartiteSourceTasks); Assert.assertEquals(0, manager.numBipartiteSourceTasksCompleted); //First task in src1 completed with small payload manager.onVertexManagerEventReceived(vmEvent); //small payload manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); Assert.assertTrue(manager.determineParallelismAndApply() == false); Assert.assertEquals(4, manager.pendingTasks.size()); Assert.assertEquals(0, scheduledTasks.size()); // no tasks scheduled Assert.assertEquals(1, manager.numBipartiteSourceTasksCompleted); Assert.assertEquals(1, manager.numVertexManagerEventsReceived); Assert.assertEquals(1L, manager.completedSourceTasksOutputSize); //Second task in src1 completed with small payload manager.onVertexManagerEventReceived(vmEvent); //small payload manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); //Still overall data gathered has not reached threshold; So, ensure parallelism can be determined later Assert.assertTrue(manager.determineParallelismAndApply() == false); Assert.assertEquals(4, manager.pendingTasks.size()); Assert.assertEquals(0, scheduledTasks.size()); // no tasks scheduled Assert.assertEquals(1, manager.numBipartiteSourceTasksCompleted); Assert.assertEquals(2, manager.numVertexManagerEventsReceived); Assert.assertEquals(2L, manager.completedSourceTasksOutputSize); //First task in src2 completed (with larger payload) to trigger determining parallelism payload = VertexManagerEventPayloadProto.newBuilder().setOutputSize(1200L).build().toByteString() .asReadOnlyByteBuffer(); vmEvent = VertexManagerEvent.create("Vertex", payload); manager.onVertexManagerEventReceived(vmEvent); Assert.assertTrue(manager.determineParallelismAndApply()); //ensure parallelism is determined verify(mockContext, times(1)).setVertexParallelism(eq(2), any(VertexLocationHint.class), anyMap(), anyMap()); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(0)); Assert.assertEquals(1, manager.pendingTasks.size()); Assert.assertEquals(1, scheduledTasks.size()); Assert.assertEquals(2, manager.numBipartiteSourceTasksCompleted); Assert.assertEquals(3, manager.numVertexManagerEventsReceived); Assert.assertEquals(1202L, manager.completedSourceTasksOutputSize); //Test for max fraction. Min fraction is just instruction to framework, but honor max fraction when(mockContext.getVertexNumTasks(mockSrcVertexId1)).thenReturn(20); when(mockContext.getVertexNumTasks(mockSrcVertexId2)).thenReturn(20); when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(40); scheduledTasks.clear(); payload = VertexManagerEventPayloadProto.newBuilder().setOutputSize(100L).build().toByteString() .asReadOnlyByteBuffer(); vmEvent = VertexManagerEvent.create("Vertex", payload); //min/max fraction of 0.0/0.2 manager = createManager(conf, mockContext, 0.0f, 0.2f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertEquals(40, manager.pendingTasks.size()); // no tasks scheduled Assert.assertEquals(40, manager.totalNumBipartiteSourceTasks); Assert.assertEquals(0, manager.numBipartiteSourceTasksCompleted); //send 7 events with payload size as 100 for(int i=0;i<7;i++) { manager.onVertexManagerEventReceived(vmEvent); //small payload manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(i)); //should not change parallelism verify(mockContext, times(0)).setVertexParallelism(eq(4), any(VertexLocationHint.class), anyMap(), anyMap()); } //send 8th event with payload size as 100 manager.onVertexManagerEventReceived(vmEvent); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(8)); //Since max threshold (40 * 0.2 = 8) is met, vertex manager should determine parallelism verify(mockContext, times(1)).setVertexParallelism(eq(4), any(VertexLocationHint.class), anyMap(), anyMap()); //reset context for next test when(mockContext.getVertexNumTasks(mockSrcVertexId1)).thenReturn(2); when(mockContext.getVertexNumTasks(mockSrcVertexId2)).thenReturn(2); when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(4); // parallelism changed due to small data size scheduledTasks.clear(); payload = VertexManagerEventPayloadProto.newBuilder().setOutputSize(500L).build().toByteString().asReadOnlyByteBuffer(); vmEvent = VertexManagerEvent.create("Vertex", payload); manager = createManager(conf, mockContext, 0.5f, 0.5f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertEquals(4, manager.pendingTasks.size()); // no tasks scheduled Assert.assertEquals(4, manager.totalNumBipartiteSourceTasks); // task completion from non-bipartite stage does nothing manager.onSourceTaskCompleted(mockSrcVertexId3, new Integer(0)); Assert.assertEquals(4, manager.pendingTasks.size()); // no tasks scheduled Assert.assertEquals(4, manager.totalNumBipartiteSourceTasks); Assert.assertEquals(0, manager.numBipartiteSourceTasksCompleted); manager.onVertexManagerEventReceived(vmEvent); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); Assert.assertEquals(4, manager.pendingTasks.size()); Assert.assertEquals(0, scheduledTasks.size()); // no tasks scheduled Assert.assertEquals(1, manager.numBipartiteSourceTasksCompleted); Assert.assertEquals(1, manager.numVertexManagerEventsReceived); Assert.assertEquals(500L, manager.completedSourceTasksOutputSize); // ignore duplicate completion manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); Assert.assertEquals(4, manager.pendingTasks.size()); Assert.assertEquals(0, scheduledTasks.size()); // no tasks scheduled Assert.assertEquals(1, manager.numBipartiteSourceTasksCompleted); Assert.assertEquals(500L, manager.completedSourceTasksOutputSize); manager.onVertexManagerEventReceived(vmEvent); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(1)); // managedVertex tasks reduced verify(mockContext, times(2)).setVertexParallelism(eq(2), any(VertexLocationHint.class), anyMap(), anyMap()); Assert.assertEquals(2, newEdgeManagers.size()); // TODO improve tests for parallelism Assert.assertEquals(0, manager.pendingTasks.size()); // all tasks scheduled Assert.assertEquals(2, scheduledTasks.size()); Assert.assertTrue(scheduledTasks.contains(new Integer(0))); Assert.assertTrue(scheduledTasks.contains(new Integer(1))); Assert.assertEquals(2, manager.numBipartiteSourceTasksCompleted); Assert.assertEquals(2, manager.numVertexManagerEventsReceived); Assert.assertEquals(1000L, manager.completedSourceTasksOutputSize); // more completions dont cause recalculation of parallelism manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(0)); verify(mockContext, times(2)).setVertexParallelism(eq(2), any(VertexLocationHint.class), anyMap(), anyMap()); Assert.assertEquals(2, newEdgeManagers.size()); EdgeManagerPlugin edgeManager = newEdgeManagers.values().iterator().next(); Map<Integer, List<Integer>> targets = Maps.newHashMap(); DataMovementEvent dmEvent = DataMovementEvent.create(1, ByteBuffer.wrap(new byte[0])); // 4 source task outputs - same as original number of partitions Assert.assertEquals(4, edgeManager.getNumSourceTaskPhysicalOutputs(0)); // 4 destination task inputs - 2 source tasks + 2 merged partitions Assert.assertEquals(4, edgeManager.getNumDestinationTaskPhysicalInputs(0)); edgeManager.routeDataMovementEventToDestination(dmEvent, 1, dmEvent.getSourceIndex(), targets); Assert.assertEquals(1, targets.size()); Map.Entry<Integer, List<Integer>> e = targets.entrySet().iterator().next(); Assert.assertEquals(0, e.getKey().intValue()); Assert.assertEquals(1, e.getValue().size()); Assert.assertEquals(3, e.getValue().get(0).intValue()); targets.clear(); dmEvent = DataMovementEvent.create(2, ByteBuffer.wrap(new byte[0])); edgeManager.routeDataMovementEventToDestination(dmEvent, 0, dmEvent.getSourceIndex(), targets); Assert.assertEquals(1, targets.size()); e = targets.entrySet().iterator().next(); Assert.assertEquals(1, e.getKey().intValue()); Assert.assertEquals(1, e.getValue().size()); Assert.assertEquals(0, e.getValue().get(0).intValue()); targets.clear(); edgeManager.routeInputSourceTaskFailedEventToDestination(2, targets); Assert.assertEquals(2, targets.size()); for (Map.Entry<Integer, List<Integer>> entry : targets.entrySet()) { Assert.assertTrue(entry.getKey().intValue() == 0 || entry.getKey().intValue() == 1); Assert.assertEquals(2, entry.getValue().size()); Assert.assertEquals(4, entry.getValue().get(0).intValue()); Assert.assertEquals(5, entry.getValue().get(1).intValue()); } } @SuppressWarnings({ "unchecked", "rawtypes" }) @Test(timeout = 5000) public void testShuffleVertexManagerSlowStart() { Configuration conf = new Configuration(); ShuffleVertexManager manager = null; HashMap<String, EdgeProperty> mockInputVertices = new HashMap<String, EdgeProperty>(); String mockSrcVertexId1 = "Vertex1"; EdgeProperty eProp1 = EdgeProperty.create( EdgeProperty.DataMovementType.SCATTER_GATHER, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String mockSrcVertexId2 = "Vertex2"; EdgeProperty eProp2 = EdgeProperty.create( EdgeProperty.DataMovementType.SCATTER_GATHER, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String mockSrcVertexId3 = "Vertex3"; EdgeProperty eProp3 = EdgeProperty.create( EdgeProperty.DataMovementType.BROADCAST, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String mockManagedVertexId = "Vertex4"; VertexManagerPluginContext mockContext = mock(VertexManagerPluginContext.class); when(mockContext.getInputVertexEdgeProperties()).thenReturn(mockInputVertices); when(mockContext.getVertexName()).thenReturn(mockManagedVertexId); when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(3); // fail if there is no bipartite src vertex mockInputVertices.put(mockSrcVertexId3, eProp3); try { manager = createManager(conf, mockContext, 0.1f, 0.1f); Assert.assertFalse(true); } catch (TezUncheckedException e) { Assert.assertTrue(e.getMessage().contains( "Atleast 1 bipartite source should exist")); } mockInputVertices.put(mockSrcVertexId1, eProp1); mockInputVertices.put(mockSrcVertexId2, eProp2); // check initialization manager = createManager(conf, mockContext, 0.1f, 0.1f); Assert.assertTrue(manager.bipartiteSources == 2); final HashSet<Integer> scheduledTasks = new HashSet<Integer>(); doAnswer(new Answer() { public Object answer(InvocationOnMock invocation) { Object[] args = invocation.getArguments(); scheduledTasks.clear(); List<TaskWithLocationHint> tasks = (List<TaskWithLocationHint>)args[0]; for (TaskWithLocationHint task : tasks) { scheduledTasks.add(task.getTaskIndex()); } return null; }}).when(mockContext).scheduleVertexTasks(anyList()); // source vertices have 0 tasks. immediate start of all managed tasks when(mockContext.getVertexNumTasks(mockSrcVertexId1)).thenReturn(0); when(mockContext.getVertexNumTasks(mockSrcVertexId2)).thenReturn(0); manager.onVertexStarted(null); Assert.assertTrue(manager.pendingTasks.isEmpty()); Assert.assertTrue(scheduledTasks.size() == 3); // all tasks scheduled when(mockContext.getVertexNumTasks(mockSrcVertexId1)).thenReturn(2); when(mockContext.getVertexNumTasks(mockSrcVertexId2)).thenReturn(2); try { // source vertex have some tasks. min < 0. manager = createManager(conf, mockContext, -0.1f, 0); Assert.assertTrue(false); // should not come here } catch (IllegalArgumentException e) { Assert.assertTrue(e.getMessage().contains( "Invalid values for slowStartMinSrcCompletionFraction")); } try { // source vertex have some tasks. min > max manager = createManager(conf, mockContext, 0.5f, 0.3f); Assert.assertTrue(false); // should not come here } catch (IllegalArgumentException e) { Assert.assertTrue(e.getMessage().contains( "Invalid values for slowStartMinSrcCompletionFraction")); } // source vertex have some tasks. min, max == 0 manager = createManager(conf, mockContext, 0, 0); manager.onVertexStarted(null); Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); Assert.assertTrue(manager.totalTasksToSchedule == 3); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 0); // all source vertices need to be configured manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.isEmpty()); Assert.assertTrue(scheduledTasks.size() == 3); // all tasks scheduled // min, max > 0 and min == max manager = createManager(conf, mockContext, 0.25f, 0.25f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); // task completion from non-bipartite stage does nothing manager.onSourceTaskCompleted(mockSrcVertexId3, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 0); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); Assert.assertTrue(manager.pendingTasks.isEmpty()); Assert.assertTrue(scheduledTasks.size() == 3); // all tasks scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 1); // min, max > 0 and min == max == absolute max 1.0 manager = createManager(conf, mockContext, 1.0f, 1.0f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); // task completion from non-bipartite stage does nothing manager.onSourceTaskCompleted(mockSrcVertexId3, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 0); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 1); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(1)); Assert.assertTrue(manager.pendingTasks.size() == 3); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 2); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 3); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(1)); Assert.assertTrue(manager.pendingTasks.isEmpty()); Assert.assertTrue(scheduledTasks.size() == 3); // all tasks scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 4); // min, max > 0 and min == max manager = createManager(conf, mockContext, 1.0f, 1.0f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); // task completion from non-bipartite stage does nothing manager.onSourceTaskCompleted(mockSrcVertexId3, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 0); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 1); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(1)); Assert.assertTrue(manager.pendingTasks.size() == 3); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 2); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 3); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(1)); Assert.assertTrue(manager.pendingTasks.isEmpty()); Assert.assertTrue(scheduledTasks.size() == 3); // all tasks scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 4); // min, max > and min < max manager = createManager(conf, mockContext, 0.25f, 0.75f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(1)); Assert.assertTrue(manager.pendingTasks.size() == 2); Assert.assertTrue(scheduledTasks.size() == 1); // 1 task scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 2); // completion of same task again should not get counted manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(1)); Assert.assertTrue(manager.pendingTasks.size() == 2); Assert.assertTrue(scheduledTasks.size() == 1); // 1 task scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 2); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 0); Assert.assertTrue(scheduledTasks.size() == 2); // 2 tasks scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 3); scheduledTasks.clear(); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(1)); // we are done. no action Assert.assertTrue(manager.pendingTasks.size() == 0); Assert.assertTrue(scheduledTasks.size() == 0); // no task scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 4); // min, max > and min < max manager = createManager(conf, mockContext, 0.25f, 1.0f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(mockSrcVertexId3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 4); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(0)); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(1)); Assert.assertTrue(manager.pendingTasks.size() == 2); Assert.assertTrue(scheduledTasks.size() == 1); // 1 task scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 2); manager.onSourceTaskCompleted(mockSrcVertexId2, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 1); Assert.assertTrue(scheduledTasks.size() == 1); // 1 task scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 3); manager.onSourceTaskCompleted(mockSrcVertexId1, new Integer(1)); Assert.assertTrue(manager.pendingTasks.size() == 0); Assert.assertTrue(scheduledTasks.size() == 1); // no task scheduled Assert.assertTrue(manager.numBipartiteSourceTasksCompleted == 4); } /** * Tasks should be scheduled only when all source vertices are configured completely */ @Test(timeout = 5000) public void test_Tez1649_with_scatter_gather_edges() { Configuration conf = new Configuration(); conf.setBoolean( ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_ENABLE_AUTO_PARALLEL, true); conf.setLong(ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_DESIRED_TASK_INPUT_SIZE, 1000L); ShuffleVertexManager manager = null; HashMap<String, EdgeProperty> mockInputVertices_R2 = new HashMap<String, EdgeProperty>(); String r1 = "R1"; EdgeProperty eProp1 = EdgeProperty.create( EdgeProperty.DataMovementType.SCATTER_GATHER, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String m2 = "M2"; EdgeProperty eProp2 = EdgeProperty.create( EdgeProperty.DataMovementType.SCATTER_GATHER, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String m3 = "M3"; EdgeProperty eProp3 = EdgeProperty.create( EdgeProperty.DataMovementType.SCATTER_GATHER, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); final String mockManagedVertexId_R2 = "R2"; mockInputVertices_R2.put(r1, eProp1); mockInputVertices_R2.put(m2, eProp2); mockInputVertices_R2.put(m3, eProp3); final VertexManagerPluginContext mockContext_R2 = mock(VertexManagerPluginContext.class); when(mockContext_R2.getInputVertexEdgeProperties()).thenReturn(mockInputVertices_R2); when(mockContext_R2.getVertexName()).thenReturn(mockManagedVertexId_R2); when(mockContext_R2.getVertexNumTasks(mockManagedVertexId_R2)).thenReturn(3); when(mockContext_R2.getVertexNumTasks(r1)).thenReturn(3); when(mockContext_R2.getVertexNumTasks(m2)).thenReturn(3); when(mockContext_R2.getVertexNumTasks(m3)).thenReturn(3); final Map<String, EdgeManagerPlugin> edgeManagerR2 = new HashMap<String, EdgeManagerPlugin>(); doAnswer(new Answer() { public Object answer(InvocationOnMock invocation) throws Exception { when(mockContext_R2.getVertexNumTasks(mockManagedVertexId_R2)).thenReturn(2); edgeManagerR2.clear(); for (Entry<String, EdgeManagerPluginDescriptor> entry : ((Map<String, EdgeManagerPluginDescriptor>)invocation.getArguments()[2]).entrySet()) { final UserPayload userPayload = entry.getValue().getUserPayload(); EdgeManagerPluginContext emContext = new EdgeManagerPluginContext() { @Override public UserPayload getUserPayload() { return userPayload == null ? null : userPayload; } @Override public String getSourceVertexName() { return null; } @Override public String getDestinationVertexName() { return null; } @Override public int getSourceVertexNumTasks() { return 2; } @Override public int getDestinationVertexNumTasks() { return 2; } }; EdgeManagerPlugin edgeManager = ReflectionUtils .createClazzInstance(entry.getValue().getClassName(), new Class[]{EdgeManagerPluginContext.class}, new Object[]{emContext}); edgeManager.initialize(); edgeManagerR2.put(entry.getKey(), edgeManager); } return null; }}).when(mockContext_R2).setVertexParallelism(eq(2), any(VertexLocationHint.class), anyMap(), anyMap()); ByteBuffer payload = VertexManagerEventPayloadProto.newBuilder().setOutputSize(50L).build().toByteString().asReadOnlyByteBuffer(); VertexManagerEvent vmEvent = VertexManagerEvent.create("Vertex", payload); // check initialization manager = createManager(conf, mockContext_R2, 0.001f, 0.001f); Assert.assertTrue(manager.bipartiteSources == 3); final HashSet<Integer> scheduledTasks = new HashSet<Integer>(); doAnswer(new Answer() { public Object answer(InvocationOnMock invocation) { Object[] args = invocation.getArguments(); scheduledTasks.clear(); List<TaskWithLocationHint> tasks = (List<TaskWithLocationHint>)args[0]; for (TaskWithLocationHint task : tasks) { scheduledTasks.add(task.getTaskIndex()); } return null; }}).when(mockContext_R2).scheduleVertexTasks(anyList()); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(m2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(m3, VertexState.CONFIGURED)); manager.onVertexManagerEventReceived(vmEvent); Assert.assertEquals(3, manager.pendingTasks.size()); // no tasks scheduled Assert.assertEquals(9, manager.totalNumBipartiteSourceTasks); Assert.assertEquals(0, manager.numBipartiteSourceTasksCompleted); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 9); //Send events for all tasks of m3. manager.onSourceTaskCompleted(m3, new Integer(0)); manager.onSourceTaskCompleted(m3, new Integer(1)); manager.onSourceTaskCompleted(m3, new Integer(2)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 9); //Send an event for m2. But still we need to wait for at least 1 event from r1. manager.onSourceTaskCompleted(m2, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 9); //Ensure that setVertexParallelism is not called for R2. verify(mockContext_R2, times(0)).setVertexParallelism(anyInt(), any(VertexLocationHint.class), anyMap(), anyMap()); // complete configuration of r1 triggers the scheduling manager.onVertexStateUpdated(new VertexStateUpdate(r1, VertexState.CONFIGURED)); verify(mockContext_R2, times(1)).setVertexParallelism(eq(1), any(VertexLocationHint.class), anyMap(), anyMap()); Assert.assertTrue(manager.pendingTasks.size() == 0); // all tasks scheduled Assert.assertTrue(scheduledTasks.size() == 3); //try with zero task vertices scheduledTasks.clear(); when(mockContext_R2.getInputVertexEdgeProperties()).thenReturn(mockInputVertices_R2); when(mockContext_R2.getVertexName()).thenReturn(mockManagedVertexId_R2); when(mockContext_R2.getVertexNumTasks(mockManagedVertexId_R2)).thenReturn(3); when(mockContext_R2.getVertexNumTasks(r1)).thenReturn(0); when(mockContext_R2.getVertexNumTasks(m2)).thenReturn(0); when(mockContext_R2.getVertexNumTasks(m3)).thenReturn(3); manager = createManager(conf, mockContext_R2, 0.001f, 0.001f); manager.onVertexStarted(null); Assert.assertEquals(3, manager.pendingTasks.size()); // no tasks scheduled Assert.assertEquals(3, manager.totalNumBipartiteSourceTasks); Assert.assertEquals(0, manager.numBipartiteSourceTasksCompleted); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 3); // Only need completed configuration notification from m3 manager.onVertexStateUpdated(new VertexStateUpdate(m3, VertexState.CONFIGURED)); manager.onSourceTaskCompleted(m3, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 0); // all tasks scheduled Assert.assertTrue(scheduledTasks.size() == 3); } @Test(timeout = 5000) public void test_Tez1649_with_mixed_edges() { Configuration conf = new Configuration(); conf.setBoolean( ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_ENABLE_AUTO_PARALLEL, true); conf.setLong(ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_DESIRED_TASK_INPUT_SIZE, 1000L); ShuffleVertexManager manager = null; HashMap<String, EdgeProperty> mockInputVertices = new HashMap<String, EdgeProperty>(); String r1 = "R1"; EdgeProperty eProp1 = EdgeProperty.create( EdgeProperty.DataMovementType.SCATTER_GATHER, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String m2 = "M2"; EdgeProperty eProp2 = EdgeProperty.create( EdgeProperty.DataMovementType.BROADCAST, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); String m3 = "M3"; EdgeProperty eProp3 = EdgeProperty.create( EdgeProperty.DataMovementType.BROADCAST, EdgeProperty.DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("out"), InputDescriptor.create("in")); final String mockManagedVertexId = "R2"; mockInputVertices.put(r1, eProp1); mockInputVertices.put(m2, eProp2); mockInputVertices.put(m3, eProp3); VertexManagerPluginContext mockContext = mock(VertexManagerPluginContext.class); when(mockContext.getInputVertexEdgeProperties()).thenReturn(mockInputVertices); when(mockContext.getVertexName()).thenReturn(mockManagedVertexId); when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(3); when(mockContext.getVertexNumTasks(r1)).thenReturn(3); when(mockContext.getVertexNumTasks(m2)).thenReturn(3); when(mockContext.getVertexNumTasks(m3)).thenReturn(3); // check initialization manager = createManager(conf, mockContext, 0.001f, 0.001f); Assert.assertTrue(manager.bipartiteSources == 1); final HashSet<Integer> scheduledTasks = new HashSet<Integer>(); doAnswer(new Answer() { public Object answer(InvocationOnMock invocation) { Object[] args = invocation.getArguments(); scheduledTasks.clear(); List<TaskWithLocationHint> tasks = (List<TaskWithLocationHint>)args[0]; for (TaskWithLocationHint task : tasks) { scheduledTasks.add(task.getTaskIndex()); } return null; }}).when(mockContext).scheduleVertexTasks(anyList()); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(r1, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(m2, VertexState.CONFIGURED)); Assert.assertEquals(3, manager.pendingTasks.size()); // no tasks scheduled Assert.assertEquals(3, manager.totalNumBipartiteSourceTasks); Assert.assertEquals(0, manager.numBipartiteSourceTasksCompleted); //Send events for 2 tasks of r1. manager.onSourceTaskCompleted(r1, new Integer(0)); manager.onSourceTaskCompleted(r1, new Integer(1)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 3); //Send an event for m2. manager.onSourceTaskCompleted(m2, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 3); //Send an event for m2. manager.onVertexStateUpdated(new VertexStateUpdate(m3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.size() == 0); // all tasks scheduled Assert.assertTrue(scheduledTasks.size() == 3); //Scenario when numBipartiteSourceTasksCompleted == totalNumBipartiteSourceTasks. //Still, wait for a configuration to be completed from other edges scheduledTasks.clear(); manager = createManager(conf, mockContext, 0.001f, 0.001f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(r1, VertexState.CONFIGURED)); when(mockContext.getInputVertexEdgeProperties()).thenReturn(mockInputVertices); when(mockContext.getVertexName()).thenReturn(mockManagedVertexId); when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(3); when(mockContext.getVertexNumTasks(r1)).thenReturn(3); when(mockContext.getVertexNumTasks(m2)).thenReturn(3); when(mockContext.getVertexNumTasks(m3)).thenReturn(3); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(manager.totalNumBipartiteSourceTasks == 3); manager.onSourceTaskCompleted(r1, new Integer(0)); manager.onSourceTaskCompleted(r1, new Integer(1)); manager.onSourceTaskCompleted(r1, new Integer(2)); //Tasks from non-scatter edges of m2 and m3 are not complete. Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled manager.onVertexStateUpdated(new VertexStateUpdate(m2, VertexState.CONFIGURED)); manager.onVertexStateUpdated(new VertexStateUpdate(m3, VertexState.CONFIGURED)); //Got an event from other edges. Schedule all Assert.assertTrue(manager.pendingTasks.size() == 0); // all tasks scheduled Assert.assertTrue(scheduledTasks.size() == 3); //try with a zero task vertex (with non-scatter-gather edges) scheduledTasks.clear(); manager = createManager(conf, mockContext, 0.001f, 0.001f); manager.onVertexStarted(null); when(mockContext.getInputVertexEdgeProperties()).thenReturn(mockInputVertices); when(mockContext.getVertexName()).thenReturn(mockManagedVertexId); when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(3); when(mockContext.getVertexNumTasks(r1)).thenReturn(3); //scatter gather when(mockContext.getVertexNumTasks(m2)).thenReturn(0); //broadcast when(mockContext.getVertexNumTasks(m3)).thenReturn(3); //broadcast manager = createManager(conf, mockContext, 0.001f, 0.001f); manager.onVertexStarted(null); manager.onVertexStateUpdated(new VertexStateUpdate(r1, VertexState.CONFIGURED)); Assert.assertEquals(3, manager.pendingTasks.size()); // no tasks scheduled Assert.assertEquals(3, manager.totalNumBipartiteSourceTasks); Assert.assertEquals(0, manager.numBipartiteSourceTasksCompleted); //Send 2 events for tasks of r1. manager.onSourceTaskCompleted(r1, new Integer(0)); manager.onSourceTaskCompleted(r1, new Integer(1)); Assert.assertTrue(manager.pendingTasks.size() == 3); // no tasks scheduled Assert.assertTrue(scheduledTasks.size() == 0); // event from m3 triggers scheduling. no need for m2 since it has 0 tasks manager.onVertexStateUpdated(new VertexStateUpdate(m3, VertexState.CONFIGURED)); Assert.assertTrue(manager.pendingTasks.size() == 0); // all tasks scheduled Assert.assertTrue(scheduledTasks.size() == 3); //try with all zero task vertices in non-SG edges scheduledTasks.clear(); manager = createManager(conf, mockContext, 0.001f, 0.001f); manager.onVertexStarted(null); when(mockContext.getInputVertexEdgeProperties()).thenReturn(mockInputVertices); when(mockContext.getVertexName()).thenReturn(mockManagedVertexId); when(mockContext.getVertexNumTasks(mockManagedVertexId)).thenReturn(3); when(mockContext.getVertexNumTasks(r1)).thenReturn(3); //scatter gather when(mockContext.getVertexNumTasks(m2)).thenReturn(0); //broadcast when(mockContext.getVertexNumTasks(m3)).thenReturn(0); //broadcast //Send 1 events for tasks of r1. manager.onVertexStateUpdated(new VertexStateUpdate(r1, VertexState.CONFIGURED)); manager.onSourceTaskCompleted(r1, new Integer(0)); Assert.assertTrue(manager.pendingTasks.size() == 0); // all tasks scheduled Assert.assertTrue(scheduledTasks.size() == 3); } private ShuffleVertexManager createManager(Configuration conf, VertexManagerPluginContext context, float min, float max) { conf.setFloat(ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MIN_SRC_FRACTION, min); conf.setFloat(ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MAX_SRC_FRACTION, max); UserPayload payload; try { payload = TezUtils.createUserPayloadFromConf(conf); } catch (IOException e) { throw new RuntimeException(e); } when(context.getUserPayload()).thenReturn(payload); ShuffleVertexManager manager = new ShuffleVertexManager(context); manager.initialize(); return manager; } }
apache-2.0
Sage-Bionetworks/Synapse-Repository-Services
client/synapseJavaClient/src/test/java/org/sagebionetworks/client/ClientVersionInfoTest.java
322
package org.sagebionetworks.client; import static org.junit.Assert.assertNotNull; import org.junit.Test; public class ClientVersionInfoTest { @Test public void testGetVersion(){ String version = ClientVersionInfo.getClientVersionInfo(); System.out.println(version); assertNotNull(version); } }
apache-2.0
l0s/jersey-hmac-auth
sample-jersey2/src/main/java/com/bazaarvoice/auth/hmac/sample/server/PizzaApplication.java
1197
package com.bazaarvoice.auth.hmac.sample.server; import org.glassfish.hk2.api.TypeLiteral; import org.glassfish.hk2.utilities.Binder; import org.glassfish.hk2.utilities.binding.AbstractBinder; import org.glassfish.jersey.server.ResourceConfig; import com.bazaarvoice.auth.hmac.server.Authenticator; import com.bazaarvoice.auth.hmac.server.HmacAuthFeature; /** * Jersey 2.x JAX-RS application that demonstrates HMAC authentication. */ public class PizzaApplication<P> extends ResourceConfig { private final Binder pizzaApplicationBinder = new AbstractBinder() { protected void configure() { // The P parameter is to trick HK2 into injecting the Authenticator where it is needed. bind(PizzaAuthenticator.class).to(new TypeLiteral<Authenticator<P>>() {}); } }; public PizzaApplication() { // features // specify your principal type here register(new HmacAuthFeature<String>()); // dependencies register(getPizzaApplicationBinder()); // resources register(PizzaResource2.class); } protected Binder getPizzaApplicationBinder() { return pizzaApplicationBinder; } }
apache-2.0
sekikn/ambari
ambari-server/src/main/java/org/apache/ambari/server/api/resources/AlertHistoryResourceDefinition.java
1414
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ambari.server.api.resources; import org.apache.ambari.server.controller.spi.Resource; /** * Resource Definition for {@link Resource.Type#AlertHistory} types. */ public class AlertHistoryResourceDefinition extends BaseResourceDefinition { /** * Constructor. * */ public AlertHistoryResourceDefinition() { super(Resource.Type.AlertHistory); } /** * {@inheritDoc} */ @Override public String getPluralName() { return "alert_history"; } /** * {@inheritDoc} */ @Override public String getSingularName() { return "alert_history"; } }
apache-2.0
pinnamur/titanium_mobile
android/titanium/src/java/org/appcelerator/titanium/util/TiPlatformHelper.java
7625
/** * Appcelerator Titanium Mobile * Copyright (c) 2009-2014 by Appcelerator, Inc. All Rights Reserved. * Licensed under the terms of the Apache Public License * Please see the LICENSE included with this distribution for details. */ package org.appcelerator.titanium.util; import java.lang.reflect.Method; import java.util.Currency; import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.StringTokenizer; import org.appcelerator.kroll.common.Log; import org.appcelerator.titanium.ITiAppInfo; import org.appcelerator.titanium.TiApplication; import android.Manifest; import android.app.Activity; import android.content.Context; import android.content.pm.PackageManager; import android.content.res.Resources; import android.net.DhcpInfo; import android.net.wifi.WifiInfo; import android.net.wifi.WifiManager; import android.text.format.Formatter; import android.util.DisplayMetrics; import com.appcelerator.analytics.APSAnalyticsHelper; public class TiPlatformHelper extends APSAnalyticsHelper { public static final String TAG = "TiPlatformHelper"; private static final Map<String, Locale> locales = java.util.Collections.synchronizedMap(new HashMap<String, Locale>()); private static final Map<Locale, String> currencyCodes = java.util.Collections .synchronizedMap(new HashMap<Locale, String>()); private static final Map<Locale, String> currencySymbols = java.util.Collections .synchronizedMap(new HashMap<Locale, String>()); private static final Map<String, String> currencySymbolsByCode = java.util.Collections .synchronizedMap(new HashMap<String, String>()); public static float applicationScaleFactor = 1.0F; public static int applicationLogicalDensity = DisplayMetrics.DENSITY_MEDIUM; private static boolean applicationDisplayInfoInitialized = false; private static class InstanceHolder { private static final TiPlatformHelper INSTANCE = new TiPlatformHelper(); } public static final TiPlatformHelper getInstance() { return InstanceHolder.INSTANCE; } private TiPlatformHelper() { } public void initialize() { APSAnalyticsHelper.getInstance().init(TiApplication.getInstance().getAppGUID(), TiApplication.getInstance()); } public synchronized void intializeDisplayMetrics(Activity activity) { if (!applicationDisplayInfoInitialized) { DisplayMetrics dm = new DisplayMetrics(); activity.getWindowManager().getDefaultDisplay().getMetrics(dm); // Note: this isn't public API, so there should be lots of error checking here try { Method gciMethod = Resources.class.getMethod("getCompatibilityInfo"); Object compatInfo = gciMethod.invoke(activity.getResources()); applicationScaleFactor = (Float) compatInfo.getClass().getField("applicationScale").get(compatInfo); } catch (Exception e) { Log.w(TAG, "Unable to get application scale factor, using reported density and its factor", Log.DEBUG_MODE); } if (applicationScaleFactor == 1.0f) { applicationLogicalDensity = dm.densityDpi; } else if (applicationScaleFactor > 1.0f) { applicationLogicalDensity = DisplayMetrics.DENSITY_MEDIUM; } else { applicationLogicalDensity = DisplayMetrics.DENSITY_LOW; } applicationDisplayInfoInitialized = true; } } public ITiAppInfo getAppInfo() { return TiApplication.getInstance().getAppInfo(); } public String getLocale() { return Locale.getDefault().toString().replace("_", "-"); } public Locale getLocale(String localeCode) { if (localeCode == null) { return null; } String code = localeCode.replace('-', '_'); if (locales.containsKey(code)) { return locales.get(code); } String language = "", country = "", variant = ""; if (code.startsWith("__")) { // This is weird, just a variant. Whatever, give it a shot. StringTokenizer tokens = new StringTokenizer(code, "__"); if (tokens.hasMoreElements()) { variant = tokens.nextToken(); } } else if (code.startsWith("_")) { // No language specified, but country specified and maybe variant. StringTokenizer tokens = new StringTokenizer(code, "_"); if (tokens.hasMoreElements()) { country = tokens.nextToken(); } if (tokens.hasMoreElements()) { variant = tokens.nextToken(); } } else if (code.contains("__")) { // this is language__variant StringTokenizer tokens = new StringTokenizer(code, "__"); if (tokens.hasMoreElements()) { language = tokens.nextToken(); } if (tokens.hasMoreElements()) { variant = tokens.nextToken(); } } else { StringTokenizer tokens = new StringTokenizer(code, "__"); if (tokens.hasMoreElements()) { language = tokens.nextToken(); } if (tokens.hasMoreElements()) { country = tokens.nextToken(); } if (tokens.hasMoreElements()) { variant = tokens.nextToken(); } } Locale l = new Locale(language, country, variant); locales.put(code, l); return l; } public String getCurrencyCode(Locale locale) { String code; if (currencyCodes.containsKey(locale)) { code = currencyCodes.get(locale); } else { code = Currency.getInstance(locale).getCurrencyCode(); currencyCodes.put(locale, code); } return code; } public String getCurrencySymbol(Locale locale) { String symbol; if (currencySymbols.containsKey(locale)) { symbol = currencySymbols.get(locale); } else { symbol = Currency.getInstance(locale).getSymbol(locale); currencySymbols.put(locale, symbol); } return symbol; } public String getCurrencySymbol(String currencyCode) { String symbol; if (currencySymbolsByCode.containsKey(currencyCode)) { symbol = currencySymbolsByCode.get(currencyCode); } else { symbol = Currency.getInstance(currencyCode).getSymbol(); currencySymbolsByCode.put(currencyCode, symbol); } return symbol; } public String getIpAddress() { String ipAddress = null; TiApplication tiApp = TiApplication.getInstance(); if (tiApp.getRootActivity().checkCallingOrSelfPermission(Manifest.permission.ACCESS_WIFI_STATE) == PackageManager.PERMISSION_GRANTED) { WifiManager wifiManager = (WifiManager) tiApp.getRootActivity().getSystemService(Context.WIFI_SERVICE); if (wifiManager != null) { WifiInfo wifiInfo = wifiManager.getConnectionInfo(); if (wifiInfo != null) { ipAddress = Formatter.formatIpAddress(wifiInfo.getIpAddress()); Log.d(TAG, "Found IP address: " + ipAddress, Log.DEBUG_MODE); } else { Log.e(TAG, "Unable to access WifiInfo, failed to get IP address"); } } else { Log.e(TAG, "Unable to access the WifiManager, failed to get IP address"); } } else { Log.e(TAG, "Must have android.permission.ACCESS_WIFI_STATE, failed to get IP address"); } return ipAddress; } public String getNetmask() { String netmask = null; TiApplication tiApp = TiApplication.getInstance(); if (tiApp.getRootActivity().checkCallingOrSelfPermission(Manifest.permission.ACCESS_WIFI_STATE) == PackageManager.PERMISSION_GRANTED) { WifiManager wifiManager = (WifiManager) tiApp.getRootActivity().getSystemService(Context.WIFI_SERVICE); if (wifiManager != null) { DhcpInfo dhcpInfo = wifiManager.getDhcpInfo(); if (dhcpInfo != null) { netmask = Formatter.formatIpAddress(dhcpInfo.netmask); Log.d(TAG, "Found netmask: " + netmask, Log.DEBUG_MODE); } else { Log.e(TAG, "Unable to access DhcpInfo, failed to get netmask"); } } else { Log.e(TAG, "Unable to access the WifiManager, failed to get netmask"); } } else { Log.e(TAG, "Must have android.permission.ACCESS_WIFI_STATE, failed to get netmask"); } return netmask; } }
apache-2.0
CC4401-TeraCity/TeraCity
engine/src/main/java/org/terasology/persistence/typeHandling/extensionTypes/CollisionGroupTypeHandler.java
1434
/* * Copyright 2013 MovingBlocks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.terasology.persistence.typeHandling.extensionTypes; import org.terasology.registry.CoreRegistry; import org.terasology.persistence.typeHandling.StringRepresentationTypeHandler; import org.terasology.physics.CollisionGroup; import org.terasology.physics.CollisionGroupManager; /** * @author Immortius */ public class CollisionGroupTypeHandler extends StringRepresentationTypeHandler<CollisionGroup> { private CollisionGroupManager groupManager; public CollisionGroupTypeHandler() { groupManager = CoreRegistry.get(CollisionGroupManager.class); } @Override public String getAsString(CollisionGroup item) { return item.getName(); } @Override public CollisionGroup getFromString(String representation) { return groupManager.getCollisionGroup(representation); } }
apache-2.0
clarkyzl/flink
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/buffer/BufferPool.java
2265
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.io.network.buffer; /** A dynamically sized buffer pool. */ public interface BufferPool extends BufferProvider, BufferRecycler { /** * Destroys this buffer pool. * * <p>If not all buffers are available, they are recycled lazily as soon as they are recycled. */ void lazyDestroy(); /** Checks whether this buffer pool has been destroyed. */ @Override boolean isDestroyed(); /** Returns the number of guaranteed (minimum number of) memory segments of this buffer pool. */ int getNumberOfRequiredMemorySegments(); /** * Returns the maximum number of memory segments this buffer pool should use. * * @return maximum number of memory segments to use or <tt>-1</tt> if unlimited */ int getMaxNumberOfMemorySegments(); /** * Returns the current size of this buffer pool. * * <p>The size of the buffer pool can change dynamically at runtime. */ int getNumBuffers(); /** * Sets the current size of this buffer pool. * * <p>The size needs to be greater or equal to the guaranteed number of memory segments. */ void setNumBuffers(int numBuffers); /** Returns the number memory segments, which are currently held by this buffer pool. */ int getNumberOfAvailableMemorySegments(); /** Returns the number of used buffers of this buffer pool. */ int bestEffortGetNumOfUsedBuffers(); }
apache-2.0
RichJackson/turbo-laser
src/integration-test/java/uk/ac/kcl/utils/PostGresTestUtils.java
7975
package uk.ac.kcl.utils; import org.apache.log4j.Logger; import org.junit.Ignore; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Profile; import org.springframework.core.io.ClassPathResource; import org.springframework.core.io.Resource; import org.springframework.jdbc.core.JdbcTemplate; import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import uk.ac.kcl.batch.BatchConfigurer; import uk.ac.kcl.batch.JobConfiguration; import uk.ac.kcl.it.postgres.GATEWithoutScheduling; import uk.ac.kcl.utils.DbmsTestUtils; import uk.ac.kcl.utils.TestUtils; import javax.annotation.PostConstruct; import javax.sql.DataSource; import java.util.Map; import java.util.Random; /* * Copyright 2016 King's College London, Richard Jackson <richgjackson@gmail.com>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * * @author rich */ @RunWith(SpringJUnit4ClassRunner.class) @TestPropertySource({ "classpath:postgres_db.properties", "classpath:postgres_test.properties",}) @Configuration @Import({JobConfiguration.class,TestUtils.class, BatchConfigurer.class}) @Profile("postgres") @Ignore public class PostGresTestUtils implements DbmsTestUtils { final static Logger logger = Logger.getLogger(GATEWithoutScheduling.class); long today = System.currentTimeMillis(); Random random = new Random(); @Autowired @Qualifier("sourceDataSource") public DataSource sourceDataSource; @Autowired @Qualifier("targetDataSource") public DataSource targetDataSource; @Autowired @Qualifier("jobRepositoryDataSource") public DataSource jobRepositoryDataSource; private JdbcTemplate sourceTemplate; private JdbcTemplate targetTemplate; private ResourceDatabasePopulator rdp = new ResourceDatabasePopulator(); @PostConstruct public void init(){ this.sourceTemplate = new JdbcTemplate(sourceDataSource); this.targetTemplate = new JdbcTemplate(targetDataSource); JdbcTemplate jobRepoTemplate = new JdbcTemplate(jobRepositoryDataSource); } public void createTikaTable() { //// for postgres sourceTemplate.execute("DROP TABLE IF EXISTS tblInputDocs"); sourceTemplate.execute("CREATE TABLE tblInputDocs" + "( ID SERIAL PRIMARY KEY" + ", srcColumnFieldName text " + ", srcTableName text " + ", primaryKeyFieldName text " + ", primaryKeyFieldValue integer " + ", updateTime TIMESTAMP " + ", someText bytea" + ", anotherTime TIMESTAMP )"); targetTemplate.execute("DROP TABLE IF EXISTS tblOutputDocs"); targetTemplate.execute("CREATE TABLE tblOutputDocs " + "( ID SERIAL PRIMARY KEY" + ", srcColumnFieldName text " + ", srcTableName text " + ", primaryKeyFieldName text " + ", primaryKeyFieldValue integer " + ", updateTime TIMESTAMP " + ", output text )"); } public void createBasicInputTable(){ sourceTemplate.execute("DROP TABLE IF EXISTS tblInputDocs"); sourceTemplate.execute("CREATE TABLE tblInputDocs" + "( ID SERIAL PRIMARY KEY" + ", srcColumnFieldName text " + ", srcTableName text " + ", primaryKeyFieldName text " + ", primaryKeyFieldValue integer " + ", updateTime TIMESTAMP " + ", someText TEXT" + ", anotherTime TIMESTAMP )"); } public void createBasicOutputTable(){ targetTemplate.execute("DROP TABLE IF EXISTS tblOutputDocs"); targetTemplate.execute("CREATE TABLE tblOutputDocs " + "( ID SERIAL PRIMARY KEY" + ", srcColumnFieldName text " + ", srcTableName text " + ", primaryKeyFieldName text " + ", primaryKeyFieldValue integer " + ", updateTime TIMESTAMP " + ", output text " + ", anotherTime TIMESTAMP)" ); } public void createMultiLineTextTable(){ createBasicInputTable(); sourceTemplate.execute("DROP TABLE IF EXISTS tblDocLines"); sourceTemplate.execute("CREATE TABLE tblDocLines" + "( ID SERIAL PRIMARY KEY" + ", primaryKeyFieldValue integer " + ", updateTime TIMESTAMP " + ", LINE_ID integer " + ", LINE_TEXT text )" ); } public void createJobRepository(){ Resource dropTablesResource = new ClassPathResource("org/springframework/batch/core/schema-drop-postgresql" + ".sql"); Resource makeTablesResource = new ClassPathResource("org/springframework/batch/core/schema-postgresql.sql"); rdp.addScript(dropTablesResource); rdp.addScript(makeTablesResource); rdp.execute(jobRepositoryDataSource); } @Override public void createDeIdInputTable() { createBasicInputTable(); sourceTemplate.execute("DROP VIEW IF EXISTS vwidentifiers"); sourceTemplate.execute("DROP TABLE IF EXISTS tblIdentifiers"); sourceTemplate.execute("CREATE TABLE tblIdentifiers " + "( ID SERIAL PRIMARY KEY" + ", primaryKeyFieldValue BIGINT " + ", NAME TEXT " + ", ADDRESS TEXT " + ", POSTCODE TEXT " + ", DATE_OF_BIRTH TIMESTAMP )"); sourceTemplate.execute("create view vwIdentifiers AS\n" + " select primarykeyfieldvalue, address as identifier from tblidentifiers\n" + " UNION\n" + " select primarykeyfieldvalue, name as identifier from tblidentifiers\n" + " UNION\n" + " select primarykeyfieldvalue, postcode as identifier from tblidentifiers"); } @Override public int countRowsInOutputTable() { JdbcTemplate jdbcTemplate = new JdbcTemplate(targetDataSource); return jdbcTemplate.queryForObject("SELECT COUNT(*) FROM tblOutputDocs", Integer.class); } @Override public void createDocManInputTable() { sourceTemplate.execute("DROP TABLE IF EXISTS tblInputDocs"); sourceTemplate.execute("CREATE TABLE tblInputDocs" + "( ID SERIAL PRIMARY KEY" + ", srcColumnFieldName text " + ", srcTableName text " + ", primaryKeyFieldName text " + ", primaryKeyFieldValue integer " + ", updateTime TIMESTAMP " + ", someText TEXT" + ", path TEXT )"); } @Override public Map<String,Object> getRowInOutputTable(int primaryKey) { JdbcTemplate jdbcTemplate = new JdbcTemplate(targetDataSource); return jdbcTemplate.queryForMap("SELECT * FROM tblOutputDocs WHERE primaryKeyFieldValue = " + Integer.toString(primaryKey)); } }
apache-2.0
abergmeier-dsfishlabs/bazel
src/java_tools/junitrunner/java/com/google/testing/junit/runner/junit4/JUnit4TestStackTraceListener.java
1973
// Copyright 2015 The Bazel Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.testing.junit.runner.junit4; import com.google.testing.junit.runner.internal.SignalHandlers; import com.google.testing.junit.runner.internal.StackTraces; import com.google.testing.junit.runner.internal.Stderr; import org.junit.runner.Description; import org.junit.runner.notification.RunListener; import sun.misc.Signal; import sun.misc.SignalHandler; import java.io.PrintStream; import javax.inject.Inject; import javax.inject.Singleton; /** * A listener than dumps all stack traces when the test receives a SIGTERM. */ @Singleton class JUnit4TestStackTraceListener extends RunListener { private final SignalHandlers signalHandlers; private final PrintStream errPrintStream; @Inject public JUnit4TestStackTraceListener( SignalHandlers signalHandlers, @Stderr PrintStream errPrintStream) { this.signalHandlers = signalHandlers; this.errPrintStream = errPrintStream; } @Override public void testRunStarted(Description description) throws Exception { signalHandlers.installHandler(new Signal("TERM"), new WriteStackTraceSignalHandler()); } private class WriteStackTraceSignalHandler implements SignalHandler { @Override public void handle(Signal signal) { errPrintStream.println("Dumping stack traces for all threads\n"); StackTraces.printAll(errPrintStream); } } }
apache-2.0
CC4401-TeraCity/TeraCity
engine/src/main/java/org/terasology/engine/modes/loadProcesses/ProcessBlockPrefabs.java
1475
/* * Copyright 2013 MovingBlocks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.terasology.engine.modes.loadProcesses; import org.terasology.registry.CoreRegistry; import org.terasology.entitySystem.entity.EntityManager; import org.terasology.world.block.BlockManager; import org.terasology.world.block.internal.BlockManagerImpl; import org.terasology.world.block.internal.BlockPrefabManager; /** * @author Immortius */ public class ProcessBlockPrefabs extends SingleStepLoadProcess { @Override public String getMessage() { return "Initialising Block Type Entities"; } @Override public boolean step() { BlockManagerImpl blockManager = (BlockManagerImpl) CoreRegistry.get(BlockManager.class); blockManager.subscribe(new BlockPrefabManager(CoreRegistry.get(EntityManager.class), blockManager)); return true; } @Override public int getExpectedCost() { return 1; } }
apache-2.0
googleapis/google-api-java-client-services
clients/google-api-services-sheets/v4/1.30.1/com/google/api/services/sheets/v4/model/BaselineValueFormat.java
8065
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.sheets.v4.model; /** * Formatting options for baseline value. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Google Sheets API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class BaselineValueFormat extends com.google.api.client.json.GenericJson { /** * The comparison type of key value with baseline value. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String comparisonType; /** * Description which is appended after the baseline value. This field is optional. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String description; /** * Color to be used, in case baseline value represents a negative change for key value. This field * is optional. * The value may be {@code null}. */ @com.google.api.client.util.Key private Color negativeColor; /** * Color to be used, in case baseline value represents a negative change for key value. This field * is optional. If negative_color is also set, this field takes precedence. * The value may be {@code null}. */ @com.google.api.client.util.Key private ColorStyle negativeColorStyle; /** * Specifies the horizontal text positioning of baseline value. This field is optional. If not * specified, default positioning is used. * The value may be {@code null}. */ @com.google.api.client.util.Key private TextPosition position; /** * Color to be used, in case baseline value represents a positive change for key value. This field * is optional. * The value may be {@code null}. */ @com.google.api.client.util.Key private Color positiveColor; /** * Color to be used, in case baseline value represents a positive change for key value. This field * is optional. If positive_color is also set, this field takes precedence. * The value may be {@code null}. */ @com.google.api.client.util.Key private ColorStyle positiveColorStyle; /** * Text formatting options for baseline value. * The value may be {@code null}. */ @com.google.api.client.util.Key private TextFormat textFormat; /** * The comparison type of key value with baseline value. * @return value or {@code null} for none */ public java.lang.String getComparisonType() { return comparisonType; } /** * The comparison type of key value with baseline value. * @param comparisonType comparisonType or {@code null} for none */ public BaselineValueFormat setComparisonType(java.lang.String comparisonType) { this.comparisonType = comparisonType; return this; } /** * Description which is appended after the baseline value. This field is optional. * @return value or {@code null} for none */ public java.lang.String getDescription() { return description; } /** * Description which is appended after the baseline value. This field is optional. * @param description description or {@code null} for none */ public BaselineValueFormat setDescription(java.lang.String description) { this.description = description; return this; } /** * Color to be used, in case baseline value represents a negative change for key value. This field * is optional. * @return value or {@code null} for none */ public Color getNegativeColor() { return negativeColor; } /** * Color to be used, in case baseline value represents a negative change for key value. This field * is optional. * @param negativeColor negativeColor or {@code null} for none */ public BaselineValueFormat setNegativeColor(Color negativeColor) { this.negativeColor = negativeColor; return this; } /** * Color to be used, in case baseline value represents a negative change for key value. This field * is optional. If negative_color is also set, this field takes precedence. * @return value or {@code null} for none */ public ColorStyle getNegativeColorStyle() { return negativeColorStyle; } /** * Color to be used, in case baseline value represents a negative change for key value. This field * is optional. If negative_color is also set, this field takes precedence. * @param negativeColorStyle negativeColorStyle or {@code null} for none */ public BaselineValueFormat setNegativeColorStyle(ColorStyle negativeColorStyle) { this.negativeColorStyle = negativeColorStyle; return this; } /** * Specifies the horizontal text positioning of baseline value. This field is optional. If not * specified, default positioning is used. * @return value or {@code null} for none */ public TextPosition getPosition() { return position; } /** * Specifies the horizontal text positioning of baseline value. This field is optional. If not * specified, default positioning is used. * @param position position or {@code null} for none */ public BaselineValueFormat setPosition(TextPosition position) { this.position = position; return this; } /** * Color to be used, in case baseline value represents a positive change for key value. This field * is optional. * @return value or {@code null} for none */ public Color getPositiveColor() { return positiveColor; } /** * Color to be used, in case baseline value represents a positive change for key value. This field * is optional. * @param positiveColor positiveColor or {@code null} for none */ public BaselineValueFormat setPositiveColor(Color positiveColor) { this.positiveColor = positiveColor; return this; } /** * Color to be used, in case baseline value represents a positive change for key value. This field * is optional. If positive_color is also set, this field takes precedence. * @return value or {@code null} for none */ public ColorStyle getPositiveColorStyle() { return positiveColorStyle; } /** * Color to be used, in case baseline value represents a positive change for key value. This field * is optional. If positive_color is also set, this field takes precedence. * @param positiveColorStyle positiveColorStyle or {@code null} for none */ public BaselineValueFormat setPositiveColorStyle(ColorStyle positiveColorStyle) { this.positiveColorStyle = positiveColorStyle; return this; } /** * Text formatting options for baseline value. * @return value or {@code null} for none */ public TextFormat getTextFormat() { return textFormat; } /** * Text formatting options for baseline value. * @param textFormat textFormat or {@code null} for none */ public BaselineValueFormat setTextFormat(TextFormat textFormat) { this.textFormat = textFormat; return this; } @Override public BaselineValueFormat set(String fieldName, Object value) { return (BaselineValueFormat) super.set(fieldName, value); } @Override public BaselineValueFormat clone() { return (BaselineValueFormat) super.clone(); } }
apache-2.0
svstanev/presto
presto-main/src/main/java/com/facebook/presto/sql/planner/iterative/rule/MergeLimitWithTopN.java
2039
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.sql.planner.iterative.rule; import com.facebook.presto.Session; import com.facebook.presto.sql.planner.PlanNodeIdAllocator; import com.facebook.presto.sql.planner.SymbolAllocator; import com.facebook.presto.sql.planner.iterative.Lookup; import com.facebook.presto.sql.planner.iterative.Rule; import com.facebook.presto.sql.planner.plan.LimitNode; import com.facebook.presto.sql.planner.plan.PlanNode; import com.facebook.presto.sql.planner.plan.TopNNode; import java.util.Optional; public class MergeLimitWithTopN implements Rule { @Override public Optional<PlanNode> apply(PlanNode node, Lookup lookup, PlanNodeIdAllocator idAllocator, SymbolAllocator symbolAllocator, Session session) { if (!(node instanceof LimitNode)) { return Optional.empty(); } LimitNode parent = (LimitNode) node; PlanNode source = lookup.resolve(parent.getSource()); if (!(source instanceof TopNNode)) { return Optional.empty(); } TopNNode child = (TopNNode) source; return Optional.of( new TopNNode( parent.getId(), child.getSource(), Math.min(parent.getCount(), child.getCount()), child.getOrderBy(), child.getOrderings(), parent.isPartial() ? TopNNode.Step.PARTIAL : TopNNode.Step.SINGLE)); } }
apache-2.0
shivpun/spring-framework
spring-aop/src/main/java/org/springframework/aop/framework/AbstractSingletonProxyFactoryBean.java
8032
/* * Copyright 2002-2015 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.aop.framework; import org.springframework.aop.TargetSource; import org.springframework.aop.framework.adapter.AdvisorAdapterRegistry; import org.springframework.aop.framework.adapter.GlobalAdvisorAdapterRegistry; import org.springframework.aop.target.SingletonTargetSource; import org.springframework.beans.factory.BeanClassLoaderAware; import org.springframework.beans.factory.FactoryBean; import org.springframework.beans.factory.FactoryBeanNotInitializedException; import org.springframework.beans.factory.InitializingBean; import org.springframework.util.ClassUtils; /** * Convenient superclass for {@link FactoryBean} types that produce singleton-scoped * proxy objects. * * <p>Manages pre- and post-interceptors (references, rather than * interceptor names, as in {@link ProxyFactoryBean}) and provides * consistent interface management. * * @author Juergen Hoeller * @since 2.0 */ @SuppressWarnings("serial") public abstract class AbstractSingletonProxyFactoryBean extends ProxyConfig implements FactoryBean<Object>, BeanClassLoaderAware, InitializingBean { private Object target; private Class<?>[] proxyInterfaces; private Object[] preInterceptors; private Object[] postInterceptors; /** Default is global AdvisorAdapterRegistry */ private AdvisorAdapterRegistry advisorAdapterRegistry = GlobalAdvisorAdapterRegistry.getInstance(); private transient ClassLoader proxyClassLoader; private Object proxy; /** * Set the target object, that is, the bean to be wrapped with a transactional proxy. * <p>The target may be any object, in which case a SingletonTargetSource will * be created. If it is a TargetSource, no wrapper TargetSource is created: * This enables the use of a pooling or prototype TargetSource etc. * @see org.springframework.aop.TargetSource * @see org.springframework.aop.target.SingletonTargetSource * @see org.springframework.aop.target.LazyInitTargetSource * @see org.springframework.aop.target.PrototypeTargetSource * @see org.springframework.aop.target.CommonsPool2TargetSource */ public void setTarget(Object target) { this.target = target; } /** * Specify the set of interfaces being proxied. * <p>If not specified (the default), the AOP infrastructure works * out which interfaces need proxying by analyzing the target, * proxying all the interfaces that the target object implements. */ public void setProxyInterfaces(Class<?>[] proxyInterfaces) { this.proxyInterfaces = proxyInterfaces; } /** * Set additional interceptors (or advisors) to be applied before the * implicit transaction interceptor, e.g. a PerformanceMonitorInterceptor. * <p>You may specify any AOP Alliance MethodInterceptors or other * Spring AOP Advices, as well as Spring AOP Advisors. * @see org.springframework.aop.interceptor.PerformanceMonitorInterceptor */ public void setPreInterceptors(Object[] preInterceptors) { this.preInterceptors = preInterceptors; } /** * Set additional interceptors (or advisors) to be applied after the * implicit transaction interceptor. * <p>You may specify any AOP Alliance MethodInterceptors or other * Spring AOP Advices, as well as Spring AOP Advisors. */ public void setPostInterceptors(Object[] postInterceptors) { this.postInterceptors = postInterceptors; } /** * Specify the AdvisorAdapterRegistry to use. * Default is the global AdvisorAdapterRegistry. * @see org.springframework.aop.framework.adapter.GlobalAdvisorAdapterRegistry */ public void setAdvisorAdapterRegistry(AdvisorAdapterRegistry advisorAdapterRegistry) { this.advisorAdapterRegistry = advisorAdapterRegistry; } /** * Set the ClassLoader to generate the proxy class in. * <p>Default is the bean ClassLoader, i.e. the ClassLoader used by the * containing BeanFactory for loading all bean classes. This can be * overridden here for specific proxies. */ public void setProxyClassLoader(ClassLoader classLoader) { this.proxyClassLoader = classLoader; } @Override public void setBeanClassLoader(ClassLoader classLoader) { if (this.proxyClassLoader == null) { this.proxyClassLoader = classLoader; } } @Override public void afterPropertiesSet() { if (this.target == null) { throw new IllegalArgumentException("Property 'target' is required"); } if (this.target instanceof String) { throw new IllegalArgumentException("'target' needs to be a bean reference, not a bean name as value"); } if (this.proxyClassLoader == null) { this.proxyClassLoader = ClassUtils.getDefaultClassLoader(); } ProxyFactory proxyFactory = new ProxyFactory(); if (this.preInterceptors != null) { for (Object interceptor : this.preInterceptors) { proxyFactory.addAdvisor(this.advisorAdapterRegistry.wrap(interceptor)); } } // Add the main interceptor (typically an Advisor). proxyFactory.addAdvisor(this.advisorAdapterRegistry.wrap(createMainInterceptor())); if (this.postInterceptors != null) { for (Object interceptor : this.postInterceptors) { proxyFactory.addAdvisor(this.advisorAdapterRegistry.wrap(interceptor)); } } proxyFactory.copyFrom(this); TargetSource targetSource = createTargetSource(this.target); proxyFactory.setTargetSource(targetSource); if (this.proxyInterfaces != null) { proxyFactory.setInterfaces(this.proxyInterfaces); } else if (!isProxyTargetClass()) { // Rely on AOP infrastructure to tell us what interfaces to proxy. proxyFactory.setInterfaces( ClassUtils.getAllInterfacesForClass(targetSource.getTargetClass(), this.proxyClassLoader)); } postProcessProxyFactory(proxyFactory); this.proxy = proxyFactory.getProxy(this.proxyClassLoader); } /** * Determine a TargetSource for the given target (or TargetSource). * @param target target. If this is an implementation of TargetSource it is * used as our TargetSource; otherwise it is wrapped in a SingletonTargetSource. * @return a TargetSource for this object */ protected TargetSource createTargetSource(Object target) { if (target instanceof TargetSource) { return (TargetSource) target; } else { return new SingletonTargetSource(target); } } /** * A hook for subclasses to post-process the {@link ProxyFactory} * before creating the proxy instance with it. * @param proxyFactory the AOP ProxyFactory about to be used * @since 4.2 */ protected void postProcessProxyFactory(ProxyFactory proxyFactory) { } @Override public Object getObject() { if (this.proxy == null) { throw new FactoryBeanNotInitializedException(); } return this.proxy; } @Override public Class<?> getObjectType() { if (this.proxy != null) { return this.proxy.getClass(); } if (this.proxyInterfaces != null && this.proxyInterfaces.length == 1) { return this.proxyInterfaces[0]; } if (this.target instanceof TargetSource) { return ((TargetSource) this.target).getTargetClass(); } if (this.target != null) { return this.target.getClass(); } return null; } @Override public final boolean isSingleton() { return true; } /** * Create the "main" interceptor for this proxy factory bean. * Typically an Advisor, but can also be any type of Advice. * <p>Pre-interceptors will be applied before, post-interceptors * will be applied after this interceptor. */ protected abstract Object createMainInterceptor(); }
apache-2.0
pperboires/PocDrools
drools-compiler/src/test/java/org/drools/compiler/Java5Test.java
1951
package org.drools.compiler; import java.io.InputStreamReader; import org.drools.DroolsTestCase; import org.drools.rule.builder.dialect.java.JavaDialectConfiguration; import org.junit.After; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import static org.junit.Assert.*; public class Java5Test extends DroolsTestCase { @Test public void testJava5Rule() throws Exception { final String javaVersion = System.getProperty( "java.specification.version" ); //do not execute tests under JDK 1.4 //otherwise the compiled version cannot be interpreted if ( javaVersion.equals( "1.4" ) ) { System.out.println( "Skipping Java 1.5 tests - current JDK not compatible" ); return; } final PackageBuilderConfiguration conf = new PackageBuilderConfiguration(); JavaDialectConfiguration javaConf = ( JavaDialectConfiguration ) conf.getDialectConfiguration( "java" ); javaConf.setCompiler( JavaDialectConfiguration.ECLIPSE ); javaConf.setJavaLanguageLevel( "1.5" ); final PackageBuilder builder = new PackageBuilder( conf ); builder.addPackageFromDrl( new InputStreamReader( this.getClass().getResourceAsStream( "java5_rule.drl" ) ) ); if ( builder.hasErrors() ) { fail( builder.getErrors().toString() ); } } @Test public void testJava14Defaults() throws Exception { final PackageBuilderConfiguration conf = new PackageBuilderConfiguration(); JavaDialectConfiguration javaConf = ( JavaDialectConfiguration ) conf.getDialectConfiguration( "java" ); javaConf.setCompiler( JavaDialectConfiguration.JANINO ); final PackageBuilder builder = new PackageBuilder( conf ); builder.addPackageFromDrl( new InputStreamReader( this.getClass().getResourceAsStream( "java5_rule.drl" ) ) ); assertTrue( builder.hasErrors() ); } }
apache-2.0
emeroad/pinpoint
agent-sdk/src/main/java/com/navercorp/pinpoint/sdk/v1/concurrent/TraceForkJoinPool.java
3423
/* * Copyright 2021 NAVER Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.navercorp.pinpoint.sdk.v1.concurrent; import com.navercorp.pinpoint.sdk.v1.concurrent.wrapper.DefaultCommandWrapper; import com.navercorp.pinpoint.sdk.v1.concurrent.wrapper.CommandWrapper; import java.util.Collection; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinTask; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; /** * Not open yet. */ class TraceForkJoinPool extends ForkJoinPool { protected CommandWrapper commandWrapper = new DefaultCommandWrapper(); private TraceForkJoinPool() { } private TraceForkJoinPool(int parallelism) { super(parallelism); } private TraceForkJoinPool(int parallelism, ForkJoinWorkerThreadFactory factory, Thread.UncaughtExceptionHandler handler, boolean asyncMode) { super(parallelism, factory, handler, asyncMode); } protected <T> ForkJoinTask<T> wrap(ForkJoinTask<T> task) { // TODO How to delegate ForkJoinTask? return task; } public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) { task = wrap(task); return super.submit(task); } public <T> ForkJoinTask<T> submit(Callable<T> task) { task = commandWrapper.wrap(task); return super.submit(task); } public <T> ForkJoinTask<T> submit(Runnable task, T result) { task = commandWrapper.wrap(task); return super.submit(task, result); } public ForkJoinTask<?> submit(Runnable task) { task = commandWrapper.wrap(task); return super.submit(task); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException { tasks = commandWrapper.wrap(tasks); return super.invokeAny(tasks); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { tasks = commandWrapper.wrap(tasks); return super.invokeAny(tasks, timeout, unit); } @Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException { tasks = commandWrapper.wrap(tasks); return super.invokeAll(tasks, timeout, unit); } public void execute(ForkJoinTask<?> task) { task = wrap(task); super.execute(task); } public void execute(Runnable task) { task = commandWrapper.wrap(task); super.execute(task); } }
apache-2.0
roberthafner/flowable-engine
modules/flowable-engine/src/main/java/org/activiti/engine/impl/interceptor/CommandContextCloseListener.java
2068
package org.activiti.engine.impl.interceptor; import org.activiti.engine.impl.cfg.TransactionContext; /** * A listener that can be used to be notified of lifecycle events of the {@link CommandContext}. * * @author Joram Barrez */ public interface CommandContextCloseListener { /** * Called when the {@link CommandContext} is being closed, but no 'close logic' has been executed. * * At this point, the {@link TransactionContext} (if applicable) has not yet been committed/rolledback * and none of the {@link Session} instances have been flushed. * * If an exception happens and it is not caught in this method: * - The {@link Session} instances will *not* be flushed * - The {@link TransactionContext} will be rolled back (if applicable) */ void closing(CommandContext commandContext); /** * Called when the {@link Session} have been successfully flushed. * When an exception happened during the flushing of the sessions, this method will not be called. * * If an exception happens and it is not caught in this method: * - The {@link Session} instances will *not* be flushed * - The {@link TransactionContext} will be rolled back (if applicable) */ void afterSessionsFlush(CommandContext commandContext); /** * Called when the {@link CommandContext} is successfully closed. * * At this point, the {@link TransactionContext} (if applicable) has been successfully committed * and no rollback has happened. All {@link Session} instances have been closed. * * Note that throwing an exception here does *not* affect the transaction. * The {@link CommandContext} will log the exception though. */ void closed(CommandContext commandContext); /** * Called when the {@link CommandContext} has not been successully closed due to an exception that happened. * * Note that throwing an exception here does *not* affect the transaction. * The {@link CommandContext} will log the exception though. */ void closeFailure(CommandContext commandContext); }
apache-2.0
jbank/aws-ant-tasks
src/main/java/com/amazonaws/ant/elasticbeanstalk/TerminateBeanstalkEnvironmentTask.java
2174
/* * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.ant.elasticbeanstalk; import org.apache.tools.ant.BuildException; import com.amazonaws.ant.AWSAntTask; import com.amazonaws.services.elasticbeanstalk.AWSElasticBeanstalkClient; import com.amazonaws.services.elasticbeanstalk.model.TerminateEnvironmentRequest; /** * This task will terminate an Elastic Beanstalk environment */ public class TerminateBeanstalkEnvironmentTask extends AWSAntTask { private String environmentName; /** * Set the name of the environment. Required * * @param environmentName * The environment to terminate. */ public void setEnvironmentName(String environmentName) { this.environmentName = environmentName; } private void checkParams() { if (environmentName == null) { throw new BuildException( "Missing parameter: environmentName is required"); } } public void execute() { System.out .println("Terminating environment " + environmentName + "..."); checkParams(); AWSElasticBeanstalkClient bcClient = getOrCreateClient(AWSElasticBeanstalkClient.class); try { bcClient.terminateEnvironment(new TerminateEnvironmentRequest() .withEnvironmentName(environmentName)); } catch (Exception e) { throw new BuildException("Could not terminate environment " + e.getMessage(), e); } System.out .println("The request to terminate the environment has been submitted."); } }
apache-2.0
fogbeam/cas_mirror
core/cas-server-core-services-registry/src/test/java/org/apereo/cas/AllServiceRegistryTestsSuite.java
1409
package org.apereo.cas; import org.apereo.cas.services.DefaultChainingServiceRegistryTests; import org.apereo.cas.services.ServiceRegistryInitializerEventListenerTests; import org.apereo.cas.services.ServiceRegistryInitializerTests; import org.apereo.cas.services.replication.DefaultRegisteredServiceReplicationStrategyTests; import org.apereo.cas.services.resource.CreateResourceBasedRegisteredServiceWatcherTests; import org.apereo.cas.services.resource.DefaultRegisteredServiceResourceNamingStrategyTests; import org.apereo.cas.services.resource.DeleteResourceBasedRegisteredServiceWatcherTests; import org.apereo.cas.services.resource.ModifyResourceBasedRegisteredServiceWatcherTests; import org.junit.platform.suite.api.SelectClasses; import org.junit.platform.suite.api.Suite; /** * This is {@link AllServiceRegistryTestsSuite}. * * @author Misagh Moayyed * @since 6.0.0 */ @SelectClasses({ ServiceRegistryInitializerTests.class, DefaultChainingServiceRegistryTests.class, DefaultRegisteredServiceReplicationStrategyTests.class, ServiceRegistryInitializerEventListenerTests.class, DefaultRegisteredServiceResourceNamingStrategyTests.class, DeleteResourceBasedRegisteredServiceWatcherTests.class, CreateResourceBasedRegisteredServiceWatcherTests.class, ModifyResourceBasedRegisteredServiceWatcherTests.class }) @Suite public class AllServiceRegistryTestsSuite { }
apache-2.0
apache/incubator-taverna-workbench
taverna-report-view/src/main/java/org/apache/taverna/workbench/report/view/ReportOnWorkflowAction.java
6053
/** * */ package org.apache.taverna.workbench.report.view; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.awt.event.ActionEvent; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import javax.swing.AbstractAction; import javax.swing.JOptionPane; import org.apache.taverna.visit.VisitReport; import org.apache.taverna.visit.VisitReport.Status; import org.apache.taverna.workbench.MainWindow; import org.apache.taverna.workbench.edits.EditManager; import org.apache.taverna.workbench.file.FileManager; import org.apache.taverna.workbench.report.ReportManager; import org.apache.taverna.workbench.ui.SwingWorkerCompletionWaiter; import org.apache.taverna.workbench.ui.Workbench; import org.apache.taverna.workflowmodel.Dataflow; /** * @author alanrw * */ public class ReportOnWorkflowAction extends AbstractAction { private final boolean includeTimeConsuming; private final boolean remember; private Dataflow specifiedDataflow; private static final String namedComponent = "reportView"; private final FileManager fileManager; private final ReportManager reportManager; private final Workbench workbench; private final EditManager editManager; public ReportOnWorkflowAction(String name, boolean includeTimeConsuming, boolean remember, EditManager editManager, FileManager fileManager, ReportManager reportManager, Workbench workbench) { super(name); this.includeTimeConsuming = includeTimeConsuming; this.remember = remember; this.editManager = editManager; this.fileManager = fileManager; this.reportManager = reportManager; this.workbench = workbench; this.specifiedDataflow = null; } public ReportOnWorkflowAction(String name, Dataflow dataflow, boolean includeTimeConsuming, boolean remember, EditManager editManager, FileManager fileManager, ReportManager reportManager, Workbench workbench) { super(name); this.specifiedDataflow = dataflow; this.includeTimeConsuming = includeTimeConsuming; this.remember = remember; this.editManager = editManager; this.fileManager = fileManager; this.reportManager = reportManager; this.workbench = workbench; } /* * (non-Javadoc) * * @see * java.awt.event.ActionListener#actionPerformed(java.awt.event.ActionEvent) */ public void actionPerformed(ActionEvent e) { if (validateWorkflow()) { checkStatus(); } } /** * Check the status and pop up a warning if something is wrong. * */ public void checkStatus() { Dataflow dataflow; if (specifiedDataflow == null) { dataflow = fileManager.getCurrentDataflow(); } else { dataflow = specifiedDataflow; } Status status = reportManager.getStatus(dataflow); int messageType; String message; if (status.equals(Status.OK)) { messageType = JOptionPane.INFORMATION_MESSAGE; message = "Workflow validated OK."; } else { StringBuffer sb = new StringBuffer(); Map<Object, Set<VisitReport>> reports = reportManager.getReports(dataflow); int errorCount = 0; int warningCount = 0; // Find warnings for (Entry<Object, Set<VisitReport>> entry : reports.entrySet()) { for (VisitReport report : entry.getValue()) { if (report.getStatus().equals(Status.SEVERE)) { errorCount++; } else if (report.getStatus().equals(Status.WARNING)) { warningCount++; } } } if (status.equals(Status.WARNING)) { messageType = JOptionPane.WARNING_MESSAGE; message = "Validation reported "; } else { // SEVERE messageType = JOptionPane.ERROR_MESSAGE; message = "Validation reported "; if (errorCount == 1) { message += "one error"; } else { message += errorCount + " errors"; } if (warningCount != 0) { message += " and "; } } if (warningCount == 1) { message += "one warning"; } else if (warningCount > 0) { message += warningCount + " warnings"; } } JOptionPane.showMessageDialog(MainWindow.getMainWindow(), message, "Workflow validation", messageType); workbench.getPerspectives().setWorkflowPerspective(); workbench.makeNamedComponentVisible(namedComponent); } /** * Perform validation on workflow. * * @return <code>true</code> if the validation was not cancelled. */ public boolean validateWorkflow() { Dataflow dataflow; if (specifiedDataflow == null) { dataflow = fileManager.getCurrentDataflow(); } else { dataflow = specifiedDataflow; } ValidateSwingWorker validateSwingWorker = new ValidateSwingWorker(dataflow, includeTimeConsuming, remember, editManager, reportManager); ValidateInProgressDialog dialog = new ValidateInProgressDialog(); validateSwingWorker.addPropertyChangeListener(new SwingWorkerCompletionWaiter(dialog)); validateSwingWorker.execute(); // Give a chance to the SwingWorker to finish so we do not have to // display // the dialog if copying of the workflow is quick (so it won't flicker // on the screen) try { Thread.sleep(500); } catch (InterruptedException ex) { } if (!validateSwingWorker.isDone()) { dialog.setVisible(true); // this will block the GUI } boolean userCancelled = dialog.hasUserCancelled(); // see if user // cancelled the // dialog if (userCancelled) { validateSwingWorker.cancel(true); } return !userCancelled; } }
apache-2.0
masaki-yamakawa/geode
geode-core/src/integrationTest/java/org/apache/geode/internal/cache/ha/HARegionQueueIntegrationTest.java
30321
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.internal.cache.ha; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT; import static org.apache.geode.test.awaitility.GeodeAwaitility.await; import static org.apache.geode.test.awaitility.GeodeAwaitility.getTimeout; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.when; import static org.mockito.Mockito.withSettings; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; import org.mockito.quality.Strictness; import org.apache.geode.CancelCriterion; import org.apache.geode.cache.AttributesFactory; import org.apache.geode.cache.CacheFactory; import org.apache.geode.cache.DataPolicy; import org.apache.geode.cache.EvictionAction; import org.apache.geode.cache.EvictionAttributes; import org.apache.geode.cache.Region; import org.apache.geode.cache.RegionShortcut; import org.apache.geode.cache.Scope; import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.cache.CacheServerImpl; import org.apache.geode.internal.cache.CachedDeserializable; import org.apache.geode.internal.cache.EnumListenerEvent; import org.apache.geode.internal.cache.EventID; import org.apache.geode.internal.cache.HARegion; import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.InternalRegionArguments; import org.apache.geode.internal.cache.LocalRegion; import org.apache.geode.internal.cache.VMCachedDeserializable; import org.apache.geode.internal.cache.tier.sockets.CacheClientNotifier; import org.apache.geode.internal.cache.tier.sockets.CacheServerStats; import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID; import org.apache.geode.internal.cache.tier.sockets.ClientRegistrationEventQueueManager; import org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessage; import org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessageImpl; import org.apache.geode.internal.cache.tier.sockets.ConnectionListener; import org.apache.geode.internal.cache.tier.sockets.HAEventWrapper; import org.apache.geode.internal.serialization.KnownVersion; import org.apache.geode.internal.statistics.StatisticsClock; import org.apache.geode.internal.util.BlobHelper; import org.apache.geode.internal.util.concurrent.StoppableReentrantReadWriteLock; import org.apache.geode.internal.util.concurrent.StoppableReentrantReadWriteLock.StoppableReadLock; import org.apache.geode.internal.util.concurrent.StoppableReentrantReadWriteLock.StoppableWriteLock; import org.apache.geode.test.junit.rules.ExecutorServiceRule; public class HARegionQueueIntegrationTest { private static final int NUM_QUEUES = 100; private static final EvictionAttributes OVERFLOW_TO_DISK = EvictionAttributes.createLIFOEntryAttributes(1000, EvictionAction.OVERFLOW_TO_DISK); private InternalCache cache; private Region dataRegion; private CacheClientNotifier ccn; private InternalDistributedMember member; @Rule public ExecutorServiceRule executorServiceRule = new ExecutorServiceRule(); @Rule public MockitoRule mockitoRule = MockitoJUnit.rule().strictness(Strictness.LENIENT); @Before public void setUp() throws Exception { cache = createCache(); dataRegion = createDataRegion(); ccn = createCacheClientNotifier(); member = createMember(); } @After public void tearDown() throws Exception { ccn.shutdown(0); cache.close(); } @Test public void verifyEndGiiQueueingEmptiesQueueAndHAContainer() throws Exception { HAContainerWrapper haContainerWrapper = (HAContainerWrapper) ccn.getHaContainer(); // create message and HAEventWrapper ClientUpdateMessage message = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), new EventID(cache.getDistributedSystem())); HAEventWrapper wrapper = new HAEventWrapper(message); wrapper.setHAContainer(haContainerWrapper); wrapper.incrementPutInProgressCounter("test"); // Create and update HARegionQueues forcing one queue to startGiiQueueing int numQueues = 10; HARegionQueue targetQueue = createAndUpdateHARegionQueuesWithGiiQueueing(haContainerWrapper, wrapper, numQueues); // Verify HAContainerWrapper (1) and refCount (numQueues(10)) assertThat(haContainerWrapper).hasSize(1); HAEventWrapper wrapperInContainer = (HAEventWrapper) haContainerWrapper.getKey(wrapper); assertThat(wrapperInContainer.getReferenceCount()).isEqualTo(numQueues - 1); assertThat(wrapperInContainer.getPutInProgress()).isTrue(); // Verify that the HAEventWrapper in the giiQueue now has msg != null // We don't null this out while putInProgress > 0 (true) Queue giiQueue = targetQueue.getGiiQueue(); assertThat(giiQueue).hasSize(1); // Simulate that we have iterated through all interested proxies // and are now decrementing the PutInProgressCounter wrapperInContainer.decrementPutInProgressCounter(); // Simulate that other queues have processed this event, then // peek and process the event off the giiQueue for (int i = 0; i < numQueues - 1; ++i) { targetQueue.decAndRemoveFromHAContainer(wrapper); } HAEventWrapper giiQueueEntry = (HAEventWrapper) giiQueue.peek(); assertThat(giiQueueEntry).isNotNull(); assertThat(giiQueueEntry.getClientUpdateMessage()).isNotNull(); // endGiiQueueing and verify queue and HAContainer are empty targetQueue.endGiiQueueing(); assertThat(giiQueue).isEmpty(); assertThat(haContainerWrapper).isEmpty(); } @Test public void verifySequentialUpdateHAEventWrapperWithMap() throws Exception { HAContainerWrapper haContainerWrapper = (HAContainerWrapper) ccn.getHaContainer(); // Create a CachedDeserializable CachedDeserializable cd = createCachedDeserializable(haContainerWrapper); // Create and update HARegionQueues createAndUpdateHARegionQueuesSequentially(haContainerWrapper, cd, NUM_QUEUES); // Verify HAContainerWrapper verifyHAContainerWrapper(haContainerWrapper, cd, NUM_QUEUES); } @Test public void verifySimultaneousUpdateHAEventWrapperWithMap() throws Exception { HAContainerWrapper haContainerWrapper = (HAContainerWrapper) ccn.getHaContainer(); // Create a CachedDeserializable CachedDeserializable cd = createCachedDeserializable(haContainerWrapper); // Create and update HARegionQueues createAndUpdateHARegionQueuesSimultaneously(haContainerWrapper, cd, NUM_QUEUES); // Verify HAContainerWrapper verifyHAContainerWrapper(haContainerWrapper, cd, NUM_QUEUES); } @Test public void verifySequentialUpdateHAEventWrapperWithRegion() throws Exception { HAContainerWrapper haContainerWrapper = (HAContainerWrapper) ccn.getHaContainer(); // Create a CachedDeserializable CachedDeserializable cd = createCachedDeserializable(haContainerWrapper); // Create and update HARegionQueues createAndUpdateHARegionQueuesSequentially(haContainerWrapper, cd, NUM_QUEUES); // Verify HAContainerWrapper verifyHAContainerWrapper(haContainerWrapper, cd, NUM_QUEUES); } @Test public void verifySimultaneousUpdateHAEventWrapperWithRegion() throws Exception { HAContainerWrapper haContainerWrapper = (HAContainerWrapper) ccn.getHaContainer(); // Create a CachedDeserializable CachedDeserializable cd = createCachedDeserializable(haContainerWrapper); // Create and update HARegionQueues createAndUpdateHARegionQueuesSimultaneously(haContainerWrapper, cd, NUM_QUEUES); // Verify HAContainerWrapper verifyHAContainerWrapper(haContainerWrapper, cd, NUM_QUEUES); } @Test public void verifySimultaneousPutHAEventWrapperWithRegion() throws Exception { HAContainerWrapper haContainerWrapper = createHAContainerRegion(); int numQueues = 30; int numOperations = 1000; Set<HAEventWrapper> haEventWrappersToValidate = createAndPutHARegionQueuesSimultaneously(haContainerWrapper, numQueues, numOperations); assertThat(haContainerWrapper).hasSize(numOperations); for (HAEventWrapper haEventWrapperToValidate : haEventWrappersToValidate) { HAEventWrapper wrapperInContainer = (HAEventWrapper) haContainerWrapper.getKey(haEventWrapperToValidate); assertThat(wrapperInContainer.getReferenceCount()).isEqualTo(numQueues); } } @Test public void verifySequentialPutHAEventWrapperWithRegion() throws Exception { HAContainerWrapper haContainerWrapper = createHAContainerRegion(); ClientUpdateMessage message = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), new EventID(new byte[] {1}, 1, 2)); HAEventWrapper haEventWrapper = new HAEventWrapper(message); haEventWrapper.setHAContainer(haContainerWrapper); int numQueues = 10; createAndPutHARegionQueuesSequentially(haContainerWrapper, haEventWrapper, numQueues); assertThat(haContainerWrapper).hasSize(1); HAEventWrapper wrapperInContainer = (HAEventWrapper) haContainerWrapper.getKey(haEventWrapper); assertThat(wrapperInContainer.getReferenceCount()).isEqualTo(numQueues); } @Test public void verifySimultaneousPutHAEventWrapperWithMap() throws Exception { HAContainerWrapper haContainerWrapper = (HAContainerWrapper) ccn.getHaContainer(); int numQueues = 30; int numOperations = 1000; Set<HAEventWrapper> haEventWrappersToValidate = createAndPutHARegionQueuesSimultaneously(haContainerWrapper, numQueues, numOperations); assertThat(haContainerWrapper).hasSize(numOperations); for (HAEventWrapper haEventWrapperToValidate : haEventWrappersToValidate) { HAEventWrapper wrapperInContainer = (HAEventWrapper) haContainerWrapper.getKey(haEventWrapperToValidate); assertThat(wrapperInContainer.getReferenceCount()).isEqualTo(numQueues); } } @Test public void verifySequentialPutHAEventWrapperWithMap() throws Exception { HAContainerWrapper haContainerWrapper = (HAContainerWrapper) ccn.getHaContainer(); ClientUpdateMessage message = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), new EventID(new byte[] {1}, 1, 2)); HAEventWrapper haEventWrapper = new HAEventWrapper(message); haEventWrapper.setHAContainer(haContainerWrapper); int numQueues = 10; createAndPutHARegionQueuesSequentially(haContainerWrapper, haEventWrapper, numQueues); assertThat(haContainerWrapper).hasSize(1); HAEventWrapper wrapperInContainer = (HAEventWrapper) haContainerWrapper.getKey(haEventWrapper); assertThat(wrapperInContainer.getReferenceCount()).isEqualTo(numQueues); } @Test public void queueRemovalAndDispatchingConcurrently() throws Exception { HAContainerWrapper haContainerWrapper = (HAContainerWrapper) ccn.getHaContainer(); List<HARegionQueue> regionQueues = new ArrayList<>(); for (int i = 0; i < 2; ++i) { HARegion haRegion = createMockHARegion(); regionQueues.add(createHARegionQueue(haContainerWrapper, i, haRegion, false)); } for (int i = 0; i < 10000; ++i) { EventID eventID = new EventID(new byte[] {1}, 1, i); ClientUpdateMessage message = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), eventID); HAEventWrapper wrapper = new HAEventWrapper(message); wrapper.setHAContainer(haContainerWrapper); wrapper.incrementPutInProgressCounter("test"); for (HARegionQueue queue : regionQueues) { queue.put(wrapper); } wrapper.decrementPutInProgressCounter(); List<Future<Void>> futures = new ArrayList<>(); for (HARegionQueue queue : regionQueues) { futures.add(executorServiceRule.submit(() -> { queue.peek(); queue.remove(); })); futures.add(executorServiceRule.submit(() -> { queue.removeDispatchedEvents(eventID); })); } for (Future<Void> future : futures) { future.get(getTimeout().toMillis(), MILLISECONDS); } } } @Test public void verifyPutEntryConditionallyInHAContainerNoOverwrite() throws Exception { // create message and HAEventWrapper EventID eventID = new EventID(cache.getDistributedSystem()); ClientUpdateMessage oldMessage = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), eventID); HAEventWrapper originalWrapperInstance = new HAEventWrapper(oldMessage); originalWrapperInstance.incrementPutInProgressCounter("test"); HAContainerWrapper haContainerWrapper = new HAContainerMap(new ConcurrentHashMap()); originalWrapperInstance.setHAContainer(haContainerWrapper); HARegionQueue haRegionQueue = createHARegionQueue(haContainerWrapper, 0); haRegionQueue.putEventInHARegion(originalWrapperInstance, 1L); // Simulate a QRM for this event haRegionQueue.region.destroy(1L); haRegionQueue.decAndRemoveFromHAContainer(originalWrapperInstance); ClientUpdateMessage newMessage = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), eventID); HAEventWrapper newWrapperInstance = new HAEventWrapper(newMessage); newWrapperInstance.incrementPutInProgressCounter("test"); newWrapperInstance.setHAContainer(haContainerWrapper); haRegionQueue.putEventInHARegion(newWrapperInstance, 1L); // Add the original wrapper back in, and verify that it does not overwrite the new one // and that it increments the ref count on the container key. haRegionQueue.putEventInHARegion(originalWrapperInstance, 1L); assertThat(newWrapperInstance.getClientUpdateMessage()) .withFailMessage("Original message overwrote new message in container") .isEqualTo(haContainerWrapper.get(originalWrapperInstance)); assertThat(newWrapperInstance.getReferenceCount()) .withFailMessage("Reference count was not the expected value") .isEqualTo(2); assertThat(haContainerWrapper) .withFailMessage("Container size was not the expected value") .hasSize(1); } @Test public void removeDispatchedEventsViaQRMAndDestroyQueueSimultaneouslySingleDecrement() throws Exception { HAContainerWrapper haContainerWrapper = new HAContainerMap(new ConcurrentHashMap()); HARegion haRegion = createMockHARegion(); HARegionQueue haRegionQueue = createHARegionQueue(haContainerWrapper, 0, haRegion, false); EventID eventID = new EventID(cache.getDistributedSystem()); ClientUpdateMessage clientUpdateMessage = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), eventID); HAEventWrapper haEventWrapper = new HAEventWrapper(clientUpdateMessage); haEventWrapper.incrementPutInProgressCounter("test"); haEventWrapper.setHAContainer(haContainerWrapper); haRegionQueue.put(haEventWrapper); List<Future<Void>> futures = new ArrayList<>(); // In one thread, simulate processing a queue removal message // by removing the dispatched event futures.add(executorServiceRule.submit(() -> { haRegionQueue.removeDispatchedEvents(eventID); })); // In another thread, simulate that the region is being destroyed, for instance // when a SocketTimeoutException is thrown and we are cleaning up futures.add(executorServiceRule.submit(() -> { haRegionQueue.destroy(); })); for (Future<Void> future : futures) { future.get(); } await().untilAsserted(() -> { assertThat(haEventWrapper.getReferenceCount()) .withFailMessage( "Expected HAEventWrapper reference count to be decremented to 0 by either the queue removal or destroy queue logic") .isZero(); }); } @Test public void removeDispatchedEventsViaMessageDispatcherAndDestroyQueueSimultaneouslySingleDecrement() throws Exception { HAContainerWrapper haContainerWrapper = new HAContainerMap(new ConcurrentHashMap()); HARegion haRegion = createMockHARegion(); HARegionQueue haRegionQueue = createHARegionQueue(haContainerWrapper, 0, haRegion, false); EventID eventID = new EventID(cache.getDistributedSystem()); ClientUpdateMessage clientUpdateMessage = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), eventID); HAEventWrapper haEventWrapper = new HAEventWrapper(clientUpdateMessage); haEventWrapper.incrementPutInProgressCounter("test"); haEventWrapper.setHAContainer(haContainerWrapper); haRegionQueue.put(haEventWrapper); List<Future<Void>> futures = new ArrayList<>(); // In one thread, simulate processing a queue removal message // by removing the dispatched event futures.add(executorServiceRule.submit(() -> { // Simulate dispatching a message by peeking and removing the HAEventWrapper haRegionQueue.peek(); haRegionQueue.remove(); })); // In another thread, simulate that the region is being destroyed, for instance // when a SocketTimeoutException is thrown and we are cleaning up futures.add(executorServiceRule.submit(() -> { haRegionQueue.destroy(); })); for (Future<Void> future : futures) { future.get(); } await().untilAsserted(() -> { assertThat(haEventWrapper.getReferenceCount()) .withFailMessage( "Expected HAEventWrapper reference count to be decremented to 0 by either the message dispatcher or destroy queue logic") .isZero(); }); } private InternalCache createCache() { return (InternalCache) new CacheFactory().set(MCAST_PORT, "0").create(); } private Region createDataRegion() { return cache.createRegionFactory(RegionShortcut.REPLICATE).create("data"); } private CacheClientNotifier createCacheClientNotifier() { CacheClientNotifier ccn = CacheClientNotifier.getInstance(cache, mock(ClientRegistrationEventQueueManager.class), mock(StatisticsClock.class), mock(CacheServerStats.class), 100000, 100000, mock(ConnectionListener.class), null, false); return ccn; } private InternalDistributedMember createMember() { // Create an InternalDistributedMember InternalDistributedMember member = mock(InternalDistributedMember.class); when(member.getVersion()).thenReturn(KnownVersion.CURRENT); return member; } private HARegion createMockHARegion() { HARegion haRegion = mock(HARegion.class); Map<Object, Object> map = new ConcurrentHashMap<>(); when(haRegion.getGemFireCache()) .thenReturn(cache); when(haRegion.put(any(Object.class), any(Object.class))) .then(answer -> map.put(answer.getArgument(0), answer.getArgument(1))); when(haRegion.get(any(Object.class))) .then(answer -> map.get(answer.getArgument(0))); doAnswer(answer -> { map.remove(answer.getArgument(0)); return null; }).when(haRegion).localDestroy(any(Object.class)); return haRegion; } private HAContainerRegion createHAContainerRegion() throws IOException, ClassNotFoundException { return new HAContainerRegion(createHAContainerRegionRegion()); } private Region<Object, Object> createHAContainerRegionRegion() throws IOException, ClassNotFoundException { String regionName = CacheServerImpl.generateNameForClientMsgsRegion(0); AttributesFactory<Object, Object> factory = new AttributesFactory<>(); factory.setDataPolicy(DataPolicy.NORMAL); factory.setDiskStoreName(null); factory.setDiskSynchronous(true); factory.setEvictionAttributes(OVERFLOW_TO_DISK); factory.setStatisticsEnabled(true); factory.setScope(Scope.LOCAL); InternalRegionArguments arguments = new InternalRegionArguments() .setDestroyLockFlag(true) .setRecreateFlag(false) .setSnapshotInputStream(null) .setImageTarget(null) .setIsUsedForMetaRegion(true); return cache.createVMRegion(regionName, factory.create(), arguments); } private HARegionQueue createHARegionQueue(Map haContainer, int index, HARegion haRegion, boolean puttingGIIDataInQueue) throws InterruptedException, IOException, ClassNotFoundException { StoppableReentrantReadWriteLock giiLock = mock(StoppableReentrantReadWriteLock.class); StoppableReentrantReadWriteLock rwLock = new StoppableReentrantReadWriteLock(cache.getCancelCriterion()); when(giiLock.writeLock()).thenReturn(mock(StoppableWriteLock.class)); when(giiLock.readLock()).thenReturn(mock(StoppableReadLock.class)); return new HARegionQueue("haRegion+" + index, haRegion, cache, haContainer, null, (byte) 1, true, mock(HARegionQueueStats.class), giiLock, rwLock, mock(CancelCriterion.class), puttingGIIDataInQueue, mock(StatisticsClock.class)); } private HARegionQueue createHARegionQueue(Map haContainer, int index) throws InterruptedException, IOException, ClassNotFoundException { HARegion haRegion = mock(HARegion.class); when(haRegion.getGemFireCache()).thenReturn(cache); return createHARegionQueue(haContainer, index, haRegion, false); } private CachedDeserializable createCachedDeserializable(HAContainerWrapper haContainerWrapper) throws IOException { // Create ClientUpdateMessage and HAEventWrapper ClientUpdateMessage message = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_UPDATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), new EventID(cache.getDistributedSystem())); HAEventWrapper wrapper = new HAEventWrapper(message); wrapper.setHAContainer(haContainerWrapper); // Create a CachedDeserializable // Note: The haContainerRegion must contain the wrapper and message to serialize it haContainerWrapper.putIfAbsent(wrapper, message); byte[] wrapperBytes = BlobHelper.serializeToBlob(wrapper); CachedDeserializable cd = new VMCachedDeserializable(wrapperBytes); haContainerWrapper.remove(wrapper); assertThat(haContainerWrapper).isEmpty(); return cd; } private void createAndUpdateHARegionQueuesSequentially(HAContainerWrapper haContainerWrapper, CachedDeserializable cd, int numQueues) throws InterruptedException, IOException, ClassNotFoundException { // Create some HARegionQueues for (int i = 0; i < numQueues; i++) { HARegionQueue haRegionQueue = createHARegionQueue(haContainerWrapper, i); haRegionQueue.updateHAEventWrapper(member, cd, "haRegion"); } } private HARegionQueue createAndUpdateHARegionQueuesWithGiiQueueing( HAContainerWrapper haContainerWrapper, HAEventWrapper wrapper, int numQueues) throws InterruptedException, IOException, ClassNotFoundException { HARegionQueue targetQueue = null; int startGiiQueueingIndex = numQueues / 2; // create HARegionQueues and startGiiQueuing on a region about half way through for (int i = 0; i < numQueues; i++) { HARegionQueue haRegionQueue = null; // start GII Queueing (targetRegionQueue) if (i == startGiiQueueingIndex) { HARegion haRegion = mock(HARegion.class); HARegionQueue giiHaRegionQueue = createHARegionQueue(haContainerWrapper, i, haRegion, false);; giiHaRegionQueue.startGiiQueueing(); targetQueue = giiHaRegionQueue; when(haRegion.put(any(Object.class), any(HAEventWrapper.class))) .then(answer -> { // Simulate that either a QRM or message dispatch has occurred immediately after the // put. // We want to ensure that the event is removed from the HAContainer if it is drained // from the giiQueue and the ref count has dropped to 0. HAEventWrapper haContainerKey = answer.getArgument(1); giiHaRegionQueue.decAndRemoveFromHAContainer(haContainerKey); return null; }); when(haRegion.getGemFireCache()).thenReturn(cache); haRegionQueue = giiHaRegionQueue; } else { haRegionQueue = createHARegionQueue(haContainerWrapper, i); } haRegionQueue.put(wrapper); } return targetQueue; } private Set<HAEventWrapper> createAndPutHARegionQueuesSimultaneously( HAContainerWrapper haContainerWrapper, int numQueues, int numOperations) throws InterruptedException, IOException, ClassNotFoundException { Collection<HARegionQueue> queues = new ConcurrentLinkedQueue<>(); AtomicInteger count = new AtomicInteger(); // create HARegionQueues for (int i = 0; i < numQueues; i++) { queues.add(createHARegionQueue(haContainerWrapper, i)); } Set<HAEventWrapper> testValidationWrapperSet = ConcurrentHashMap.newKeySet(); for (int i = 0; i < numOperations; i++) { count.set(i); ClientUpdateMessage message = new ClientUpdateMessageImpl(EnumListenerEvent.AFTER_CREATE, (LocalRegion) dataRegion, "key", "value".getBytes(), (byte) 0x01, null, new ClientProxyMembershipID(), new EventID(new byte[] {1}, 1, count.get())); queues.parallelStream().forEach(haRegionQueue -> { try { // In production (CacheClientNotifier.singletonRouteClientMessage), each queue has its // own HAEventWrapper object even though they hold the same ClientUpdateMessage, // so we create an object for each queue in here HAEventWrapper haEventWrapper = new HAEventWrapper(message); testValidationWrapperSet.add(haEventWrapper); haRegionQueue.put(haEventWrapper); } catch (InterruptedException iex) { throw new RuntimeException(iex); } }); } return testValidationWrapperSet; } private void createAndPutHARegionQueuesSequentially(HAContainerWrapper haContainerWrapper, HAEventWrapper haEventWrapper, int numQueues) throws InterruptedException, IOException, ClassNotFoundException { Collection<HARegionQueue> queues = new ArrayList<>(); // create HARegionQueues for (int i = 0; i < numQueues; i++) { queues.add(createHARegionQueue(haContainerWrapper, i)); } haEventWrapper.incrementPutInProgressCounter("test"); for (HARegionQueue queue : queues) { queue.put(haEventWrapper); } haEventWrapper.decrementPutInProgressCounter(); } private void createAndUpdateHARegionQueuesSimultaneously(HAContainerWrapper haContainerWrapper, CachedDeserializable cd, int numQueues) throws InterruptedException, IOException, ClassNotFoundException, TimeoutException, ExecutionException { // Create some HARegionQueues HARegionQueue[] haRegionQueues = new HARegionQueue[numQueues]; for (int i = 0; i < numQueues; i++) { haRegionQueues[i] = createHARegionQueue(haContainerWrapper, i); } // Create threads to simultaneously update the HAEventWrapper Collection<Future<Void>> futures = new ArrayList<>(); for (HARegionQueue haRegionQueue : haRegionQueues) { futures.add(executorServiceRule.submit(() -> { haRegionQueue.updateHAEventWrapper(member, cd, "haRegion"); })); } for (Future<Void> future : futures) { future.get(getTimeout().toMillis(), MILLISECONDS); } } private void verifyHAContainerWrapper(HAContainerWrapper haContainerWrapper, CachedDeserializable cd, int numQueues) { // Verify HAContainerRegion size assertThat(haContainerWrapper).hasSize(1); // Verify the refCount is correct HAEventWrapper wrapperInContainer = (HAEventWrapper) haContainerWrapper.getKey(cd.getDeserializedForReading()); assertThat(wrapperInContainer.getReferenceCount()).isEqualTo(numQueues); } private static <T> T mock(Class<T> classToMock) { return Mockito.mock(classToMock, withSettings().stubOnly()); } }
apache-2.0
nmldiegues/stibt
radargun/framework/src/test/java/org/radargun/fwk/DomConfigAttributesParsingTest.java
1059
package org.radargun.fwk; import org.radargun.config.DomConfigParser; import org.radargun.config.FixedSizeBenchmarkConfig; import org.radargun.config.MasterConfig; import org.radargun.config.ScalingBenchmarkConfig; import org.radargun.utils.TypedProperties; import org.testng.annotations.Test; import java.util.List; import static org.testng.AssertJUnit.assertEquals; /** * @author Mircea.Markus@jboss.com */ @Test public class DomConfigAttributesParsingTest { public void simpleTest() throws Exception { DomConfigParser parser = new DomConfigParser(); MasterConfig masterConfig = parser.parseConfig("config-attributes-benchmark.xml"); List<FixedSizeBenchmarkConfig> benchmarks = masterConfig.getBenchmarks(); assertEquals(benchmarks.size(),2); ScalingBenchmarkConfig sc = (ScalingBenchmarkConfig) benchmarks.get(0); TypedProperties configAttributes = sc.getConfigAttributes(); assertEquals(configAttributes.getProperty("a"), "va"); assertEquals(configAttributes.getIntProperty("i", -1), 1); } }
apache-2.0
datametica/calcite
core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableCollect.java
3785
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.adapter.enumerable; import org.apache.calcite.linq4j.tree.BlockBuilder; import org.apache.calcite.linq4j.tree.Expression; import org.apache.calcite.linq4j.tree.Expressions; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Collect; import org.apache.calcite.util.BuiltInMethod; /** Implementation of {@link org.apache.calcite.rel.core.Collect} in * {@link org.apache.calcite.adapter.enumerable.EnumerableConvention enumerable calling convention}. */ public class EnumerableCollect extends Collect implements EnumerableRel { public EnumerableCollect(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, String fieldName) { super(cluster, traitSet, child, fieldName); assert getConvention() instanceof EnumerableConvention; assert getConvention() == child.getConvention(); } @Override public EnumerableCollect copy(RelTraitSet traitSet, RelNode newInput) { return new EnumerableCollect(getCluster(), traitSet, newInput, fieldName); } @Override public Result implement(EnumerableRelImplementor implementor, Prefer pref) { final BlockBuilder builder = new BlockBuilder(); final EnumerableRel child = (EnumerableRel) getInput(); // REVIEW zabetak January 7, 2019: Even if we ask the implementor to provide a result // where records are represented as arrays (Prefer.ARRAY) this may not be respected. final Result result = implementor.visitChild(this, 0, child, Prefer.ARRAY); final PhysType physType = PhysTypeImpl.of( implementor.getTypeFactory(), getRowType(), JavaRowFormat.LIST); // final Enumerable child = <<child adapter>>; // final Enumerable<Object[]> converted = child.select(<<conversion code>>); // final List<Object[]> list = converted.toList(); Expression child_ = builder.append( "child", result.block); // In the internal representation of multisets , every element must be a record. In case the // result above is a scalar type we have to wrap it around a physical type capable of // representing records. For this reason the following conversion is necessary. // REVIEW zabetak January 7, 2019: If we can ensure that the input to this operator // has the correct physical type (e.g., respecting the Prefer.ARRAY above) then this conversion // can be removed. Expression conv_ = builder.append( "converted", result.physType.convertTo(child_, JavaRowFormat.ARRAY)); Expression list_ = builder.append("list", Expressions.call(conv_, BuiltInMethod.ENUMERABLE_TO_LIST.method)); builder.add( Expressions.return_(null, Expressions.call( BuiltInMethod.SINGLETON_ENUMERABLE.method, list_))); return implementor.result(physType, builder.toBlock()); } }
apache-2.0
papicella/snappy-store
gemfire-examples/src/main/java/helloworld/HelloWorldProducer.java
2092
/* * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package helloworld; import com.gemstone.gemfire.cache.Cache; import com.gemstone.gemfire.cache.CacheFactory; import com.gemstone.gemfire.cache.Region; /** * This example shows two members with replicated regions. This member puts * entries into the replicated region. Please refer to the quickstart guide for * instructions on how to run this example. * * @author GemStone Systems, Inc. * * @since 6.5 */ public class HelloWorldProducer { public static void main(String[] args) throws Exception { System.out.println("\nConnecting to the distributed system and creating the cache."); // Create the cache which causes the cache-xml-file to be parsed Cache cache = new CacheFactory() .set("cache-xml-file", "xml/HelloWorld.xml") .create(); // Get the exampleRegion Region<String,String> exampleRegion = cache.getRegion("exampleRegion"); System.out.println("Example region, " + exampleRegion.getFullPath() + ", created in cache. "); System.out.println("Putting entry: Hello, World"); exampleRegion.put("Hello", "World"); System.out.println("Putting entry: Hello, Moon!"); exampleRegion.put("Hello", "Moon!"); // Close the cache and disconnect from GemFire distributed system System.out.println("\nClosing the cache and disconnecting."); cache.close(); System.out.println("\nPlease press Enter in the HelloWorldConsumer."); } }
apache-2.0
macs524/mybatis_learn
src/test/java/org/apache/ibatis/submitted/empty_row/ReturnInstanceForEmptyRowTest.java
5354
/** * Copyright 2009-2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ibatis.submitted.empty_row; import static org.junit.Assert.*; import java.io.Reader; import java.sql.Connection; import java.util.Map; import org.apache.ibatis.io.Resources; import org.apache.ibatis.jdbc.ScriptRunner; import org.apache.ibatis.session.SqlSession; import org.apache.ibatis.session.SqlSessionFactory; import org.apache.ibatis.session.SqlSessionFactoryBuilder; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; public class ReturnInstanceForEmptyRowTest { private static SqlSessionFactory sqlSessionFactory; @BeforeClass public static void setUp() throws Exception { // create an SqlSessionFactory Reader reader = Resources .getResourceAsReader("org/apache/ibatis/submitted/empty_row/mybatis-config.xml"); sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader); reader.close(); // populate in-memory database SqlSession session = sqlSessionFactory.openSession(); Connection conn = session.getConnection(); reader = Resources .getResourceAsReader("org/apache/ibatis/submitted/empty_row/CreateDB.sql"); ScriptRunner runner = new ScriptRunner(conn); runner.setLogWriter(null); runner.runScript(reader); conn.close(); reader.close(); session.close(); } @Before public void resetCallSettersOnNulls() { sqlSessionFactory.getConfiguration().setCallSettersOnNulls(false); } @Test public void shouldSimpleTypeBeNull() { SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); String result = mapper.getString(); assertNull(result); } finally { sqlSession.close(); } } @Test public void shouldObjectTypeNotBeNull() { SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); Parent parent = mapper.getBean(1); assertNotNull(parent); } finally { sqlSession.close(); } } @Test public void shouldMapBeEmpty() { SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); Map<String, String> map = mapper.getMap(1); assertNotNull(map); assertTrue(map.isEmpty()); } finally { sqlSession.close(); } } @Test public void shouldMapHaveColumnNamesIfCallSettersOnNullsEnabled() { sqlSessionFactory.getConfiguration().setCallSettersOnNulls(true); SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); Map<String, String> map = mapper.getMap(1); assertEquals(2, map.size()); assertTrue(map.containsKey("COL1")); assertTrue(map.containsKey("COL2")); } finally { sqlSession.close(); } } @Test public void shouldAssociationNotBeNull() { SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); Parent parent = mapper.getAssociation(1); assertNotNull(parent.getChild()); } finally { sqlSession.close(); } } @Test public void shouldAssociationBeNullIfNotNullColumnSpecified() { SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); Parent parent = mapper.getAssociationWithNotNullColumn(1); assertNotNull(parent); assertNull(parent.getChild()); } finally { sqlSession.close(); } } @Test public void shouldNestedAssociationNotBeNull() { // #420 SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); Parent parent = mapper.getNestedAssociation(); assertNotNull(parent.getChild().getGrandchild()); } finally { sqlSession.close(); } } @Test public void testCollection() { SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); Parent parent = mapper.getCollection(1); assertEquals(1, parent.getChildren().size()); assertNotNull(parent.getChildren().get(0)); } finally { sqlSession.close(); } } @Test public void shouldSquashMultipleEmptyResults() { SqlSession sqlSession = sqlSessionFactory.openSession(); try { Mapper mapper = sqlSession.getMapper(Mapper.class); Parent parent = mapper.getTwoCollections(2); assertEquals(1, parent.getPets().size()); assertNotNull(parent.getPets().get(0)); } finally { sqlSession.close(); } } }
apache-2.0
mauimauer/cheapcast
src/main/java/at/maui/cheapcast/chromecast/model/ConnectionResponse.java
1355
/* * Copyright 2013 Sebastian Mauer * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package at.maui.cheapcast.chromecast.model; public class ConnectionResponse { private String URL, type; private int senderId, requestId; public String getURL() { return URL; } public void setURL(String URL) { this.URL = URL; } public String getType() { return type; } public void setType(String type) { this.type = type; } public int getSenderId() { return senderId; } public void setSenderId(int senderId) { this.senderId = senderId; } public int getRequestId() { return requestId; } public void setRequestId(int requestId) { this.requestId = requestId; } }
apache-2.0
mhurne/aws-sdk-java
aws-java-sdk-lambda/src/main/java/com/amazonaws/services/lambda/model/transform/GetFunctionConfigurationRequestMarshaller.java
5515
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.lambda.model.transform; import static com.amazonaws.util.StringUtils.UTF8; import static com.amazonaws.util.StringUtils.COMMA_SEPARATOR; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.OutputStreamWriter; import java.io.StringWriter; import java.io.Writer; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.List; import java.util.regex.Pattern; import com.amazonaws.AmazonClientException; import com.amazonaws.Request; import com.amazonaws.DefaultRequest; import com.amazonaws.http.HttpMethodName; import com.amazonaws.services.lambda.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.util.BinaryUtils; import com.amazonaws.util.StringUtils; import com.amazonaws.util.StringInputStream; import com.amazonaws.util.json.*; /** * Get Function Configuration Request Marshaller */ public class GetFunctionConfigurationRequestMarshaller implements Marshaller<Request<GetFunctionConfigurationRequest>, GetFunctionConfigurationRequest> { private static final String RESOURCE_PATH_TEMPLATE; private static final Map<String, String> STATIC_QUERY_PARAMS; private static final Map<String, String> DYNAMIC_QUERY_PARAMS; static { String path = "/2015-03-31/functions/{FunctionName}/configuration?Qualifier={Qualifier}"; Map<String, String> staticMap = new HashMap<String, String>(); Map<String, String> dynamicMap = new HashMap<String, String>(); int index = path.indexOf("?"); if (index != -1) { String queryString = path.substring(index + 1); path = path.substring(0, index); for (String s : queryString.split("[;&]")) { index = s.indexOf("="); if (index != -1) { String name = s.substring(0, index); String value = s.substring(index + 1); if (value.startsWith("{") && value.endsWith("}")) { dynamicMap.put(value.substring(1, value.length() - 1), name); } else { staticMap.put(name, value); } } } } RESOURCE_PATH_TEMPLATE = path; STATIC_QUERY_PARAMS = Collections.unmodifiableMap(staticMap); DYNAMIC_QUERY_PARAMS = Collections.unmodifiableMap(dynamicMap); } public Request<GetFunctionConfigurationRequest> marshall(GetFunctionConfigurationRequest getFunctionConfigurationRequest) { if (getFunctionConfigurationRequest == null) { throw new AmazonClientException("Invalid argument passed to marshall(...)"); } Request<GetFunctionConfigurationRequest> request = new DefaultRequest<GetFunctionConfigurationRequest>(getFunctionConfigurationRequest, "AWSLambda"); String target = "AWSLambda.GetFunctionConfiguration"; request.addHeader("X-Amz-Target", target); request.setHttpMethod(HttpMethodName.GET); String uriResourcePath = RESOURCE_PATH_TEMPLATE; if (DYNAMIC_QUERY_PARAMS.containsKey("FunctionName")) { String name = DYNAMIC_QUERY_PARAMS.get("FunctionName"); String value = (getFunctionConfigurationRequest.getFunctionName() == null) ? null : StringUtils.fromString(getFunctionConfigurationRequest.getFunctionName()); if (!(value == null || value.isEmpty())) { request.addParameter(name, value); } } else { uriResourcePath = uriResourcePath.replace("{FunctionName}", (getFunctionConfigurationRequest.getFunctionName() == null) ? "" : StringUtils.fromString(getFunctionConfigurationRequest.getFunctionName())); } if (DYNAMIC_QUERY_PARAMS.containsKey("Qualifier")) { String name = DYNAMIC_QUERY_PARAMS.get("Qualifier"); String value = (getFunctionConfigurationRequest.getQualifier() == null) ? null : StringUtils.fromString(getFunctionConfigurationRequest.getQualifier()); if (!(value == null || value.isEmpty())) { request.addParameter(name, value); } } else { uriResourcePath = uriResourcePath.replace("{Qualifier}", (getFunctionConfigurationRequest.getQualifier() == null) ? "" : StringUtils.fromString(getFunctionConfigurationRequest.getQualifier())); } request.setResourcePath(uriResourcePath.replaceAll("//", "/")); for (Map.Entry<String, String> entry : STATIC_QUERY_PARAMS.entrySet()) { request.addParameter(entry.getKey(), entry.getValue()); } request.setContent(new ByteArrayInputStream(new byte[0])); if (!request.getHeaders().containsKey("Content-Type")) { request.addHeader("Content-Type", "application/x-amz-json-1.1"); } return request; } }
apache-2.0
rafaelgarrote/metamodel
core/src/main/java/org/apache/metamodel/data/DataSetHeader.java
1317
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.metamodel.data; import java.io.Serializable; import org.apache.metamodel.query.SelectItem; import org.apache.metamodel.schema.Column; /** * Represents the header of a {@link DataSet}, which define the * columns/SelectItems of it. */ public interface DataSetHeader extends Serializable { public SelectItem[] getSelectItems(); public int size(); public int indexOf(SelectItem item); public int indexOf(Column column); public SelectItem getSelectItem(int i); }
apache-2.0
emre-aydin/hazelcast
hazelcast/src/test/java/com/hazelcast/internal/serialization/impl/VersionedInCurrentVersionTest.java
7193
/* * Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.internal.serialization.impl; import com.hazelcast.nio.ObjectDataInput; import com.hazelcast.nio.serialization.DataSerializable; import com.hazelcast.nio.serialization.impl.Versioned; import com.hazelcast.test.annotation.ParallelJVMTest; import com.hazelcast.test.annotation.QuickTest; import com.hazelcast.test.starter.GuardianException; import com.hazelcast.test.starter.HazelcastVersionLocator; import com.hazelcast.internal.util.StringUtil; import com.hazelcast.version.Version; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.mockito.Mockito; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; import org.reflections.Reflections; import org.reflections.scanners.SubTypesScanner; import org.reflections.util.ConfigurationBuilder; import java.io.File; import java.net.URL; import java.util.ArrayList; import java.util.List; import java.util.Set; import static com.hazelcast.instance.BuildInfoProvider.getBuildInfo; import static com.hazelcast.internal.cluster.Versions.CURRENT_CLUSTER_VERSION; import static com.hazelcast.internal.cluster.Versions.PREVIOUS_CLUSTER_VERSION; import static com.hazelcast.test.HazelcastTestSupport.assumeThatNoJDK6; import static com.hazelcast.test.HazelcastTestSupport.assumeThatNoJDK7; import static com.hazelcast.test.ReflectionsHelper.REFLECTIONS; import static com.hazelcast.test.ReflectionsHelper.filterNonConcreteClasses; import static com.hazelcast.test.starter.HazelcastStarterUtils.rethrowGuardianException; import static com.hazelcast.internal.util.EmptyStatement.ignore; import static org.junit.Assert.fail; import static org.junit.Assume.assumeNoException; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; /** * Tests a common compatibility issue: when a (Identified)DataSerializable class first * becomes Versioned, it might miss checking input stream for UNKNOWN version (which is * the version of an incoming stream from a previous-version member) instead of using * in.getVersion.isUnknownOrLessThan(CURRENT). */ @RunWith(PowerMockRunner.class) @PowerMockIgnore({"javax.net.ssl.*", "javax.security.*", "javax.management.*"}) @PrepareForTest(Version.class) @Category({QuickTest.class, ParallelJVMTest.class}) public class VersionedInCurrentVersionTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); private Set<Class<? extends Versioned>> versionedInCurrentVersion; @Before public void setup() throws Exception { assumeThatNoJDK6(); assumeThatNoJDK7(); Set<Class<? extends Versioned>> versionedClasses = REFLECTIONS.getSubTypesOf(Versioned.class); Set<Class<? extends DataSerializable>> dsClasses = REFLECTIONS.getSubTypesOf(DataSerializable.class); Set<Class<? extends Versioned>> versionedSincePreviousVersion = versionedClassesInPreviousVersion(); filterNonConcreteClasses(versionedClasses); versionedClasses.removeAll(versionedSincePreviousVersion); versionedClasses.retainAll(dsClasses); versionedInCurrentVersion = versionedClasses; } @Test public void testNewVersionedClass_doesNotInvokeLessThan_whenReadingData() { List<Class<? extends Versioned>> failures = new ArrayList<Class<? extends Versioned>>(); for (Class<? extends Versioned> versionedClass : versionedInCurrentVersion) { Versioned instance = createInstance(versionedClass); if (instance == null) { // may occur when there is no default constructor continue; } DataSerializable dataSerializable = (DataSerializable) instance; Version spy = org.powermock.api.mockito.PowerMockito.spy(CURRENT_CLUSTER_VERSION); ObjectDataInput mockInput = spy(ObjectDataInput.class); when(mockInput.getVersion()).thenReturn(spy); try { dataSerializable.readData(mockInput); } catch (Throwable t) { ignore(t); } finally { try { Mockito.verify(spy).isLessThan(CURRENT_CLUSTER_VERSION); failures.add(versionedClass); } catch (Throwable t) { // expected when Version.isLessThan() was not invoked } } } if (!failures.isEmpty()) { StringBuilder failMessageBuilder = new StringBuilder(); for (Class<? extends Versioned> failedClass : failures) { failMessageBuilder.append(StringUtil.LINE_SEPARATOR) .append(failedClass.getName()) .append(" invoked in.getVersion().isLessThan(CURRENT_CLUSTER_VERSION) while reading data"); } fail(failMessageBuilder.toString()); } } private <T extends Versioned> T createInstance(Class<T> klass) { try { return klass.newInstance(); } catch (Exception e) { return null; } } private Set<Class<? extends Versioned>> versionedClassesInPreviousVersion() throws Exception { File[] previousVersionArtifacts = getPreviousVersionArtifacts(); URL[] artifactURLs = new URL[previousVersionArtifacts.length]; for (int i = 0; i < previousVersionArtifacts.length; i++) { artifactURLs[i] = previousVersionArtifacts[i].toURI().toURL(); } Reflections previousVersionReflections = new Reflections(new ConfigurationBuilder() .setUrls(artifactURLs) .addScanners(new SubTypesScanner()) ); return previousVersionReflections.getSubTypesOf(Versioned.class); } private File[] getPreviousVersionArtifacts() throws Exception { File previousVersionFolder = temporaryFolder.newFolder(); try { return HazelcastVersionLocator.locateVersion( PREVIOUS_CLUSTER_VERSION.toString(), previousVersionFolder, getBuildInfo().isEnterprise()); } catch (GuardianException e) { assumeNoException("The requested version could not be downloaded, most probably it has not been released yet", e); throw rethrowGuardianException(e); } } }
apache-2.0
objectiser/camel
platforms/spring-boot/components-starter/camel-mvel-starter/src/main/java/org/apache/camel/language/mvel/springboot/MvelLanguageConfiguration.java
1852
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.language.mvel.springboot; import javax.annotation.Generated; import org.apache.camel.spring.boot.LanguageConfigurationPropertiesCommon; import org.springframework.boot.context.properties.ConfigurationProperties; /** * To use MVEL scripts in Camel expressions or predicates. * * Generated by camel-package-maven-plugin - do not edit this file! */ @Generated("org.apache.camel.maven.packaging.SpringBootAutoConfigurationMojo") @ConfigurationProperties(prefix = "camel.language.mvel") public class MvelLanguageConfiguration extends LanguageConfigurationPropertiesCommon { /** * Whether to enable auto configuration of the mvel language. This is * enabled by default. */ private Boolean enabled; /** * Whether to trim the value to remove leading and trailing whitespaces and * line breaks */ private Boolean trim = true; public Boolean getTrim() { return trim; } public void setTrim(Boolean trim) { this.trim = trim; } }
apache-2.0
MaltheFriisberg/CIE
src/main/java/org/netmelody/cieye/spies/teamcity/jsondomain/Builds.java
299
package org.netmelody.cieye.spies.teamcity.jsondomain; import java.util.ArrayList; import java.util.List; public final class Builds { public int count; public List<Build> build; public List<Build> build() { return (null == build) ? new ArrayList<Build>() : build; } }
apache-2.0
droolsjbpm/jbpm-wb
jbpm-wb-integration/jbpm-wb-integration-client/src/main/java/org/jbpm/workbench/wi/client/workitem/ServiceTaskRepositoryPerspective.java
1811
/* * Copyright 2018 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jbpm.workbench.wi.client.workitem; import javax.enterprise.context.Dependent; import org.uberfire.client.annotations.Perspective; import org.uberfire.client.annotations.WorkbenchPerspective; import org.uberfire.client.workbench.panels.impl.SimpleWorkbenchPanelPresenter; import org.uberfire.mvp.impl.DefaultPlaceRequest; import org.uberfire.workbench.model.PerspectiveDefinition; import org.uberfire.workbench.model.impl.PartDefinitionImpl; import org.uberfire.workbench.model.impl.PerspectiveDefinitionImpl; import com.google.gwt.user.client.ui.Composite; @Dependent @WorkbenchPerspective(identifier = ServiceTaskRepositoryPerspective.PERSPECTIVE_ID) public class ServiceTaskRepositoryPerspective extends Composite { public static final String PERSPECTIVE_ID = "ServiceTaskAdminPerspective"; @Perspective public PerspectiveDefinition getPerspective() { final PerspectiveDefinition p = new PerspectiveDefinitionImpl(SimpleWorkbenchPanelPresenter.class.getName()); p.setName(PERSPECTIVE_ID); p.getRoot().addPart(new PartDefinitionImpl(new DefaultPlaceRequest(ServiceTasksRepositoryListPresenter.SCREEN_ID))); return p; } }
apache-2.0
miniway/presto
presto-kafka/src/test/java/io/prestosql/plugin/kafka/util/EmbeddedZookeeper.java
2601
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prestosql.plugin.kafka.util; import com.google.common.io.Files; import org.apache.zookeeper.server.NIOServerCnxnFactory; import org.apache.zookeeper.server.ServerCnxnFactory; import org.apache.zookeeper.server.ZooKeeperServer; import org.apache.zookeeper.server.persistence.FileTxnSnapLog; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; import static com.google.common.io.MoreFiles.deleteRecursively; import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE; public class EmbeddedZookeeper implements Closeable { private final File zkDataDir; private final ZooKeeperServer zkServer; private final ServerCnxnFactory cnxnFactory; private final AtomicBoolean started = new AtomicBoolean(); private final AtomicBoolean stopped = new AtomicBoolean(); public EmbeddedZookeeper() throws IOException { zkDataDir = Files.createTempDir(); zkServer = new ZooKeeperServer(); FileTxnSnapLog ftxn = new FileTxnSnapLog(zkDataDir, zkDataDir); zkServer.setTxnLogFactory(ftxn); cnxnFactory = NIOServerCnxnFactory.createFactory(0, 0); } public void start() throws InterruptedException, IOException { if (!started.getAndSet(true)) { cnxnFactory.startup(zkServer); } } @Override public void close() throws IOException { if (started.get() && !stopped.getAndSet(true)) { cnxnFactory.shutdown(); try { cnxnFactory.join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (zkServer.isRunning()) { zkServer.shutdown(); } deleteRecursively(zkDataDir.toPath(), ALLOW_INSECURE); } } public String getConnectString() { return "127.0.0.1:" + cnxnFactory.getLocalPort(); } }
apache-2.0
xiao-chen/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
131317
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import java.io.*; import java.security.Permission; import java.security.PrivilegedExceptionAction; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Scanner; import java.util.TimeZone; import java.util.concurrent.atomic.AtomicInteger; import java.util.zip.DeflaterOutputStream; import java.util.zip.GZIPOutputStream; import com.google.common.base.Supplier; import com.google.common.collect.Lists; import org.apache.commons.lang3.RandomStringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.log4j.Level; import org.junit.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.compress.BZip2Codec; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; import org.junit.rules.Timeout; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; import static org.apache.hadoop.fs.permission.AclEntryType.*; import static org.apache.hadoop.fs.permission.FsAction.*; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.*; import static org.hamcrest.core.StringContains.containsString; /** * This class tests commands from DFSShell. */ public class TestDFSShell { private static final Logger LOG = LoggerFactory.getLogger(TestDFSShell.class); private static final AtomicInteger counter = new AtomicInteger(); private final int SUCCESS = 0; private final int ERROR = 1; static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class); private static final String RAW_A1 = "raw.a1"; private static final String TRUSTED_A1 = "trusted.a1"; private static final String USER_A1 = "user.a1"; private static final byte[] RAW_A1_VALUE = new byte[]{0x32, 0x32, 0x32}; private static final byte[] TRUSTED_A1_VALUE = new byte[]{0x31, 0x31, 0x31}; private static final byte[] USER_A1_VALUE = new byte[]{0x31, 0x32, 0x33}; private static final int BLOCK_SIZE = 1024; private static MiniDFSCluster miniCluster; private static DistributedFileSystem dfs; @BeforeClass public static void setup() throws IOException { final Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); // set up the shared miniCluster directory so individual tests can launch // new clusters without conflict conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1000); miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); miniCluster.waitActive(); dfs = miniCluster.getFileSystem(); } @AfterClass public static void tearDown() { if (miniCluster != null) { miniCluster.shutdown(true, true); } } @Rule public Timeout globalTimeout= new Timeout(30 * 1000); // 30s static Path writeFile(FileSystem fs, Path f) throws IOException { DataOutputStream out = fs.create(f); out.writeBytes("dhruba: " + f); out.close(); assertTrue(fs.exists(f)); return f; } static Path writeByte(FileSystem fs, Path f) throws IOException { DataOutputStream out = fs.create(f); out.writeByte(1); out.close(); assertTrue(fs.exists(f)); return f; } static Path mkdir(FileSystem fs, Path p) throws IOException { assertTrue(fs.mkdirs(p)); assertTrue(fs.exists(p)); assertTrue(fs.getFileStatus(p).isDirectory()); return p; } static void rmr(FileSystem fs, Path p) throws IOException { assertTrue(fs.delete(p, true)); assertFalse(fs.exists(p)); } /** Create a local file whose content contains its full path. */ static File createLocalFile(File f) throws IOException { assertTrue(!f.exists()); PrintWriter out = new PrintWriter(f); out.print("createLocalFile: " + f.getAbsolutePath()); out.flush(); out.close(); assertTrue(f.exists()); assertTrue(f.isFile()); return f; } static File createLocalFileWithRandomData(int fileLength, File f) throws IOException { assertTrue(!f.exists()); f.createNewFile(); FileOutputStream out = new FileOutputStream(f.toString()); byte[] buffer = new byte[fileLength]; out.write(buffer); out.flush(); out.close(); return f; } static void show(String s) { System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s); } @Test (timeout = 30000) public void testZeroSizeFile() throws IOException { //create a zero size file final File f1 = new File(TEST_ROOT_DIR, "f1"); assertTrue(!f1.exists()); assertTrue(f1.createNewFile()); assertTrue(f1.exists()); assertTrue(f1.isFile()); assertEquals(0L, f1.length()); //copy to remote final Path root = mkdir(dfs, new Path("/testZeroSizeFile/zeroSizeFile")); final Path remotef = new Path(root, "dst"); show("copy local " + f1 + " to remote " + remotef); dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef); //getBlockSize() should not throw exception show("Block size = " + dfs.getFileStatus(remotef).getBlockSize()); //copy back final File f2 = new File(TEST_ROOT_DIR, "f2"); assertTrue(!f2.exists()); dfs.copyToLocalFile(remotef, new Path(f2.getPath())); assertTrue(f2.exists()); assertTrue(f2.isFile()); assertEquals(0L, f2.length()); f1.delete(); f2.delete(); } @Test (timeout = 30000) public void testRecursiveRm() throws IOException { final Path parent = new Path("/testRecursiveRm", "parent"); final Path child = new Path(parent, "child"); dfs.mkdirs(child); try { dfs.delete(parent, false); fail("Should have failed because dir is not empty"); } catch(IOException e) { //should have thrown an exception } dfs.delete(parent, true); assertFalse(dfs.exists(parent)); } @Test (timeout = 30000) public void testDu() throws IOException { int replication = 2; PrintStream psBackup = System.out; ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintStream psOut = new PrintStream(out); System.setOut(psOut); FsShell shell = new FsShell(dfs.getConf()); try { final Path myPath = new Path("/testDu", "dir"); assertTrue(dfs.mkdirs(myPath)); assertTrue(dfs.exists(myPath)); final Path myFile = new Path(myPath, "file"); writeFile(dfs, myFile); assertTrue(dfs.exists(myFile)); final Path myFile2 = new Path(myPath, "file2"); writeFile(dfs, myFile2); assertTrue(dfs.exists(myFile2)); Long myFileLength = dfs.getFileStatus(myFile).getLen(); Long myFileDiskUsed = myFileLength * replication; Long myFile2Length = dfs.getFileStatus(myFile2).getLen(); Long myFile2DiskUsed = myFile2Length * replication; String[] args = new String[2]; args[0] = "-du"; args[1] = myPath.toString(); int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val == 0); String returnString = out.toString(); out.reset(); // Check if size matches as expected assertThat(returnString, containsString(myFileLength.toString())); assertThat(returnString, containsString(myFileDiskUsed.toString())); assertThat(returnString, containsString(myFile2Length.toString())); assertThat(returnString, containsString(myFile2DiskUsed.toString())); // Check that -du -s reports the state of the snapshot String snapshotName = "ss1"; Path snapshotPath = new Path(myPath, ".snapshot/" + snapshotName); dfs.allowSnapshot(myPath); assertThat(dfs.createSnapshot(myPath, snapshotName), is(snapshotPath)); assertThat(dfs.delete(myFile, false), is(true)); assertThat(dfs.exists(myFile), is(false)); args = new String[3]; args[0] = "-du"; args[1] = "-s"; args[2] = snapshotPath.toString(); val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertThat(val, is(0)); returnString = out.toString(); out.reset(); Long combinedLength = myFileLength + myFile2Length; Long combinedDiskUsed = myFileDiskUsed + myFile2DiskUsed; assertThat(returnString, containsString(combinedLength.toString())); assertThat(returnString, containsString(combinedDiskUsed.toString())); // Check if output is rendered properly with multiple input paths final Path myFile3 = new Path(myPath, "file3"); writeByte(dfs, myFile3); assertTrue(dfs.exists(myFile3)); args = new String[3]; args[0] = "-du"; args[1] = myFile3.toString(); args[2] = myFile2.toString(); val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals("Return code should be 0.", 0, val); returnString = out.toString(); out.reset(); assertTrue(returnString.contains("1 2 " + myFile3.toString())); assertTrue(returnString.contains("25 50 " + myFile2.toString())); } finally { System.setOut(psBackup); } } @Test (timeout = 180000) public void testDuSnapshots() throws IOException { final int replication = 2; final PrintStream psBackup = System.out; final ByteArrayOutputStream out = new ByteArrayOutputStream(); final PrintStream psOut = new PrintStream(out); final FsShell shell = new FsShell(dfs.getConf()); try { System.setOut(psOut); final Path parent = new Path("/testDuSnapshots"); final Path dir = new Path(parent, "dir"); mkdir(dfs, dir); final Path file = new Path(dir, "file"); writeFile(dfs, file); final Path file2 = new Path(dir, "file2"); writeFile(dfs, file2); final Long fileLength = dfs.getFileStatus(file).getLen(); final Long fileDiskUsed = fileLength * replication; final Long file2Length = dfs.getFileStatus(file2).getLen(); final Long file2DiskUsed = file2Length * replication; /* * Construct dir as follows: * /test/dir/file <- this will later be deleted after snapshot taken. * /test/dir/newfile <- this will be created after snapshot taken. * /test/dir/file2 * Snapshot enabled on /test */ // test -du on /test/dir int ret = -1; try { ret = shell.run(new String[] {"-du", dir.toString()}); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, ret); String returnString = out.toString(); LOG.info("-du return is:\n" + returnString); // Check if size matches as expected assertTrue(returnString.contains(fileLength.toString())); assertTrue(returnString.contains(fileDiskUsed.toString())); assertTrue(returnString.contains(file2Length.toString())); assertTrue(returnString.contains(file2DiskUsed.toString())); out.reset(); // take a snapshot, then remove file and add newFile final String snapshotName = "ss1"; final Path snapshotPath = new Path(parent, ".snapshot/" + snapshotName); dfs.allowSnapshot(parent); assertThat(dfs.createSnapshot(parent, snapshotName), is(snapshotPath)); rmr(dfs, file); final Path newFile = new Path(dir, "newfile"); writeFile(dfs, newFile); final Long newFileLength = dfs.getFileStatus(newFile).getLen(); final Long newFileDiskUsed = newFileLength * replication; // test -du -s on /test ret = -1; try { ret = shell.run(new String[] {"-du", "-s", parent.toString()}); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, ret); returnString = out.toString(); LOG.info("-du -s return is:\n" + returnString); Long combinedLength = fileLength + file2Length + newFileLength; Long combinedDiskUsed = fileDiskUsed + file2DiskUsed + newFileDiskUsed; assertTrue(returnString.contains(combinedLength.toString())); assertTrue(returnString.contains(combinedDiskUsed.toString())); out.reset(); // test -du on /test ret = -1; try { ret = shell.run(new String[] {"-du", parent.toString()}); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, ret); returnString = out.toString(); LOG.info("-du return is:\n" + returnString); assertTrue(returnString.contains(combinedLength.toString())); assertTrue(returnString.contains(combinedDiskUsed.toString())); out.reset(); // test -du -s -x on /test ret = -1; try { ret = shell.run(new String[] {"-du", "-s", "-x", parent.toString()}); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, ret); returnString = out.toString(); LOG.info("-du -s -x return is:\n" + returnString); Long exludeSnapshotLength = file2Length + newFileLength; Long excludeSnapshotDiskUsed = file2DiskUsed + newFileDiskUsed; assertTrue(returnString.contains(exludeSnapshotLength.toString())); assertTrue(returnString.contains(excludeSnapshotDiskUsed.toString())); out.reset(); // test -du -x on /test ret = -1; try { ret = shell.run(new String[] {"-du", "-x", parent.toString()}); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, ret); returnString = out.toString(); LOG.info("-du -x return is:\n" + returnString); assertTrue(returnString.contains(exludeSnapshotLength.toString())); assertTrue(returnString.contains(excludeSnapshotDiskUsed.toString())); out.reset(); } finally { System.setOut(psBackup); } } @Test (timeout = 180000) public void testCountSnapshots() throws IOException { final PrintStream psBackup = System.out; final ByteArrayOutputStream out = new ByteArrayOutputStream(); final PrintStream psOut = new PrintStream(out); System.setOut(psOut); final FsShell shell = new FsShell(dfs.getConf()); try { final Path parent = new Path("/testCountSnapshots"); final Path dir = new Path(parent, "dir"); mkdir(dfs, dir); final Path file = new Path(dir, "file"); writeFile(dfs, file); final Path file2 = new Path(dir, "file2"); writeFile(dfs, file2); final long fileLength = dfs.getFileStatus(file).getLen(); final long file2Length = dfs.getFileStatus(file2).getLen(); final Path dir2 = new Path(parent, "dir2"); mkdir(dfs, dir2); /* * Construct dir as follows: * /test/dir/file <- this will later be deleted after snapshot taken. * /test/dir/newfile <- this will be created after snapshot taken. * /test/dir/file2 * /test/dir2 <- this will later be deleted after snapshot taken. * Snapshot enabled on /test */ // take a snapshot // then create /test/dir/newfile and remove /test/dir/file, /test/dir2 final String snapshotName = "s1"; final Path snapshotPath = new Path(parent, ".snapshot/" + snapshotName); dfs.allowSnapshot(parent); assertThat(dfs.createSnapshot(parent, snapshotName), is(snapshotPath)); rmr(dfs, file); rmr(dfs, dir2); final Path newFile = new Path(dir, "new file"); writeFile(dfs, newFile); final Long newFileLength = dfs.getFileStatus(newFile).getLen(); // test -count on /test. Include header for easier debugging. int val = -1; try { val = shell.run(new String[] {"-count", "-v", parent.toString() }); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); String returnString = out.toString(); LOG.info("-count return is:\n" + returnString); Scanner in = new Scanner(returnString); in.nextLine(); assertEquals(3, in.nextLong()); //DIR_COUNT assertEquals(3, in.nextLong()); //FILE_COUNT assertEquals(fileLength + file2Length + newFileLength, in.nextLong()); //CONTENT_SIZE out.reset(); // test -count -x on /test. Include header for easier debugging. val = -1; try { val = shell.run(new String[] {"-count", "-x", "-v", parent.toString()}); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); returnString = out.toString(); LOG.info("-count -x return is:\n" + returnString); in = new Scanner(returnString); in.nextLine(); assertEquals(2, in.nextLong()); //DIR_COUNT assertEquals(2, in.nextLong()); //FILE_COUNT assertEquals(file2Length + newFileLength, in.nextLong()); //CONTENT_SIZE out.reset(); } finally { System.setOut(psBackup); } } @Test (timeout = 30000) public void testPut() throws IOException { // remove left over crc files: new File(TEST_ROOT_DIR, ".f1.crc").delete(); new File(TEST_ROOT_DIR, ".f2.crc").delete(); final File f1 = createLocalFile(new File(TEST_ROOT_DIR, "f1")); final File f2 = createLocalFile(new File(TEST_ROOT_DIR, "f2")); final Path root = mkdir(dfs, new Path("/testPut")); final Path dst = new Path(root, "dst"); show("begin"); final Thread copy2ndFileThread = new Thread() { @Override public void run() { try { show("copy local " + f2 + " to remote " + dst); dfs.copyFromLocalFile(false, false, new Path(f2.getPath()), dst); } catch (IOException ioe) { show("good " + StringUtils.stringifyException(ioe)); return; } //should not be here, must got IOException assertTrue(false); } }; //use SecurityManager to pause the copying of f1 and begin copying f2 SecurityManager sm = System.getSecurityManager(); System.out.println("SecurityManager = " + sm); System.setSecurityManager(new SecurityManager() { private boolean firstTime = true; @Override public void checkPermission(Permission perm) { if (firstTime) { Thread t = Thread.currentThread(); if (!t.toString().contains("DataNode")) { String s = "" + Arrays.asList(t.getStackTrace()); if (s.contains("FileUtil.copyContent")) { //pause at FileUtil.copyContent firstTime = false; copy2ndFileThread.start(); try {Thread.sleep(5000);} catch (InterruptedException e) {} } } } } }); show("copy local " + f1 + " to remote " + dst); dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), dst); show("done"); try {copy2ndFileThread.join();} catch (InterruptedException e) { } System.setSecurityManager(sm); // copy multiple files to destination directory final Path destmultiple = mkdir(dfs, new Path(root, "putmultiple")); Path[] srcs = new Path[2]; srcs[0] = new Path(f1.getPath()); srcs[1] = new Path(f2.getPath()); dfs.copyFromLocalFile(false, false, srcs, destmultiple); srcs[0] = new Path(destmultiple,"f1"); srcs[1] = new Path(destmultiple,"f2"); assertTrue(dfs.exists(srcs[0])); assertTrue(dfs.exists(srcs[1])); // move multiple files to destination directory final Path destmultiple2 = mkdir(dfs, new Path(root, "movemultiple")); srcs[0] = new Path(f1.getPath()); srcs[1] = new Path(f2.getPath()); dfs.moveFromLocalFile(srcs, destmultiple2); assertFalse(f1.exists()); assertFalse(f2.exists()); srcs[0] = new Path(destmultiple2, "f1"); srcs[1] = new Path(destmultiple2, "f2"); assertTrue(dfs.exists(srcs[0])); assertTrue(dfs.exists(srcs[1])); f1.delete(); f2.delete(); } /** check command error outputs and exit statuses. */ @Test (timeout = 30000) public void testErrOutPut() throws Exception { PrintStream bak = null; try { Path root = new Path("/nonexistentfile"); bak = System.err; ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintStream tmp = new PrintStream(out); System.setErr(tmp); String[] argv = new String[2]; argv[0] = "-cat"; argv[1] = root.toUri().getPath(); int ret = ToolRunner.run(new FsShell(), argv); assertEquals(" -cat returned 1 ", 1, ret); String returned = out.toString(); assertTrue("cat does not print exceptions ", (returned.lastIndexOf("Exception") == -1)); out.reset(); argv[0] = "-rm"; argv[1] = root.toString(); FsShell shell = new FsShell(dfs.getConf()); ret = ToolRunner.run(shell, argv); assertEquals(" -rm returned 1 ", 1, ret); returned = out.toString(); out.reset(); assertTrue("rm prints reasonable error ", (returned.lastIndexOf("No such file or directory") != -1)); argv[0] = "-rmr"; argv[1] = root.toString(); ret = ToolRunner.run(shell, argv); assertEquals(" -rmr returned 1", 1, ret); returned = out.toString(); assertTrue("rmr prints reasonable error ", (returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0] = "-du"; argv[1] = "/nonexistentfile"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertTrue(" -du prints reasonable error ", (returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0] = "-dus"; argv[1] = "/nonexistentfile"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertTrue(" -dus prints reasonable error", (returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0] = "-ls"; argv[1] = "/nonexistenfile"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertTrue(" -ls does not return Found 0 items", (returned.lastIndexOf("Found 0") == -1)); out.reset(); argv[0] = "-ls"; argv[1] = "/nonexistentfile"; ret = ToolRunner.run(shell, argv); assertEquals(" -lsr should fail ", 1, ret); out.reset(); dfs.mkdirs(new Path("/testdir")); argv[0] = "-ls"; argv[1] = "/testdir"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertTrue(" -ls does not print out anything ", (returned.lastIndexOf("Found 0") == -1)); out.reset(); argv[0] = "-ls"; argv[1] = "/user/nonxistant/*"; ret = ToolRunner.run(shell, argv); assertEquals(" -ls on nonexistent glob returns 1", 1, ret); out.reset(); argv[0] = "-mkdir"; argv[1] = "/testdir"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertEquals(" -mkdir returned 1 ", 1, ret); assertTrue(" -mkdir returned File exists", (returned.lastIndexOf("File exists") != -1)); Path testFile = new Path("/testfile"); OutputStream outtmp = dfs.create(testFile); outtmp.write(testFile.toString().getBytes()); outtmp.close(); out.reset(); argv[0] = "-mkdir"; argv[1] = "/testfile"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertEquals(" -mkdir returned 1", 1, ret); assertTrue(" -mkdir returned this is a file ", (returned.lastIndexOf("not a directory") != -1)); out.reset(); argv[0] = "-mkdir"; argv[1] = "/testParent/testChild"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertEquals(" -mkdir returned 1", 1, ret); assertTrue(" -mkdir returned there is No file or directory but has testChild in the path", (returned.lastIndexOf("testChild") == -1)); out.reset(); argv = new String[3]; argv[0] = "-mv"; argv[1] = "/testfile"; argv[2] = "/no-such-dir/file"; ret = ToolRunner.run(shell, argv); assertEquals("mv failed to rename", 1, ret); out.reset(); argv = new String[3]; argv[0] = "-mv"; argv[1] = "/testfile"; argv[2] = "/testfiletest"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertTrue("no output from rename", (returned.lastIndexOf("Renamed") == -1)); out.reset(); argv[0] = "-mv"; argv[1] = "/testfile"; argv[2] = "/testfiletmp"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertTrue(" unix like output", (returned.lastIndexOf("No such file or") != -1)); out.reset(); argv = new String[1]; argv[0] = "-du"; dfs.mkdirs(dfs.getHomeDirectory()); ret = ToolRunner.run(shell, argv); returned = out.toString(); assertEquals(" no error ", 0, ret); assertTrue("empty path specified", (returned.lastIndexOf("empty string") == -1)); out.reset(); argv = new String[3]; argv[0] = "-test"; argv[1] = "-d"; argv[2] = "/no/such/dir"; ret = ToolRunner.run(shell, argv); returned = out.toString(); assertEquals(" -test -d wrong result ", 1, ret); assertTrue(returned.isEmpty()); } finally { if (bak != null) { System.setErr(bak); } } } @Test public void testMoveWithTargetPortEmpty() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) .format(true) .numDataNodes(2) .nameNodePort(ServerSocketUtil.waitForPort( HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, 60)) .waitSafeMode(true) .build(); FileSystem srcFs = cluster.getFileSystem(); FsShell shell = new FsShell(); shell.setConf(conf); String[] argv = new String[2]; argv[0] = "-mkdir"; argv[1] = "/testfile"; ToolRunner.run(shell, argv); argv = new String[3]; argv[0] = "-mv"; argv[1] = srcFs.getUri() + "/testfile"; argv[2] = "hdfs://" + srcFs.getUri().getHost() + "/testfile2"; int ret = ToolRunner.run(shell, argv); assertEquals("mv should have succeeded", 0, ret); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test (timeout = 30000) public void testURIPaths() throws Exception { Configuration srcConf = new HdfsConfiguration(); Configuration dstConf = new HdfsConfiguration(); MiniDFSCluster srcCluster = null; MiniDFSCluster dstCluster = null; File bak = new File(PathUtils.getTestDir(getClass()), "testURIPaths"); bak.mkdirs(); try{ srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build(); dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath()); dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build(); FileSystem srcFs = srcCluster.getFileSystem(); FileSystem dstFs = dstCluster.getFileSystem(); FsShell shell = new FsShell(); shell.setConf(srcConf); //check for ls String[] argv = new String[2]; argv[0] = "-ls"; argv[1] = dstFs.getUri().toString() + "/"; int ret = ToolRunner.run(shell, argv); assertEquals("ls works on remote uri ", 0, ret); //check for rm -r dstFs.mkdirs(new Path("/hadoopdir")); argv = new String[2]; argv[0] = "-rmr"; argv[1] = dstFs.getUri().toString() + "/hadoopdir"; ret = ToolRunner.run(shell, argv); assertEquals("-rmr works on remote uri " + argv[1], 0, ret); //check du argv[0] = "-du"; argv[1] = dstFs.getUri().toString() + "/"; ret = ToolRunner.run(shell, argv); assertEquals("du works on remote uri ", 0, ret); //check put File furi = new File(TEST_ROOT_DIR, "furi"); createLocalFile(furi); argv = new String[3]; argv[0] = "-put"; argv[1] = furi.toURI().toString(); argv[2] = dstFs.getUri().toString() + "/furi"; ret = ToolRunner.run(shell, argv); assertEquals(" put is working ", 0, ret); //check cp argv[0] = "-cp"; argv[1] = dstFs.getUri().toString() + "/furi"; argv[2] = srcFs.getUri().toString() + "/furi"; ret = ToolRunner.run(shell, argv); assertEquals(" cp is working ", 0, ret); assertTrue(srcFs.exists(new Path("/furi"))); //check cat argv = new String[2]; argv[0] = "-cat"; argv[1] = dstFs.getUri().toString() + "/furi"; ret = ToolRunner.run(shell, argv); assertEquals(" cat is working ", 0, ret); //check chown dstFs.delete(new Path("/furi"), true); dstFs.delete(new Path("/hadoopdir"), true); String file = "/tmp/chownTest"; Path path = new Path(file); Path parent = new Path("/tmp"); Path root = new Path("/"); TestDFSShell.writeFile(dstFs, path); runCmd(shell, "-chgrp", "-R", "herbivores", dstFs.getUri().toString() +"/*"); confirmOwner(null, "herbivores", dstFs, parent, path); runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/"); confirmOwner(null, "reptiles", dstFs, root, parent, path); //check if default hdfs:/// works argv[0] = "-cat"; argv[1] = "hdfs:///furi"; ret = ToolRunner.run(shell, argv); assertEquals(" default works for cat", 0, ret); argv[0] = "-ls"; argv[1] = "hdfs:///"; ret = ToolRunner.run(shell, argv); assertEquals("default works for ls ", 0, ret); argv[0] = "-rmr"; argv[1] = "hdfs:///furi"; ret = ToolRunner.run(shell, argv); assertEquals("default works for rm/rmr", 0, ret); } finally { if (null != srcCluster) { srcCluster.shutdown(); } if (null != dstCluster) { dstCluster.shutdown(); } } } /** * Test that -head displays first kilobyte of the file to stdout. */ @Test (timeout = 30000) public void testHead() throws Exception { final int fileLen = 5 * BLOCK_SIZE; // create a text file with multiple KB bytes (and multiple blocks) final Path testFile = new Path("testHead", "file1"); final String text = RandomStringUtils.randomAscii(fileLen); try (OutputStream pout = dfs.create(testFile)) { pout.write(text.getBytes()); } final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); final String[] argv = new String[]{"-head", testFile.toString()}; final int ret = ToolRunner.run(new FsShell(dfs.getConf()), argv); assertEquals(Arrays.toString(argv) + " returned " + ret, 0, ret); assertEquals("-head returned " + out.size() + " bytes data, expected 1KB", 1024, out.size()); // tailed out last 1KB of the file content assertArrayEquals("Head output doesn't match input", text.substring(0, 1024).getBytes(), out.toByteArray()); out.reset(); } /** * Test that -tail displays last kilobyte of the file to stdout. */ @Test (timeout = 30000) public void testTail() throws Exception { final int fileLen = 5 * BLOCK_SIZE; // create a text file with multiple KB bytes (and multiple blocks) final Path testFile = new Path("testTail", "file1"); final String text = RandomStringUtils.randomAscii(fileLen); try (OutputStream pout = dfs.create(testFile)) { pout.write(text.getBytes()); } final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); final String[] argv = new String[]{"-tail", testFile.toString()}; final int ret = ToolRunner.run(new FsShell(dfs.getConf()), argv); assertEquals(Arrays.toString(argv) + " returned " + ret, 0, ret); assertEquals("-tail returned " + out.size() + " bytes data, expected 1KB", 1024, out.size()); // tailed out last 1KB of the file content assertArrayEquals("Tail output doesn't match input", text.substring(fileLen - 1024).getBytes(), out.toByteArray()); out.reset(); } /** * Test that -tail -f outputs appended data as the file grows. */ @Test(timeout = 30000) public void testTailWithFresh() throws Exception { final Path testFile = new Path("testTailWithFresh", "file1"); dfs.create(testFile); final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); final Thread tailer = new Thread() { @Override public void run() { final String[] argv = new String[]{"-tail", "-f", testFile.toString()}; try { ToolRunner.run(new FsShell(dfs.getConf()), argv); } catch (Exception e) { LOG.error("Client that tails the test file fails", e); } finally { out.reset(); } } }; tailer.start(); // wait till the tailer is sleeping GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { return tailer.getState() == Thread.State.TIMED_WAITING; } }, 100, 10000); final String text = RandomStringUtils.randomAscii(BLOCK_SIZE / 2); try (OutputStream pout = dfs.create(testFile)) { pout.write(text.getBytes()); } // The tailer should eventually show the file contents GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { return Arrays.equals(text.getBytes(), out.toByteArray()); } }, 100, 10000); } @Test (timeout = 30000) public void testText() throws Exception { final Configuration conf = dfs.getConf(); textTest(new Path("/texttest").makeQualified(dfs.getUri(), dfs.getWorkingDirectory()), conf); final FileSystem lfs = FileSystem.getLocal(conf); textTest(new Path(TEST_ROOT_DIR, "texttest").makeQualified(lfs.getUri(), lfs.getWorkingDirectory()), conf); } private void textTest(Path root, Configuration conf) throws Exception { PrintStream bak = null; try { final FileSystem fs = root.getFileSystem(conf); fs.mkdirs(root); // Test the gzip type of files. Magic detection. OutputStream zout = new GZIPOutputStream( fs.create(new Path(root, "file.gz"))); Random r = new Random(); bak = System.out; ByteArrayOutputStream file = new ByteArrayOutputStream(); for (int i = 0; i < 1024; ++i) { char c = Character.forDigit(r.nextInt(26) + 10, 36); file.write(c); zout.write(c); } zout.close(); ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); String[] argv = new String[2]; argv[0] = "-text"; argv[1] = new Path(root, "file.gz").toString(); int ret = ToolRunner.run(new FsShell(conf), argv); assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret); assertTrue("Output doesn't match input", Arrays.equals(file.toByteArray(), out.toByteArray())); // Create a sequence file with a gz extension, to test proper // container detection. Magic detection. SequenceFile.Writer writer = SequenceFile.createWriter( conf, SequenceFile.Writer.file(new Path(root, "file.gz")), SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(Text.class)); writer.append(new Text("Foo"), new Text("Bar")); writer.close(); out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); argv = new String[2]; argv[0] = "-text"; argv[1] = new Path(root, "file.gz").toString(); ret = ToolRunner.run(new FsShell(conf), argv); assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret); assertTrue("Output doesn't match input", Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray())); out.reset(); // Test deflate. Extension-based detection. OutputStream dout = new DeflaterOutputStream( fs.create(new Path(root, "file.deflate"))); byte[] outbytes = "foo".getBytes(); dout.write(outbytes); dout.close(); out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); argv = new String[2]; argv[0] = "-text"; argv[1] = new Path(root, "file.deflate").toString(); ret = ToolRunner.run(new FsShell(conf), argv); assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret); assertTrue("Output doesn't match input", Arrays.equals(outbytes, out.toByteArray())); out.reset(); // Test a simple codec. Extension based detection. We use // Bzip2 cause its non-native. CompressionCodec codec = ReflectionUtils.newInstance(BZip2Codec.class, conf); String extension = codec.getDefaultExtension(); Path p = new Path(root, "file." + extension); OutputStream fout = new DataOutputStream(codec.createOutputStream( fs.create(p, true))); byte[] writebytes = "foo".getBytes(); fout.write(writebytes); fout.close(); out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); argv = new String[2]; argv[0] = "-text"; argv[1] = new Path(root, p).toString(); ret = ToolRunner.run(new FsShell(conf), argv); assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret); assertTrue("Output doesn't match input", Arrays.equals(writebytes, out.toByteArray())); out.reset(); // Test a plain text. OutputStream pout = fs.create(new Path(root, "file.txt")); writebytes = "bar".getBytes(); pout.write(writebytes); pout.close(); out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); argv = new String[2]; argv[0] = "-text"; argv[1] = new Path(root, "file.txt").toString(); ret = ToolRunner.run(new FsShell(conf), argv); assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret); assertTrue("Output doesn't match input", Arrays.equals(writebytes, out.toByteArray())); out.reset(); } finally { if (null != bak) { System.setOut(bak); } } } @Test (timeout = 30000) public void testCopyToLocal() throws IOException { FsShell shell = new FsShell(dfs.getConf()); String root = createTree(dfs, "copyToLocal"); // Verify copying the tree { try { assertEquals(0, runCmd(shell, "-copyToLocal", root + "*", TEST_ROOT_DIR)); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } File localroot = new File(TEST_ROOT_DIR, "copyToLocal"); File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2"); File f1 = new File(localroot, "f1"); assertTrue("Copying failed.", f1.isFile()); File f2 = new File(localroot, "f2"); assertTrue("Copying failed.", f2.isFile()); File sub = new File(localroot, "sub"); assertTrue("Copying failed.", sub.isDirectory()); File f3 = new File(sub, "f3"); assertTrue("Copying failed.", f3.isFile()); File f4 = new File(sub, "f4"); assertTrue("Copying failed.", f4.isFile()); File f5 = new File(localroot2, "f1"); assertTrue("Copying failed.", f5.isFile()); f1.delete(); f2.delete(); f3.delete(); f4.delete(); f5.delete(); sub.delete(); } // Verify copying non existing sources do not create zero byte // destination files { String[] args = {"-copyToLocal", "nosuchfile", TEST_ROOT_DIR}; try { assertEquals(1, shell.run(args)); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } File f6 = new File(TEST_ROOT_DIR, "nosuchfile"); assertTrue(!f6.exists()); } } static String createTree(FileSystem fs, String name) throws IOException { // create a tree // ROOT // |- f1 // |- f2 // + sub // |- f3 // |- f4 // ROOT2 // |- f1 String path = "/test/" + name; Path root = mkdir(fs, new Path(path)); Path sub = mkdir(fs, new Path(root, "sub")); Path root2 = mkdir(fs, new Path(path + "2")); writeFile(fs, new Path(root, "f1")); writeFile(fs, new Path(root, "f2")); writeFile(fs, new Path(sub, "f3")); writeFile(fs, new Path(sub, "f4")); writeFile(fs, new Path(root2, "f1")); mkdir(fs, new Path(root2, "sub")); return path; } @Test (timeout = 30000) public void testCount() throws Exception { FsShell shell = new FsShell(dfs.getConf()); String root = createTree(dfs, "count"); // Verify the counts runCount(root, 2, 4, shell); runCount(root + "2", 2, 1, shell); runCount(root + "2/f1", 0, 1, shell); runCount(root + "2/sub", 1, 0, shell); final FileSystem localfs = FileSystem.getLocal(dfs.getConf()); Path localpath = new Path(TEST_ROOT_DIR, "testcount"); localpath = localpath.makeQualified(localfs.getUri(), localfs.getWorkingDirectory()); localfs.mkdirs(localpath); final String localstr = localpath.toString(); System.out.println("localstr=" + localstr); runCount(localstr, 1, 0, shell); assertEquals(0, runCmd(shell, "-count", root, localstr)); } @Test(timeout = 30000) public void testTotalSizeOfAllFiles() throws Exception { final Path root = new Path("/testTotalSizeOfAllFiles"); dfs.mkdirs(root); // create file under root FSDataOutputStream File1 = dfs.create(new Path(root, "File1")); File1.write("hi".getBytes()); File1.close(); // create file under sub-folder FSDataOutputStream File2 = dfs.create(new Path(root, "Folder1/File2")); File2.write("hi".getBytes()); File2.close(); // getUsed() should return total length of all the files in Filesystem assertEquals(4, dfs.getUsed(root)); } private static void runCount(String path, long dirs, long files, FsShell shell ) throws IOException { ByteArrayOutputStream bytes = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bytes); PrintStream oldOut = System.out; System.setOut(out); Scanner in = null; String results = null; try { runCmd(shell, "-count", path); results = bytes.toString(); in = new Scanner(results); assertEquals(dirs, in.nextLong()); assertEquals(files, in.nextLong()); } finally { System.setOut(oldOut); if (in!=null) in.close(); IOUtils.closeStream(out); System.out.println("results:\n" + results); } } //throws IOException instead of Exception as shell.run() does. private static int runCmd(FsShell shell, String... args) throws IOException { StringBuilder cmdline = new StringBuilder("RUN:"); for (String arg : args) cmdline.append(" " + arg); LOG.info(cmdline.toString()); try { int exitCode; exitCode = shell.run(args); LOG.info("RUN: "+args[0]+" exit=" + exitCode); return exitCode; } catch (IOException e) { LOG.error("RUN: "+args[0]+" IOException="+e.getMessage()); throw e; } catch (RuntimeException e) { LOG.error("RUN: "+args[0]+" RuntimeException="+e.getMessage()); throw e; } catch (Exception e) { LOG.error("RUN: "+args[0]+" Exception="+e.getMessage()); throw new IOException(StringUtils.stringifyException(e)); } } /** * Test chmod. */ void testChmod(Configuration conf, FileSystem fs, String chmodDir) throws IOException { FsShell shell = new FsShell(); shell.setConf(conf); try { //first make dir Path dir = new Path(chmodDir); fs.delete(dir, true); fs.mkdirs(dir); confirmPermissionChange(/* Setting */ "u+rwx,g=rw,o-rwx", /* Should give */ "rwxrw----", fs, shell, dir); //create an empty file Path file = new Path(chmodDir, "file"); TestDFSShell.writeFile(fs, file); //test octal mode confirmPermissionChange("644", "rw-r--r--", fs, shell, file); //test recursive runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir); assertEquals("rwxrwxrwx", fs.getFileStatus(dir).getPermission().toString()); assertEquals("rw-rw-rw-", fs.getFileStatus(file).getPermission().toString()); // Skip "sticky bit" tests on Windows. // if (!Path.WINDOWS) { // test sticky bit on directories Path dir2 = new Path(dir, "stickybit"); fs.mkdirs(dir2); LOG.info("Testing sticky bit on: " + dir2); LOG.info("Sticky bit directory initial mode: " + fs.getFileStatus(dir2).getPermission()); confirmPermissionChange("u=rwx,g=rx,o=rx", "rwxr-xr-x", fs, shell, dir2); // sticky bit explicit set confirmPermissionChange("+t", "rwxr-xr-t", fs, shell, dir2); // sticky bit explicit reset confirmPermissionChange("-t", "rwxr-xr-x", fs, shell, dir2); confirmPermissionChange("=t", "--------T", fs, shell, dir2); // reset all permissions confirmPermissionChange("0000", "---------", fs, shell, dir2); // turn on rw permissions for all confirmPermissionChange("1666", "rw-rw-rwT", fs, shell, dir2); // sticky bit explicit set along with x permission confirmPermissionChange("1777", "rwxrwxrwt", fs, shell, dir2); // sticky bit explicit reset confirmPermissionChange("0777", "rwxrwxrwx", fs, shell, dir2); // sticky bit explicit set confirmPermissionChange("1777", "rwxrwxrwt", fs, shell, dir2); // sticky bit implicit reset confirmPermissionChange("777", "rwxrwxrwx", fs, shell, dir2); fs.delete(dir2, true); } else { LOG.info("Skipped sticky bit tests on Windows"); } fs.delete(dir, true); } finally { try { shell.close(); } catch (IOException ignored) {} } } // Apply a new permission to a path and confirm that the new permission // is the one you were expecting private void confirmPermissionChange(String toApply, String expected, FileSystem fs, FsShell shell, Path dir2) throws IOException { LOG.info("Confirming permission change of " + toApply + " to " + expected); runCmd(shell, "-chmod", toApply, dir2.toString()); String result = fs.getFileStatus(dir2).getPermission().toString(); LOG.info("Permission change result: " + result); assertEquals(expected, result); } private void confirmOwner(String owner, String group, FileSystem fs, Path... paths) throws IOException { for(Path path : paths) { if (owner != null) { assertEquals(owner, fs.getFileStatus(path).getOwner()); } if (group != null) { assertEquals(group, fs.getFileStatus(path).getGroup()); } } } @Test (timeout = 30000) public void testFilePermissions() throws IOException { Configuration conf = new HdfsConfiguration(); //test chmod on local fs FileSystem fs = FileSystem.getLocal(conf); testChmod(conf, fs, (new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath()); conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true"); //test chmod on DFS fs = dfs; conf = dfs.getConf(); testChmod(conf, fs, "/tmp/chmodTest"); // test chown and chgrp on DFS: FsShell shell = new FsShell(); shell.setConf(conf); /* For dfs, I am the super user and I can change owner of any file to * anything. "-R" option is already tested by chmod test above. */ String file = "/tmp/chownTest"; Path path = new Path(file); Path parent = new Path("/tmp"); Path root = new Path("/"); TestDFSShell.writeFile(fs, path); runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*"); confirmOwner(null, "herbivores", fs, parent, path); runCmd(shell, "-chgrp", "mammals", file); confirmOwner(null, "mammals", fs, path); runCmd(shell, "-chown", "-R", ":reptiles", "/"); confirmOwner(null, "reptiles", fs, root, parent, path); runCmd(shell, "-chown", "python:", "/nonExistentFile", file); confirmOwner("python", "reptiles", fs, path); runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/"); confirmOwner("hadoop", "toys", fs, root, parent, path); // Test different characters in names runCmd(shell, "-chown", "hdfs.user", file); confirmOwner("hdfs.user", null, fs, path); runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file); confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path); runCmd(shell, "-chown", "hdfs/hadoop-core@apache.org:asf-projects", file); confirmOwner("hdfs/hadoop-core@apache.org", "asf-projects", fs, path); runCmd(shell, "-chgrp", "hadoop-core@apache.org/100", file); confirmOwner(null, "hadoop-core@apache.org/100", fs, path); } /** * Tests various options of DFSShell. */ @Test (timeout = 120000) public void testDFSShell() throws Exception { /* This tests some properties of ChecksumFileSystem as well. * Make sure that we create ChecksumDFS */ FsShell shell = new FsShell(dfs.getConf()); // First create a new directory with mkdirs Path myPath = new Path("/testDFSShell/mkdirs"); assertTrue(dfs.mkdirs(myPath)); assertTrue(dfs.exists(myPath)); assertTrue(dfs.mkdirs(myPath)); // Second, create a file in that directory. Path myFile = new Path("/testDFSShell/mkdirs/myFile"); writeFile(dfs, myFile); assertTrue(dfs.exists(myFile)); Path myFile2 = new Path("/testDFSShell/mkdirs/myFile2"); writeFile(dfs, myFile2); assertTrue(dfs.exists(myFile2)); // Verify that rm with a pattern { String[] args = new String[2]; args[0] = "-rm"; args[1] = "/testDFSShell/mkdirs/myFile*"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val == 0); assertFalse(dfs.exists(myFile)); assertFalse(dfs.exists(myFile2)); //re-create the files for other tests writeFile(dfs, myFile); assertTrue(dfs.exists(myFile)); writeFile(dfs, myFile2); assertTrue(dfs.exists(myFile2)); } // Verify that we can read the file { String[] args = new String[3]; args[0] = "-cat"; args[1] = "/testDFSShell/mkdirs/myFile"; args[2] = "/testDFSShell/mkdirs/myFile2"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run: " + StringUtils.stringifyException(e)); } assertTrue(val == 0); } dfs.delete(myFile2, true); // Verify that we get an error while trying to read an nonexistent file { String[] args = new String[2]; args[0] = "-cat"; args[1] = "/testDFSShell/mkdirs/myFile1"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val != 0); } // Verify that we get an error while trying to delete an nonexistent file { String[] args = new String[2]; args[0] = "-rm"; args[1] = "/testDFSShell/mkdirs/myFile1"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val != 0); } // Verify that we succeed in removing the file we created { String[] args = new String[2]; args[0] = "-rm"; args[1] = "/testDFSShell/mkdirs/myFile"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val == 0); } // Verify touch/test { String[] args; int val; args = new String[3]; args[0] = "-test"; args[1] = "-e"; args[2] = "/testDFSShell/mkdirs/noFileHere"; val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); args[1] = "-z"; val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); args = new String[2]; args[0] = "-touchz"; args[1] = "/testDFSShell/mkdirs/isFileHere"; val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); args = new String[2]; args[0] = "-touchz"; args[1] = "/testDFSShell/mkdirs/thisDirNotExists/isFileHere"; val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); args = new String[3]; args[0] = "-test"; args[1] = "-e"; args[2] = "/testDFSShell/mkdirs/isFileHere"; val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); args[1] = "-d"; val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); args[1] = "-z"; val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); } // Verify that cp from a directory to a subdirectory fails { String[] args = new String[2]; args[0] = "-mkdir"; args[1] = "/testDFSShell/dir1"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); // this should fail String[] args1 = new String[3]; args1[0] = "-cp"; args1[1] = "/testDFSShell/dir1"; args1[2] = "/testDFSShell/dir1/dir2"; val = 0; try { val = shell.run(args1); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); // this should succeed args1[0] = "-cp"; args1[1] = "/testDFSShell/dir1"; args1[2] = "/testDFSShell/dir1foo"; val = -1; try { val = shell.run(args1); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); // this should fail args1[0] = "-cp"; args1[1] = "/"; args1[2] = "/test"; val = 0; try { val = shell.run(args1); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); } // Verify -test -f negative case (missing file) { String[] args = new String[3]; args[0] = "-test"; args[1] = "-f"; args[2] = "/testDFSShell/mkdirs/noFileHere"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); } // Verify -test -f negative case (directory rather than file) { String[] args = new String[3]; args[0] = "-test"; args[1] = "-f"; args[2] = "/testDFSShell/mkdirs"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); } // Verify -test -f positive case { writeFile(dfs, myFile); assertTrue(dfs.exists(myFile)); String[] args = new String[3]; args[0] = "-test"; args[1] = "-f"; args[2] = myFile.toString(); int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); } // Verify -test -s negative case (missing file) { String[] args = new String[3]; args[0] = "-test"; args[1] = "-s"; args[2] = "/testDFSShell/mkdirs/noFileHere"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); } // Verify -test -s negative case (zero length file) { String[] args = new String[3]; args[0] = "-test"; args[1] = "-s"; args[2] = "/testDFSShell/mkdirs/isFileHere"; int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1, val); } // Verify -test -s positive case (nonzero length file) { String[] args = new String[3]; args[0] = "-test"; args[1] = "-s"; args[2] = myFile.toString(); int val = -1; try { val = shell.run(args); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); } // Verify -test -w/-r { Path permDir = new Path("/testDFSShell/permDir"); Path permFile = new Path("/testDFSShell/permDir/permFile"); mkdir(dfs, permDir); writeFile(dfs, permFile); // Verify -test -w positive case (dir exists and can write) final String[] wargs = new String[3]; wargs[0] = "-test"; wargs[1] = "-w"; wargs[2] = permDir.toString(); int val = -1; try { val = shell.run(wargs); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); // Verify -test -r positive case (file exists and can read) final String[] rargs = new String[3]; rargs[0] = "-test"; rargs[1] = "-r"; rargs[2] = permFile.toString(); try { val = shell.run(rargs); } catch (Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0, val); // Verify -test -r negative case (file exists but cannot read) runCmd(shell, "-chmod", "600", permFile.toString()); UserGroupInformation smokeUser = UserGroupInformation.createUserForTesting("smokeUser", new String[] {"hadoop"}); smokeUser.doAs(new PrivilegedExceptionAction<String>() { @Override public String run() throws Exception { FsShell shell = new FsShell(dfs.getConf()); int exitCode = shell.run(rargs); assertEquals(1, exitCode); return null; } }); // Verify -test -w negative case (dir exists but cannot write) runCmd(shell, "-chown", "-R", "not_allowed", permDir.toString()); runCmd(shell, "-chmod", "-R", "700", permDir.toString()); smokeUser.doAs(new PrivilegedExceptionAction<String>() { @Override public String run() throws Exception { FsShell shell = new FsShell(dfs.getConf()); int exitCode = shell.run(wargs); assertEquals(1, exitCode); return null; } }); // cleanup dfs.delete(permDir, true); } } private static List<MaterializedReplica> getMaterializedReplicas( MiniDFSCluster cluster) throws IOException { List<MaterializedReplica> replicas = new ArrayList<>(); String poolId = cluster.getNamesystem().getBlockPoolId(); List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId); for(int i = 0; i < blocks.size(); i++) { Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i); for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) { for(Block b : e.getValue()) { replicas.add(cluster.getMaterializedReplica(i, new ExtendedBlock(poolId, b))); } } } return replicas; } private static void corrupt( List<MaterializedReplica> replicas, String content) throws IOException { StringBuilder sb = new StringBuilder(content); char c = content.charAt(0); sb.setCharAt(0, ++c); for(MaterializedReplica replica : replicas) { replica.corruptData(sb.toString().getBytes("UTF8")); } } static interface TestGetRunner { String run(int exitcode, String... options) throws IOException; } @Test (timeout = 30000) public void testRemoteException() throws Exception { UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"}); PrintStream bak = null; try { Path p = new Path("/foo"); dfs.mkdirs(p); dfs.setPermission(p, new FsPermission((short)0700)); bak = System.err; tmpUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { FsShell fshell = new FsShell(dfs.getConf()); ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintStream tmp = new PrintStream(out); System.setErr(tmp); String[] args = new String[2]; args[0] = "-ls"; args[1] = "/foo"; int ret = ToolRunner.run(fshell, args); assertEquals("returned should be 1", 1, ret); String str = out.toString(); assertTrue("permission denied printed", str.indexOf("Permission denied") != -1); out.reset(); return null; } }); } finally { if (bak != null) { System.setErr(bak); } } } @Test (timeout = 30000) public void testGet() throws IOException { GenericTestUtils.setLogLevel(FSInputChecker.LOG, Level.ALL); final String fname = "testGet.txt"; Path root = new Path("/test/get"); final Path remotef = new Path(root, fname); final Configuration conf = new HdfsConfiguration(); // Set short retry timeouts so this test runs faster conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10); TestGetRunner runner = new TestGetRunner() { private int count = 0; private final FsShell shell = new FsShell(conf); public String run(int exitcode, String... options) throws IOException { String dst = new File(TEST_ROOT_DIR, fname + ++count) .getAbsolutePath(); String[] args = new String[options.length + 3]; args[0] = "-get"; args[args.length - 2] = remotef.toString(); args[args.length - 1] = dst; for(int i = 0; i < options.length; i++) { args[i + 1] = options[i]; } show("args=" + Arrays.asList(args)); try { assertEquals(exitcode, shell.run(args)); } catch (Exception e) { assertTrue(StringUtils.stringifyException(e), false); } return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null; } }; File localf = createLocalFile(new File(TEST_ROOT_DIR, fname)); MiniDFSCluster cluster = null; DistributedFileSystem dfs = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true) .build(); dfs = cluster.getFileSystem(); mkdir(dfs, root); dfs.copyFromLocalFile(false, false, new Path(localf.getPath()), remotef); String localfcontent = DFSTestUtil.readFile(localf); assertEquals(localfcontent, runner.run(0)); assertEquals(localfcontent, runner.run(0, "-ignoreCrc")); // find block files to modify later List<MaterializedReplica> replicas = getMaterializedReplicas(cluster); // Shut down miniCluster and then corrupt the block files by overwriting a // portion with junk data. We must shut down the miniCluster so that threads // in the data node do not hold locks on the block files while we try to // write into them. Particularly on Windows, the data node's use of the // FileChannel.transferTo method can cause block files to be memory mapped // in read-only mode during the transfer to a client, and this causes a // locking conflict. The call to shutdown the miniCluster blocks until all // DataXceiver threads exit, preventing this problem. dfs.close(); cluster.shutdown(); show("replicas=" + replicas); corrupt(replicas, localfcontent); // Start the miniCluster again, but do not reformat, so prior files remain. cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false) .build(); dfs = cluster.getFileSystem(); assertEquals(null, runner.run(1)); String corruptedcontent = runner.run(0, "-ignoreCrc"); assertEquals(localfcontent.substring(1), corruptedcontent.substring(1)); assertEquals(localfcontent.charAt(0)+1, corruptedcontent.charAt(0)); } finally { if (null != dfs) { try { dfs.close(); } catch (Exception e) { } } if (null != cluster) { cluster.shutdown(); } localf.delete(); } } /** * Test -stat [format] <path>... prints statistics about the file/directory * at <path> in the specified format. */ @Test (timeout = 30000) public void testStat() throws Exception { final SimpleDateFormat fmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); fmt.setTimeZone(TimeZone.getTimeZone("UTC")); final Path testDir1 = new Path("testStat", "dir1"); dfs.mkdirs(testDir1); final Path testFile2 = new Path(testDir1, "file2"); DFSTestUtil.createFile(dfs, testFile2, 2 * BLOCK_SIZE, (short) 3, 0); final FileStatus status1 = dfs.getFileStatus(testDir1); final String mtime1 = fmt.format(new Date(status1.getModificationTime())); final String atime1 = fmt.format(new Date(status1.getAccessTime())); long now = Time.now(); dfs.setTimes(testFile2, now + 3000, now + 6000); final FileStatus status2 = dfs.getFileStatus(testFile2); final String mtime2 = fmt.format(new Date(status2.getModificationTime())); final String atime2 = fmt.format(new Date(status2.getAccessTime())); final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); doFsStat(dfs.getConf(), null); out.reset(); doFsStat(dfs.getConf(), null, testDir1); assertEquals("Unexpected -stat output: " + out, out.toString(), String.format("%s%n", mtime1)); out.reset(); doFsStat(dfs.getConf(), null, testDir1, testFile2); assertEquals("Unexpected -stat output: " + out, out.toString(), String.format("%s%n%s%n", mtime1, mtime2)); doFsStat(dfs.getConf(), "%F %u:%g %b %y %n"); out.reset(); doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %y %n", testDir1); assertTrue(out.toString(), out.toString().contains(mtime1)); assertTrue(out.toString(), out.toString().contains("directory")); assertTrue(out.toString(), out.toString().contains(status1.getGroup())); assertTrue(out.toString(), out.toString().contains(status1.getPermission().toString())); int n = status1.getPermission().toShort(); int octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7); assertTrue(out.toString(), out.toString().contains(String.valueOf(octal))); out.reset(); doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %x %y %n", testDir1, testFile2); n = status2.getPermission().toShort(); octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7); assertTrue(out.toString(), out.toString().contains(mtime1)); assertTrue(out.toString(), out.toString().contains(atime1)); assertTrue(out.toString(), out.toString().contains("regular file")); assertTrue(out.toString(), out.toString().contains(status2.getPermission().toString())); assertTrue(out.toString(), out.toString().contains(String.valueOf(octal))); assertTrue(out.toString(), out.toString().contains(mtime2)); assertTrue(out.toString(), out.toString().contains(atime2)); } private static void doFsStat(Configuration conf, String format, Path... files) throws Exception { if (files == null || files.length == 0) { final String[] argv = (format == null ? new String[] {"-stat"} : new String[] {"-stat", format}); assertEquals("Should have failed with missing arguments", -1, ToolRunner.run(new FsShell(conf), argv)); } else { List<String> argv = new LinkedList<>(); argv.add("-stat"); if (format != null) { argv.add(format); } for (Path f : files) { argv.add(f.toString()); } int ret = ToolRunner.run(new FsShell(conf), argv.toArray(new String[0])); assertEquals(argv + " returned non-zero status " + ret, 0, ret); } } @Test (timeout = 30000) public void testLsr() throws Exception { final Configuration conf = dfs.getConf(); final String root = createTree(dfs, "lsr"); dfs.mkdirs(new Path(root, "zzz")); runLsr(new FsShell(conf), root, 0); final Path sub = new Path(root, "sub"); dfs.setPermission(sub, new FsPermission((short)0)); final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); final String tmpusername = ugi.getShortUserName() + "1"; UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting( tmpusername, new String[] {tmpusername}); String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() { @Override public String run() throws Exception { return runLsr(new FsShell(conf), root, 1); } }); assertTrue(results.contains("zzz")); } private static String runLsr(final FsShell shell, String root, int returnvalue ) throws Exception { System.out.println("root=" + root + ", returnvalue=" + returnvalue); final ByteArrayOutputStream bytes = new ByteArrayOutputStream(); final PrintStream out = new PrintStream(bytes); final PrintStream oldOut = System.out; final PrintStream oldErr = System.err; System.setOut(out); System.setErr(out); final String results; try { assertEquals(returnvalue, shell.run(new String[]{"-lsr", root})); results = bytes.toString(); } finally { System.setOut(oldOut); System.setErr(oldErr); IOUtils.closeStream(out); } System.out.println("results:\n" + results); return results; } /** * default setting is file:// which is not a DFS * so DFSAdmin should throw and catch InvalidArgumentException * and return -1 exit code. * @throws Exception */ @Test (timeout = 30000) public void testInvalidShell() throws Exception { Configuration conf = new Configuration(); // default FS (non-DFS) DFSAdmin admin = new DFSAdmin(); admin.setConf(conf); int res = admin.run(new String[] {"-refreshNodes"}); assertEquals("expected to fail -1", res , -1); } // Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR, // ACLs) @Test (timeout = 120000) public void testCopyCommandsWithPreserveOption() throws Exception { FsShell shell = null; final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-" + counter.getAndIncrement(); final Path hdfsTestDir = new Path(testdir); try { dfs.mkdirs(hdfsTestDir); Path src = new Path(hdfsTestDir, "srcfile"); dfs.create(src).close(); dfs.setAcl(src, Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE))); FileStatus status = dfs.getFileStatus(src); final long mtime = status.getModificationTime(); final long atime = status.getAccessTime(); final String owner = status.getOwner(); final String group = status.getGroup(); final FsPermission perm = status.getPermission(); dfs.setXAttr(src, USER_A1, USER_A1_VALUE); dfs.setXAttr(src, TRUSTED_A1, TRUSTED_A1_VALUE); shell = new FsShell(dfs.getConf()); // -p Path target1 = new Path(hdfsTestDir, "targetfile1"); String[] argv = new String[] { "-cp", "-p", src.toUri().toString(), target1.toUri().toString() }; int ret = ToolRunner.run(shell, argv); assertEquals("cp -p is not working", SUCCESS, ret); FileStatus targetStatus = dfs.getFileStatus(target1); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); FsPermission targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); Map<String, byte[]> xattrs = dfs.getXAttrs(target1); assertTrue(xattrs.isEmpty()); List<AclEntry> acls = dfs.getAclStatus(target1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetStatus.hasAcl()); // -ptop Path target2 = new Path(hdfsTestDir, "targetfile2"); argv = new String[] { "-cp", "-ptop", src.toUri().toString(), target2.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptop is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(target2); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs = dfs.getXAttrs(target2); assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(target2).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetStatus.hasAcl()); // -ptopx Path target3 = new Path(hdfsTestDir, "targetfile3"); argv = new String[] { "-cp", "-ptopx", src.toUri().toString(), target3.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptopx is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(target3); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs = dfs.getXAttrs(target3); assertEquals(xattrs.size(), 2); assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1)); assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1)); acls = dfs.getAclStatus(target3).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetStatus.hasAcl()); // -ptopa Path target4 = new Path(hdfsTestDir, "targetfile4"); argv = new String[] { "-cp", "-ptopa", src.toUri().toString(), target4.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptopa is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(target4); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs = dfs.getXAttrs(target4); assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(target4).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target4)); // -ptoa (verify -pa option will preserve permissions also) Path target5 = new Path(hdfsTestDir, "targetfile5"); argv = new String[] { "-cp", "-ptoa", src.toUri().toString(), target5.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptoa is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(target5); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs = dfs.getXAttrs(target5); assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(target5).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target5)); } finally { if (null != shell) { shell.close(); } } } @Test (timeout = 120000) public void testCopyCommandsWithRawXAttrs() throws Exception { FsShell shell = null; final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithRawXAttrs-" + counter.getAndIncrement(); final Path hdfsTestDir = new Path(testdir); final Path rawHdfsTestDir = new Path("/.reserved/raw" + testdir); try { dfs.mkdirs(hdfsTestDir); final Path src = new Path(hdfsTestDir, "srcfile"); final String rawSrcBase = "/.reserved/raw" + testdir; final Path rawSrc = new Path(rawSrcBase, "srcfile"); dfs.create(src).close(); final Path srcDir = new Path(hdfsTestDir, "srcdir"); final Path rawSrcDir = new Path("/.reserved/raw" + testdir, "srcdir"); dfs.mkdirs(srcDir); final Path srcDirFile = new Path(srcDir, "srcfile"); final Path rawSrcDirFile = new Path("/.reserved/raw" + srcDirFile); dfs.create(srcDirFile).close(); final Path[] paths = { rawSrc, rawSrcDir, rawSrcDirFile }; final String[] xattrNames = { USER_A1, RAW_A1 }; final byte[][] xattrVals = { USER_A1_VALUE, RAW_A1_VALUE }; for (int i = 0; i < paths.length; i++) { for (int j = 0; j < xattrNames.length; j++) { dfs.setXAttr(paths[i], xattrNames[j], xattrVals[j]); } } shell = new FsShell(dfs.getConf()); /* Check that a file as the source path works ok. */ doTestCopyCommandsWithRawXAttrs(shell, dfs, src, hdfsTestDir, false); doTestCopyCommandsWithRawXAttrs(shell, dfs, rawSrc, hdfsTestDir, false); doTestCopyCommandsWithRawXAttrs(shell, dfs, src, rawHdfsTestDir, false); doTestCopyCommandsWithRawXAttrs(shell, dfs, rawSrc, rawHdfsTestDir, true); /* Use a relative /.reserved/raw path. */ final Path savedWd = dfs.getWorkingDirectory(); try { dfs.setWorkingDirectory(new Path(rawSrcBase)); final Path relRawSrc = new Path("../srcfile"); final Path relRawHdfsTestDir = new Path(".."); doTestCopyCommandsWithRawXAttrs(shell, dfs, relRawSrc, relRawHdfsTestDir, true); } finally { dfs.setWorkingDirectory(savedWd); } /* Check that a directory as the source path works ok. */ doTestCopyCommandsWithRawXAttrs(shell, dfs, srcDir, hdfsTestDir, false); doTestCopyCommandsWithRawXAttrs(shell, dfs, rawSrcDir, hdfsTestDir, false); doTestCopyCommandsWithRawXAttrs(shell, dfs, srcDir, rawHdfsTestDir, false); doTestCopyCommandsWithRawXAttrs(shell, dfs, rawSrcDir, rawHdfsTestDir, true); /* Use relative in an absolute path. */ final String relRawSrcDir = "./.reserved/../.reserved/raw/../raw" + testdir + "/srcdir"; final String relRawDstDir = "./.reserved/../.reserved/raw/../raw" + testdir; doTestCopyCommandsWithRawXAttrs(shell, dfs, new Path(relRawSrcDir), new Path(relRawDstDir), true); } finally { if (null != shell) { shell.close(); } dfs.delete(hdfsTestDir, true); } } private void doTestCopyCommandsWithRawXAttrs(FsShell shell, FileSystem fs, Path src, Path hdfsTestDir, boolean expectRaw) throws Exception { Path target; boolean srcIsRaw; if (src.isAbsolute()) { srcIsRaw = src.toString().contains("/.reserved/raw"); } else { srcIsRaw = new Path(fs.getWorkingDirectory(), src). toString().contains("/.reserved/raw"); } final boolean destIsRaw = hdfsTestDir.toString().contains("/.reserved/raw"); final boolean srcDestMismatch = srcIsRaw ^ destIsRaw; // -p (possibly preserve raw if src & dst are both /.r/r */ if (srcDestMismatch) { doCopyAndTest(shell, hdfsTestDir, src, "-p", ERROR); } else { target = doCopyAndTest(shell, hdfsTestDir, src, "-p", SUCCESS); checkXAttrs(fs, target, expectRaw, false); } // -px (possibly preserve raw, always preserve non-raw xattrs. */ if (srcDestMismatch) { doCopyAndTest(shell, hdfsTestDir, src, "-px", ERROR); } else { target = doCopyAndTest(shell, hdfsTestDir, src, "-px", SUCCESS); checkXAttrs(fs, target, expectRaw, true); } // no args (possibly preserve raw, never preserve non-raw xattrs. */ if (srcDestMismatch) { doCopyAndTest(shell, hdfsTestDir, src, null, ERROR); } else { target = doCopyAndTest(shell, hdfsTestDir, src, null, SUCCESS); checkXAttrs(fs, target, expectRaw, false); } } private Path doCopyAndTest(FsShell shell, Path dest, Path src, String cpArgs, int expectedExitCode) throws Exception { final Path target = new Path(dest, "targetfile" + counter.getAndIncrement()); final String[] argv = cpArgs == null ? new String[] { "-cp", src.toUri().toString(), target.toUri().toString() } : new String[] { "-cp", cpArgs, src.toUri().toString(), target.toUri().toString() }; final int ret = ToolRunner.run(shell, argv); assertEquals("cp -p is not working", expectedExitCode, ret); return target; } private void checkXAttrs(FileSystem fs, Path target, boolean expectRaw, boolean expectVanillaXAttrs) throws Exception { final Map<String, byte[]> xattrs = fs.getXAttrs(target); int expectedCount = 0; if (expectRaw) { assertArrayEquals("raw.a1 has incorrect value", RAW_A1_VALUE, xattrs.get(RAW_A1)); expectedCount++; } if (expectVanillaXAttrs) { assertArrayEquals("user.a1 has incorrect value", USER_A1_VALUE, xattrs.get(USER_A1)); expectedCount++; } assertEquals("xattrs size mismatch", expectedCount, xattrs.size()); } // verify cp -ptopxa option will preserve directory attributes. @Test (timeout = 120000) public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception { FsShell shell = null; final String testdir = "/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement(); final Path hdfsTestDir = new Path(testdir); try { dfs.mkdirs(hdfsTestDir); Path srcDir = new Path(hdfsTestDir, "srcDir"); dfs.mkdirs(srcDir); dfs.setAcl(srcDir, Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE))); // set sticky bit dfs.setPermission(srcDir, new FsPermission(ALL, READ_EXECUTE, EXECUTE, true)); // Create a file in srcDir to check if modification time of // srcDir to be preserved after copying the file. // If cp -p command is to preserve modification time and then copy child // (srcFile), modification time will not be preserved. Path srcFile = new Path(srcDir, "srcFile"); dfs.create(srcFile).close(); FileStatus status = dfs.getFileStatus(srcDir); final long mtime = status.getModificationTime(); final long atime = status.getAccessTime(); final String owner = status.getOwner(); final String group = status.getGroup(); final FsPermission perm = status.getPermission(); dfs.setXAttr(srcDir, USER_A1, USER_A1_VALUE); dfs.setXAttr(srcDir, TRUSTED_A1, TRUSTED_A1_VALUE); shell = new FsShell(dfs.getConf()); // -p Path targetDir1 = new Path(hdfsTestDir, "targetDir1"); String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(), targetDir1.toUri().toString() }; int ret = ToolRunner.run(shell, argv); assertEquals("cp -p is not working", SUCCESS, ret); FileStatus targetStatus = dfs.getFileStatus(targetDir1); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); FsPermission targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); Map<String, byte[]> xattrs = dfs.getXAttrs(targetDir1); assertTrue(xattrs.isEmpty()); List<AclEntry> acls = dfs.getAclStatus(targetDir1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetStatus.hasAcl()); // -ptop Path targetDir2 = new Path(hdfsTestDir, "targetDir2"); argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(), targetDir2.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptop is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(targetDir2); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs = dfs.getXAttrs(targetDir2); assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(targetDir2).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetStatus.hasAcl()); // -ptopx Path targetDir3 = new Path(hdfsTestDir, "targetDir3"); argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(), targetDir3.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptopx is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(targetDir3); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs = dfs.getXAttrs(targetDir3); assertEquals(xattrs.size(), 2); assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1)); assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1)); acls = dfs.getAclStatus(targetDir3).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetStatus.hasAcl()); // -ptopa Path targetDir4 = new Path(hdfsTestDir, "targetDir4"); argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(), targetDir4.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptopa is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(targetDir4); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs = dfs.getXAttrs(targetDir4); assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(targetDir4).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir4)); // -ptoa (verify -pa option will preserve permissions also) Path targetDir5 = new Path(hdfsTestDir, "targetDir5"); argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(), targetDir5.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptoa is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(targetDir5); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs = dfs.getXAttrs(targetDir5); assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(targetDir5).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir5)); } finally { if (shell != null) { shell.close(); } } } // Verify cp -pa option will preserve both ACL and sticky bit. @Test (timeout = 120000) public void testCopyCommandsPreserveAclAndStickyBit() throws Exception { FsShell shell = null; final String testdir = "/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + counter.getAndIncrement(); final Path hdfsTestDir = new Path(testdir); try { dfs.mkdirs(hdfsTestDir); Path src = new Path(hdfsTestDir, "srcfile"); dfs.create(src).close(); dfs.setAcl(src, Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE))); // set sticky bit dfs.setPermission(src, new FsPermission(ALL, READ_EXECUTE, EXECUTE, true)); FileStatus status = dfs.getFileStatus(src); final long mtime = status.getModificationTime(); final long atime = status.getAccessTime(); final String owner = status.getOwner(); final String group = status.getGroup(); final FsPermission perm = status.getPermission(); shell = new FsShell(dfs.getConf()); // -p preserves sticky bit and doesn't preserve ACL Path target1 = new Path(hdfsTestDir, "targetfile1"); String[] argv = new String[] { "-cp", "-p", src.toUri().toString(), target1.toUri().toString() }; int ret = ToolRunner.run(shell, argv); assertEquals("cp is not working", SUCCESS, ret); FileStatus targetStatus = dfs.getFileStatus(target1); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); FsPermission targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); List<AclEntry> acls = dfs.getAclStatus(target1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetStatus.hasAcl()); // -ptopa preserves both sticky bit and ACL Path target2 = new Path(hdfsTestDir, "targetfile2"); argv = new String[] { "-cp", "-ptopa", src.toUri().toString(), target2.toUri().toString() }; ret = ToolRunner.run(shell, argv); assertEquals("cp -ptopa is not working", SUCCESS, ret); targetStatus = dfs.getFileStatus(target2); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); targetPerm = targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); acls = dfs.getAclStatus(target2).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target2)); } finally { if (null != shell) { shell.close(); } } } // force Copy Option is -f @Test (timeout = 30000) public void testCopyCommandsWithForceOption() throws Exception { FsShell shell = null; final File localFile = new File(TEST_ROOT_DIR, "testFileForPut"); final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString(); final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithForceOption-" + counter.getAndIncrement(); final Path hdfsTestDir = new Path(testdir); try { dfs.mkdirs(hdfsTestDir); localFile.createNewFile(); writeFile(dfs, new Path(testdir, "testFileForPut")); shell = new FsShell(); // Tests for put String[] argv = new String[] { "-put", "-f", localfilepath, testdir }; int res = ToolRunner.run(shell, argv); assertEquals("put -f is not working", SUCCESS, res); argv = new String[] { "-put", localfilepath, testdir }; res = ToolRunner.run(shell, argv); assertEquals("put command itself is able to overwrite the file", ERROR, res); // Tests for copyFromLocal argv = new String[] { "-copyFromLocal", "-f", localfilepath, testdir }; res = ToolRunner.run(shell, argv); assertEquals("copyFromLocal -f is not working", SUCCESS, res); argv = new String[] { "-copyFromLocal", localfilepath, testdir }; res = ToolRunner.run(shell, argv); assertEquals( "copyFromLocal command itself is able to overwrite the file", ERROR, res); // Tests for cp argv = new String[] { "-cp", "-f", localfilepath, testdir }; res = ToolRunner.run(shell, argv); assertEquals("cp -f is not working", SUCCESS, res); argv = new String[] { "-cp", localfilepath, testdir }; res = ToolRunner.run(shell, argv); assertEquals("cp command itself is able to overwrite the file", ERROR, res); } finally { if (null != shell) shell.close(); if (localFile.exists()) localFile.delete(); } } /* [refs HDFS-5033] * * return a "Permission Denied" message instead of "No such file or Directory" * when trying to put/copyFromLocal a file that doesn't have read access * */ @Test (timeout = 30000) public void testCopyFromLocalWithPermissionDenied() throws Exception { FsShell shell = null; PrintStream bak = null; final File localFile = new File(TEST_ROOT_DIR, "testFileWithNoReadPermissions"); final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString(); final String testdir = "/tmp/TestDFSShell-CopyFromLocalWithPermissionDenied-" + counter.getAndIncrement(); final Path hdfsTestDir = new Path(testdir); try { dfs.mkdirs(hdfsTestDir); localFile.createNewFile(); localFile.setReadable(false); writeFile(dfs, new Path(testdir, "testFileForPut")); shell = new FsShell(); // capture system error messages, snarfed from testErrOutPut() bak = System.err; ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintStream tmp = new PrintStream(out); System.setErr(tmp); // Tests for put String[] argv = new String[] { "-put", localfilepath, testdir }; int res = ToolRunner.run(shell, argv); assertEquals("put is working", ERROR, res); String returned = out.toString(); assertTrue(" outputs Permission denied error message", (returned.lastIndexOf("Permission denied") != -1)); // Tests for copyFromLocal argv = new String[] { "-copyFromLocal", localfilepath, testdir }; res = ToolRunner.run(shell, argv); assertEquals("copyFromLocal -f is working", ERROR, res); returned = out.toString(); assertTrue(" outputs Permission denied error message", (returned.lastIndexOf("Permission denied") != -1)); } finally { if (bak != null) { System.setErr(bak); } if (null != shell) shell.close(); if (localFile.exists()) localFile.delete(); dfs.delete(hdfsTestDir, true); } } /** * Test -setrep with a replication factor that is too low. We have to test * this here because the mini-miniCluster used with testHDFSConf.xml uses a * replication factor of 1 (for good reason). */ @Test (timeout = 30000) public void testSetrepLow() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); MiniDFSCluster cluster = builder.numDataNodes(2).format(true).build(); FsShell shell = new FsShell(conf); cluster.waitActive(); final String testdir = "/tmp/TestDFSShell-testSetrepLow"; final Path hdfsFile = new Path(testdir, "testFileForSetrepLow"); final PrintStream origOut = System.out; final PrintStream origErr = System.err; try { final FileSystem fs = cluster.getFileSystem(); assertTrue("Unable to create test directory", fs.mkdirs(new Path(testdir))); fs.create(hdfsFile, true).close(); // Capture the command output so we can examine it final ByteArrayOutputStream bao = new ByteArrayOutputStream(); final PrintStream capture = new PrintStream(bao); System.setOut(capture); System.setErr(capture); final String[] argv = new String[] { "-setrep", "1", hdfsFile.toString() }; try { assertEquals("Command did not return the expected exit code", 1, shell.run(argv)); } finally { System.setOut(origOut); System.setErr(origErr); } assertTrue("Error message is not the expected error message" + bao.toString(), bao.toString().startsWith( "setrep: Requested replication factor of 1 is less than " + "the required minimum of 2 for /tmp/TestDFSShell-" + "testSetrepLow/testFileForSetrepLow")); } finally { shell.close(); cluster.shutdown(); } } // setrep for file and directory. @Test (timeout = 30000) public void testSetrep() throws Exception { FsShell shell = null; final String testdir1 = "/tmp/TestDFSShell-testSetrep-" + counter.getAndIncrement(); final String testdir2 = testdir1 + "/nestedDir"; final Path hdfsFile1 = new Path(testdir1, "testFileForSetrep"); final Path hdfsFile2 = new Path(testdir2, "testFileForSetrep"); final Short oldRepFactor = new Short((short) 2); final Short newRepFactor = new Short((short) 3); try { String[] argv; assertThat(dfs.mkdirs(new Path(testdir2)), is(true)); shell = new FsShell(dfs.getConf()); dfs.create(hdfsFile1, true).close(); dfs.create(hdfsFile2, true).close(); // Tests for setrep on a file. argv = new String[] { "-setrep", newRepFactor.toString(), hdfsFile1.toString() }; assertThat(shell.run(argv), is(SUCCESS)); assertThat(dfs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor)); assertThat(dfs.getFileStatus(hdfsFile2).getReplication(), is(oldRepFactor)); // Tests for setrep // Tests for setrep on a directory and make sure it is applied recursively. argv = new String[] { "-setrep", newRepFactor.toString(), testdir1 }; assertThat(shell.run(argv), is(SUCCESS)); assertThat(dfs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor)); assertThat(dfs.getFileStatus(hdfsFile2).getReplication(), is(newRepFactor)); } finally { if (shell != null) { shell.close(); } } } /** * Delete a file optionally configuring trash on the server and client. */ private void deleteFileUsingTrash( boolean serverTrash, boolean clientTrash) throws Exception { // Run a miniCluster, optionally with trash enabled on the server Configuration serverConf = new HdfsConfiguration(); if (serverTrash) { serverConf.setLong(FS_TRASH_INTERVAL_KEY, 1); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf) .numDataNodes(1).format(true).build(); Configuration clientConf = new Configuration(serverConf); // Create a client, optionally with trash enabled if (clientTrash) { clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1); } else { clientConf.setLong(FS_TRASH_INTERVAL_KEY, 0); } FsShell shell = new FsShell(clientConf); FileSystem fs = null; try { // Create and delete a file fs = cluster.getFileSystem(); // Use a separate tmp dir for each invocation. final String testdir = "/tmp/TestDFSShell-deleteFileUsingTrash-" + counter.getAndIncrement(); writeFile(fs, new Path(testdir, "foo")); final String testFile = testdir + "/foo"; final String trashFile = shell.getCurrentTrashDir() + "/" + testFile; String[] argv = new String[] { "-rm", testFile }; int res = ToolRunner.run(shell, argv); assertEquals("rm failed", 0, res); if (serverTrash) { // If the server config was set we should use it unconditionally assertTrue("File not in trash", fs.exists(new Path(trashFile))); } else if (clientTrash) { // If the server config was not set but the client config was // set then we should use it assertTrue("File not in trashed", fs.exists(new Path(trashFile))); } else { // If neither was set then we should not have trashed the file assertFalse("File was not removed", fs.exists(new Path(testFile))); assertFalse("File was trashed", fs.exists(new Path(trashFile))); } } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } } @Test (timeout = 300000) public void testAppendToFile() throws Exception { final int inputFileLength = 1024 * 1024; File testRoot = new File(TEST_ROOT_DIR, "testAppendtoFileDir"); testRoot.mkdirs(); File file1 = new File(testRoot, "file1"); File file2 = new File(testRoot, "file2"); createLocalFileWithRandomData(inputFileLength, file1); createLocalFileWithRandomData(inputFileLength, file2); Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); try { FileSystem dfs = cluster.getFileSystem(); assertTrue("Not a HDFS: " + dfs.getUri(), dfs instanceof DistributedFileSystem); // Run appendToFile once, make sure that the target file is // created and is of the right size. Path remoteFile = new Path("/remoteFile"); FsShell shell = new FsShell(); shell.setConf(conf); String[] argv = new String[] { "-appendToFile", file1.toString(), file2.toString(), remoteFile.toString() }; int res = ToolRunner.run(shell, argv); assertThat(res, is(0)); assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 2)); // Run the command once again and make sure that the target file // size has been doubled. res = ToolRunner.run(shell, argv); assertThat(res, is(0)); assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 4)); } finally { cluster.shutdown(); } } @Test (timeout = 300000) public void testAppendToFileBadArgs() throws Exception { final int inputFileLength = 1024 * 1024; File testRoot = new File(TEST_ROOT_DIR, "testAppendToFileBadArgsDir"); testRoot.mkdirs(); File file1 = new File(testRoot, "file1"); createLocalFileWithRandomData(inputFileLength, file1); // Run appendToFile with insufficient arguments. FsShell shell = new FsShell(); shell.setConf(dfs.getConf()); String[] argv = new String[] { "-appendToFile", file1.toString() }; int res = ToolRunner.run(shell, argv); assertThat(res, not(0)); // Mix stdin with other input files. Must fail. Path remoteFile = new Path("/remoteFile"); argv = new String[] { "-appendToFile", file1.toString(), "-", remoteFile.toString() }; res = ToolRunner.run(shell, argv); assertThat(res, not(0)); } @Test (timeout = 30000) public void testSetXAttrPermission() throws Exception { UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] {"mygroup"}); PrintStream bak = null; try { Path p = new Path("/foo"); dfs.mkdirs(p); bak = System.err; final FsShell fshell = new FsShell(dfs.getConf()); final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); // No permission to write xattr dfs.setPermission(p, new FsPermission((short) 0700)); user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { int ret = ToolRunner.run(fshell, new String[]{ "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"}); assertEquals("Returned should be 1", 1, ret); String str = out.toString(); assertTrue("Permission denied printed", str.indexOf("Permission denied") != -1); out.reset(); return null; } }); int ret = ToolRunner.run(fshell, new String[]{ "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"}); assertEquals("Returned should be 0", 0, ret); out.reset(); // No permission to read and remove dfs.setPermission(p, new FsPermission((short) 0750)); user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { // Read int ret = ToolRunner.run(fshell, new String[]{ "-getfattr", "-n", "user.a1", "/foo"}); assertEquals("Returned should be 1", 1, ret); String str = out.toString(); assertTrue("Permission denied printed", str.indexOf("Permission denied") != -1); out.reset(); // Remove ret = ToolRunner.run(fshell, new String[]{ "-setfattr", "-x", "user.a1", "/foo"}); assertEquals("Returned should be 1", 1, ret); str = out.toString(); assertTrue("Permission denied printed", str.indexOf("Permission denied") != -1); out.reset(); return null; } }); } finally { if (bak != null) { System.setErr(bak); } } } /* HDFS-6413 xattr names erroneously handled as case-insensitive */ @Test (timeout = 30000) public void testSetXAttrCaseSensitivity() throws Exception { PrintStream bak = null; try { Path p = new Path("/mydir"); dfs.mkdirs(p); bak = System.err; final FsShell fshell = new FsShell(dfs.getConf()); final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); doSetXattr(out, fshell, new String[] {"-setfattr", "-n", "User.Foo", "/mydir"}, new String[] {"-getfattr", "-d", "/mydir"}, new String[] {"user.Foo"}, new String[] {}); doSetXattr(out, fshell, new String[] {"-setfattr", "-n", "user.FOO", "/mydir"}, new String[] {"-getfattr", "-d", "/mydir"}, new String[] {"user.Foo", "user.FOO"}, new String[] {}); doSetXattr(out, fshell, new String[] {"-setfattr", "-n", "USER.foo", "/mydir"}, new String[] {"-getfattr", "-d", "/mydir"}, new String[] {"user.Foo", "user.FOO", "user.foo"}, new String[] {}); doSetXattr(out, fshell, new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"}, new String[] {"-getfattr", "-d", "/mydir"}, new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""}, new String[] {"user.Foo=", "user.FOO=", "user.foo="}); doSetXattr(out, fshell, new String[] {"-setfattr", "-x", "useR.foo", "/mydir"}, new String[] {"-getfattr", "-d", "/mydir"}, new String[] {"user.Foo", "user.FOO"}, new String[] {"foo"}); doSetXattr(out, fshell, new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"}, new String[] {"-getfattr", "-d", "/mydir"}, new String[] {"user.Foo"}, new String[] {"FOO"}); doSetXattr(out, fshell, new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"}, new String[] {"-getfattr", "-n", "User.Foo", "/mydir"}, new String[] {}, new String[] {"Foo"}); } finally { if (bak != null) { System.setOut(bak); } } } private void doSetXattr(ByteArrayOutputStream out, FsShell fshell, String[] setOp, String[] getOp, String[] expectArr, String[] dontExpectArr) throws Exception { int ret = ToolRunner.run(fshell, setOp); out.reset(); ret = ToolRunner.run(fshell, getOp); final String str = out.toString(); for (int i = 0; i < expectArr.length; i++) { final String expect = expectArr[i]; final StringBuilder sb = new StringBuilder ("Incorrect results from getfattr. Expected: "); sb.append(expect).append(" Full Result: "); sb.append(str); assertTrue(sb.toString(), str.indexOf(expect) != -1); } for (int i = 0; i < dontExpectArr.length; i++) { String dontExpect = dontExpectArr[i]; final StringBuilder sb = new StringBuilder ("Incorrect results from getfattr. Didn't Expect: "); sb.append(dontExpect).append(" Full Result: "); sb.append(str); assertTrue(sb.toString(), str.indexOf(dontExpect) == -1); } out.reset(); } /** * * Test to make sure that user namespace xattrs can be set only if path has * access and for sticky directorries, only owner/privileged user can write. * Trusted namespace xattrs can be set only with privileged users. * * As user1: Create a directory (/foo) as user1, chown it to user1 (and * user1's group), grant rwx to "other". * * As user2: Set an xattr (should pass with path access). * * As user1: Set an xattr (should pass). * * As user2: Read the xattr (should pass). Remove the xattr (should pass with * path access). * * As user1: Read the xattr (should pass). Remove the xattr (should pass). * * As user1: Change permissions only to owner * * As User2: Set an Xattr (Should fail set with no path access) Remove an * Xattr (Should fail with no path access) * * As SuperUser: Set an Xattr with Trusted (Should pass) */ @Test (timeout = 30000) public void testSetXAttrPermissionAsDifferentOwner() throws Exception { final String root = "/testSetXAttrPermissionAsDifferentOwner"; final String USER1 = "user1"; final String GROUP1 = "supergroup"; final UserGroupInformation user1 = UserGroupInformation. createUserForTesting(USER1, new String[] {GROUP1}); final UserGroupInformation user2 = UserGroupInformation. createUserForTesting("user2", new String[] {"mygroup2"}); final UserGroupInformation SUPERUSER = UserGroupInformation.getCurrentUser(); PrintStream bak = null; try { dfs.mkdirs(new Path(root)); dfs.setOwner(new Path(root), USER1, GROUP1); bak = System.err; final FsShell fshell = new FsShell(dfs.getConf()); final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); //Test 1. Let user1 be owner for /foo user1.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final int ret = ToolRunner.run(fshell, new String[]{ "-mkdir", root + "/foo"}); assertEquals("Return should be 0", 0, ret); out.reset(); return null; } }); //Test 2. Give access to others user1.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { // Give access to "other" final int ret = ToolRunner.run(fshell, new String[]{ "-chmod", "707", root + "/foo"}); assertEquals("Return should be 0", 0, ret); out.reset(); return null; } }); // Test 3. Should be allowed to write xattr if there is a path access to // user (user2). user2.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final int ret = ToolRunner.run(fshell, new String[]{ "-setfattr", "-n", "user.a1", "-v", "1234", root + "/foo"}); assertEquals("Returned should be 0", 0, ret); out.reset(); return null; } }); //Test 4. There should be permission to write xattr for // the owning user with write permissions. user1.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final int ret = ToolRunner.run(fshell, new String[]{ "-setfattr", "-n", "user.a1", "-v", "1234", root + "/foo"}); assertEquals("Returned should be 0", 0, ret); out.reset(); return null; } }); // Test 5. There should be permission to read non-owning user (user2) if // there is path access to that user and also can remove. user2.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { // Read int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n", "user.a1", root + "/foo" }); assertEquals("Returned should be 0", 0, ret); out.reset(); // Remove ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x", "user.a1", root + "/foo" }); assertEquals("Returned should be 0", 0, ret); out.reset(); return null; } }); // Test 6. There should be permission to read/remove for // the owning user with path access. user1.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { return null; } }); // Test 7. Change permission to have path access only to owner(user1) user1.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { // Give access to "other" final int ret = ToolRunner.run(fshell, new String[]{ "-chmod", "700", root + "/foo"}); assertEquals("Return should be 0", 0, ret); out.reset(); return null; } }); // Test 8. There should be no permissions to set for // the non-owning user with no path access. user2.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { // set int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n", "user.a2", root + "/foo" }); assertEquals("Returned should be 1", 1, ret); final String str = out.toString(); assertTrue("Permission denied printed", str.indexOf("Permission denied") != -1); out.reset(); return null; } }); // Test 9. There should be no permissions to remove for // the non-owning user with no path access. user2.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { // set int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x", "user.a2", root + "/foo" }); assertEquals("Returned should be 1", 1, ret); final String str = out.toString(); assertTrue("Permission denied printed", str.indexOf("Permission denied") != -1); out.reset(); return null; } }); // Test 10. Superuser should be allowed to set with trusted namespace SUPERUSER.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { // set int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n", "trusted.a3", root + "/foo" }); assertEquals("Returned should be 0", 0, ret); out.reset(); return null; } }); } finally { if (bak != null) { System.setErr(bak); } } } /* * 1. Test that CLI throws an exception and returns non-0 when user does * not have permission to read an xattr. * 2. Test that CLI throws an exception and returns non-0 when a non-existent * xattr is requested. */ @Test (timeout = 120000) public void testGetFAttrErrors() throws Exception { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] {"mygroup"}); PrintStream bakErr = null; try { final Path p = new Path("/testGetFAttrErrors"); dfs.mkdirs(p); bakErr = System.err; final FsShell fshell = new FsShell(dfs.getConf()); final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); // No permission for "other". dfs.setPermission(p, new FsPermission((short) 0700)); { final int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n", "user.a1", "-v", "1234", p.toString()}); assertEquals("Returned should be 0", 0, ret); out.reset(); } user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n", "user.a1", p.toString()}); String str = out.toString(); assertTrue("xattr value was incorrectly returned", str.indexOf("1234") == -1); out.reset(); return null; } }); { final int ret = ToolRunner.run(fshell, new String[]{ "-getfattr", "-n", "user.nonexistent", p.toString()}); String str = out.toString(); assertTrue("xattr value was incorrectly returned", str.indexOf( "getfattr: At least one of the attributes provided was not found") >= 0); out.reset(); } } finally { if (bakErr != null) { System.setErr(bakErr); } } } /** * Test that the server trash configuration is respected when * the client configuration is not set. */ @Test (timeout = 30000) public void testServerConfigRespected() throws Exception { deleteFileUsingTrash(true, false); } /** * Test that server trash configuration is respected even when the * client configuration is set. */ @Test (timeout = 30000) public void testServerConfigRespectedWithClient() throws Exception { deleteFileUsingTrash(true, true); } /** * Test that the client trash configuration is respected when * the server configuration is not set. */ @Test (timeout = 30000) public void testClientConfigRespected() throws Exception { deleteFileUsingTrash(false, true); } /** * Test that trash is disabled by default. */ @Test (timeout = 30000) public void testNoTrashConfig() throws Exception { deleteFileUsingTrash(false, false); } @Test (timeout = 30000) public void testListReserved() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs = cluster.getFileSystem(); FsShell shell = new FsShell(); shell.setConf(conf); FileStatus test = fs.getFileStatus(new Path("/.reserved")); assertEquals(FSDirectory.DOT_RESERVED_STRING, test.getPath().getName()); // Listing /.reserved/ should show 2 items: raw and .inodes FileStatus[] stats = fs.listStatus(new Path("/.reserved")); assertEquals(2, stats.length); assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName()); assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY), stats[0].getGroup()); assertEquals("raw", stats[1].getPath().getName()); assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY), stats[1].getGroup()); // Listing / should not show /.reserved stats = fs.listStatus(new Path("/")); assertEquals(0, stats.length); // runCmd prints error into System.err, thus verify from there. PrintStream syserr = System.err; final ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); System.setErr(ps); try { runCmd(shell, "-ls", "/.reserved"); assertEquals(0, baos.toString().length()); runCmd(shell, "-ls", "/.reserved/raw/.reserved"); assertTrue(baos.toString().contains("No such file or directory")); } finally { System.setErr(syserr); cluster.shutdown(); } } @Test (timeout = 30000) public void testMkdirReserved() throws IOException { try { dfs.mkdirs(new Path("/.reserved")); fail("Can't mkdir /.reserved"); } catch (Exception e) { // Expected, HadoopIllegalArgumentException thrown from remote assertTrue(e.getMessage().contains("\".reserved\" is reserved")); } } @Test (timeout = 30000) public void testRmReserved() throws IOException { try { dfs.delete(new Path("/.reserved"), true); fail("Can't delete /.reserved"); } catch (Exception e) { // Expected, InvalidPathException thrown from remote assertTrue(e.getMessage().contains("Invalid path name /.reserved")); } } @Test //(timeout = 30000) public void testCopyReserved() throws IOException { final File localFile = new File(TEST_ROOT_DIR, "testFileForPut"); localFile.createNewFile(); final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString(); try { dfs.copyFromLocalFile(new Path(localfilepath), new Path("/.reserved")); fail("Can't copyFromLocal to /.reserved"); } catch (Exception e) { // Expected, InvalidPathException thrown from remote assertTrue(e.getMessage().contains("Invalid path name /.reserved")); } final String testdir = GenericTestUtils.getTempPath( "TestDFSShell-testCopyReserved"); final Path hdfsTestDir = new Path(testdir); writeFile(dfs, new Path(testdir, "testFileForPut")); final Path src = new Path(hdfsTestDir, "srcfile"); dfs.create(src).close(); assertTrue(dfs.exists(src)); // runCmd prints error into System.err, thus verify from there. PrintStream syserr = System.err; final ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); System.setErr(ps); try { FsShell shell = new FsShell(dfs.getConf()); runCmd(shell, "-cp", src.toString(), "/.reserved"); assertTrue(baos.toString().contains("Invalid path name /.reserved")); } finally { System.setErr(syserr); } } @Test (timeout = 30000) public void testChmodReserved() throws IOException { // runCmd prints error into System.err, thus verify from there. PrintStream syserr = System.err; final ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); System.setErr(ps); try { FsShell shell = new FsShell(dfs.getConf()); runCmd(shell, "-chmod", "777", "/.reserved"); assertTrue(baos.toString().contains("Invalid path name /.reserved")); } finally { System.setErr(syserr); } } @Test (timeout = 30000) public void testChownReserved() throws IOException { // runCmd prints error into System.err, thus verify from there. PrintStream syserr = System.err; final ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); System.setErr(ps); try { FsShell shell = new FsShell(dfs.getConf()); runCmd(shell, "-chown", "user1", "/.reserved"); assertTrue(baos.toString().contains("Invalid path name /.reserved")); } finally { System.setErr(syserr); } } @Test (timeout = 30000) public void testSymLinkReserved() throws IOException { try { dfs.createSymlink(new Path("/.reserved"), new Path("/rl1"), false); fail("Can't create symlink to /.reserved"); } catch (Exception e) { // Expected, InvalidPathException thrown from remote assertTrue(e.getMessage().contains("Invalid target name: /.reserved")); } } @Test (timeout = 30000) public void testSnapshotReserved() throws IOException { final Path reserved = new Path("/.reserved"); try { dfs.allowSnapshot(reserved); fail("Can't allow snapshot on /.reserved"); } catch (FileNotFoundException e) { assertTrue(e.getMessage().contains("Directory does not exist")); } try { dfs.createSnapshot(reserved, "snap"); fail("Can't create snapshot on /.reserved"); } catch (FileNotFoundException e) { assertTrue(e.getMessage().contains("Directory/File does not exist")); } } }
apache-2.0
apache/oodt
crawler/src/main/java/org/apache/oodt/cas/crawl/daemon/CrawlDaemon.java
5267
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.oodt.cas.crawl.daemon; //OODT imports import org.apache.oodt.cas.crawl.ProductCrawler; import org.apache.xmlrpc.WebServer; import java.util.logging.Level; import java.util.logging.Logger; //JDK imports //APACHE imports /** * @author mattmann * @version $Revision$ * @deprecated soon be replaced by avro-rpc * <p> * A daemon utility class for {@link ProductCrawler}s that allows a regular * ProductCrawler to be run as a daemon, and statistics about crawling to be * kept. The daemon is an XML-RPC accessible web service. * </p>. */ @Deprecated public class CrawlDaemon { public static final double DOUBLE = 1000.0; /* our log stream */ private static Logger LOG = Logger.getLogger(CrawlDaemon.class.getName()); /* are we running or not? */ private boolean running = true; /* wait interval in seconds between crawls */ private long waitInterval = -1; /* number of times that the crawler has been called */ private int numCrawls = 0; /* the amount of miliseconds spent crawling */ private long milisCrawling = 0L; /* the product crawler that this daemon should use */ private ProductCrawler crawler = null; /* the port that this crawl daemon should run on */ private int daemonPort = 9999; public CrawlDaemon(int wait, ProductCrawler crawler, int port) { this.waitInterval = wait; this.crawler = crawler; this.daemonPort = port; } public void startCrawling() { // start up the web server WebServer server = new WebServer(this.daemonPort); server.addHandler("crawldaemon", this); server.start(); LOG.log(Level.INFO, "Crawl Daemon started by " + System.getProperty("user.name", "unknown")); while (running) { // okay, time to crawl long timeBefore = System.currentTimeMillis(); crawler.crawl(); long timeAfter = System.currentTimeMillis(); milisCrawling += (timeAfter - timeBefore); numCrawls++; LOG.log(Level.INFO, "Sleeping for: [" + waitInterval + "] seconds"); // take a nap try { Thread.currentThread().sleep(waitInterval * 1000); } catch (InterruptedException ignore) { } } LOG.log(Level.INFO, "Crawl Daemon: Shutting down gracefully"); LOG.log(Level.INFO, "Num Crawls: [" + this.numCrawls + "]"); LOG.log(Level.INFO, "Total time spent crawling: [" + (this.milisCrawling / DOUBLE) + "] seconds"); LOG.log(Level.INFO, "Average Crawl Time: [" + (this.getAverageCrawlTime() / DOUBLE) + "] seconds"); server.shutdown(); } public double getAverageCrawlTime() { return (1.0 * milisCrawling) / (1.0 * numCrawls); } /** * @return the crawler */ public ProductCrawler getCrawler() { return crawler; } /** * @param crawler * the crawler to set */ public void setCrawler(ProductCrawler crawler) { this.crawler = crawler; } /** * @return the milisCrawling */ public int getMilisCrawling() { return (int) milisCrawling; } /** * @param milisCrawling * the milisCrawling to set */ public void setMilisCrawling(long milisCrawling) { this.milisCrawling = milisCrawling; } /** * @return the numCrawls */ public int getNumCrawls() { return numCrawls; } /** * @param numCrawls * the numCrawls to set */ public void setNumCrawls(int numCrawls) { this.numCrawls = numCrawls; } /** * @return the running */ public boolean isRunning() { return running; } /** */ public boolean stop() { this.running = false; return false; } /** * @return the waitInterval */ public int getWaitInterval() { return (int) waitInterval; } /** * @param waitInterval * the waitInterval to set */ public void setWaitInterval(long waitInterval) { this.waitInterval = waitInterval; } private static void main(String[] args) throws InstantiationException { throw new InstantiationException( "Don't call a crawl daemon by its main function!"); } }
apache-2.0
sankarh/hive
ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java
5866
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.metastore; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.CmRecycleResponse; import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FireEventRequest; import org.apache.hadoop.hive.metastore.api.FireEventResponse; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.LockRequest; import org.apache.hadoop.hive.metastore.api.LockResponse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest; import org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest; import org.apache.thrift.TException; /** * Synchronized MetaStoreClient wrapper */ public final class SynchronizedMetaStoreClient { private final IMetaStoreClient client; public SynchronizedMetaStoreClient(IMetaStoreClient client) { this.client = client; } public synchronized long openTxn(String user) throws TException { return client.openTxn(user); } public synchronized void commitTxn(long txnid) throws TException { client.commitTxn(txnid); } public synchronized void rollbackTxn(long txnid) throws TException { client.rollbackTxn(txnid); } public synchronized void heartbeat(long txnid, long lockid) throws TException { client.heartbeat(txnid, lockid); } public synchronized ValidTxnList getValidTxns(long currentTxn) throws TException { return client.getValidTxns(currentTxn); } public synchronized LockResponse lock(LockRequest request) throws TException { return client.lock(request); } public synchronized Partition add_partition(Partition partition) throws TException { return client.add_partition(partition); } public synchronized int add_partitions(List<Partition> partitions) throws TException { return client.add_partitions(partitions); } public synchronized void alter_partition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, String writeIdList) throws TException { client.alter_partition(catName, dbName, tblName, newPart, environmentContext, writeIdList); } public void alter_partitions(String catName, String dbName, String tblName, List<Partition> partitions, EnvironmentContext environmentContext, String writeIdList, long writeId) throws TException { client.alter_partitions(catName, dbName, tblName, partitions, environmentContext, writeIdList, writeId); } public synchronized LockResponse checkLock(long lockid) throws TException { return client.checkLock(lockid); } public synchronized void unlock(long lockid) throws TException { client.unlock(lockid); } public synchronized ShowLocksResponse showLocks(ShowLocksRequest showLocksRequest) throws TException { return client.showLocks(showLocksRequest); } public synchronized Partition getPartitionWithAuthInfo(String dbName, String tableName, List<String> pvals, String userName, List<String> groupNames) throws MetaException, UnknownTableException, NoSuchObjectException, TException { return client.getPartitionWithAuthInfo(dbName, tableName, pvals, userName, groupNames); } public synchronized Partition appendPartition(String db_name, String table_name, List<String> part_vals) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { return client.appendPartition(db_name, table_name, part_vals); } public synchronized FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException { return client.fireListenerEvent(rqst); } public synchronized void addWriteNotificationLog(WriteNotificationLogRequest rqst) throws TException { client.addWriteNotificationLog(rqst); } public synchronized void addWriteNotificationLogInBatch(WriteNotificationLogBatchRequest rqst) throws TException { client.addWriteNotificationLogInBatch(rqst); } public synchronized CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException { return client.recycleDirToCmPath(request); } public synchronized void close() { client.close(); } public boolean isSameConfObj(Configuration c) { return client.isSameConfObj(c); } public boolean isCompatibleWith(Configuration c) { return client.isCompatibleWith(c); } }
apache-2.0
apache/sandesha
modules/core/src/main/java/org/apache/sandesha2/policy/builders/RMAssertionBuilder.java
1984
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sandesha2.policy.builders; import javax.xml.namespace.QName; import org.apache.axiom.om.OMElement; import org.apache.neethi.Assertion; import org.apache.neethi.AssertionBuilderFactory; import org.apache.neethi.Policy; import org.apache.neethi.PolicyComponent; import org.apache.neethi.PolicyEngine; import org.apache.neethi.builders.AssertionBuilder; import org.apache.sandesha2.Sandesha2Constants; import org.apache.sandesha2.policy.SandeshaPolicyBean; public class RMAssertionBuilder implements AssertionBuilder<OMElement> { public Assertion build(OMElement element, AssertionBuilderFactory factory) throws IllegalArgumentException { SandeshaPolicyBean propertyBean = new SandeshaPolicyBean(); Policy policy = PolicyEngine.getPolicy(element.getFirstElement()); for (PolicyComponent component : policy.getPolicyComponents()) { ((SandeshaPropertyAssertion)component).apply(propertyBean); } return propertyBean; } public QName[] getKnownElements() { return new QName[] { new QName( Sandesha2Constants.Assertions.URI_RM_POLICY_NS, "RMAssertion") }; } }
apache-2.0
apache/curator
curator-x-async/src/main/java/org/apache/curator/x/async/modeled/versioned/Versioned.java
1868
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.curator.x.async.modeled.versioned; /** * A container for a model instance and a version. Can be used with the * {@link org.apache.curator.x.async.modeled.ModeledFramework#versioned()} APIs */ @FunctionalInterface public interface Versioned<T> { /** * Returns the contained model * * @return model */ T model(); /** * Returns the version of the model when it was read * * @return version */ default int version() { return -1; } /** * Return a new Versioned wrapper for the given model and version * * @param model model * @param version version * @return new Versioned wrapper */ static <T> Versioned<T> from(T model, int version) { return new Versioned<T>() { @Override public int version() { return version; } @Override public T model() { return model; } }; } }
apache-2.0
dimbleby/JGroups
src/org/jgroups/util/XMLSchemaGenerator.java
13129
package org.jgroups.util; import org.jgroups.Version; import org.jgroups.annotations.Property; import org.jgroups.annotations.XmlAttribute; import org.jgroups.annotations.XmlElement; import org.jgroups.annotations.XmlInclude; import org.jgroups.stack.Protocol; import org.w3c.dom.DOMImplementation; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.transform.OutputKeys; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.net.URL; import java.util.*; /** * Iterates over all concrete Protocol classes and creates XML schema used for validation of configuration files. * * https://jira.jboss.org/jira/browse/JGRP-448 * * @author Vladimir Blagojevic * @author Bela Ban * */ public class XMLSchemaGenerator { protected static final String PROT_PACKAGE="org.jgroups.protocols"; protected static final String[] PACKAGES={"", "pbcast", "tom", "relay", "rules"}; public static void main(String[] args) { String outputDir = "./"; for (int i = 0; i < args.length; i++) { String arg = args[i]; if ("-o".equals(arg)) { outputDir = args[++i]; } else { System.out.println("XMLSchemaGenerator -o <path to newly created xsd schema file>"); return; } } String version = Version.major + "." + Version.minor; File f = new File(outputDir, "jgroups-" + version + ".xsd"); try { FileWriter fw = new FileWriter(f, false); DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = factory.newDocumentBuilder(); DOMImplementation impl = builder.getDOMImplementation(); Document xmldoc = impl.createDocument("http://www.w3.org/2001/XMLSchema", "xs:schema", null); xmldoc.getDocumentElement().setAttribute("targetNamespace", "urn:org:jgroups"); xmldoc.getDocumentElement().setAttribute("elementFormDefault", "qualified"); xmldoc.getDocumentElement().setAttribute("attributeFormDefault", "qualified"); xmldoc.getDocumentElement().setAttribute("version", version); Element complexType = xmldoc.createElement("xs:complexType"); complexType.setAttribute("name", "ConfigType"); xmldoc.getDocumentElement().appendChild(complexType); Element allType = xmldoc.createElement("xs:choice"); allType.setAttribute("maxOccurs", "unbounded"); complexType.appendChild(allType); generateProtocolSchema(xmldoc, allType, PACKAGES); Element xsElement = xmldoc.createElement("xs:element"); xsElement.setAttribute("name", "config"); xsElement.setAttribute("type", "ConfigType"); xmldoc.getDocumentElement().appendChild(xsElement); DOMSource domSource = new DOMSource(xmldoc); StreamResult streamResult = new StreamResult(fw); TransformerFactory tf = TransformerFactory.newInstance(); Transformer serializer = tf.newTransformer(); serializer.setOutputProperty(OutputKeys.METHOD, "xml"); serializer.setOutputProperty(OutputKeys.INDENT, "yes"); serializer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); serializer.transform(domSource, streamResult); fw.flush(); fw.close(); } catch (Exception e) { e.printStackTrace(); } } protected static void generateProtocolSchema(Document xmldoc, Element parent, String... suffixes) throws Exception { for(String suffix: suffixes) { String package_name=PROT_PACKAGE + (suffix == null || suffix.isEmpty()? "" : "." + suffix); Set<Class<?>> classes=getClasses(Protocol.class, package_name); List<Class<?>> sortedClasses = new LinkedList<>(classes); Collections.sort(sortedClasses, (o1, o2) -> o1.getCanonicalName().compareTo(o2.getCanonicalName())); for (Class<?> clazz : sortedClasses) classToXML(xmldoc, parent, clazz, package_name); } } private static Set<Class<?>> getClasses(Class<?> assignableFrom, String packageName) throws IOException, ClassNotFoundException { ClassLoader loader = Thread.currentThread().getContextClassLoader(); Set<Class<?>> classes = new HashSet<>(); String path = packageName.replace('.', '/'); URL resource = loader.getResource(path); if (resource != null) { String filePath = resource.getFile(); if (filePath != null && new File(filePath).isDirectory()) { for (String file : new File(filePath).list()) { if (file.endsWith(".class")) { String name = packageName + '.' + file.substring(0, file.indexOf(".class")); Class<?> clazz = Class.forName(name); int mods=clazz.getModifiers(); boolean isConcreteClass=!Modifier.isAbstract(mods); boolean is_public=Modifier.isPublic(mods); boolean generate=is_public && isConcreteClass && !clazz.isAnonymousClass(); if (assignableFrom.isAssignableFrom(clazz) && generate) classes.add(clazz); } } } } return classes; } private static void classToXML(Document xmldoc, Element parent, Class<?> clazz, String preAppendToSimpleClassName) throws Exception { XmlInclude incl=Util.getAnnotation(clazz, XmlInclude.class); if(incl != null) { String[] schemas=incl.schema(); for (String schema : schemas) { Element incl_el = xmldoc.createElement(incl.type() == XmlInclude.Type.IMPORT ? "xs:import" : "xs:include"); if (!incl.namespace().isEmpty()) incl_el.setAttribute("namespace", incl.namespace()); incl_el.setAttribute("schemaLocation", schema); Node first_child = xmldoc.getDocumentElement().getFirstChild(); if (first_child == null) xmldoc.getDocumentElement().appendChild(incl_el); else xmldoc.getDocumentElement().insertBefore(incl_el, first_child); } if(!incl.alias().isEmpty()) xmldoc.getDocumentElement().setAttribute("xmlns:" + incl.alias(), incl.namespace()); } parent.appendChild(createXMLTree(xmldoc, clazz, preAppendToSimpleClassName)); } private static Element createXMLTree(final Document xmldoc, Class<?> clazz, String pkgname) throws Exception { Element classElement = xmldoc.createElement("xs:element"); String elementName = pkgname + "." + clazz.getSimpleName(); if(elementName.isEmpty()) { throw new IllegalArgumentException("Cannot create empty attribute name for element xs:element, class is " + clazz); } elementName=elementName.replace(PROT_PACKAGE + ".", ""); classElement.setAttribute("name",elementName); final Element complexType = xmldoc.createElement("xs:complexType"); classElement.appendChild(complexType); // the protocol has its own subtree XmlElement el=Util.getAnnotation(clazz, XmlElement.class); if(el != null) { Element choice=xmldoc.createElement("xs:choice"); choice.setAttribute("minOccurs", "0"); choice.setAttribute("maxOccurs", "unbounded"); complexType.appendChild(choice); Element tmp=xmldoc.createElement("xs:element"); tmp.setAttribute("name", el.name()); tmp.setAttribute("type", el.type()); choice.appendChild(tmp); } Map<String, DelayingElementWriter> sortedElements =new TreeMap<>(); XmlAttribute xml_attr=Util.getAnnotation(clazz, XmlAttribute.class); if(xml_attr != null) { String[] attrs=xml_attr.attrs(); if(attrs.length > 0) { Set<String> set=new HashSet<>(Arrays.asList(attrs)); // to weed out dupes for(final String attr: set) { sortedElements.put(attr, () -> { Element attributeElement = xmldoc.createElement("xs:attribute"); attributeElement.setAttribute("name", attr); attributeElement.setAttribute("type", "xs:string"); complexType.appendChild(attributeElement); }); } } } // iterate fields for (Class<?> clazzInLoop = clazz; clazzInLoop != null; clazzInLoop = clazzInLoop.getSuperclass()) { Field[] fields = clazzInLoop.getDeclaredFields(); for (Field field : fields) { if (field.isAnnotationPresent(Property.class)) { final String property; final Property r = field.getAnnotation(Property.class); boolean annotationRedefinesName = !r.name().isEmpty() && r.deprecatedMessage().isEmpty(); if (annotationRedefinesName) { property = r.name(); } else { property = field.getName(); } if(property == null || property.isEmpty()) { throw new IllegalArgumentException("Cannot create empty attribute name for element xs:attribute, field is " + field); } sortedElements.put(property, () -> { Element attributeElement = xmldoc.createElement("xs:attribute"); attributeElement.setAttribute("name", property); // Agreement with Bela Ban on Jan-20-2009 (Go Obama!!!) to treat all types as // xs:string since we do not know where users are going to use // replacement tokens in configuration files. Therefore, the type becomes // indeterminate. attributeElement.setAttribute("type", "xs:string"); complexType.appendChild(attributeElement); Element annotationElement = xmldoc.createElement("xs:annotation"); attributeElement.appendChild(annotationElement); Element documentationElement = xmldoc.createElement("xs:documentation"); documentationElement.setTextContent(r.description()); annotationElement.appendChild(documentationElement); }); } } } // iterate methods Method[] methods = clazz.getMethods(); for (Method method : methods) { if (method.isAnnotationPresent(Property.class)) { final Property annotation = method.getAnnotation(Property.class); final String name; if (annotation.name().length() < 1) { name = Util.methodNameToAttributeName(method.getName()); } else { name = annotation.name(); } sortedElements.put(name, () -> { Element attributeElement = xmldoc.createElement("xs:attribute"); attributeElement.setAttribute("name", name); attributeElement.setAttribute("type", "xs:string"); complexType.appendChild(attributeElement); String desc = annotation.description(); if (!desc.isEmpty()) { Element annotationElement = xmldoc.createElement("xs:annotation"); attributeElement.appendChild(annotationElement); Element documentationElement = xmldoc.createElement("xs:documentation"); documentationElement.setTextContent(annotation.description()); annotationElement.appendChild(documentationElement); } }); } } // write out ordered and duplicates weeded out elements for (Map.Entry<String, DelayingElementWriter> entry : sortedElements.entrySet()) { entry.getValue().writeElement(); } return classElement; } private interface DelayingElementWriter { void writeElement(); } }
apache-2.0
twitter/bookkeeper
hedwig-server/src/main/java/org/apache/hedwig/server/stats/HedwigServerStatsImpl.java
7406
package org.apache.hedwig.server.stats; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.bookkeeper.stats.BaseStatsImpl; import org.apache.bookkeeper.stats.OpStatsData; import org.apache.bookkeeper.util.MathUtils; import org.apache.hedwig.protocol.PubSubProtocol; import org.apache.hedwig.protocol.PubSubProtocol.OperationType; import org.apache.hedwig.util.Pair; import com.google.protobuf.ByteString; import com.twitter.common.stats.SampledStat; import com.twitter.common.stats.Stats; /** * This class implements the HedwigServerStatsLogger and HedwigServerStatsGetter interfaces. * It's a singleton responsible for stats for the entire server. Internals are implemented * using twitter.common.stats * TODO(Aniruddha): Add support for exporting ReadAheadCache stats. */ public class HedwigServerStatsImpl extends BaseStatsImpl implements HedwigServerStatsGetter, HedwigServerStatsLogger { // Maps a topic to the number of pending messages to be delivered. Maps to -1 if we don't have ownership // for a topic that we had earlier acquired. ConcurrentMap<PerTopicStatType, ConcurrentMap<ByteString, PerTopicStat>> perTopicLoggerMap = new ConcurrentHashMap<PerTopicStatType, ConcurrentMap<ByteString, PerTopicStat>>(); static Enum[] allOpStatsEnums() { Enum[] publicOps = OperationType.values(); Enum[] internalOps = HedwigServerInternalOpStatType.values(); Enum[] stats = new Enum[publicOps.length + internalOps.length]; System.arraycopy(publicOps, 0, stats, 0, publicOps.length); System.arraycopy(internalOps, 0, stats, publicOps.length, internalOps.length); return stats; } public HedwigServerStatsImpl(String name) { super(name, allOpStatsEnums(), HedwigServerSimpleStatType.values()); for (PerTopicStatType type : PerTopicStatType.values()) { perTopicLoggerMap.put(type, new ConcurrentHashMap<ByteString, PerTopicStat>()); } setUpStats(); } /** * Set up any stats that are not already exported by our stat types. */ private void setUpStats() { // Exports the maximum value for messages pending delivery across all topics. SampledStat<Long> localPendingStat = new SampledStat<Long>(name + "_max_pending_delivery", 0L) { @Override public Long doSample() { ConcurrentMap<ByteString, PerTopicStat> topicMap = ServerStatsProvider .getStatsLoggerInstance().getPerTopicLogger(PerTopicStatType.LOCAL_PENDING); long maxPending = 0L; for (PerTopicStat _value : topicMap.values()) { PerTopicPendingMessageStat value = (PerTopicPendingMessageStat)_value; AtomicLong pending; if (null == (pending = value.getPending())) { continue; } maxPending = Math.max(maxPending, pending.get()); } return maxPending; } }; Stats.export(localPendingStat); // Export the max age of the last seen message across any region. Takes the maximum across all topics. //TODO(Aniruddha): Export this stat per region. SampledStat<Long> maxAgeCrossRegion = new SampledStat<Long>(name + "_max_age_cross_region", 0L) { @Override public Long doSample() { ConcurrentMap<ByteString, PerTopicStat> topicMap = ServerStatsProvider .getStatsLoggerInstance().getPerTopicLogger(PerTopicStatType.CROSS_REGION); long minLastSeenMillis = Long.MAX_VALUE; for (PerTopicStat _value : topicMap.values()) { PerTopicCrossRegionStat value = (PerTopicCrossRegionStat)_value; ConcurrentMap<ByteString, Pair<PubSubProtocol.Message, Long>> regionMap = value.getRegionMap(); for (Pair<PubSubProtocol.Message, Long> regionValue : regionMap.values()) { Long lastSeenTimestamp = regionValue.second(); if (null == lastSeenTimestamp) { continue; } minLastSeenMillis = Math.min(minLastSeenMillis, lastSeenTimestamp); } } return Math.max(0L, MathUtils.now() - minLastSeenMillis); } }; Stats.export(maxAgeCrossRegion); } // The HedwigServerStatsGetter functions @Override public OpStatsData getOpStatsData(OperationType type) { return getOpStatsLogger(type).toOpStatsData(); } private PerTopicStat getPerTopicStat(PerTopicStatType type, ByteString topic, boolean create) { ConcurrentMap<ByteString, PerTopicStat> topicMap = perTopicLoggerMap.get(type); PerTopicStat curValue = null, statToPut = null; if (create) { if (type == PerTopicStatType.CROSS_REGION) { statToPut = new PerTopicCrossRegionStat(topic); } else if (type == PerTopicStatType.LOCAL_PENDING) { statToPut = new PerTopicPendingMessageStat(topic); } curValue = topicMap.putIfAbsent(topic, statToPut); } if (null == curValue) { curValue = topicMap.get(topic); } return curValue; } @Override public void setPerTopicSeqId(PerTopicStatType type, ByteString topic, long seqId, boolean create) { PerTopicStat curValue = getPerTopicStat(type, topic, create); if (null == curValue) { // We don't have ownership of the topic. return; } curValue.setSeqId(seqId); } @Override public void setPerTopicLastSeenMessage(PerTopicStatType type, ByteString topic, PubSubProtocol.Message message, boolean create) { PerTopicStat curValue = getPerTopicStat(type, topic, create); if (null == curValue) { // We don't have ownership of the topic. return; } curValue.setLastSeenMessage(message); } @Override public ConcurrentMap<ByteString, PerTopicStat> getPerTopicLogger(PerTopicStatType type) { return perTopicLoggerMap.get(type); } @Override public void removePerTopicLogger(PerTopicStatType type, ByteString topic) { ConcurrentMap<ByteString, PerTopicStat> topicMap = perTopicLoggerMap.get(type); topicMap.remove(topic); } @Override public long getNumRequestsReceived() { return getSimpleStatLogger(HedwigServerSimpleStatType.TOTAL_REQUESTS_RECEIVED).get(); } @Override public long getNumRequestsRedirect() { return getSimpleStatLogger(HedwigServerSimpleStatType.TOTAL_REQUESTS_REDIRECT).get(); } @Override public long getNumMessagesDelivered() { return getSimpleStatLogger(HedwigServerSimpleStatType.TOTAL_MESSAGES_DELIVERED).get(); } @Override public long getNumTopics() { return getSimpleStatLogger(HedwigServerSimpleStatType.NUM_TOPICS).get(); } @Override public long getPersistQueueSize() { return getSimpleStatLogger(HedwigServerSimpleStatType.PERSIST_QUEUE).get(); } }
apache-2.0
recruit-tech/redpen
redpen-core/src/main/java/cc/redpen/parser/BaseDocumentParser.java
4288
/** * redpen: a text inspection tool * Copyright (c) 2014-2015 Recruit Technologies Co., Ltd. and contributors * (see CONTRIBUTORS.md) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cc.redpen.parser; import cc.redpen.RedPenException; import cc.redpen.model.Document; import cc.redpen.model.Sentence; import cc.redpen.tokenizer.RedPenTokenizer; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Optional; import static java.lang.Character.isWhitespace; /** * Abstract Parser class containing common procedures to * implements the concrete Parser classes. */ public abstract class BaseDocumentParser implements DocumentParser { @Override public Document parse(InputStream is, SentenceExtractor sentenceExtractor, RedPenTokenizer tokenizer) throws RedPenException { return parse(is, Optional.empty(), sentenceExtractor, tokenizer); } @Override public Document parse(String content, SentenceExtractor sentenceExtractor, RedPenTokenizer tokenizer) throws RedPenException { return parse(new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)), sentenceExtractor, tokenizer); } @Override public Document parse(File file, SentenceExtractor sentenceExtractor, RedPenTokenizer tokenizer) throws RedPenException { try (InputStream inputStream = new FileInputStream(file)) { return parse(inputStream, Optional.of(file.getPath()), sentenceExtractor, tokenizer); } catch (IOException e) { throw new RedPenException(e); } } /** * Given input stream, return Document instance from a stream. * * @param io input stream containing input content * @param fileName file name * @param sentenceExtractor SentenceExtractor object * @param tokenizer tokenizer * @return a generated file content * @throws cc.redpen.RedPenException if Parser failed to parse input. */ protected abstract Document parse(InputStream io, Optional<String> fileName, SentenceExtractor sentenceExtractor, RedPenTokenizer tokenizer) throws RedPenException; /** * create BufferedReader from InputStream is. * * @param is InputStream using to parse * @return BufferedReader created from InputStream */ protected PreprocessingReader createReader(InputStream is) { return new PreprocessingReader(new InputStreamReader(is, StandardCharsets.UTF_8), this); } protected static class ValueWithOffsets extends Sentence { public ValueWithOffsets() { super("", 0); } public ValueWithOffsets(String content, List<LineOffset> offsetMap) { super(content, offsetMap, new ArrayList<>()); } public boolean isEmpty() { return "".equals(getContent()); } public ValueWithOffsets append(String line, List<LineOffset> offsets) { setContent(getContent() + line); getOffsetMap().addAll(offsets); return this; } public ValueWithOffsets extract(int start, int end) { if (start == end) return new ValueWithOffsets(); return new ValueWithOffsets(getContent().substring(start, end), getOffsetMap().subList(start, end)); } } protected int skipWhitespace(String line, int start) { for (int i = start; i < line.length(); i++) if (!isWhitespace(line.charAt(i))) return i; return line.length(); } }
apache-2.0
Tycheo/coffeemud
com/planet_ink/coffee_mud/Items/Armor/GenEarring.java
6943
package com.planet_ink.coffee_mud.Items.Armor; import java.util.ArrayList; import java.util.Enumeration; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.TreeMap; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.Basic.GenItem; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; /* Copyright 2004-2015 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ public class GenEarring extends GenThinArmor { @Override public String ID(){ return "GenEarring";} private String wearLocDesc = null; private final Map<Long,String> wearLocs = new TreeMap<Long,String>(); public GenEarring() { super(); setName("a pretty earring"); setDisplayText("a pretty earring lies here"); setDescription("It`s very pretty, and has a little clip for going in a pierced bodypart."); properWornBitmap=Wearable.WORN_EARS; wornLogicalAnd=true; basePhyStats().setArmor(0); basePhyStats().setWeight(1); basePhyStats().setAbility(0); baseGoldValue=40; layer=(short)-10; layerAttributes=Armor.LAYERMASK_MULTIWEAR; recoverPhyStats(); material=RawMaterial.RESOURCE_GOLD; } protected int numWorn(final MOB mob, long wornCode) { int numWorn = 0; for(Item I : mob.fetchWornItems(wornCode, layer, layerAttributes)) if(I instanceof GenEarring) numWorn++; return numWorn; } protected boolean hasFreePiercing(final MOB mob, long wornCode) { if(mob==null) return false; final Wearable.CODES codes = Wearable.CODES.instance(); final String wearLocName = codes.nameup(wornCode); int availablePiercings=0; for(final Enumeration<MOB.Tattoo> e=mob.tattoos();e.hasMoreElements();) { final String tattooName=e.nextElement().tattooName.toUpperCase(); if(tattooName.startsWith(wearLocName+":") && (tattooName.substring(wearLocName.length()+1).indexOf("PIERCE")>=0)) availablePiercings++; } if(availablePiercings==0) return false; return availablePiercings > numWorn(mob,wornCode); } protected boolean hasFreePiercingFor(final MOB mob, long wornCodes) { final Wearable.CODES codes = Wearable.CODES.instance(); if(super.wornLogicalAnd) { for(long code : codes.all()) if((code != 0) && (code != Wearable.WORN_HELD) && CMath.bset(wornCodes,code) && (!hasFreePiercing(mob, code))) return false; return true; } else { for(long code : codes.all()) if((code != 0) && CMath.bset(wornCodes,code) &&((code == Wearable.WORN_HELD) ||(hasFreePiercing(mob, code)))) return true; return false; } } @Override public boolean canWear(MOB mob, long where) { if(!super.canWear(mob, where)) return false; if(where==0) return true; return hasFreePiercingFor(mob,where); } @Override public long whereCantWear(MOB mob) { long where=super.whereCantWear(mob); final Wearable.CODES codes = Wearable.CODES.instance(); if(where == 0) { for(long code : codes.all()) if((code != 0) && fitsOn(code) &&(code!=Item.WORN_HELD) &&(!CMath.bset(where,code))) { if(hasFreePiercing(mob, code)) return 0; else where = where | code; } } return where; } @Override public void recoverPhyStats() { super.recoverPhyStats(); if((owner instanceof MOB)&&(!super.amWearingAt(Wearable.IN_INVENTORY))) { if(wearLocDesc == null) { synchronized(this) { if(wearLocDesc == null) { wearLocs.clear(); final List<String> dispWearLocs=new LinkedList<String>(); final MOB mob=(MOB)owner(); final Wearable.CODES codes = Wearable.CODES.instance(); final List<GenEarring> wornStuff = new ArrayList<GenEarring>(2); for(final Enumeration<Item> i = mob.items(); i.hasMoreElements();) { final Item I=i.nextElement(); if((I instanceof GenEarring) && (I!=this) && (!I.amWearingAt(Item.IN_INVENTORY)) && ((I.rawWornCode() & this.rawWornCode()) != 0)) wornStuff.add((GenEarring)I); } for(long wornCode : CMath.getSeperateBitMasks(myWornCode)) { final List<String> availablePiercingsThisLoc = new ArrayList<String>(2); final String wearLocName = codes.nameup(wornCode); for(final Enumeration<MOB.Tattoo> e=mob.tattoos();e.hasMoreElements();) { final String tattooName=e.nextElement().tattooName.toUpperCase(); if(tattooName.startsWith(wearLocName+":") && (tattooName.substring(wearLocName.length()+1).indexOf("PIERCE")>=0)) availablePiercingsThisLoc.add(tattooName.substring(wearLocName.length()+1).toLowerCase()); } final Long wornCodeL=Long.valueOf(wornCode); for(final GenEarring I : wornStuff) if((I.wearLocs!=null) && ((I.rawWornCode() & wornCode)!=0) && (I.wearLocs.containsKey(wornCodeL))) availablePiercingsThisLoc.remove(I.wearLocs.remove(wornCodeL)); if(availablePiercingsThisLoc.size()>0) { final String loc=availablePiercingsThisLoc.get(0); if(!CMLib.english().startsWithAnArticle(loc)) dispWearLocs.add("both "+loc); else dispWearLocs.add(loc); wearLocs.put(wornCodeL, loc); } } if(wearLocs.size() > 0) wearLocDesc = " on "+CMLib.english().toEnglishStringList(dispWearLocs); } } } if((wearLocDesc != null) && (wearLocDesc.length()>0)) phyStats().setName(name + wearLocDesc); } else this.wearLocDesc = null; } }
apache-2.0
joshualitt/DataflowJavaSDK
sdk/src/test/java/com/google/cloud/dataflow/sdk/transforms/GroupByKeyTest.java
17314
/* * Copyright (C) 2015 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.google.cloud.dataflow.sdk.transforms; import static com.google.cloud.dataflow.sdk.TestUtils.KvMatcher.isKv; import static com.google.cloud.dataflow.sdk.transforms.display.DisplayDataMatchers.hasDisplayItem; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.collection.IsIterableContainingInAnyOrder.containsInAnyOrder; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; import com.google.cloud.dataflow.sdk.Pipeline; import com.google.cloud.dataflow.sdk.coders.BigEndianIntegerCoder; import com.google.cloud.dataflow.sdk.coders.KvCoder; import com.google.cloud.dataflow.sdk.coders.MapCoder; import com.google.cloud.dataflow.sdk.coders.StringUtf8Coder; import com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions; import com.google.cloud.dataflow.sdk.options.DirectPipelineOptions; import com.google.cloud.dataflow.sdk.options.PipelineOptionsFactory; import com.google.cloud.dataflow.sdk.runners.DataflowPipelineRunner; import com.google.cloud.dataflow.sdk.runners.DirectPipelineRunner; import com.google.cloud.dataflow.sdk.testing.DataflowAssert; import com.google.cloud.dataflow.sdk.testing.RunnableOnService; import com.google.cloud.dataflow.sdk.testing.TestPipeline; import com.google.cloud.dataflow.sdk.transforms.display.DisplayData; import com.google.cloud.dataflow.sdk.transforms.windowing.FixedWindows; import com.google.cloud.dataflow.sdk.transforms.windowing.InvalidWindows; import com.google.cloud.dataflow.sdk.transforms.windowing.OutputTimeFns; import com.google.cloud.dataflow.sdk.transforms.windowing.Sessions; import com.google.cloud.dataflow.sdk.transforms.windowing.Window; import com.google.cloud.dataflow.sdk.util.NoopPathValidator; import com.google.cloud.dataflow.sdk.util.WindowingStrategy; import com.google.cloud.dataflow.sdk.values.KV; import com.google.cloud.dataflow.sdk.values.PBegin; import com.google.cloud.dataflow.sdk.values.PCollection; import com.google.cloud.dataflow.sdk.values.TimestampedValue; import com.google.cloud.dataflow.sdk.values.TypeDescriptor; import org.joda.time.Duration; import org.joda.time.Instant; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import java.util.Arrays; import java.util.List; import java.util.Map; /** * Tests for GroupByKey. */ @RunWith(JUnit4.class) @SuppressWarnings({"rawtypes", "unchecked"}) public class GroupByKeyTest { @Rule public ExpectedException thrown = ExpectedException.none(); @Test @Category(RunnableOnService.class) public void testGroupByKey() { List<KV<String, Integer>> ungroupedPairs = Arrays.asList( KV.of("k1", 3), KV.of("k5", Integer.MAX_VALUE), KV.of("k5", Integer.MIN_VALUE), KV.of("k2", 66), KV.of("k1", 4), KV.of("k2", -33), KV.of("k3", 0)); Pipeline p = TestPipeline.create(); PCollection<KV<String, Integer>> input = p.apply(Create.of(ungroupedPairs) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))); PCollection<KV<String, Iterable<Integer>>> output = input.apply(GroupByKey.<String, Integer>create()); DataflowAssert.that(output) .satisfies(new AssertThatHasExpectedContentsForTestGroupByKey()); p.run(); } static class AssertThatHasExpectedContentsForTestGroupByKey implements SerializableFunction<Iterable<KV<String, Iterable<Integer>>>, Void> { @Override public Void apply(Iterable<KV<String, Iterable<Integer>>> actual) { assertThat(actual, containsInAnyOrder( isKv(is("k1"), containsInAnyOrder(3, 4)), isKv(is("k5"), containsInAnyOrder(Integer.MAX_VALUE, Integer.MIN_VALUE)), isKv(is("k2"), containsInAnyOrder(66, -33)), isKv(is("k3"), containsInAnyOrder(0)))); return null; } } @Test @Category(RunnableOnService.class) public void testGroupByKeyAndWindows() { List<KV<String, Integer>> ungroupedPairs = Arrays.asList( KV.of("k1", 3), // window [0, 5) KV.of("k5", Integer.MAX_VALUE), // window [0, 5) KV.of("k5", Integer.MIN_VALUE), // window [0, 5) KV.of("k2", 66), // window [0, 5) KV.of("k1", 4), // window [5, 10) KV.of("k2", -33), // window [5, 10) KV.of("k3", 0)); // window [5, 10) Pipeline p = TestPipeline.create(); PCollection<KV<String, Integer>> input = p.apply(Create.timestamped(ungroupedPairs, Arrays.asList(1L, 2L, 3L, 4L, 5L, 6L, 7L)) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))); PCollection<KV<String, Iterable<Integer>>> output = input.apply(Window.<KV<String, Integer>>into(FixedWindows.of(new Duration(5)))) .apply(GroupByKey.<String, Integer>create()); DataflowAssert.that(output) .satisfies(new AssertThatHasExpectedContentsForTestGroupByKeyAndWindows()); p.run(); } static class AssertThatHasExpectedContentsForTestGroupByKeyAndWindows implements SerializableFunction<Iterable<KV<String, Iterable<Integer>>>, Void> { @Override public Void apply(Iterable<KV<String, Iterable<Integer>>> actual) { assertThat(actual, containsInAnyOrder( isKv(is("k1"), containsInAnyOrder(3)), isKv(is("k1"), containsInAnyOrder(4)), isKv(is("k5"), containsInAnyOrder(Integer.MAX_VALUE, Integer.MIN_VALUE)), isKv(is("k2"), containsInAnyOrder(66)), isKv(is("k2"), containsInAnyOrder(-33)), isKv(is("k3"), containsInAnyOrder(0)))); return null; } } @Test @Category(RunnableOnService.class) public void testGroupByKeyEmpty() { List<KV<String, Integer>> ungroupedPairs = Arrays.asList(); Pipeline p = TestPipeline.create(); PCollection<KV<String, Integer>> input = p.apply(Create.of(ungroupedPairs) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))); PCollection<KV<String, Iterable<Integer>>> output = input.apply(GroupByKey.<String, Integer>create()); DataflowAssert.that(output).empty(); p.run(); } @Test public void testGroupByKeyNonDeterministic() throws Exception { List<KV<Map<String, String>, Integer>> ungroupedPairs = Arrays.asList(); Pipeline p = TestPipeline.create(); PCollection<KV<Map<String, String>, Integer>> input = p.apply(Create.of(ungroupedPairs) .withCoder( KvCoder.of(MapCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()), BigEndianIntegerCoder.of()))); thrown.expect(IllegalStateException.class); thrown.expectMessage("must be deterministic"); input.apply(GroupByKey.<Map<String, String>, Integer>create()); } @Test public void testIdentityWindowFnPropagation() { Pipeline p = TestPipeline.create(); List<KV<String, Integer>> ungroupedPairs = Arrays.asList(); PCollection<KV<String, Integer>> input = p.apply(Create.of(ungroupedPairs) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))) .apply(Window.<KV<String, Integer>>into(FixedWindows.of(Duration.standardMinutes(1)))); PCollection<KV<String, Iterable<Integer>>> output = input.apply(GroupByKey.<String, Integer>create()); p.run(); Assert.assertTrue(output.getWindowingStrategy().getWindowFn().isCompatible( FixedWindows.of(Duration.standardMinutes(1)))); } @Test public void testWindowFnInvalidation() { Pipeline p = TestPipeline.create(); List<KV<String, Integer>> ungroupedPairs = Arrays.asList(); PCollection<KV<String, Integer>> input = p.apply(Create.of(ungroupedPairs) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))) .apply(Window.<KV<String, Integer>>into( Sessions.withGapDuration(Duration.standardMinutes(1)))); PCollection<KV<String, Iterable<Integer>>> output = input.apply(GroupByKey.<String, Integer>create()); p.run(); Assert.assertTrue( output.getWindowingStrategy().getWindowFn().isCompatible( new InvalidWindows( "Invalid", Sessions.withGapDuration( Duration.standardMinutes(1))))); } /** * Create a test pipeline that uses the {@link DataflowPipelineRunner} so that {@link GroupByKey} * is not expanded. This is used for verifying that even without expansion the proper errors show * up. */ private Pipeline createTestServiceRunner() { DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class); options.setRunner(DataflowPipelineRunner.class); options.setProject("someproject"); options.setStagingLocation("gs://staging"); options.setPathValidatorClass(NoopPathValidator.class); options.setDataflowClient(null); return Pipeline.create(options); } private Pipeline createTestDirectRunner() { DirectPipelineOptions options = PipelineOptionsFactory.as(DirectPipelineOptions.class); options.setRunner(DirectPipelineRunner.class); return Pipeline.create(options); } @Test public void testInvalidWindowsDirect() { Pipeline p = createTestDirectRunner(); List<KV<String, Integer>> ungroupedPairs = Arrays.asList(); PCollection<KV<String, Integer>> input = p.apply(Create.of(ungroupedPairs) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))) .apply(Window.<KV<String, Integer>>into( Sessions.withGapDuration(Duration.standardMinutes(1)))); thrown.expect(IllegalStateException.class); thrown.expectMessage("GroupByKey must have a valid Window merge function"); input .apply("GroupByKey", GroupByKey.<String, Integer>create()) .apply("GroupByKeyAgain", GroupByKey.<String, Iterable<Integer>>create()); } @Test public void testInvalidWindowsService() { Pipeline p = createTestServiceRunner(); List<KV<String, Integer>> ungroupedPairs = Arrays.asList(); PCollection<KV<String, Integer>> input = p.apply(Create.of(ungroupedPairs) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))) .apply(Window.<KV<String, Integer>>into( Sessions.withGapDuration(Duration.standardMinutes(1)))); thrown.expect(IllegalStateException.class); thrown.expectMessage("GroupByKey must have a valid Window merge function"); input .apply("GroupByKey", GroupByKey.<String, Integer>create()) .apply("GroupByKeyAgain", GroupByKey.<String, Iterable<Integer>>create()); } @Test public void testRemerge() { Pipeline p = TestPipeline.create(); List<KV<String, Integer>> ungroupedPairs = Arrays.asList(); PCollection<KV<String, Integer>> input = p.apply(Create.of(ungroupedPairs) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))) .apply(Window.<KV<String, Integer>>into( Sessions.withGapDuration(Duration.standardMinutes(1)))); PCollection<KV<String, Iterable<Iterable<Integer>>>> middle = input .apply("GroupByKey", GroupByKey.<String, Integer>create()) .apply("Remerge", Window.<KV<String, Iterable<Integer>>>remerge()) .apply("GroupByKeyAgain", GroupByKey.<String, Iterable<Integer>>create()) .apply("RemergeAgain", Window.<KV<String, Iterable<Iterable<Integer>>>>remerge()); p.run(); Assert.assertTrue( middle.getWindowingStrategy().getWindowFn().isCompatible( Sessions.withGapDuration(Duration.standardMinutes(1)))); } @Test public void testGroupByKeyDirectUnbounded() { Pipeline p = createTestDirectRunner(); PCollection<KV<String, Integer>> input = p.apply( new PTransform<PBegin, PCollection<KV<String, Integer>>>() { @Override public PCollection<KV<String, Integer>> apply(PBegin input) { return PCollection.<KV<String, Integer>>createPrimitiveOutputInternal( input.getPipeline(), WindowingStrategy.globalDefault(), PCollection.IsBounded.UNBOUNDED) .setTypeDescriptorInternal(new TypeDescriptor<KV<String, Integer>>() {}); } }); thrown.expect(IllegalStateException.class); thrown.expectMessage( "GroupByKey cannot be applied to non-bounded PCollection in the GlobalWindow without " + "a trigger. Use a Window.into or Window.triggering transform prior to GroupByKey."); input.apply("GroupByKey", GroupByKey.<String, Integer>create()); } @Test public void testGroupByKeyServiceUnbounded() { Pipeline p = createTestServiceRunner(); PCollection<KV<String, Integer>> input = p.apply( new PTransform<PBegin, PCollection<KV<String, Integer>>>() { @Override public PCollection<KV<String, Integer>> apply(PBegin input) { return PCollection.<KV<String, Integer>>createPrimitiveOutputInternal( input.getPipeline(), WindowingStrategy.globalDefault(), PCollection.IsBounded.UNBOUNDED) .setTypeDescriptorInternal(new TypeDescriptor<KV<String, Integer>>() {}); } }); thrown.expect(IllegalStateException.class); thrown.expectMessage( "GroupByKey cannot be applied to non-bounded PCollection in the GlobalWindow without " + "a trigger. Use a Window.into or Window.triggering transform prior to GroupByKey."); input.apply("GroupByKey", GroupByKey.<String, Integer>create()); } /** * Tests that when two elements are combined via a GroupByKey their output timestamp agrees * with the windowing function customized to actually be the same as the default, the earlier of * the two values. */ @Test @Category(RunnableOnService.class) public void testOutputTimeFnEarliest() { Pipeline pipeline = TestPipeline.create(); pipeline.apply( Create.timestamped( TimestampedValue.of(KV.of(0, "hello"), new Instant(0)), TimestampedValue.of(KV.of(0, "goodbye"), new Instant(10)))) .apply(Window.<KV<Integer, String>>into(FixedWindows.of(Duration.standardMinutes(10))) .withOutputTimeFn(OutputTimeFns.outputAtEarliestInputTimestamp())) .apply(GroupByKey.<Integer, String>create()) .apply(ParDo.of(new AssertTimestamp(new Instant(0)))); pipeline.run(); } /** * Tests that when two elements are combined via a GroupByKey their output timestamp agrees * with the windowing function customized to use the latest value. */ @Test @Category(RunnableOnService.class) public void testOutputTimeFnLatest() { Pipeline pipeline = TestPipeline.create(); pipeline.apply( Create.timestamped( TimestampedValue.of(KV.of(0, "hello"), new Instant(0)), TimestampedValue.of(KV.of(0, "goodbye"), new Instant(10)))) .apply(Window.<KV<Integer, String>>into(FixedWindows.of(Duration.standardMinutes(10))) .withOutputTimeFn(OutputTimeFns.outputAtLatestInputTimestamp())) .apply(GroupByKey.<Integer, String>create()) .apply(ParDo.of(new AssertTimestamp(new Instant(10)))); pipeline.run(); } private static class AssertTimestamp<K, V> extends DoFn<KV<K, V>, Void> { private final Instant timestamp; public AssertTimestamp(Instant timestamp) { this.timestamp = timestamp; } @Override public void processElement(ProcessContext c) throws Exception { assertThat(c.timestamp(), equalTo(timestamp)); } } @Test public void testGroupByKeyGetName() { Assert.assertEquals("GroupByKey", GroupByKey.<String, Integer>create().getName()); } @Test public void testDisplayData() { GroupByKey<String, String> groupByKey = GroupByKey.create(); GroupByKey<String, String> groupByFewKeys = GroupByKey.create(true); DisplayData gbkDisplayData = DisplayData.from(groupByKey); DisplayData fewKeysDisplayData = DisplayData.from(groupByFewKeys); assertThat(gbkDisplayData.items(), empty()); assertThat(fewKeysDisplayData, hasDisplayItem("fewKeys", true)); } }
apache-2.0
delavior/dubbo
dubbo-test/dubbo-test-benchmark/src/main/java/com/alibaba/dubbo/rpc/benchmark/RpcBenchmarkClient.java
1338
package com.alibaba.dubbo.rpc.benchmark; import java.lang.reflect.InvocationTargetException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; public class RpcBenchmarkClient extends AbstractBenchmarkClient { public static void main(String[] args) throws Exception { new RpcBenchmarkClient().run(args); } @SuppressWarnings("rawtypes") @Override public ClientRunnable getClientRunnable(String targetIP, int targetPort, int clientNums, int rpcTimeout, CyclicBarrier barrier, CountDownLatch latch, long startTime, long endTime) throws IllegalArgumentException, SecurityException, InstantiationException, IllegalAccessException, InvocationTargetException, NoSuchMethodException, ClassNotFoundException { String runnable = properties.getProperty("classname"); Class[] parameterTypes = new Class[]{String.class, int.class, int.class, int.class, CyclicBarrier.class, CountDownLatch.class, long.class, long.class}; Object[] parameters = new Object[]{targetIP, targetPort, clientNums, rpcTimeout, barrier, latch, startTime, endTime}; return (ClientRunnable) Class.forName(runnable).getConstructor(parameterTypes).newInstance(parameters); } }
apache-2.0
khuxtable/seaglass
src/main/java/com/seaglasslookandfeel/painter/SearchFieldPainter.java
5024
/* * Copyright (c) 2009 Kathryn Huxtable and Kenneth Orr. * * This file is part of the SeaGlass Pluggable Look and Feel. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * $Id$ */ package com.seaglasslookandfeel.painter; import java.awt.Color; import java.awt.Graphics2D; import java.awt.Shape; import javax.swing.JComponent; import com.seaglasslookandfeel.effect.SeaGlassInternalShadowEffect; import com.seaglasslookandfeel.painter.util.ShapeGenerator.CornerSize; /** * TextComponentPainter implementation. */ public final class SearchFieldPainter extends AbstractCommonColorsPainter { /** * Control states. */ public static enum Which { BACKGROUND_DISABLED, BACKGROUND_ENABLED, BACKGROUND_SELECTED, BORDER_DISABLED, BORDER_FOCUSED, BORDER_ENABLED, } private SeaGlassInternalShadowEffect internalShadow = new SeaGlassInternalShadowEffect(); private Which state; private PaintContext ctx; private CommonControlState type; private boolean focused; /** * Creates a new SearchFieldPainter object. * * @param state the control state to paint. */ public SearchFieldPainter(Which state) { super(); this.state = state; this.ctx = new PaintContext(AbstractRegionPainter.PaintContext.CacheMode.FIXED_SIZES); type = (state == Which.BACKGROUND_DISABLED || state == Which.BORDER_DISABLED) ? CommonControlState.DISABLED : CommonControlState.ENABLED; focused = (state == Which.BORDER_FOCUSED); } /** * {@inheritDoc} */ protected void doPaint(Graphics2D g, JComponent c, int width, int height, Object[] extendedCacheKeys) { int x = focusInsets.left; int y = focusInsets.top; width -= focusInsets.left + focusInsets.right; height -= focusInsets.top + focusInsets.bottom; switch (state) { case BACKGROUND_DISABLED: case BACKGROUND_ENABLED: case BACKGROUND_SELECTED: paintBackground(g, c, x, y, width, height); break; case BORDER_DISABLED: case BORDER_ENABLED: case BORDER_FOCUSED: paintBorder(g, c, x, y, width, height); break; } } /** * {@inheritDoc} */ protected PaintContext getPaintContext() { return ctx; } /** * DOCUMENT ME! * * @param g DOCUMENT ME! * @param c DOCUMENT ME! * @param x DOCUMENT ME! * @param y DOCUMENT ME! * @param width DOCUMENT ME! * @param height DOCUMENT ME! */ private void paintBackground(Graphics2D g, JComponent c, int x, int y, int width, int height) { Color color = c.getBackground(); if (type == CommonControlState.DISABLED) { color = new Color(color.getRed(), color.getGreen(), color.getBlue(), 0x80); } Shape s = shapeGenerator.createRoundRectangle(x + 1, y + 1, width - 2, height - 2, CornerSize.ROUND_HEIGHT); g.setPaint(color); g.fill(s); } /** * DOCUMENT ME! * * @param g DOCUMENT ME! * @param c DOCUMENT ME! * @param x DOCUMENT ME! * @param y DOCUMENT ME! * @param width DOCUMENT ME! * @param height DOCUMENT ME! */ private void paintBorder(Graphics2D g, JComponent c, int x, int y, int width, int height) { boolean useToolBarColors = isInToolBar(c); Shape s; if (focused) { s = shapeGenerator.createRoundRectangle(x - 2, y - 2, width + 4 - 1, height + 4 - 1, CornerSize.ROUND_HEIGHT_DRAW); g.setPaint(getFocusPaint(s, FocusType.OUTER_FOCUS, useToolBarColors)); g.draw(s); s = shapeGenerator.createRoundRectangle(x - 1, y - 1, width + 2 - 1, height + 2 - 1, CornerSize.ROUND_HEIGHT_DRAW); g.setPaint(getFocusPaint(s, FocusType.INNER_FOCUS, useToolBarColors)); g.draw(s); } if (type != CommonControlState.DISABLED) { s = shapeGenerator.createInternalDropShadowRounded(x + 1, y + 1, width - 2, height - 2); internalShadow.fill(g, s, true, true); } g.setPaint(getTextBorderPaint(type, !focused && useToolBarColors)); s = shapeGenerator.createRoundRectangle(x, y, width - 1, height - 1, CornerSize.ROUND_HEIGHT_DRAW); g.draw(s); } }
apache-2.0
maheshika/carbon-data
components/data-services/org.wso2.carbon.dataservices.ui/src/main/java/org/wso2/carbon/dataservices/ui/beans/DataServiceConfigurationElement.java
1624
/* * Copyright 2005-2007 WSO2, Inc. (http://wso2.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.carbon.dataservices.ui.beans; import org.apache.axiom.om.OMElement; public abstract class DataServiceConfigurationElement { private String requireddRoles; private String xsdType; public DataServiceConfigurationElement(String requiredRoles, String xsdType) { this.requireddRoles = requiredRoles; this.xsdType = xsdType; } public DataServiceConfigurationElement(String requiredRoles) { this.requireddRoles = requiredRoles; } public DataServiceConfigurationElement(){ } public String getRequiredRoles() { return requireddRoles; } public String getXsdType() { return xsdType; } public void setRequiredRoles(String userRoles){ this.requireddRoles = userRoles; } public void setxsdType(String xsType){ this.xsdType = xsType; } /** * Generates XML representation of Object * @return OMElement */ public abstract OMElement buildXML(); }
apache-2.0
vtkhir/kaa
server/node/src/main/java/org/kaaproject/kaa/server/operations/service/event/UserRouteInfo.java
3594
/* * Copyright 2014-2016 CyberVision, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaaproject.kaa.server.operations.service.event; import org.kaaproject.kaa.server.operations.service.akka.messages.core.route.RouteOperation; public final class UserRouteInfo { private final String serverId; private final String tenantId; private final String userId; private final RouteOperation routeOperation; public UserRouteInfo(String tenantId, String userId) { this(tenantId, userId, null, RouteOperation.ADD); } /** * Create new instance of <code>UserRouteInfo</code>. * * @param tenantId the tenant id * @param userId the user id * @param serverId the server id * @param routeOperation the route operation */ public UserRouteInfo(String tenantId, String userId, String serverId, RouteOperation routeOperation) { super(); this.tenantId = tenantId; this.userId = userId; this.serverId = serverId; this.routeOperation = routeOperation; } public String getServerId() { return serverId; } public String getTenantId() { return tenantId; } public String getUserId() { return userId; } public RouteOperation getRouteOperation() { return routeOperation; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("UserRouteInfo [serverId="); builder.append(serverId); builder.append(", tenantId="); builder.append(tenantId); builder.append(", userId="); builder.append(userId); builder.append(", routeOperation="); builder.append(routeOperation); builder.append("]"); return builder.toString(); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((routeOperation == null) ? 0 : routeOperation.hashCode()); result = prime * result + ((serverId == null) ? 0 : serverId.hashCode()); result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode()); result = prime * result + ((userId == null) ? 0 : userId.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } UserRouteInfo other = (UserRouteInfo) obj; if (routeOperation != other.routeOperation) { return false; } if (serverId == null) { if (other.serverId != null) { return false; } } else if (!serverId.equals(other.serverId)) { return false; } if (tenantId == null) { if (other.tenantId != null) { return false; } } else if (!tenantId.equals(other.tenantId)) { return false; } if (userId == null) { if (other.userId != null) { return false; } } else if (!userId.equals(other.userId)) { return false; } return true; } }
apache-2.0
sekikn/ambari
ambari-server/src/test/java/org/apache/ambari/server/state/DesiredConfigTest.java
2698
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ambari.server.state; import java.util.Arrays; import java.util.List; import org.junit.Assert; import org.junit.Test; import nl.jqno.equalsverifier.EqualsVerifier; import nl.jqno.equalsverifier.Warning; /** * Tests desired config instances. */ public class DesiredConfigTest { @Test public void testDesiredConfig() throws Exception { DesiredConfig dc = new DesiredConfig(); dc.setServiceName("service"); dc.setTag("global"); Assert.assertEquals("Expected service 'service'", "service", dc.getServiceName()); Assert.assertEquals("Expected version 'global'", "global", dc.getTag()); Assert.assertEquals("Expected no host overrides", 0, dc.getHostOverrides().size()); List<DesiredConfig.HostOverride> hosts = Arrays.asList( new DesiredConfig.HostOverride("h1", "v2"), new DesiredConfig.HostOverride("h2", "v3")); dc.setHostOverrides(hosts); Assert.assertNotNull("Expected host overrides to be set", dc.getHostOverrides()); Assert.assertEquals("Expected host override equality", hosts, dc.getHostOverrides()); } @Test public void testHostOverride() throws Exception { DesiredConfig.HostOverride override = new DesiredConfig.HostOverride("h1", "v1"); Assert.assertNotNull(override.getName()); Assert.assertNotNull(override.getVersionTag()); Assert.assertEquals("Expected override host 'h1'", "h1", override.getName()); Assert.assertEquals("Expected override version 'v1'", "v1", override.getVersionTag()); } @Test public void testEquals() throws Exception { EqualsVerifier.forClass(DesiredConfig.class) .usingGetClass() .suppress(Warning.NONFINAL_FIELDS) .verify(); } @Test public void testHostOverride_Equals() throws Exception { EqualsVerifier.forClass(DesiredConfig.HostOverride.class) .usingGetClass() .verify(); } }
apache-2.0
Deepnekroz/kaa
server/common/dao/src/main/java/org/kaaproject/kaa/server/common/dao/model/EndpointUser.java
987
/** * Copyright 2014-2016 CyberVision, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaaproject.kaa.server.common.dao.model; import java.util.List; import org.kaaproject.kaa.common.dto.EndpointUserDto; import org.kaaproject.kaa.common.dto.HasVersion; public interface EndpointUser extends ToDto<EndpointUserDto>, HasVersion { List<String> getEndpointIds(); void setEndpointIds(List<String> endpointIds); String getId(); }
apache-2.0
stanlyxiang/incubator-hawq
pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveLineBreakAccessor.java
2108
package org.apache.hawq.pxf.plugins.hive; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import org.apache.hawq.pxf.api.utilities.InputData; import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities; import org.apache.hadoop.mapred.*; import java.io.IOException; import static org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities.PXF_HIVE_SERDES; /** * Specialization of HiveAccessor for a Hive table stored as Text files. * Use together with {@link HiveInputFormatFragmenter}/{@link HiveStringPassResolver}. */ public class HiveLineBreakAccessor extends HiveAccessor { /** * Constructs a HiveLineBreakAccessor. * * @param input input containing user data * @throws Exception if user data was wrong */ public HiveLineBreakAccessor(InputData input) throws Exception { super(input, new TextInputFormat()); ((TextInputFormat) inputFormat).configure(jobConf); HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input, PXF_HIVE_SERDES.LAZY_SIMPLE_SERDE); initPartitionFields(hiveUserData.getPartitionKeys()); filterInFragmenter = hiveUserData.isFilterInFragmenter(); } @Override protected Object getReader(JobConf jobConf, InputSplit split) throws IOException { return new LineRecordReader(jobConf, (FileSplit) split); } }
apache-2.0
gaohoward/activemq-artemis
tests/unit-tests/src/test/java/org/apache/activemq/artemis/tests/unit/core/server/impl/QueueImplTest.java
42768
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.artemis.tests.unit.core.server.impl; import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.apache.activemq.artemis.api.core.Message; import org.apache.activemq.artemis.api.core.SimpleString; import org.apache.activemq.artemis.api.core.client.ClientMessage; import org.apache.activemq.artemis.api.core.client.ClientProducer; import org.apache.activemq.artemis.api.core.client.ClientSession; import org.apache.activemq.artemis.api.core.client.ClientSessionFactory; import org.apache.activemq.artemis.api.core.client.ServerLocator; import org.apache.activemq.artemis.core.filter.Filter; import org.apache.activemq.artemis.core.filter.impl.FilterImpl; import org.apache.activemq.artemis.core.postoffice.impl.LocalQueueBinding; import org.apache.activemq.artemis.core.server.ActiveMQServer; import org.apache.activemq.artemis.core.server.ActiveMQServers; import org.apache.activemq.artemis.core.server.Consumer; import org.apache.activemq.artemis.core.server.HandleStatus; import org.apache.activemq.artemis.core.server.MessageReference; import org.apache.activemq.artemis.core.server.Queue; import org.apache.activemq.artemis.core.server.impl.QueueImpl; import org.apache.activemq.artemis.core.settings.impl.AddressSettings; import org.apache.activemq.artemis.selector.filter.Filterable; import org.apache.activemq.artemis.tests.unit.core.server.impl.fakes.FakeConsumer; import org.apache.activemq.artemis.tests.unit.core.server.impl.fakes.FakeFilter; import org.apache.activemq.artemis.tests.unit.core.server.impl.fakes.FakePostOffice; import org.apache.activemq.artemis.tests.util.ActiveMQTestBase; import org.apache.activemq.artemis.utils.ActiveMQThreadFactory; import org.apache.activemq.artemis.utils.FutureLatch; import org.apache.activemq.artemis.utils.actors.ArtemisExecutor; import org.apache.activemq.artemis.utils.collections.LinkedListIterator; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class QueueImplTest extends ActiveMQTestBase { // The tests ---------------------------------------------------------------- private ScheduledExecutorService scheduledExecutor; private ExecutorService executor; @Override @Before public void setUp() throws Exception { super.setUp(); scheduledExecutor = Executors.newSingleThreadScheduledExecutor(ActiveMQThreadFactory.defaultThreadFactory()); executor = Executors.newSingleThreadExecutor(ActiveMQThreadFactory.defaultThreadFactory()); } @Override @After public void tearDown() throws Exception { scheduledExecutor.shutdownNow(); executor.shutdownNow(); super.tearDown(); } private static final SimpleString queue1 = new SimpleString("queue1"); private static final SimpleString address1 = new SimpleString("address1"); @Test public void testName() { final SimpleString name = new SimpleString("oobblle"); QueueImpl queue = getNamedQueue(name); Assert.assertEquals(name, queue.getName()); } @Test public void testDurable() { QueueImpl queue = getNonDurableQueue(); Assert.assertFalse(queue.isDurable()); queue = getDurableQueue(); Assert.assertTrue(queue.isDurable()); } @Test public void testAddRemoveConsumer() throws Exception { Consumer cons1 = new FakeConsumer(); Consumer cons2 = new FakeConsumer(); Consumer cons3 = new FakeConsumer(); QueueImpl queue = getTemporaryQueue(); Assert.assertEquals(0, queue.getConsumerCount()); queue.addConsumer(cons1); Assert.assertEquals(1, queue.getConsumerCount()); queue.removeConsumer(cons1); Assert.assertEquals(0, queue.getConsumerCount()); queue.addConsumer(cons1); queue.addConsumer(cons2); queue.addConsumer(cons3); Assert.assertEquals(3, queue.getConsumerCount()); queue.removeConsumer(new FakeConsumer()); Assert.assertEquals(3, queue.getConsumerCount()); queue.removeConsumer(cons1); Assert.assertEquals(2, queue.getConsumerCount()); queue.removeConsumer(cons2); Assert.assertEquals(1, queue.getConsumerCount()); queue.removeConsumer(cons3); Assert.assertEquals(0, queue.getConsumerCount()); queue.removeConsumer(cons3); } @Test public void testGetFilter() { QueueImpl queue = getTemporaryQueue(); Assert.assertNull(queue.getFilter()); Filter filter = new Filter() { @Override public boolean match(final Message message) { return false; } @Override public boolean match(Map<String, String> map) { return false; } @Override public boolean match(Filterable filterable) { return false; } @Override public SimpleString getFilterString() { return null; } }; queue = getFilteredQueue(filter); Assert.assertEquals(filter, queue.getFilter()); } @Test public void testSimpleadd() { QueueImpl queue = getTemporaryQueue(); final int numMessages = 10; for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); queue.addTail(ref); } Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); } @Test public void testRate() throws InterruptedException { QueueImpl queue = getTemporaryQueue(); final int numMessages = 10; for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); queue.addTail(ref); } Thread.sleep(1000); float rate = queue.getRate(); Assert.assertTrue(rate <= 10.0f); System.out.println("Rate: " + rate); } @Test public void testSimpleNonDirectDelivery() throws Exception { QueueImpl queue = getTemporaryQueue(); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); // Now add a consumer FakeConsumer consumer = new FakeConsumer(); queue.addConsumer(consumer); Assert.assertTrue(consumer.getReferences().isEmpty()); Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); queue.deliverNow(); assertRefListsIdenticalRefs(refs, consumer.getReferences()); Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(numMessages, queue.getDeliveringCount()); } @Test public void testBusyConsumer() throws Exception { QueueImpl queue = getTemporaryQueue(); FakeConsumer consumer = new FakeConsumer(); consumer.setStatusImmediate(HandleStatus.BUSY); queue.addConsumer(consumer); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); queue.deliverNow(); Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); Assert.assertTrue(consumer.getReferences().isEmpty()); consumer.setStatusImmediate(HandleStatus.HANDLED); queue.deliverNow(); assertRefListsIdenticalRefs(refs, consumer.getReferences()); Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(10, queue.getDeliveringCount()); } @Test public void testBusyConsumerThenAddMoreMessages() throws Exception { QueueImpl queue = getTemporaryQueue(); FakeConsumer consumer = new FakeConsumer(); consumer.setStatusImmediate(HandleStatus.BUSY); queue.addConsumer(consumer); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); queue.deliverNow(); Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); Assert.assertTrue(consumer.getReferences().isEmpty()); for (int i = numMessages; i < numMessages * 2; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } Assert.assertEquals(20, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); Assert.assertTrue(consumer.getReferences().isEmpty()); consumer.setStatusImmediate(HandleStatus.HANDLED); for (int i = numMessages * 2; i < numMessages * 3; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } queue.deliverNow(); assertRefListsIdenticalRefs(refs, consumer.getReferences()); Assert.assertEquals(30, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(30, queue.getDeliveringCount()); } @Test public void testaddHeadadd() throws Exception { QueueImpl queue = getTemporaryQueue(); final int numMessages = 10; List<MessageReference> refs1 = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs1.add(ref); queue.addTail(ref); } LinkedList<MessageReference> refs2 = new LinkedList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i + numMessages); refs2.addFirst(ref); queue.addHead(ref, false); } List<MessageReference> refs3 = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i + 2 * numMessages); refs3.add(ref); queue.addTail(ref); } FakeConsumer consumer = new FakeConsumer(); queue.addConsumer(consumer); queue.deliverNow(); List<MessageReference> allRefs = new ArrayList<>(); allRefs.addAll(refs2); allRefs.addAll(refs1); allRefs.addAll(refs3); assertRefListsIdenticalRefs(allRefs, consumer.getReferences()); } @Test public void testChangeConsumersAndDeliver() throws Exception { QueueImpl queue = getTemporaryQueue(); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); FakeConsumer cons1 = new FakeConsumer(); queue.addConsumer(cons1); queue.deliverNow(); Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(numMessages, queue.getDeliveringCount()); assertRefListsIdenticalRefs(refs, cons1.getReferences()); FakeConsumer cons2 = new FakeConsumer(); queue.addConsumer(cons2); Assert.assertEquals(2, queue.getConsumerCount()); cons1.getReferences().clear(); for (MessageReference ref : refs) { queue.acknowledge(ref); } refs.clear(); for (int i = 0; i < 2 * numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } queue.deliverNow(); Assert.assertEquals(numMessages * 2, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(numMessages * 2, queue.getDeliveringCount()); Assert.assertEquals(numMessages, cons1.getReferences().size()); Assert.assertEquals(numMessages, cons2.getReferences().size()); cons1.getReferences().clear(); cons2.getReferences().clear(); for (MessageReference ref : refs) { queue.acknowledge(ref); } refs.clear(); FakeConsumer cons3 = new FakeConsumer(); queue.addConsumer(cons3); Assert.assertEquals(3, queue.getConsumerCount()); for (int i = 0; i < 3 * numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } queue.deliverNow(); Assert.assertEquals(numMessages * 3, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(numMessages * 3, queue.getDeliveringCount()); Assert.assertEquals(numMessages, cons1.getReferences().size()); Assert.assertEquals(numMessages, cons2.getReferences().size()); Assert.assertEquals(numMessages, cons3.getReferences().size()); queue.removeConsumer(cons1); cons3.getReferences().clear(); cons2.getReferences().clear(); for (MessageReference ref : refs) { queue.acknowledge(ref); } refs.clear(); for (int i = 0; i < 2 * numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } queue.deliverNow(); Assert.assertEquals(numMessages * 2, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(numMessages * 2, queue.getDeliveringCount()); Assert.assertEquals(numMessages, cons2.getReferences().size()); Assert.assertEquals(numMessages, cons3.getReferences().size()); queue.removeConsumer(cons3); cons2.getReferences().clear(); for (MessageReference ref : refs) { queue.acknowledge(ref); } refs.clear(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } queue.deliverNow(); Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(numMessages, queue.getDeliveringCount()); Assert.assertEquals(numMessages, cons2.getReferences().size()); } @Test public void testRoundRobinWithQueueing() throws Exception { QueueImpl queue = getTemporaryQueue(); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); queue.pause(); // Test first with queueing FakeConsumer cons1 = new FakeConsumer(); FakeConsumer cons2 = new FakeConsumer(); queue.addConsumer(cons1); queue.addConsumer(cons2); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } queue.resume(); // Need to make sure the consumers will receive the messages before we do these assertions long timeout = System.currentTimeMillis() + 5000; while (cons1.getReferences().size() != numMessages / 2 && timeout > System.currentTimeMillis()) { Thread.sleep(1); } while (cons2.getReferences().size() != numMessages / 2 && timeout > System.currentTimeMillis()) { Thread.sleep(1); } Assert.assertEquals(numMessages / 2, cons1.getReferences().size()); Assert.assertEquals(numMessages / 2, cons2.getReferences().size()); for (int i = 0; i < numMessages; i++) { MessageReference ref; ref = i % 2 == 0 ? cons1.getReferences().get(i / 2) : cons2.getReferences().get(i / 2); Assert.assertEquals(refs.get(i), ref); } } @Test public void testWithPriorities() throws Exception { QueueImpl queue = getTemporaryQueue(); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); ref.getMessage().setPriority((byte) i); refs.add(ref); queue.addTail(ref); } queue.deliverNow(); FakeConsumer consumer = new FakeConsumer(); queue.addConsumer(consumer); queue.deliverNow(); List<MessageReference> receivedRefs = consumer.getReferences(); // Should be in reverse order Assert.assertEquals(refs.size(), receivedRefs.size()); for (int i = 0; i < numMessages; i++) { Assert.assertEquals(refs.get(i), receivedRefs.get(9 - i)); } } @Test public void testConsumerWithFiltersDirect() throws Exception { testConsumerWithFilters(true); } @Test public void testConsumerWithFiltersQueueing() throws Exception { testConsumerWithFilters(false); } @Test public void testConsumerWithFilterAddAndRemove() { QueueImpl queue = getTemporaryQueue(); Filter filter = new FakeFilter("fruit", "orange"); FakeConsumer consumer = new FakeConsumer(filter); } @Test public void testIterator() { QueueImpl queue = getTemporaryQueue(); final int numMessages = 20; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); queue.addTail(ref); refs.add(ref); } Assert.assertEquals(numMessages, getMessageCount(queue)); Iterator<MessageReference> iterator = queue.iterator(); List<MessageReference> list = new ArrayList<>(); while (iterator.hasNext()) { list.add(iterator.next()); } assertRefListsIdenticalRefs(refs, list); } private void awaitExecution() { FutureLatch future = new FutureLatch(); executor.execute(future); future.await(10000); } @Test public void testConsumeWithFiltersAddAndRemoveConsumer() throws Exception { QueueImpl queue = getTemporaryQueue(); Filter filter = new FakeFilter("fruit", "orange"); FakeConsumer consumer = new FakeConsumer(filter); queue.addConsumer(consumer); List<MessageReference> refs = new ArrayList<>(); MessageReference ref1 = generateReference(queue, 1); ref1.getMessage().putStringProperty(new SimpleString("fruit"), new SimpleString("banana")); queue.addTail(ref1); MessageReference ref2 = generateReference(queue, 2); ref2.getMessage().putStringProperty(new SimpleString("fruit"), new SimpleString("orange")); queue.addTail(ref2); refs.add(ref2); Assert.assertEquals(2, getMessageCount(queue)); awaitExecution(); Assert.assertEquals(1, consumer.getReferences().size()); Assert.assertEquals(1, queue.getDeliveringCount()); assertRefListsIdenticalRefs(refs, consumer.getReferences()); queue.acknowledge(ref2); queue.removeConsumer(consumer); queue.addConsumer(consumer); queue.deliverNow(); refs.clear(); consumer.clearReferences(); MessageReference ref3 = generateReference(queue, 3); ref3.getMessage().putStringProperty(new SimpleString("fruit"), new SimpleString("banana")); queue.addTail(ref3); MessageReference ref4 = generateReference(queue, 4); ref4.getMessage().putStringProperty(new SimpleString("fruit"), new SimpleString("orange")); queue.addTail(ref4); refs.add(ref4); Assert.assertEquals(3, getMessageCount(queue)); awaitExecution(); Assert.assertEquals(1, consumer.getReferences().size()); Assert.assertEquals(1, queue.getDeliveringCount()); assertRefListsIdenticalRefs(refs, consumer.getReferences()); } @Test public void testBusyConsumerWithFilterFirstCallBusy() throws Exception { QueueImpl queue = getTemporaryQueue(); FakeConsumer consumer = new FakeConsumer(FilterImpl.createFilter("color = 'green'")); consumer.setStatusImmediate(HandleStatus.BUSY); queue.addConsumer(consumer); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); ref.getMessage().putStringProperty("color", "green"); refs.add(ref); queue.addTail(ref); } Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); queue.deliverNow(); consumer.setStatusImmediate(null); queue.deliverNow(); List<MessageReference> receeivedRefs = consumer.getReferences(); int currId = 0; for (MessageReference receeivedRef : receeivedRefs) { Assert.assertEquals("messages received out of order", receeivedRef.getMessage().getMessageID(), currId++); } } @Test public void testBusyConsumerWithFilterThenAddMoreMessages() throws Exception { QueueImpl queue = getTemporaryQueue(); FakeConsumer consumer = new FakeConsumer(FilterImpl.createFilter("color = 'green'")); consumer.setStatusImmediate(HandleStatus.BUSY); queue.addConsumer(consumer); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); ref.getMessage().putStringProperty("color", "red"); refs.add(ref); queue.addTail(ref); } Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); queue.deliverNow(); Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); Assert.assertTrue(consumer.getReferences().isEmpty()); for (int i = numMessages; i < numMessages * 2; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); ref.getMessage().putStringProperty("color", "green"); queue.addTail(ref); } Assert.assertEquals(20, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); Assert.assertTrue(consumer.getReferences().isEmpty()); consumer.setStatusImmediate(null); for (int i = numMessages * 2; i < numMessages * 3; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } queue.deliverNow(); Assert.assertEquals(numMessages, consumer.getReferences().size()); Assert.assertEquals(30, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(10, queue.getDeliveringCount()); List<MessageReference> receeivedRefs = consumer.getReferences(); int currId = 10; for (MessageReference receeivedRef : receeivedRefs) { Assert.assertEquals("messages received out of order", receeivedRef.getMessage().getMessageID(), currId++); } } @Test public void testConsumerWithFilterThenAddMoreMessages() throws Exception { QueueImpl queue = getTemporaryQueue(); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); ref.getMessage().putStringProperty("color", "red"); refs.add(ref); queue.addTail(ref); } Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); queue.deliverNow(); Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); for (int i = numMessages; i < numMessages * 2; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); ref.getMessage().putStringProperty("color", "green"); queue.addTail(ref); } FakeConsumer consumer = new FakeConsumer(FilterImpl.createFilter("color = 'green'")); queue.addConsumer(consumer); queue.deliverNow(); Assert.assertEquals(20, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(10, queue.getDeliveringCount()); for (int i = numMessages * 2; i < numMessages * 3; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); ref.getMessage().putStringProperty("color", "green"); queue.addTail(ref); } queue.deliverNow(); Assert.assertEquals(20, consumer.getReferences().size()); Assert.assertEquals(30, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(20, queue.getDeliveringCount()); } // Private ------------------------------------------------------------------------------ private void testConsumerWithFilters(final boolean direct) throws Exception { QueueImpl queue = getTemporaryQueue(); Filter filter = new FakeFilter("fruit", "orange"); FakeConsumer consumer = new FakeConsumer(filter); if (direct) { queue.addConsumer(consumer); } List<MessageReference> refs = new ArrayList<>(); MessageReference ref1 = generateReference(queue, 1); ref1.getMessage().putStringProperty(new SimpleString("fruit"), new SimpleString("banana")); queue.addTail(ref1); MessageReference ref2 = generateReference(queue, 2); ref2.getMessage().putStringProperty(new SimpleString("cheese"), new SimpleString("stilton")); queue.addTail(ref2); MessageReference ref3 = generateReference(queue, 3); ref3.getMessage().putStringProperty(new SimpleString("cake"), new SimpleString("sponge")); queue.addTail(ref3); MessageReference ref4 = generateReference(queue, 4); ref4.getMessage().putStringProperty(new SimpleString("fruit"), new SimpleString("orange")); refs.add(ref4); queue.addTail(ref4); MessageReference ref5 = generateReference(queue, 5); ref5.getMessage().putStringProperty(new SimpleString("fruit"), new SimpleString("apple")); queue.addTail(ref5); MessageReference ref6 = generateReference(queue, 6); ref6.getMessage().putStringProperty(new SimpleString("fruit"), new SimpleString("orange")); refs.add(ref6); queue.addTail(ref6); if (!direct) { queue.addConsumer(consumer); queue.deliverNow(); } Assert.assertEquals(6, getMessageCount(queue)); awaitExecution(); Assert.assertEquals(2, consumer.getReferences().size()); Assert.assertEquals(2, queue.getDeliveringCount()); assertRefListsIdenticalRefs(refs, consumer.getReferences()); queue.acknowledge(ref5); queue.acknowledge(ref6); queue.removeConsumer(consumer); consumer = new FakeConsumer(); queue.addConsumer(consumer); queue.deliverNow(); Assert.assertEquals(4, getMessageCount(queue)); Assert.assertEquals(4, consumer.getReferences().size()); Assert.assertEquals(4, queue.getDeliveringCount()); } @Test public void testMessageOrder() throws Exception { FakeConsumer consumer = new FakeConsumer(); QueueImpl queue = getTemporaryQueue(); MessageReference messageReference = generateReference(queue, 1); MessageReference messageReference2 = generateReference(queue, 2); MessageReference messageReference3 = generateReference(queue, 3); queue.addHead(messageReference, false); queue.addTail(messageReference2); queue.addHead(messageReference3, false); Assert.assertEquals(0, consumer.getReferences().size()); queue.addConsumer(consumer); queue.deliverNow(); Assert.assertEquals(3, consumer.getReferences().size()); Assert.assertEquals(messageReference3, consumer.getReferences().get(0)); Assert.assertEquals(messageReference, consumer.getReferences().get(1)); Assert.assertEquals(messageReference2, consumer.getReferences().get(2)); } @Test public void testMessagesAdded() throws Exception { QueueImpl queue = getTemporaryQueue(); MessageReference messageReference = generateReference(queue, 1); MessageReference messageReference2 = generateReference(queue, 2); MessageReference messageReference3 = generateReference(queue, 3); queue.addTail(messageReference); queue.addTail(messageReference2); queue.addTail(messageReference3); Assert.assertEquals(getMessagesAdded(queue), 3); } @Test public void testGetReference() throws Exception { QueueImpl queue = getTemporaryQueue(); MessageReference messageReference = generateReference(queue, 1); MessageReference messageReference2 = generateReference(queue, 2); MessageReference messageReference3 = generateReference(queue, 3); queue.addHead(messageReference, false); queue.addHead(messageReference2, false); queue.addHead(messageReference3, false); Assert.assertEquals(queue.getReference(2), messageReference2); } @Test public void testGetNonExistentReference() throws Exception { QueueImpl queue = getTemporaryQueue(); MessageReference messageReference = generateReference(queue, 1); MessageReference messageReference2 = generateReference(queue, 2); MessageReference messageReference3 = generateReference(queue, 3); queue.addHead(messageReference, false); queue.addHead(messageReference2, false); queue.addHead(messageReference3, false); Assert.assertNull(queue.getReference(5)); } /** * Test the paused and resumed states with async deliveries. * * @throws Exception */ @Test public void testPauseAndResumeWithAsync() throws Exception { QueueImpl queue = getTemporaryQueue(); // pauses the queue queue.pause(); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } // even as this queue is paused, it will receive the messages anyway Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); // Now add a consumer FakeConsumer consumer = new FakeConsumer(); queue.addConsumer(consumer); Assert.assertTrue(consumer.getReferences().isEmpty()); Assert.assertEquals(10, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); // explicit order of delivery queue.deliverNow(); // As the queue is paused, even an explicit order of delivery will not work. Assert.assertEquals(0, consumer.getReferences().size()); Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); // resuming work queue.resume(); awaitExecution(); // after resuming the delivery begins. assertRefListsIdenticalRefs(refs, consumer.getReferences()); Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(numMessages, queue.getDeliveringCount()); } /** * Test the paused and resumed states with direct deliveries. * * @throws Exception */ @Test public void testPauseAndResumeWithDirect() throws Exception { QueueImpl queue = getTemporaryQueue(); // Now add a consumer FakeConsumer consumer = new FakeConsumer(); queue.addConsumer(consumer); // brings to queue to paused state queue.pause(); final int numMessages = 10; List<MessageReference> refs = new ArrayList<>(); for (int i = 0; i < numMessages; i++) { MessageReference ref = generateReference(queue, i); refs.add(ref); queue.addTail(ref); } // the queue even if it's paused will receive the message but won't forward // directly to the consumer until resumed. Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(0, queue.getScheduledCount()); Assert.assertEquals(0, queue.getDeliveringCount()); Assert.assertTrue(consumer.getReferences().isEmpty()); // brings the queue to resumed state. queue.resume(); awaitExecution(); // resuming delivery of messages assertRefListsIdenticalRefs(refs, consumer.getReferences()); Assert.assertEquals(numMessages, getMessageCount(queue)); Assert.assertEquals(numMessages, queue.getDeliveringCount()); } @Test public void testResetMessagesAdded() throws Exception { QueueImpl queue = getTemporaryQueue(); MessageReference messageReference = generateReference(queue, 1); MessageReference messageReference2 = generateReference(queue, 2); queue.addTail(messageReference); queue.addTail(messageReference2); Assert.assertEquals(2, getMessagesAdded(queue)); queue.resetMessagesAdded(); Assert.assertEquals(0, getMessagesAdded(queue)); } class AddtoQueueRunner implements Runnable { QueueImpl queue; MessageReference messageReference; boolean added = false; CountDownLatch countDownLatch; boolean first; AddtoQueueRunner(final boolean first, final QueueImpl queue, final MessageReference messageReference, final CountDownLatch countDownLatch) { this.queue = queue; this.messageReference = messageReference; this.countDownLatch = countDownLatch; this.first = first; } @Override public void run() { if (first) { queue.addHead(messageReference, false); } else { queue.addTail(messageReference); } added = true; countDownLatch.countDown(); } } @Test public void testTotalIteratorOrder() throws Exception { final String MY_ADDRESS = "myAddress"; final String MY_QUEUE = "myQueue"; ActiveMQServer server = addServer(ActiveMQServers.newActiveMQServer(createDefaultInVMConfig(), true)); AddressSettings defaultSetting = new AddressSettings().setPageSizeBytes(10 * 1024).setMaxSizeBytes(20 * 1024); server.getAddressSettingsRepository().addMatch("#", defaultSetting); server.start(); ServerLocator locator = createInVMNonHALocator().setBlockOnNonDurableSend(true).setBlockOnDurableSend(true).setBlockOnAcknowledge(true); ClientSessionFactory factory = createSessionFactory(locator); ClientSession session = addClientSession(factory.createSession(false, true, true)); session.createQueue(MY_ADDRESS, MY_QUEUE, true); ClientProducer producer = addClientProducer(session.createProducer(MY_ADDRESS)); for (int i = 0; i < 50; i++) { ClientMessage message = session.createMessage(true); message.getBodyBuffer().writeBytes(new byte[1024]); message.putIntProperty("order", i); producer.send(message); } producer.close(); session.close(); factory.close(); locator.close(); Queue queue = ((LocalQueueBinding) server.getPostOffice().getBinding(new SimpleString(MY_QUEUE))).getQueue(); LinkedListIterator<MessageReference> totalIterator = queue.browserIterator(); try { int i = 0; while (totalIterator.hasNext()) { MessageReference ref = totalIterator.next(); Assert.assertEquals(i++, ref.getMessage().getIntProperty("order").intValue()); } } finally { totalIterator.close(); server.stop(); } } @Test public void testGroupMessageWithManyConsumers() throws Exception { final CountDownLatch firstMessageHandled = new CountDownLatch(1); final CountDownLatch finished = new CountDownLatch(2); final Consumer groupConsumer = new FakeConsumer() { int count = 0; @Override public synchronized HandleStatus handle(MessageReference reference) { if (count == 0) { //the first message is handled and will be used to determine this consumer //to be the group consumer count++; firstMessageHandled.countDown(); return HandleStatus.HANDLED; } else if (count <= 2) { //the next two attempts to send the second message will be done //attempting a direct delivery and an async one after that count++; finished.countDown(); return HandleStatus.BUSY; } else { //this shouldn't happen, because the last attempt to deliver //the second message should have stop the delivery loop: //it will succeed just to let the message being handled and //reduce the message count to 0 return HandleStatus.HANDLED; } } }; final Consumer noConsumer = new FakeConsumer() { @Override public synchronized HandleStatus handle(MessageReference reference) { Assert.fail("this consumer isn't allowed to consume any message"); throw new AssertionError(); } }; final QueueImpl queue = new QueueImpl(1, new SimpleString("address1"), QueueImplTest.queue1, null, null, false, true, false, scheduledExecutor, null, null, null, ArtemisExecutor.delegate(executor), null, null); queue.addConsumer(groupConsumer); queue.addConsumer(noConsumer); final MessageReference firstMessageReference = generateReference(queue, 1); final SimpleString groupName = SimpleString.toSimpleString("group"); firstMessageReference.getMessage().putStringProperty(Message.HDR_GROUP_ID, groupName); final MessageReference secondMessageReference = generateReference(queue, 2); secondMessageReference.getMessage().putStringProperty(Message.HDR_GROUP_ID, groupName); queue.addTail(firstMessageReference, true); Assert.assertTrue("first message isn't handled", firstMessageHandled.await(3000, TimeUnit.MILLISECONDS)); Assert.assertEquals("group consumer isn't correctly set", groupConsumer, queue.getGroups().get(groupName)); queue.addTail(secondMessageReference, true); final boolean atLeastTwoDeliverAttempts = finished.await(3000, TimeUnit.MILLISECONDS); Assert.assertTrue(atLeastTwoDeliverAttempts); Thread.sleep(1000); Assert.assertEquals("The second message should be in the queue", 1, queue.getMessageCount()); } private QueueImpl getNonDurableQueue() { return getQueue(QueueImplTest.queue1, false, false, null); } private QueueImpl getDurableQueue() { return getQueue(QueueImplTest.queue1, true, false, null); } private QueueImpl getNamedQueue(SimpleString name) { return getQueue(name, false, true, null); } private QueueImpl getFilteredQueue(Filter filter) { return getQueue(QueueImplTest.queue1, false, true, filter); } private QueueImpl getTemporaryQueue() { return getQueue(QueueImplTest.queue1, false, true, null); } private QueueImpl getQueue(SimpleString name, boolean durable, boolean temporary, Filter filter) { return new QueueImpl(1, QueueImplTest.address1, name, filter, null, durable, temporary, false, scheduledExecutor, new FakePostOffice(), null, null, ArtemisExecutor.delegate(executor), null, null); } }
apache-2.0
asereda-gs/immutables
value-fixture/src/org/immutables/fixture/jackson/NamingStrategy.java
956
/* Copyright 2016 Immutables Authors and Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.immutables.fixture.jackson; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import org.immutables.value.Value; @JsonDeserialize(as = ImmutableNamingStrategy.class) @Value.Immutable @Value.Style(forceJacksonPropertyNames = false) public interface NamingStrategy { int abraCadabra(); boolean focusPocus(); }
apache-2.0
passion1014/metaworks_framework
core/broadleaf-profile-web/src/main/java/org/broadleafcommerce/profile/web/core/security/CustomerStateRequestProcessor.java
18733
/* * #%L * BroadleafCommerce Profile Web * %% * Copyright (C) 2009 - 2013 Broadleaf Commerce * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package org.broadleafcommerce.profile.web.core.security; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.broadleafcommerce.common.extension.ExtensionResultHolder; import org.broadleafcommerce.common.util.BLCRequestUtils; import org.broadleafcommerce.common.web.AbstractBroadleafWebRequestProcessor; import org.broadleafcommerce.common.web.BroadleafRequestCustomerResolverImpl; import org.broadleafcommerce.profile.core.domain.Customer; import org.broadleafcommerce.profile.core.service.CustomerService; import org.broadleafcommerce.profile.web.core.CustomerState; import org.broadleafcommerce.profile.web.core.CustomerStateRefresher; import org.springframework.context.ApplicationEvent; import org.springframework.context.ApplicationEventPublisher; import org.springframework.context.ApplicationEventPublisherAware; import org.springframework.security.authentication.AnonymousAuthenticationToken; import org.springframework.security.authentication.RememberMeAuthenticationToken; import org.springframework.security.authentication.UsernamePasswordAuthenticationToken; import org.springframework.security.core.Authentication; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.stereotype.Component; import org.springframework.web.context.request.WebRequest; import java.util.HashMap; import java.util.Map; import javax.annotation.Resource; /** * @author Phillip Verheyden * @see {@link CustomerStateFilter} */ @Component("blCustomerStateRequestProcessor") public class CustomerStateRequestProcessor extends AbstractBroadleafWebRequestProcessor implements ApplicationEventPublisherAware { /** Logger for this class and subclasses */ protected final Log logger = LogFactory.getLog(getClass()); public static final String BLC_RULE_MAP_PARAM = "blRuleMap"; @Resource(name="blCustomerService") protected CustomerService customerService; @Resource(name = "blCustomerMergeExtensionManager") protected CustomerMergeExtensionManager customerMergeExtensionManager; protected ApplicationEventPublisher eventPublisher; public static final String ANONYMOUS_CUSTOMER_SESSION_ATTRIBUTE_NAME = "_blc_anonymousCustomer"; public static final String ANONYMOUS_CUSTOMER_ID_SESSION_ATTRIBUTE_NAME = "_blc_anonymousCustomerId"; private static final String LAST_PUBLISHED_EVENT_SESSION_ATTRIBUTED_NAME = "_blc_lastPublishedEvent"; public static final String OVERRIDE_CUSTOMER_SESSION_ATTR_NAME = "_blc_overrideCustomerId"; public static final String ANONYMOUS_CUSTOMER_MERGED_SESSION_ATTRIBUTE_NAME = "_blc_anonymousCustomerMerged"; @Override public void process(WebRequest request) { Customer customer = null; Long overrideId = null; if (BLCRequestUtils.isOKtoUseSession(request)) { overrideId = (Long) request.getAttribute(OVERRIDE_CUSTOMER_SESSION_ATTR_NAME, WebRequest.SCOPE_GLOBAL_SESSION); } if (overrideId != null) { customer = customerService.readCustomerById(overrideId); } else { Authentication authentication = SecurityContextHolder.getContext().getAuthentication(); if ((authentication != null) && !(authentication instanceof AnonymousAuthenticationToken)) { String userName = authentication.getName(); customer = (Customer) BroadleafRequestCustomerResolverImpl.getRequestCustomerResolver().getCustomer(request); if (userName != null && (customer == null || !userName.equals(customer.getUsername()))) { // can only get here if the authenticated user does not match the user in session customer = customerService.readCustomerByUsername(userName); if (logger.isDebugEnabled() && customer != null) { logger.debug("Customer found by username " + userName); } } if (customer != null) { ApplicationEvent lastPublishedEvent = (ApplicationEvent) BLCRequestUtils.getSessionAttributeIfOk(request, LAST_PUBLISHED_EVENT_SESSION_ATTRIBUTED_NAME); if (authentication instanceof RememberMeAuthenticationToken) { // set transient property of customer customer.setCookied(true); boolean publishRememberMeEvent = true; if (lastPublishedEvent != null && lastPublishedEvent instanceof CustomerAuthenticatedFromCookieEvent) { CustomerAuthenticatedFromCookieEvent cookieEvent = (CustomerAuthenticatedFromCookieEvent) lastPublishedEvent; if (userName.equals(cookieEvent.getCustomer().getUsername())) { publishRememberMeEvent = false; } } if (publishRememberMeEvent) { CustomerAuthenticatedFromCookieEvent cookieEvent = new CustomerAuthenticatedFromCookieEvent(customer, this.getClass().getName()); eventPublisher.publishEvent(cookieEvent); BLCRequestUtils.setSessionAttributeIfOk(request, LAST_PUBLISHED_EVENT_SESSION_ATTRIBUTED_NAME, cookieEvent); } } else if (authentication instanceof UsernamePasswordAuthenticationToken) { customer.setLoggedIn(true); boolean publishLoggedInEvent = true; if (lastPublishedEvent != null && lastPublishedEvent instanceof CustomerLoggedInEvent) { CustomerLoggedInEvent loggedInEvent = (CustomerLoggedInEvent) lastPublishedEvent; if (userName.equals(loggedInEvent.getCustomer().getUsername())) { publishLoggedInEvent = false; } } if (publishLoggedInEvent) { CustomerLoggedInEvent loggedInEvent = new CustomerLoggedInEvent(customer, this.getClass().getName()); eventPublisher.publishEvent(loggedInEvent); BLCRequestUtils.setSessionAttributeIfOk(request, LAST_PUBLISHED_EVENT_SESSION_ATTRIBUTED_NAME, loggedInEvent); } } else { customer = resolveAuthenticatedCustomer(authentication); } } } } if (customer == null) { // This is an anonymous customer. // TODO: Handle a custom cookie (different than remember me) that is just for anonymous users. // This can be used to remember their cart from a previous visit. // Cookie logic probably needs to be configurable - with TCS as the exception. customer = resolveAnonymousCustomer(request); } else { //Does this customer need to have an anonymous customer's data merged into it? customer = mergeCustomerIfRequired(request, customer); } CustomerState.setCustomer(customer); // Setup customer for content rule processing @SuppressWarnings("unchecked") Map<String,Object> ruleMap = (Map<String, Object>) request.getAttribute(BLC_RULE_MAP_PARAM, WebRequest.SCOPE_REQUEST); if (ruleMap == null) { ruleMap = new HashMap<String,Object>(); } ruleMap.put("customer", customer); request.setAttribute(BLC_RULE_MAP_PARAM, ruleMap, WebRequest.SCOPE_REQUEST); } /** * Allows the merging of anonymous customer data and / or session data, to the logged in customer, if required. * This is written to only require it to happen once. * @param request * @param customer * @return */ protected Customer mergeCustomerIfRequired(WebRequest request, Customer customer) { if (BLCRequestUtils.isOKtoUseSession(request)) { //Don't call this if it has already been called if (request.getAttribute(getAnonymousCustomerMergedSessionAttributeName(), WebRequest.SCOPE_GLOBAL_SESSION) == null) { //Set this so we don't do this every time. request.setAttribute(getAnonymousCustomerMergedSessionAttributeName(), Boolean.TRUE, WebRequest.SCOPE_GLOBAL_SESSION); Customer anonymousCustomer = getAnonymousCustomer(request); customer = copyAnonymousCustomerInfoToCustomer(request, anonymousCustomer, customer); } } return customer; } /** * This allows the customer object to be augmented by information that may have been stored on the * anonymous customer or session. After login, a new instance of customer is created that is different from the * anonymous customer. In many cases, there are reasons that the anonymous customer may have had data associated with * them that is required on the new customer. For example, customer attributes, promotions, promo codes, etc. * may have been associated with the anonymous customer, and we want them to be copied to this customer. * The default implementation does not copy data. It simply provides a hook for implementors to extend / implement * this method. You should consider security when copying data from one customer to another. * * @param request * @param anonymous * @param customer * @return */ protected Customer copyAnonymousCustomerInfoToCustomer(WebRequest request, Customer anonymous, Customer customer) { if (customerMergeExtensionManager != null) { ExtensionResultHolder<Customer> resultHolder = new ExtensionResultHolder<Customer>(); resultHolder.setResult(customer); customerMergeExtensionManager.getProxy().merge(resultHolder, request, anonymous); if (resultHolder.getThrowable() != null) { if (resultHolder.getThrowable() instanceof RuntimeException) { throw ((RuntimeException) resultHolder.getThrowable()); } else { throw new RuntimeException("An unexpected error occured merging the anonymous customer", resultHolder.getThrowable()); } } return customerService.saveCustomer(resultHolder.getResult()); } return customer; } /** * Subclasses can extend to resolve other types of Authentication tokens * @param authentication * @return */ public Customer resolveAuthenticatedCustomer(Authentication authentication) { return null; } /** * <p>Implementors can subclass to change how anonymous customers are created. Note that this method is intended to actually create the anonymous * customer if one does not exist. If you are looking to just get the current anonymous customer (if it exists) then instead use the * {@link #getAnonymousCustomer(WebRequest)} method.<p> * * <p>The intended behavior of this method is as follows:</p> * * <ul> * <li>Look for a {@link Customer} on the session</li> * <ul> * <li>If a customer is found in session, keep using the session-based customer</li> * <li>If a customer is not found in session</li> * <ul> * <li>Look for a customer ID in session</li> * <li>If a customer ID is found in session:</li> * <ul><li>Look up the customer in the database</ul></li> * </ul> * <li>If no there is no customer ID in session (and thus no {@link Customer})</li> * <ol> * <li>Create a new customer</li> * <li>Put the newly-created {@link Customer} in session</li> * </ol> * </ul> * </ul> * * @param request * @return * @see {@link #getAnonymousCustomer(WebRequest)} * @see {@link #getAnonymousCustomerAttributeName()} * @see {@link #getAnonymousCustomerIdAttributeName()} */ public Customer resolveAnonymousCustomer(WebRequest request) { Customer customer; customer = getAnonymousCustomer(request); //If there is no Customer object in session, AND no customer id in session, create a new customer //and store the entire customer in session (don't persist to DB just yet) if (customer == null) { customer = customerService.createNewCustomer(); if (BLCRequestUtils.isOKtoUseSession(request)) { request.setAttribute(getAnonymousCustomerSessionAttributeName(), customer, WebRequest.SCOPE_GLOBAL_SESSION); } } customer.setAnonymous(true); return customer; } /** * Returns the anonymous customer that was saved in session. This first checks for a full customer in session (meaning * that the customer has not already been persisted) and returns that. If there is no full customer in session (and * there is instead just an anonymous customer ID) then this will look up the customer from the database using that and * return it. * * @param request the current request * @return the anonymous customer in session or null if there is no anonymous customer represented in session * @see {@link #getAnonymousCustomerSessionAttributeName()} * @see {@link #getAnonymousCustomerIdSessionAttributeName()} */ public Customer getAnonymousCustomer(WebRequest request) { if (BLCRequestUtils.isOKtoUseSession(request)) { Customer anonymousCustomer = (Customer) request.getAttribute(getAnonymousCustomerSessionAttributeName(), WebRequest.SCOPE_GLOBAL_SESSION); if (anonymousCustomer == null) { //Customer is not in session, see if we have just a customer ID in session (the anonymous customer might have //already been persisted) Long customerId = (Long) request.getAttribute(getAnonymousCustomerIdSessionAttributeName(), WebRequest.SCOPE_GLOBAL_SESSION); if (customerId != null) { //we have a customer ID in session, look up the customer from the database to ensure we have an up-to-date //customer to store in CustomerState anonymousCustomer = customerService.readCustomerById(customerId); } } return anonymousCustomer; } return null; } /** * Returns the session attribute to store the anonymous customer. * Some implementations may wish to have a different anonymous customer instance (and as a result a different cart). * * The entire Customer should be stored in session ONLY if that Customer has not already been persisted to the database. * Once it has been persisted (like once the user has added something to the cart) then {@link #getAnonymousCustomerIdAttributeName()} * should be used instead. * * @return the session attribute for an anonymous {@link Customer} that has not been persisted to the database yet */ public static String getAnonymousCustomerSessionAttributeName() { return ANONYMOUS_CUSTOMER_SESSION_ATTRIBUTE_NAME; } /** * <p>Returns the session attribute to store the anonymous customer ID. This session attribute should be used to track * anonymous customers that have not registered but have state in the database. When users first visit the Broadleaf * site, a new {@link Customer} is instantiated but is <b>only saved in session</b> and not persisted to the database. However, * once that user adds something to the cart, that {@link Customer} is now saved in the database and it no longer makes * sense to pull back a full {@link Customer} object from session, as any session-based {@link Customer} will be out of * date in regards to Hibernate (specifically with lists).</p> * * <p>So, once Broadleaf detects that the session-based {@link Customer} has been persisted, it should remove the session-based * {@link Customer} and then utilize just the customer ID from session.</p> * * @see {@link CustomerStateRefresher} */ public static String getAnonymousCustomerIdSessionAttributeName() { return ANONYMOUS_CUSTOMER_ID_SESSION_ATTRIBUTE_NAME; } @Override public void setApplicationEventPublisher(ApplicationEventPublisher eventPublisher) { this.eventPublisher = eventPublisher; } /** * The request-scoped attribute that should store the {@link Customer}. * * <pre> * Customer customer = (Customer) request.getAttribute(CustomerStateRequestProcessor.getCustomerRequestAttributeName()); * //this is equivalent to the above invocation * Customer customer = CustomerState.getCustomer(); * </pre> * @return * @see {@link CustomerState} */ public static String getCustomerRequestAttributeName() { return BroadleafRequestCustomerResolverImpl.getRequestCustomerResolver().getCustomerRequestAttributeName(); } /** * This is the name of a session attribute that holds whether or not the anonymous customer has been merged into * the logged in customer. This is useful for tracking as often there is an anonymous customer that has customer * attributes or other data that is saved on the customer in the database or in transient properties. It is often * beneficial, after logging in, to copy certain properties to the logged in customer. * @return */ public static String getAnonymousCustomerMergedSessionAttributeName() { return ANONYMOUS_CUSTOMER_MERGED_SESSION_ATTRIBUTE_NAME; } }
apache-2.0
xhoong/incubator-calcite
core/src/main/java/org/apache/calcite/sql/fun/SqlDatetimePlusOperator.java
2911
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.sql.fun; import org.apache.calcite.avatica.util.TimeUnit; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.sql.SqlCall; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlOperatorBinding; import org.apache.calcite.sql.SqlSpecialOperator; import org.apache.calcite.sql.SqlSyntax; import org.apache.calcite.sql.SqlWriter; import org.apache.calcite.sql.type.InferTypes; import org.apache.calcite.sql.type.IntervalSqlType; import org.apache.calcite.sql.type.OperandTypes; import org.apache.calcite.sql.type.ReturnTypes; import org.apache.calcite.sql.validate.SqlMonotonicity; /** * Operator that adds an INTERVAL to a DATETIME. */ public class SqlDatetimePlusOperator extends SqlSpecialOperator { //~ Constructors ----------------------------------------------------------- SqlDatetimePlusOperator() { super("+", SqlKind.PLUS, 40, true, ReturnTypes.ARG2_NULLABLE, InferTypes.FIRST_KNOWN, OperandTypes.MINUS_DATE_OPERATOR); } //~ Methods ---------------------------------------------------------------- @Override public RelDataType inferReturnType(SqlOperatorBinding opBinding) { final RelDataTypeFactory typeFactory = opBinding.getTypeFactory(); final RelDataType leftType = opBinding.getOperandType(0); final IntervalSqlType unitType = (IntervalSqlType) opBinding.getOperandType(1); final TimeUnit timeUnit = unitType.getIntervalQualifier().getStartUnit(); return SqlTimestampAddFunction.deduceType(typeFactory, timeUnit, unitType, leftType); } public SqlSyntax getSyntax() { return SqlSyntax.SPECIAL; } public void unparse( SqlWriter writer, SqlCall call, int leftPrec, int rightPrec) { writer.getDialect().unparseSqlDatetimeArithmetic( writer, call, SqlKind.PLUS, leftPrec, rightPrec); } @Override public SqlMonotonicity getMonotonicity(SqlOperatorBinding call) { return SqlStdOperatorTable.PLUS.getMonotonicity(call); } } // End SqlDatetimePlusOperator.java
apache-2.0
android-ia/platform_tools_idea
platform/lang-impl/src/com/intellij/openapi/options/colors/pages/ANSIColoredConsoleColorsPage.java
6159
package com.intellij.openapi.options.colors.pages; import com.intellij.execution.process.ConsoleHighlighter; import com.intellij.execution.ui.ConsoleViewContentType; import com.intellij.openapi.editor.colors.TextAttributesKey; import com.intellij.openapi.fileTypes.FileTypes; import com.intellij.openapi.fileTypes.PlainSyntaxHighlighter; import com.intellij.openapi.fileTypes.SyntaxHighlighter; import com.intellij.openapi.options.OptionsBundle; import com.intellij.openapi.options.colors.AttributesDescriptor; import com.intellij.openapi.options.colors.ColorDescriptor; import com.intellij.openapi.options.colors.ColorSettingsPage; import com.intellij.psi.codeStyle.DisplayPriority; import com.intellij.psi.codeStyle.DisplayPrioritySortable; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.util.HashMap; import java.util.Map; /** * @author oleg, Roman.Chernyatchik */ public class ANSIColoredConsoleColorsPage implements ColorSettingsPage, DisplayPrioritySortable { private static final String DEMO_TEXT = "<stdsys>C:\\command.com</stdsys>\n" + "-<stdout> C:></stdout>\n" + "-<stdin> help</stdin>\n" + "<stderr>Bad command or file name</stderr>\n" + "\n" + "<logError>Log error</logError>\n" + "<logWarning>Log warning</logWarning>\n" + "<logExpired>An expired log entry</logExpired>\n" + "\n" + "# Process output highlighted using ANSI colors codes\n" + "<red>ANSI: red</red>\n" + "<green>ANSI: green</green>\n" + "<yellow>ANSI: yellow</yellow>\n" + "<blue>ANSI: blue</blue>\n" + "<magenta>ANSI: magenta</magenta>\n" + "<cyan>ANSI: cyan</cyan>\n" + "<gray>ANSI: gray</gray>\n" + "\n" + "<stdsys>Process finished with exit code 1</stdsys>\n"; private static final AttributesDescriptor[] ATTRS = { new AttributesDescriptor(OptionsBundle.message("options.general.color.descriptor.console.stdout"), ConsoleViewContentType.NORMAL_OUTPUT_KEY), new AttributesDescriptor(OptionsBundle.message("options.general.color.descriptor.console.stderr"), ConsoleViewContentType.ERROR_OUTPUT_KEY), new AttributesDescriptor(OptionsBundle.message("options.general.color.descriptor.console.stdin"), ConsoleViewContentType.USER_INPUT_KEY), new AttributesDescriptor(OptionsBundle.message("options.general.color.descriptor.console.system.output"), ConsoleViewContentType.SYSTEM_OUTPUT_KEY), new AttributesDescriptor(OptionsBundle.message("options.general.color.descriptor.console.logError"), ConsoleViewContentType.LOG_ERROR_OUTPUT_KEY), new AttributesDescriptor(OptionsBundle.message("options.general.color.descriptor.console.warning"), ConsoleViewContentType.LOG_WARNING_OUTPUT_KEY), new AttributesDescriptor(OptionsBundle.message("options.general.color.descriptor.console.expired"), ConsoleViewContentType.LOG_EXPIRED_ENTRY), new AttributesDescriptor(OptionsBundle.message("color.settings.console.red"), ConsoleHighlighter.RED), new AttributesDescriptor(OptionsBundle.message("color.settings.console.green"), ConsoleHighlighter.GREEN), new AttributesDescriptor(OptionsBundle.message("color.settings.console.yellow"), ConsoleHighlighter.YELLOW), new AttributesDescriptor(OptionsBundle.message("color.settings.console.blue"), ConsoleHighlighter.BLUE), new AttributesDescriptor(OptionsBundle.message("color.settings.console.magenta"), ConsoleHighlighter.MAGENTA), new AttributesDescriptor(OptionsBundle.message("color.settings.console.cyan"), ConsoleHighlighter.CYAN), new AttributesDescriptor(OptionsBundle.message("color.settings.console.gray"), ConsoleHighlighter.GRAY), }; private static final Map<String, TextAttributesKey> ADDITIONAL_HIGHLIGHT_DESCRIPTORS = new HashMap<String, TextAttributesKey>(); static{ ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("stdsys", ConsoleViewContentType.SYSTEM_OUTPUT_KEY); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("stdout", ConsoleViewContentType.NORMAL_OUTPUT_KEY); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("stdin", ConsoleViewContentType.USER_INPUT_KEY); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("stderr", ConsoleViewContentType.ERROR_OUTPUT_KEY); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("logError", ConsoleViewContentType.LOG_ERROR_OUTPUT_KEY); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("logWarning", ConsoleViewContentType.LOG_WARNING_OUTPUT_KEY); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("logExpired", ConsoleViewContentType.LOG_EXPIRED_ENTRY); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("red", ConsoleHighlighter.RED); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("green", ConsoleHighlighter.GREEN); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("yellow", ConsoleHighlighter.YELLOW); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("blue", ConsoleHighlighter.BLUE); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("magenta", ConsoleHighlighter.MAGENTA); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("cyan", ConsoleHighlighter.CYAN); ADDITIONAL_HIGHLIGHT_DESCRIPTORS.put("gray", ConsoleHighlighter.GRAY); } private static final ColorDescriptor[] COLORS = { new ColorDescriptor(OptionsBundle.message("options.general.color.descriptor.console.background"), ConsoleViewContentType.CONSOLE_BACKGROUND_KEY, ColorDescriptor.Kind.BACKGROUND), }; @Override @Nullable public Map<String, TextAttributesKey> getAdditionalHighlightingTagToDescriptorMap() { return ADDITIONAL_HIGHLIGHT_DESCRIPTORS; } @Override @NotNull public String getDisplayName() { return OptionsBundle.message("color.settings.console.name"); } @Override @NotNull public Icon getIcon() { return FileTypes.PLAIN_TEXT.getIcon(); } @Override @NotNull public AttributesDescriptor[] getAttributeDescriptors() { return ATTRS; } @Override @NotNull public ColorDescriptor[] getColorDescriptors() { return COLORS; } @Override @NotNull public SyntaxHighlighter getHighlighter() { return new PlainSyntaxHighlighter(); } @Override @NotNull public String getDemoText() { return DEMO_TEXT; } @Override public DisplayPriority getPriority() { return DisplayPriority.COMMON_SETTINGS; } }
apache-2.0
ctripcorp/apollo
apollo-biz/src/main/java/com/ctrip/framework/apollo/biz/repository/AppNamespaceRepository.java
1896
/* * Copyright 2021 Apollo Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.ctrip.framework.apollo.biz.repository; import com.ctrip.framework.apollo.common.entity.AppNamespace; import org.springframework.data.jpa.repository.Modifying; import org.springframework.data.jpa.repository.Query; import org.springframework.data.repository.PagingAndSortingRepository; import java.util.List; import java.util.Set; public interface AppNamespaceRepository extends PagingAndSortingRepository<AppNamespace, Long>{ AppNamespace findByAppIdAndName(String appId, String namespaceName); List<AppNamespace> findByAppIdAndNameIn(String appId, Set<String> namespaceNames); AppNamespace findByNameAndIsPublicTrue(String namespaceName); List<AppNamespace> findByNameInAndIsPublicTrue(Set<String> namespaceNames); List<AppNamespace> findByAppIdAndIsPublic(String appId, boolean isPublic); List<AppNamespace> findByAppId(String appId); List<AppNamespace> findFirst500ByIdGreaterThanOrderByIdAsc(long id); @Modifying @Query("UPDATE AppNamespace SET IsDeleted=1,DataChange_LastModifiedBy = ?2 WHERE AppId=?1") int batchDeleteByAppId(String appId, String operator); @Modifying @Query("UPDATE AppNamespace SET IsDeleted=1,DataChange_LastModifiedBy = ?3 WHERE AppId=?1 and Name = ?2") int delete(String appId, String namespaceName, String operator); }
apache-2.0
esl/Smack
smack-extensions/src/main/java/org/jivesoftware/smackx/pubsub/PresenceState.java
868
/** * * Copyright the original author or authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.smackx.pubsub; /** * Defines the possible valid presence states for node subscription via * {@link SubscribeForm#getShowValues()}. * * @author Robin Collier */ public enum PresenceState { chat, online, away, xa, dnd }
apache-2.0
springrichclient/springrcp
spring-richclient-core/src/main/java/org/springframework/richclient/application/mdi/contextmenu/MinimizeAllCommand.java
1677
/* * Copyright 2002-2007 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.springframework.richclient.application.mdi.contextmenu; import java.beans.PropertyVetoException; import javax.swing.JDesktopPane; import javax.swing.JInternalFrame; import org.springframework.richclient.command.ActionCommand; /** * Minimizes all <code>JInternalFrame</code>s in a given <code>JDesktopPane</code>. * * @author Peter De Bruycker */ public class MinimizeAllCommand extends ActionCommand { private static final String ID = "minimizeAllCommand"; private JDesktopPane desktop; public MinimizeAllCommand( JDesktopPane desktopPane ) { super(ID); desktop = desktopPane; } protected void doExecuteCommand() { JInternalFrame allFrames[] = desktop.getAllFrames(); for( int i = 0; i < allFrames.length; i++ ) { JInternalFrame frame = allFrames[i]; try { frame.setIcon( true ); } catch( PropertyVetoException ignore ) { // ignore } } } }
apache-2.0
android-ia/platform_tools_idea
platform/platform-impl/src/com/intellij/ide/actions/ResizeToolWindowAction.java
10009
/* * Copyright 2000-2010 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.ide.actions; import com.intellij.openapi.actionSystem.ActionManager; import com.intellij.openapi.actionSystem.AnAction; import com.intellij.openapi.actionSystem.AnActionEvent; import com.intellij.openapi.actionSystem.PlatformDataKeys; import com.intellij.openapi.project.DumbAware; import com.intellij.openapi.project.Project; import com.intellij.openapi.project.ProjectManager; import com.intellij.openapi.project.ProjectManagerAdapter; import com.intellij.openapi.ui.ShadowAction; import com.intellij.openapi.util.registry.Registry; import com.intellij.openapi.wm.*; import com.intellij.openapi.wm.ex.ToolWindowEx; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.awt.*; public abstract class ResizeToolWindowAction extends AnAction implements DumbAware { private ToolWindow myLastWindow; private ToolWindowManager myLastManager; protected JLabel myScrollHelper; private ToolWindow myToolWindow; private boolean myListenerInstalled; protected ResizeToolWindowAction() { } protected ResizeToolWindowAction(String text) { super(text); } protected ResizeToolWindowAction(String text, String description, Icon icon) { super(text, description, icon); } protected ResizeToolWindowAction(ToolWindow toolWindow, String originalAction, JComponent c) { myToolWindow = toolWindow; new ShadowAction(this, ActionManager.getInstance().getAction(originalAction), c); } @Override public final void update(AnActionEvent e) { Project project = PlatformDataKeys.PROJECT.getData(e.getDataContext()); if (project == null) { setDisabled(e); return; } if (!myListenerInstalled) { myListenerInstalled = true; ProjectManager.getInstance().addProjectManagerListener(new ProjectManagerAdapter() { @Override public void projectClosed(Project project) { setDisabled(null); } }); } Component owner = KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner(); if (owner == null) { setDisabled(e); return; } final Window windowAncestor = SwingUtilities.getWindowAncestor(owner); if (!(windowAncestor instanceof IdeFrame) || windowAncestor instanceof IdeFrame.Child) { setDisabled(e); return; } ToolWindowManager mgr = ToolWindowManager.getInstance(project); ToolWindow window = myToolWindow; if (window != null || mgr.getActiveToolWindowId() != null) { if (window == null) { window = mgr.getToolWindow(mgr.getActiveToolWindowId()); } if (window == null || !window.isAvailable() || !window.isVisible() || window.getType() == ToolWindowType.FLOATING || !window.isActive()) { setDisabled(e); return; } update(e, window, mgr); if (e.getPresentation().isEnabled()) { myLastWindow = window; myLastManager = mgr; } else { setDisabled(e); } } else { setDisabled(e); } } private void setDisabled(@Nullable AnActionEvent e) { if (e != null) { e.getPresentation().setEnabled(false); } myLastWindow = null; myLastManager = null; myToolWindow = null; } protected abstract void update(AnActionEvent event, ToolWindow window, ToolWindowManager mgr); @Override public final void actionPerformed(AnActionEvent e) { actionPerformed(e, myLastWindow, myLastManager); } @Nullable private ToolWindowScrollable getScrollable(ToolWindow wnd, boolean isHorizontalStretchingOffered) { KeyboardFocusManager mgr = KeyboardFocusManager.getCurrentKeyboardFocusManager(); Component eachComponent = mgr.getFocusOwner(); ToolWindowScrollable scrollable = null; while (eachComponent != null) { if (!SwingUtilities.isDescendingFrom(eachComponent, wnd.getComponent())) break; if (eachComponent instanceof ToolWindowScrollable) { ToolWindowScrollable eachScrollable = (ToolWindowScrollable)eachComponent; if (isHorizontalStretchingOffered) { if (eachScrollable.isHorizontalScrollingNeeded()) { scrollable = eachScrollable; break; } } else { if (eachScrollable.isVerticalScrollingNeeded()) { scrollable = eachScrollable; break; } } } eachComponent = eachComponent.getParent(); } if (scrollable == null) { scrollable = new DefaultToolWindowScrollable(); } if (isHorizontalStretchingOffered && scrollable.isHorizontalScrollingNeeded()) return scrollable; if (!isHorizontalStretchingOffered && scrollable.isVerticalScrollingNeeded()) return scrollable; return null; } protected abstract void actionPerformed(AnActionEvent e, ToolWindow wnd, ToolWindowManager mgr); protected void stretch(ToolWindow wnd, boolean isHorizontalStretching, boolean isIncrementAction) { ToolWindowScrollable scrollable = getScrollable(wnd, isHorizontalStretching); if (scrollable == null) return; ToolWindowAnchor anchor = wnd.getAnchor(); if (isHorizontalStretching && !anchor.isHorizontal()) { incWidth(wnd, scrollable.getNextHorizontalScroll(), (anchor == ToolWindowAnchor.LEFT) == isIncrementAction); } else if (!isHorizontalStretching && anchor.isHorizontal()) { incHeight(wnd, scrollable.getNextVerticalScroll(), (anchor == ToolWindowAnchor.TOP) != isIncrementAction); } } private static void incWidth(ToolWindow wnd, int value, boolean isPositive) { ((ToolWindowEx)wnd).stretchWidth(isPositive ? value : -value); } private static void incHeight(ToolWindow wnd, int value, boolean isPositive) { ((ToolWindowEx)wnd).stretchHeight(isPositive ? value : -value); } public static class Left extends ResizeToolWindowAction { public Left() { } public Left(String text) { super(text); } public Left(String text, String description, Icon icon) { super(text, description, icon); } public Left(ToolWindow toolWindow, JComponent c) { super(toolWindow, "ResizeToolWindowLeft", c); } @Override protected void update(AnActionEvent event, ToolWindow window, ToolWindowManager mgr) { event.getPresentation().setEnabled(!window.getAnchor().isHorizontal()); } @Override protected void actionPerformed(AnActionEvent e, ToolWindow wnd, ToolWindowManager mgr) { stretch(wnd, true, false); } } public static class Right extends ResizeToolWindowAction { public Right() { } public Right(String text) { super(text); } public Right(String text, String description, Icon icon) { super(text, description, icon); } public Right(ToolWindow toolWindow, JComponent c) { super(toolWindow, "ResizeToolWindowRight", c); } @Override protected void update(AnActionEvent event, ToolWindow window, ToolWindowManager mgr) { event.getPresentation().setEnabled(!window.getAnchor().isHorizontal()); } @Override protected void actionPerformed(AnActionEvent e, ToolWindow wnd, ToolWindowManager mgr) { stretch(wnd, true, true); } } public static class Up extends ResizeToolWindowAction { public Up() { } public Up(String text) { super(text); } public Up(String text, String description, Icon icon) { super(text, description, icon); } public Up(ToolWindow toolWindow, JComponent c) { super(toolWindow, "ResizeToolWindowUp", c); } @Override protected void update(AnActionEvent event, ToolWindow window, ToolWindowManager mgr) { event.getPresentation().setEnabled(window.getAnchor().isHorizontal()); } @Override protected void actionPerformed(AnActionEvent e, ToolWindow wnd, ToolWindowManager mgr) { stretch(wnd, false, true); } } public static class Down extends ResizeToolWindowAction { public Down() { } public Down(String text) { super(text); } public Down(String text, String description, Icon icon) { super(text, description, icon); } public Down(ToolWindow toolWindow, JComponent c) { super(toolWindow, "ResizeToolWindowDown", c); } @Override protected void update(AnActionEvent event, ToolWindow window, ToolWindowManager mgr) { event.getPresentation().setEnabled(window.getAnchor().isHorizontal()); } @Override protected void actionPerformed(AnActionEvent e, ToolWindow wnd, ToolWindowManager mgr) { stretch(wnd, false, false); } } private class DefaultToolWindowScrollable implements ToolWindowScrollable { public boolean isHorizontalScrollingNeeded() { return true; } public int getNextHorizontalScroll() { return getReferenceSize().width * Registry.intValue("ide.windowSystem.hScrollChars"); } public boolean isVerticalScrollingNeeded() { return true; } public int getNextVerticalScroll() { return getReferenceSize().height * Registry.intValue("ide.windowSystem.vScrollChars"); } } private Dimension getReferenceSize() { if (myScrollHelper == null) { if (SwingUtilities.isEventDispatchThread()) { myScrollHelper = new JLabel("W"); } else { return new Dimension(1, 1); } } return myScrollHelper.getPreferredSize(); } }
apache-2.0
android-ia/platform_tools_idea
platform/platform-impl/src/com/intellij/ide/ui/laf/darcula/DarculaWelcomeScreenLabelUI.java
1834
/* * Copyright 2000-2012 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.ide.ui.laf.darcula; import com.intellij.ui.Gray; import com.intellij.util.ui.UIUtil; import sun.swing.SwingUtilities2; import javax.swing.*; import javax.swing.plaf.LabelUI; import javax.swing.plaf.basic.BasicLabelUI; import java.awt.*; /** * @author Konstantin Bulenkov */ class DarculaWelcomeScreenLabelUI extends BasicLabelUI { @SuppressWarnings({"MethodOverridesStaticMethodOfSuperclass", "UnusedDeclaration"}) public static LabelUI createUI(JComponent c) { return new DarculaWelcomeScreenLabelUI(); } @Override public void paint(Graphics g, JComponent c) { if(c.isEnabled()) { super.paint(g, c); } } @Override protected void paintEnabledText(JLabel l, Graphics g, String s, int x, int y) { g.setColor(l.getForeground().equals(UIUtil.getPanelBackground()) ? Gray._255.withAlpha(60) : Gray._0.withAlpha(150)); SwingUtilities2.drawStringUnderlineCharAt(l, g, s, -1, x, y + 1); g.setColor(l.getForeground()); SwingUtilities2.drawStringUnderlineCharAt(l, g, s, -1, x, y); } @Override public Dimension getPreferredSize(JComponent c) { final Dimension size = super.getPreferredSize(c); return new Dimension(size.width, size.height + 1); } }
apache-2.0
papicella/snappy-store
gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/impl/sql/execute/DropGatewaySenderConstantAction.java
5270
/* * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.pivotal.gemfirexd.internal.impl.sql.execute; import java.util.List; import java.util.Set; import com.gemstone.gemfire.cache.wan.GatewaySender; import com.pivotal.gemfirexd.internal.engine.Misc; import com.pivotal.gemfirexd.internal.engine.GfxdConstants; import com.pivotal.gemfirexd.internal.engine.store.GemFireContainer; import com.pivotal.gemfirexd.internal.engine.store.ServerGroupUtils; import com.pivotal.gemfirexd.internal.iapi.error.StandardException; import com.pivotal.gemfirexd.internal.iapi.reference.SQLState; import com.pivotal.gemfirexd.internal.iapi.sql.Activation; import com.pivotal.gemfirexd.internal.iapi.sql.conn.LanguageConnectionContext; import com.pivotal.gemfirexd.internal.iapi.sql.dictionary.SchemaDescriptor; import com.pivotal.gemfirexd.internal.iapi.sql.execute.ExecIndexRow; import com.pivotal.gemfirexd.internal.iapi.store.access.TransactionController; import com.pivotal.gemfirexd.internal.iapi.types.SQLVarchar; import com.pivotal.gemfirexd.internal.impl.sql.catalog.DataDictionaryImpl; import com.pivotal.gemfirexd.internal.impl.sql.catalog.TabInfoImpl; import com.pivotal.gemfirexd.internal.shared.common.sanity.SanityManager; public class DropGatewaySenderConstantAction extends DDLConstantAction { final String id; final boolean onlyIfExists; DropGatewaySenderConstantAction(String id, boolean onlyIfExists) { this.id = id; this.onlyIfExists = onlyIfExists; } @Override public boolean isDropIfExists() { return onlyIfExists; } // Override the getSchemaName/getObjectName to enable // DDL conflation of CREATE and DROP GATEWAYSENDER statements. @Override public final String getSchemaName() { // gateways have no schema, so return 'SYS' return SchemaDescriptor.STD_SYSTEM_SCHEMA_NAME; } @Override public final String getTableName() { return CreateGatewaySenderConstantAction.REGION_PREFIX_FOR_CONFLATION + id; } @Override public final boolean isDropStatement() { return true; } @Override public void executeConstantAction(Activation activation) throws StandardException { int rowsDeleted = 0; // If this node is not hosting data, return success, nothing to do if (!ServerGroupUtils.isDataStore()) { return; } // Check if GATEWAYSENDER is in use by a table List<GemFireContainer> containers = Misc.getMemStore().getAllContainers(); for (GemFireContainer container : containers) { if (container.getRegion() != null && container.isApplicationTable()) { Set<String> senderIds = container.getRegionAttributes() .getGatewaySenderIds(); if (senderIds != null && !senderIds.isEmpty() && senderIds.contains(id)) { throw StandardException .newException(SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, "DROP", "GatewaySender " + id, "table", container.getQualifiedTableName()); } } } // Drop GATEWAYSENDER from catalog LanguageConnectionContext lcc = activation.getLanguageConnectionContext(); DataDictionaryImpl dd = (DataDictionaryImpl)lcc.getDataDictionary(); TransactionController tc = lcc.getTransactionExecute(); dd.startWriting(lcc); ExecIndexRow keyRow = dd.getExecutionFactory().getIndexableRow(1); TabInfoImpl ti = dd .getNonCoreTI(DataDictionaryImpl.GATEWAYSENDERS_CATALOG_NUM); keyRow.setColumn(1, new SQLVarchar(id)); rowsDeleted = ti.deleteRow(tc, keyRow, 0); if (rowsDeleted == 0) { // The GATEWAYSENDER wasn't in the catalog in the first place // Throw object-not-found exception if (onlyIfExists) { return; } else { throw StandardException.newException( SQLState.LANG_OBJECT_DOES_NOT_EXIST, "DROP GATEWAYSENDER", id); } } SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_CONGLOM, "DropGatewaySender:: removed GatewaySender " + id + " from SYS table"); // Finally, remove the GATEWAYSENDER cache object GatewaySender sender = Misc.getGemFireCache().getGatewaySender(id); if (sender != null) { try { Misc.getGemFireCache().removeGatewaySender(sender); sender.destroy(); } catch (Exception ex) { throw StandardException.newException( SQLState.UNEXPECTED_EXCEPTION_FOR_ASYNC_LISTENER, ex, id, ex.toString()); } } } @Override public boolean isCancellable() { return false; } // OBJECT METHODS @Override public String toString() { return constructToString("DROP GATEWAYSENDER ", id); } }
apache-2.0
antoinesd/weld-core
tests-arquillian/src/test/java/org/jboss/weld/tests/metadata/scanning/redhat/Waldo.java
80
package org.jboss.weld.tests.metadata.scanning.redhat; public class Waldo { }
apache-2.0
pm-ping/pf-datastore-mongodb
src/com/pingidentity/datastore/MongoDBDatastore.java
4763
package com.pingidentity.datastore; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.sourceid.saml20.adapter.conf.Configuration; import org.sourceid.saml20.adapter.conf.SimpleFieldList; import com.mongodb.BasicDBObject; import com.mongodb.DBCursor; import com.mongodb.DBObject; import com.mongodb.MongoTimeoutException; import com.pingidentity.sources.CustomDataSourceDriver; import com.pingidentity.sources.SourceDescriptor; import static java.util.concurrent.TimeUnit.MILLISECONDS; public class MongoDBDatastore implements CustomDataSourceDriver { // instantiate and obtain the config object private MongoDBDatastoreConfiguration config = new MongoDBDatastoreConfiguration(); // initialise a logger private final Log log = LogFactory.getLog(this.getClass()); public MongoDBDatastore() { } @Override public boolean testConnection() { log.debug("---[ Testing connectivity to MongoDB ]------"); try { DBCursor cursor = config.mongoCollection.find().limit(1).maxTime(config.actionTimeout, MILLISECONDS);; if(cursor.hasNext()) { return true; } } catch(MongoTimeoutException ex) { log.debug("Caught MTE: " + ex.getMessage()); return false; } catch(Exception e) { log.debug("Caught exception: " + e.getMessage()); return false; } return false; } @Override public Map<String, Object> retrieveValues(Collection<String> attributeNamesToFill, SimpleFieldList filterConfiguration) { log.debug("---[ Retrieving Values ]------"); Map<String, Object> returnMap = new HashMap<String, Object>(); BasicDBObject mongoDBQuery = buildMongoQueryFromFilter(filterConfiguration); try { DBCursor cursor = config.mongoCollection.find(mongoDBQuery).limit(1).maxTime(config.actionTimeout, MILLISECONDS); if(cursor.hasNext()) { DBObject entry = cursor.next(); for(String attribute : attributeNamesToFill) { log.debug("Checking for attribute: " + attribute); if (entry.containsField(attribute)) { log.debug(" - returning value: " + entry.get(attribute)); returnMap.put(attribute, entry.get(attribute)); } else { log.debug(" - returning value: null"); returnMap.put(attribute, null); } } } else { log.info("No object found"); } } catch(MongoTimeoutException ex) { log.error("ERROR: Timeout occurred - " + ex.getMessage()); } return returnMap; } @Override public List<String> getAvailableFields() { log.debug("---[ Retrieving Available Fields ]------"); List<String> availableFields = new ArrayList<String>(); try { DBCursor cursor = config.mongoCollection.find().limit(1).maxTime(config.actionTimeout, MILLISECONDS);; if(cursor.hasNext()) { DBObject schema = cursor.next(); for (String k : schema.keySet()) { availableFields.add(k); } } } catch(MongoTimeoutException ex) { log.error("ERROR: Timeout occurred - " + ex.getMessage()); } return sortList(availableFields); } /** * The getSourceDescriptor method returns the configuration details. */ @Override public SourceDescriptor getSourceDescriptor() { return config.getSourceDescriptor(this); } /** * The configure method sets the configuration details. */ @Override public void configure(Configuration configuration) { config.configure(configuration); } private BasicDBObject buildMongoQueryFromFilter(SimpleFieldList filter) { log.debug("---[ Building the MongoDB query ]------"); // lets make this very basic for now: attribute = value BasicDBObject mongoQuery = null; String rawFilter = filter.getFieldValue(config.filterKey); log.debug(" - Raw filter: " + rawFilter); if (rawFilter.contains("=")) { String[] filterComponents = rawFilter.split("="); String attribute = filterComponents[0].trim(); String value = filterComponents[1].trim(); mongoQuery = new BasicDBObject(attribute, value); } else { log.error("Invalid filter: " + rawFilter); } return mongoQuery; } private static <T extends Comparable<? super T>> List<T> sortList(Collection<T> c) { List<T> list = new ArrayList<T>(c); java.util.Collections.sort(list); return list; } }
apache-2.0
apache/zest-qi4j
extensions/entitystore-cassandra/src/main/java/org/apache/polygene/entitystore/cassandra/ClusterBuilder.java
3549
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package org.apache.polygene.entitystore.cassandra; import com.datastax.driver.core.Cluster; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collection; import java.util.stream.Collectors; import org.apache.polygene.api.mixin.Mixins; @Mixins( ClusterBuilder.DefaultBuilder.class ) public interface ClusterBuilder { String DEFAULT_HOST_PORT = "localhost:9042"; Cluster build( CassandraEntityStoreConfiguration config ); class DefaultBuilder implements ClusterBuilder { protected CassandraEntityStoreConfiguration config; @Override public Cluster build( CassandraEntityStoreConfiguration config ) { this.config = config; String clusterName = clusterName( config ); Collection<InetSocketAddress> connectionPoints = cassandraConnectionPoints(); Cluster.Builder builder = Cluster.builder() .withClusterName( clusterName ) .addContactPointsWithPorts( connectionPoints ) .withCredentials( username(), password() ); builder = customConfiguration( builder ); return builder.build(); } protected String clusterName( CassandraEntityStoreConfiguration config ) { String clusterName = config.clusterName().get(); if( clusterName == null ) { clusterName = "polygene-cluster"; } return clusterName; } protected String username() { return config.username().get(); } protected String password() { return config.password().get(); } protected Collection<InetSocketAddress> cassandraConnectionPoints() { String hostnames = hostnames(); return Arrays.stream( hostnames.split( "[ ,]" ) ) .map( text -> { String[] strings = text.split( ":" ); return new InetSocketAddress( strings[ 0 ], Integer.parseInt( strings[ 1 ] ) ); } ) .collect( Collectors.toList() ); } protected String hostnames() { String hostnames = config.hostnames().get(); if( hostnames == null ) { hostnames = DEFAULT_HOST_PORT; } return hostnames; } protected Cluster.Builder customConfiguration( Cluster.Builder builder ) { return builder; } } }
apache-2.0
gab1one/imagej-ops
src/main/java/net/imagej/ops/special/chain/BCViaBC.java
2607
/* * #%L * ImageJ software for multidimensional image processing and analysis. * %% * Copyright (C) 2014 - 2017 Board of Regents of the University of * Wisconsin-Madison, University of Konstanz and Brian Northan. * %% * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * #L% */ package net.imagej.ops.special.chain; import net.imagej.ops.special.computer.AbstractBinaryComputerOp; import net.imagej.ops.special.computer.BinaryComputerOp; /** * Base class for {@link BinaryComputerOp}s that delegate to other * {@link BinaryComputerOp}s. * * @author Curtis Rueden * @param <I1> type of first input * @param <I2> type of second input * @param <O> type of output * @param <DI1> type of first input accepted by the worker op * @param <DI2> type of second input accepted by the worker op * @param <DO> type of output accepted by the worker op */ public abstract class BCViaBC<I1 extends DI1, I2 extends DI2, O extends DO, DI1, DI2, DO> extends AbstractBinaryComputerOp<I1, I2, O> implements DelegatingBinaryOp<I1, I2, O, DI1, DI2, DO, BinaryComputerOp<DI1, DI2, DO>> { private BinaryComputerOp<DI1, DI2, DO> worker; @Override public void initialize() { worker = createWorker(in1(), in2()); } @Override public void compute(final I1 input1, final I2 input2, final O output) { worker.compute(input1, input2, output); } }
bsd-2-clause
gab1one/imagej-ops
src/main/java/net/imagej/ops/imagemoments/centralmoments/DefaultCentralMoment20.java
3347
/* * #%L * ImageJ software for multidimensional image processing and analysis. * %% * Copyright (C) 2014 - 2017 Board of Regents of the University of * Wisconsin-Madison, University of Konstanz and Brian Northan. * %% * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * #L% */ package net.imagej.ops.imagemoments.centralmoments; import net.imagej.ops.Op; import net.imagej.ops.Ops; import net.imagej.ops.imagemoments.AbstractImageMomentOp; import net.imagej.ops.special.chain.RTs; import net.imagej.ops.special.function.UnaryFunctionOp; import net.imglib2.Cursor; import net.imglib2.IterableInterval; import net.imglib2.type.numeric.RealType; import org.scijava.plugin.Plugin; /** * {@link Op} to calculate the {@code imageMoments.centralMoment20}. * * @author Daniel Seebacher (University of Konstanz) * @author Christian Dietz (University of Konstanz) * @param <I> input type * @param <O> output type */ @Plugin(type = Ops.ImageMoments.CentralMoment20.class, label = "Image Moment: CentralMoment20") public class DefaultCentralMoment20<I extends RealType<I>, O extends RealType<O>> extends AbstractImageMomentOp<I, O> implements Ops.ImageMoments.CentralMoment20 { private UnaryFunctionOp<IterableInterval<I>, O> moment00Func; private UnaryFunctionOp<IterableInterval<I>, O> moment10Func; @Override public void initialize() { moment00Func = RTs.function(ops(), Ops.ImageMoments.Moment00.class, in()); moment10Func = RTs.function(ops(), Ops.ImageMoments.Moment10.class, in()); } @Override public void compute(final IterableInterval<I> input, final O output) { final double moment00 = moment00Func.calculate(input).getRealDouble(); final double moment10 = moment10Func.calculate(input).getRealDouble(); final double centerX = moment10 / moment00; double centralmoment20 = 0; final Cursor<I> it = input.localizingCursor(); while (it.hasNext()) { it.fwd(); final double x = it.getDoublePosition(0) - centerX; final double val = it.get().getRealDouble(); centralmoment20 += val * x * x; } output.setReal(centralmoment20); } }
bsd-2-clause
NoChanceSD/DarkBot
src/main/java/org/darkstorm/darkbot/mcwrapper/commands/ChatDelayCommand.java
435
package org.darkstorm.darkbot.mcwrapper.commands; import org.darkstorm.darkbot.mcwrapper.MinecraftBotWrapper; public class ChatDelayCommand extends AbstractCommand { public ChatDelayCommand(MinecraftBotWrapper bot) { super(bot, "chatdelay", "Change chat delay", "<delay>", "[0-9]+"); } @Override public void execute(String[] args) { controller.say("Set chat delay!"); bot.setMessageDelay(Integer.parseInt(args[0])); } }
bsd-2-clause
troyel/dhis2-core
dhis-2/dhis-services/dhis-service-core/src/main/java/org/hisp/dhis/color/DefaultColorService.java
2637
package org.hisp.dhis.color; /* * Copyright (c) 2004-2017, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ import org.hisp.dhis.common.GenericIdentifiableObjectStore; import org.springframework.transaction.annotation.Transactional; /** * @author Lars Helge Overland */ @Transactional public class DefaultColorService implements ColorService { private GenericIdentifiableObjectStore<Color> colorStore; public void setColorStore( GenericIdentifiableObjectStore<Color> colorStore ) { this.colorStore = colorStore; } private GenericIdentifiableObjectStore<ColorSet> colorSetStore; public void setColorSetStore( GenericIdentifiableObjectStore<ColorSet> colorSetStore ) { this.colorSetStore = colorSetStore; } // ------------------------------------------------------------------------- // Dependencies // ------------------------------------------------------------------------- public Color getColor( String uid ) { return colorStore.getByUid( uid ); } public ColorSet getColorSet( String uid ) { return colorSetStore.getByUid( uid ); } }
bsd-3-clause
sh4nth/atlasdb-1
atlasdb-commons/src/main/java/com/palantir/common/base/AbstractBatchingVisitable.java
5296
/** * Copyright 2015 Palantir Technologies * * Licensed under the BSD-3 License (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://opensource.org/licenses/BSD-3-Clause * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.common.base; import java.util.Collections; import java.util.List; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; /** * This abstract class will implement the required methods in {@link BatchingVisitable} * and will also implement the requires batchSize guarantee (only the last page is allowed to be * smaller than the batch size). */ public abstract class AbstractBatchingVisitable<T> implements BatchingVisitable<T> { @Override final public <K extends Exception> boolean batchAccept(int batchSize, AbortingVisitor<? super List<T>, K> v) throws K { Preconditions.checkArgument(batchSize > 0); if (v instanceof ConsistentVisitor) { @SuppressWarnings("unchecked") AbortingVisitor<List<T>, K> v2 = (AbortingVisitor<List<T>, K>) v; ConsistentVisitor<T, K> consistentVisitor = (ConsistentVisitor<T, K>) v2; Preconditions.checkState(consistentVisitor.visitorAlwaysReturnedTrue, "passed a visitor that has already said stop"); batchAcceptSizeHint(batchSize, consistentVisitor); return consistentVisitor.visitorAlwaysReturnedTrue; } AbstractBatchingVisitable.ConsistentVisitor<T, K> consistentVisitor = AbstractBatchingVisitable.ConsistentVisitor.create(batchSize, v); batchAcceptSizeHint(batchSize, consistentVisitor); if (consistentVisitor.visitorAlwaysReturnedTrue && !consistentVisitor.buffer.isEmpty()) { Preconditions.checkState(consistentVisitor.buffer.size() < batchSize); return v.visit(Collections.unmodifiableList(consistentVisitor.buffer)); } else { return consistentVisitor.visitorAlwaysReturnedTrue; } } /** * The batch size passed to this method is purely a hint. * The underlying impl can batch up pages however it wants * and pass them to the visitor. Batch size consistency is already taken care of by the * {@link ConsistentVisitor}. */ protected abstract <K extends Exception> void batchAcceptSizeHint(int batchSizeHint, ConsistentVisitor<T, K> v) throws K; protected final static class ConsistentVisitor<T, K extends Exception> implements AbortingVisitor<List<T>, K> { final int batchSize; final AbortingVisitor<? super List<T>, K> v; List<T> buffer = Lists.newArrayList(); boolean visitorAlwaysReturnedTrue = true; private ConsistentVisitor(int batchSize, AbortingVisitor<? super List<T>, K> av) { Preconditions.checkArgument(batchSize > 0); this.batchSize = batchSize; this.v = Preconditions.checkNotNull(av); } static <T, K extends Exception> ConsistentVisitor<T, K> create(int batchSize, AbortingVisitor<? super List<T>, K> v) { return new ConsistentVisitor<T, K>(batchSize, v); } public boolean visitOne(T t) throws K { return visit(ImmutableList.of(t)); } @Override public boolean visit(List<T> list) throws K { if (!visitorAlwaysReturnedTrue) { throw new IllegalStateException("Cannot keep visiting if visitor returns false."); } if (buffer.isEmpty() && list.size() == batchSize) { // Special case: We have exactly one batch. return visitBufferWithDelegate(Collections.unmodifiableList(list)); } buffer.addAll(list); if (buffer.size() < batchSize) { return true; } return processBufferBatches(); } private boolean visitBufferWithDelegate(List<T> list) throws K { boolean ret = v.visit(list); visitorAlwaysReturnedTrue &= ret; return ret; } private boolean processBufferBatches() throws K { List<List<T>> batches = Lists.partition(buffer, batchSize); for (List<T> batch : batches) { if (batch.size() != batchSize) { continue; } if (!visitBufferWithDelegate(Collections.unmodifiableList(batch))) { return false; } } List<T> lastBatch = batches.get(batches.size()-1); if (lastBatch.size() == batchSize) { buffer = Lists.newArrayList(); } else { buffer = Lists.newArrayList(lastBatch); } return true; } } }
bsd-3-clause
arthurgwatidzo/dhis2-android-sdk
models/src/main/java/org/hisp/dhis/android/sdk/models/organisationunit/OrganisationUnit.java
2491
/* * Copyright (c) 2015, University of Oslo * * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.hisp.dhis.android.sdk.models.organisationunit; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import org.hisp.dhis.android.sdk.models.common.base.BaseIdentifiableObject; @JsonIgnoreProperties(ignoreUnknown = true) public final class OrganisationUnit extends BaseIdentifiableObject { @JsonProperty("label") private String label; @JsonProperty("level") private int level; @JsonProperty("parent") private String parent; public String getLabel() { return label; } public void setLabel(String label) { this.label = label; } public int getLevel() { return level; } public void setLevel(int level) { this.level = level; } public String getParent() { return parent; } public void setParent(String parent) { this.parent = parent; } }
bsd-3-clause
dhis2/dhis2-core
dhis-2/dhis-web/dhis-web-commons/src/main/java/org/hisp/dhis/i18n/action/I18nAction.java
6142
/* * Copyright (c) 2004-2022, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.hisp.dhis.i18n.action; import static org.hisp.dhis.common.IdentifiableObjectUtils.CLASS_ALIAS; import java.util.ArrayList; import java.util.Hashtable; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.stream.Collectors; import org.hisp.dhis.common.IdentifiableObject; import org.hisp.dhis.common.IdentifiableObjectManager; import org.hisp.dhis.i18n.I18nLocaleService; import org.hisp.dhis.schema.Schema; import org.hisp.dhis.schema.SchemaService; import org.hisp.dhis.user.UserSettingKey; import org.hisp.dhis.user.UserSettingService; import org.hisp.dhis.util.TranslationUtils; import org.springframework.beans.factory.annotation.Autowired; import com.opensymphony.xwork2.Action; /** * @author Oyvind Brucker * @version $Id$ * @modifier Dang Duy Hieu * @since 2010-03-24 */ public class I18nAction implements Action { private String className; private String uid; private String returnUrl; private String message; private Locale currentLocale; private List<Locale> availableLocales = new ArrayList<>(); private Map<String, String> translations = new Hashtable<>(); private Map<String, String> referenceTranslations = new Hashtable<>(); private List<String> propertyNames = new ArrayList<>(); // ------------------------------------------------------------------------- // Dependencies // ------------------------------------------------------------------------- private UserSettingService userSettingService; public void setUserSettingService( UserSettingService userSettingService ) { this.userSettingService = userSettingService; } private IdentifiableObjectManager identifiableObjectManager; public void setIdentifiableObjectManager( IdentifiableObjectManager identifiableObjectManager ) { this.identifiableObjectManager = identifiableObjectManager; } @Autowired private I18nLocaleService i18nLocaleService; @Autowired private SchemaService schemaService; // ------------------------------------------------------------------------- // Input // ------------------------------------------------------------------------- public void setClassName( String className ) { this.className = className; } public void setUid( String uid ) { this.uid = uid; } public void setReturnUrl( String returnUrl ) { this.returnUrl = returnUrl; } public void setMessage( String message ) { this.message = message; } // ------------------------------------------------------------------------- // Output // ------------------------------------------------------------------------- public String getClassName() { return className; } public String getUid() { return uid; } public String getReturnUrl() { return returnUrl; } public String getMessage() { return message; } public Locale getCurrentLocale() { return currentLocale; } public List<Locale> getAvailableLocales() { return availableLocales; } public Map<String, String> getReferenceTranslations() { return referenceTranslations; } public Map<String, String> getTranslations() { return translations; } public List<String> getPropertyNames() { return propertyNames; } // ------------------------------------------------------------------------- // Action implementation // ------------------------------------------------------------------------- @Override public String execute() throws Exception { className = className != null && CLASS_ALIAS.containsKey( className ) ? CLASS_ALIAS.get( className ) : className; currentLocale = (Locale) userSettingService.getUserSetting( UserSettingKey.DB_LOCALE ); availableLocales = i18nLocaleService.getAllLocales(); IdentifiableObject object = identifiableObjectManager.getObject( uid, className ); translations = TranslationUtils.convertTranslations( object.getTranslations(), currentLocale ); Schema schema = schemaService.getSchema( object.getClass() ); referenceTranslations = TranslationUtils.getObjectPropertyValues( schema, object ); propertyNames = schema.getTranslatableProperties().stream().map( p -> p.getName() ) .collect( Collectors.toList() ); return SUCCESS; } }
bsd-3-clause
ric2b/Vivaldi-browser
chromium/chrome/android/javatests/src/org/chromium/chrome/browser/ntp/IncognitoDescriptionViewRenderTest.java
2524
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chrome.browser.ntp; import android.app.Activity; import android.view.View; import androidx.test.filters.MediumTest; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.chromium.base.test.params.ParameterAnnotations; import org.chromium.base.test.params.ParameterAnnotations.UseRunnerDelegate; import org.chromium.base.test.params.ParameterSet; import org.chromium.base.test.params.ParameterizedRunner; import org.chromium.base.test.util.Feature; import org.chromium.chrome.R; import org.chromium.chrome.test.ChromeJUnit4RunnerDelegate; import org.chromium.chrome.test.util.ChromeRenderTestRule; import org.chromium.content_public.browser.test.util.TestThreadUtils; import org.chromium.ui.test.util.DummyUiActivityTestCase; import org.chromium.ui.test.util.NightModeTestUtils; import java.io.IOException; import java.util.List; /** * Render test of incognito description in the incognito ntp. */ @RunWith(ParameterizedRunner.class) @UseRunnerDelegate(ChromeJUnit4RunnerDelegate.class) public class IncognitoDescriptionViewRenderTest extends DummyUiActivityTestCase { @ParameterAnnotations.ClassParameter private static List<ParameterSet> sClassParams = new NightModeTestUtils.NightModeParams().getParameters(); @Rule public ChromeRenderTestRule mRenderTestRule = ChromeRenderTestRule.Builder.withPublicCorpus().setRevision(1).build(); public IncognitoDescriptionViewRenderTest(boolean nightModeEnabled) { NightModeTestUtils.setUpNightModeForDummyUiActivity(nightModeEnabled); mRenderTestRule.setNightModeEnabled(nightModeEnabled); } @Override public void setUpTest() throws Exception { super.setUpTest(); TestThreadUtils.runOnUiThreadBlocking(() -> { Activity activity = getActivity(); activity.setContentView(R.layout.incognito_description_layout); }); } @Test @MediumTest @Feature({"RenderTest"}) public void testRender_IncognitoDescriptionView() throws IOException { View view = getActivity().findViewById(android.R.id.content); TestThreadUtils.runOnUiThreadBlocking(() -> { view.setBackgroundResource(R.color.ntp_bg_incognito); }); mRenderTestRule.render(view, "incognito_description_view"); } }
bsd-3-clause
cmnbroad/Hadoop-BAM
src/main/java/org/seqdoop/hadoop_bam/SequencedFragment.java
14895
// Copyright (C) 2011-2012 CRS4. // // This file is part of Hadoop-BAM. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. package org.seqdoop.hadoop_bam; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import java.io.IOException; import java.io.DataInput; import java.io.DataOutput; import org.seqdoop.hadoop_bam.FormatConstants.BaseQualityEncoding; public class SequencedFragment implements Writable { protected Text sequence = new Text(); protected Text quality = new Text(); protected String instrument; protected Integer runNumber; protected String flowcellId; protected Integer lane; protected Integer tile; protected Integer xpos; protected Integer ypos; protected Integer read; protected Boolean filterPassed; protected Integer controlNumber; protected String indexSequence; // for serialization of nullable fiels protected static final int Instrument_Present = 0x0001; protected static final int RunNumber_Present = 0x0002; protected static final int FlowcellId_Present = 0x0004; protected static final int Lane_Present = 0x0008; protected static final int Tile_Present = 0x0010; protected static final int Xpos_Present = 0x0020; protected static final int Ypos_Present = 0x0040; protected static final int Read_Present = 0x0080; protected static final int FilterPassed_Present = 0x0100; protected static final int ControlNumber_Present = 0x0200; protected static final int IndexSequence_Present = 0x0400; public void clear() { sequence.clear(); quality.clear(); instrument = null; runNumber = null; flowcellId = null; lane = null; tile = null; xpos = null; ypos = null; read = null; filterPassed = null; controlNumber = null; indexSequence = null; } /** * Get sequence Text object. * Trade encapsulation for efficiency. Here we expose the internal Text * object so that data may be read and written diretly from/to it. * * Sequence should always be written using CAPITAL letters and 'N' for unknown bases. */ public Text getSequence() { return sequence; } /** * Get quality Text object. * Trade encapsulation for efficiency. Here we expose the internal Text * object so that data may be read and written diretly from/to it. * * Quality should always be in ASCII-encoded Phred+33 format (sanger). */ public Text getQuality() { return quality; } public void setInstrument(String v) { instrument = v; } public void setRunNumber(Integer v) { runNumber = v; } public void setFlowcellId(String v) { flowcellId = v; } public void setLane(Integer v) { lane = v; } public void setTile(Integer v) { tile = v; } public void setXpos(Integer v) { xpos = v; } public void setYpos(Integer v) { ypos = v; } public void setRead(Integer v) { read = v; } public void setFilterPassed(Boolean v) { filterPassed = v; } public void setControlNumber(Integer v) { controlNumber = v; } public void setIndexSequence(String v) { indexSequence = v; } public void setSequence(Text seq) { if (seq == null) throw new IllegalArgumentException("can't have a null sequence"); sequence = seq; } /** * Set quality. Quality should be encoded in Sanger Phred+33 format. */ public void setQuality(Text qual) { if (qual == null) throw new IllegalArgumentException("can't have a null quality"); quality = qual; } public String getInstrument() { return instrument; } public Integer getRunNumber() { return runNumber; } public String getFlowcellId() { return flowcellId; } public Integer getLane() { return lane; } public Integer getTile() { return tile; } public Integer getXpos() { return xpos; } public Integer getYpos() { return ypos; } public Integer getRead() { return read; } public Boolean getFilterPassed() { return filterPassed; } public Integer getControlNumber() { return controlNumber; } public String getIndexSequence() { return indexSequence; } /** * Recreates a pseudo qseq record with the fields available. */ public String toString() { String delim = "\t"; StringBuilder builder = new StringBuilder(800); builder.append(instrument).append(delim); builder.append(runNumber).append(delim); builder.append(flowcellId).append(delim); builder.append(lane).append(delim); builder.append(tile).append(delim); builder.append(xpos).append(delim); builder.append(ypos).append(delim); builder.append(indexSequence).append(delim); builder.append(read).append(delim); builder.append(sequence).append(delim); builder.append(quality).append(delim); builder.append((filterPassed == null || filterPassed) ? 1 : 0); return builder.toString(); } public boolean equals(Object other) { if (other != null && other instanceof SequencedFragment) { SequencedFragment otherFrag = (SequencedFragment)other; if (instrument == null && otherFrag.instrument != null || instrument != null && !instrument.equals(otherFrag.instrument)) return false; if (runNumber == null && otherFrag.runNumber != null || runNumber != null && !runNumber.equals(otherFrag.runNumber)) return false; if (flowcellId == null && otherFrag.flowcellId != null || flowcellId != null && !flowcellId.equals(otherFrag.flowcellId)) return false; if (lane == null && otherFrag.lane != null || lane != null && !lane.equals(otherFrag.lane)) return false; if (tile == null && otherFrag.tile != null || tile != null && !tile.equals(otherFrag.tile)) return false; if (xpos == null && otherFrag.xpos != null || xpos != null && !xpos.equals(otherFrag.xpos)) return false; if (ypos == null && otherFrag.ypos != null || ypos != null && !ypos.equals(otherFrag.ypos)) return false; if (read == null && otherFrag.read != null || read != null && !read.equals(otherFrag.read)) return false; if (filterPassed == null && otherFrag.filterPassed != null || filterPassed != null && !filterPassed.equals(otherFrag.filterPassed)) return false; if (controlNumber == null && otherFrag.controlNumber != null || controlNumber != null && !controlNumber.equals(otherFrag.controlNumber)) return false; if (indexSequence == null && otherFrag.indexSequence != null || indexSequence != null && !indexSequence.equals(otherFrag.indexSequence)) return false; // sequence and quality can't be null if (!sequence.equals(otherFrag.sequence)) return false; if (!quality.equals(otherFrag.quality)) return false; return true; } else return false; } @Override public int hashCode() { int result = sequence.hashCode(); result = 31 * result + quality.hashCode(); result = 31 * result + (instrument != null ? instrument.hashCode() : 0); result = 31 * result + (runNumber != null ? runNumber.hashCode() : 0); result = 31 * result + (flowcellId != null ? flowcellId.hashCode() : 0); result = 31 * result + (lane != null ? lane.hashCode() : 0); result = 31 * result + (tile != null ? tile.hashCode() : 0); result = 31 * result + (xpos != null ? xpos.hashCode() : 0); result = 31 * result + (ypos != null ? ypos.hashCode() : 0); result = 31 * result + (read != null ? read.hashCode() : 0); result = 31 * result + (filterPassed != null ? filterPassed.hashCode() : 0); result = 31 * result + (controlNumber != null ? controlNumber.hashCode() : 0); result = 31 * result + (indexSequence != null ? indexSequence.hashCode() : 0); return result; } /** * Convert quality scores in-place. * * @throws FormatException if quality scores are out of the range * allowed by the current encoding. * @throws IllegalArgumentException if current and target quality encodings are the same. */ public static void convertQuality(Text quality, BaseQualityEncoding current, BaseQualityEncoding target) { if (current == target) throw new IllegalArgumentException("current and target quality encodinds are the same (" + current + ")"); byte[] bytes = quality.getBytes(); final int len = quality.getLength(); final int illuminaSangerDistance = FormatConstants.ILLUMINA_OFFSET - FormatConstants.SANGER_OFFSET; if (current == BaseQualityEncoding.Illumina && target == BaseQualityEncoding.Sanger) { for (int i = 0; i < len; ++i) { if (bytes[i] < FormatConstants.ILLUMINA_OFFSET || bytes[i] > (FormatConstants.ILLUMINA_OFFSET + FormatConstants.ILLUMINA_MAX)) { throw new FormatException( "base quality score out of range for Illumina Phred+64 format (found " + (bytes[i] - FormatConstants.ILLUMINA_OFFSET) + " but acceptable range is [0," + FormatConstants.ILLUMINA_MAX + "]).\n" + "Maybe qualities are encoded in Sanger format?\n"); } bytes[i] -= illuminaSangerDistance; } } else if (current == BaseQualityEncoding.Sanger && target == BaseQualityEncoding.Illumina) { for (int i = 0; i < len; ++i) { if (bytes[i] < FormatConstants.SANGER_OFFSET || bytes[i] > (FormatConstants.SANGER_OFFSET + FormatConstants.SANGER_MAX)) { throw new FormatException( "base quality score out of range for Sanger Phred+64 format (found " + (bytes[i] - FormatConstants.SANGER_OFFSET) + " but acceptable range is [0," + FormatConstants.SANGER_MAX + "]).\n" + "Maybe qualities are encoded in Illumina format?\n"); } bytes[i] += illuminaSangerDistance; } } else throw new IllegalArgumentException("unsupported BaseQualityEncoding transformation from " + current + " to " + target); } /** * Verify that the given quality bytes are within the range allowed for the specified encoding. * * In theory, the Sanger encoding uses the entire * range of characters from ASCII 33 to 126, giving a value range of [0,93]. However, values over 60 are * unlikely in practice, and are more likely to be caused by mistaking a file that uses Illumina encoding * for Sanger. So, we'll enforce the same range supported by Illumina encoding ([0,62]) for Sanger. * * @return -1 if quality is ok. * @return If an out-of-range value is found the index of the value is returned. */ public static int verifyQuality(Text quality, BaseQualityEncoding encoding) { // set allowed quality range int max, min; if (encoding == BaseQualityEncoding.Illumina) { max = FormatConstants.ILLUMINA_OFFSET + FormatConstants.ILLUMINA_MAX; min = FormatConstants.ILLUMINA_OFFSET; } else if (encoding == BaseQualityEncoding.Sanger) { max = FormatConstants.SANGER_OFFSET + FormatConstants.SANGER_MAX; min = FormatConstants.SANGER_OFFSET; } else throw new IllegalArgumentException("Unsupported base encoding quality " + encoding); // verify final byte[] bytes = quality.getBytes(); final int len = quality.getLength(); for (int i = 0; i < len; ++i) { if (bytes[i] < min || bytes[i] > max) return i; } return -1; } public void readFields(DataInput in) throws IOException { // TODO: reimplement with a serialization system (e.g. Avro) // serialization order: // 1) sequence // 2) quality // 3) int with flags indicating which fields are defined (see *_Present flags) // 4..end) the rest of the fields this.clear(); sequence.readFields(in); quality.readFields(in); int presentFlags = WritableUtils.readVInt(in); if ( (presentFlags & Instrument_Present) != 0) instrument = WritableUtils.readString(in); if ( (presentFlags & RunNumber_Present) != 0) runNumber = WritableUtils.readVInt(in); if ( (presentFlags & FlowcellId_Present) != 0) flowcellId = WritableUtils.readString(in); if ( (presentFlags & Lane_Present) != 0) lane = WritableUtils.readVInt(in); if ( (presentFlags & Tile_Present) != 0) tile = WritableUtils.readVInt(in); if ( (presentFlags & Xpos_Present) != 0) xpos = WritableUtils.readVInt(in); if ( (presentFlags & Ypos_Present) != 0) ypos = WritableUtils.readVInt(in); if ( (presentFlags & Read_Present) != 0) read = WritableUtils.readVInt(in); if ( (presentFlags & FilterPassed_Present) != 0) filterPassed = WritableUtils.readVInt(in) == 1; if ( (presentFlags & ControlNumber_Present) != 0) controlNumber = WritableUtils.readVInt(in); if ( (presentFlags & IndexSequence_Present) != 0) indexSequence = WritableUtils.readString(in); } public void write(DataOutput out) throws IOException { // TODO: reimplement with a serialization system (e.g. Avro) sequence.write(out); quality.write(out); int presentFlags = 0; if (instrument != null) presentFlags |= Instrument_Present; if (runNumber != null) presentFlags |= RunNumber_Present; if (flowcellId != null) presentFlags |= FlowcellId_Present; if (lane != null) presentFlags |= Lane_Present; if (tile != null) presentFlags |= Tile_Present; if (xpos != null) presentFlags |= Xpos_Present; if (ypos != null) presentFlags |= Ypos_Present; if (read != null) presentFlags |= Read_Present; if (filterPassed != null) presentFlags |= FilterPassed_Present; if (controlNumber != null) presentFlags |= ControlNumber_Present; if (indexSequence != null) presentFlags |= IndexSequence_Present; WritableUtils.writeVInt(out, presentFlags); if (instrument != null) WritableUtils.writeString(out, instrument); if (runNumber != null) WritableUtils.writeVInt(out, runNumber); if (flowcellId != null) WritableUtils.writeString(out, flowcellId); if (lane != null) WritableUtils.writeVInt(out, lane); if (tile != null) WritableUtils.writeVInt(out, tile); if (xpos != null) WritableUtils.writeVInt(out, xpos); if (ypos != null) WritableUtils.writeVInt(out, ypos); if (read != null) WritableUtils.writeVInt(out, read); if (filterPassed != null) WritableUtils.writeVInt(out, filterPassed ? 1 : 0); if (controlNumber != null) WritableUtils.writeVInt(out, controlNumber); if (indexSequence != null) WritableUtils.writeString(out, indexSequence); } }
mit
jianghaolu/azure-sdk-for-java
azure-mgmt-graph-rbac/src/main/java/com/microsoft/azure/management/graphrbac/implementation/ActiveDirectoryApplicationImpl.java
14713
/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. */ package com.microsoft.azure.management.graphrbac.implementation; import com.google.common.base.Function; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.microsoft.azure.management.apigeneration.LangDefinition; import com.microsoft.azure.management.graphrbac.ActiveDirectoryApplication; import com.microsoft.azure.management.graphrbac.CertificateCredential; import com.microsoft.azure.management.graphrbac.PasswordCredential; import com.microsoft.azure.management.resources.fluentcore.model.implementation.CreatableUpdatableImpl; import rx.Observable; import rx.functions.Func1; import java.net.MalformedURLException; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; /** * Implementation for ServicePrincipal and its parent interfaces. */ @LangDefinition(ContainerName = "/Microsoft.Azure.Management.Graph.RBAC.Fluent") class ActiveDirectoryApplicationImpl extends CreatableUpdatableImpl<ActiveDirectoryApplication, ApplicationInner, ActiveDirectoryApplicationImpl> implements ActiveDirectoryApplication, ActiveDirectoryApplication.Definition, ActiveDirectoryApplication.Update, HasCredential<ActiveDirectoryApplicationImpl> { private GraphRbacManager manager; private ApplicationCreateParametersInner createParameters; private ApplicationUpdateParametersInner updateParameters; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; ActiveDirectoryApplicationImpl(ApplicationInner innerObject, GraphRbacManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.createParameters = new ApplicationCreateParametersInner().withDisplayName(innerObject.displayName()); this.updateParameters = new ApplicationUpdateParametersInner().withDisplayName(innerObject.displayName()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Observable<ActiveDirectoryApplication> createResourceAsync() { if (createParameters.identifierUris() == null) { createParameters.withIdentifierUris(new ArrayList<String>()); createParameters.identifierUris().add(createParameters.homepage()); } return manager.inner().applications().createAsync(createParameters) .map(innerToFluentMap(this)) .flatMap(new Func1<ActiveDirectoryApplication, Observable<ActiveDirectoryApplication>>() { @Override public Observable<ActiveDirectoryApplication> call(ActiveDirectoryApplication application) { return refreshCredentialsAsync(); } }); } @Override public Observable<ActiveDirectoryApplication> updateResourceAsync() { return manager.inner().applications().patchAsync(id(), updateParameters) .flatMap(new Func1<Void, Observable<ActiveDirectoryApplication>>() { @Override public Observable<ActiveDirectoryApplication> call(Void aVoid) { return refreshAsync(); } }); } Observable<ActiveDirectoryApplication> refreshCredentialsAsync() { final Observable<ActiveDirectoryApplication> keyCredentials = manager.inner().applications().listKeyCredentialsAsync(id()) .flatMapIterable(new Func1<List<KeyCredentialInner>, Iterable<KeyCredentialInner>>() { @Override public Iterable<KeyCredentialInner> call(List<KeyCredentialInner> keyCredentialInners) { return keyCredentialInners; } }) .map(new Func1<KeyCredentialInner, CertificateCredential>() { @Override public CertificateCredential call(KeyCredentialInner keyCredentialInner) { return new CertificateCredentialImpl<ActiveDirectoryApplication>(keyCredentialInner); } }) .toMap(new Func1<CertificateCredential, String>() { @Override public String call(CertificateCredential certificateCredential) { return certificateCredential.name(); } }).map(new Func1<Map<String, CertificateCredential>, ActiveDirectoryApplication>() { @Override public ActiveDirectoryApplication call(Map<String, CertificateCredential> stringCertificateCredentialMap) { ActiveDirectoryApplicationImpl.this.cachedCertificateCredentials = stringCertificateCredentialMap; return ActiveDirectoryApplicationImpl.this; } }); final Observable<ActiveDirectoryApplication> passwordCredentials = manager.inner().applications().listPasswordCredentialsAsync(id()) .flatMapIterable(new Func1<List<PasswordCredentialInner>, Iterable<PasswordCredentialInner>>() { @Override public Iterable<PasswordCredentialInner> call(List<PasswordCredentialInner> passwordCredentialInners) { return passwordCredentialInners; } }) .map(new Func1<PasswordCredentialInner, PasswordCredential>() { @Override public PasswordCredential call(PasswordCredentialInner passwordCredentialInner) { return new PasswordCredentialImpl<ActiveDirectoryApplication>(passwordCredentialInner); } }) .toMap(new Func1<PasswordCredential, String>() { @Override public String call(PasswordCredential passwordCredential) { return passwordCredential.name(); } }).map(new Func1<Map<String, PasswordCredential>, ActiveDirectoryApplication>() { @Override public ActiveDirectoryApplication call(Map<String, PasswordCredential> stringPasswordCredentialMap) { ActiveDirectoryApplicationImpl.this.cachedPasswordCredentials = stringPasswordCredentialMap; return ActiveDirectoryApplicationImpl.this; } }); return keyCredentials.mergeWith(passwordCredentials).last(); } @Override public Observable<ActiveDirectoryApplication> refreshAsync() { return getInnerAsync() .map(innerToFluentMap(this)) .flatMap(new Func1<ActiveDirectoryApplication, Observable<ActiveDirectoryApplication>>() { @Override public Observable<ActiveDirectoryApplication> call(ActiveDirectoryApplication application) { return refreshCredentialsAsync(); } }); } @Override public String id() { return inner().objectId(); } @Override public String applicationId() { return inner().appId(); } @Override public List<String> applicationPermissions() { if (inner().appPermissions() == null) { return null; } return Collections.unmodifiableList(inner().appPermissions()); } @Override public boolean availableToOtherTenants() { return inner().availableToOtherTenants(); } @Override public Set<String> identifierUris() { if (inner().identifierUris() == null) { return null; } return Collections.unmodifiableSet(Sets.newHashSet(inner().identifierUris())); } @Override public Set<String> replyUrls() { if (inner().replyUrls() == null) { return null; } return Collections.unmodifiableSet(Sets.newHashSet(inner().replyUrls())); } @Override public URL signOnUrl() { try { return new URL(inner().homepage()); } catch (MalformedURLException e) { return null; } } @Override public Map<String, PasswordCredential> passwordCredentials() { if (cachedPasswordCredentials == null) { return null; } return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { if (cachedCertificateCredentials == null) { return null; } return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override protected Observable<ApplicationInner> getInnerAsync() { return manager.inner().applications().getAsync(id()); } @Override public ActiveDirectoryApplicationImpl withSignOnUrl(String signOnUrl) { if (isInCreateMode()) { createParameters.withHomepage(signOnUrl); } else { updateParameters.withHomepage(signOnUrl); } return withReplyUrl(signOnUrl); } @Override public ActiveDirectoryApplicationImpl withReplyUrl(String replyUrl) { if (isInCreateMode()) { if (createParameters.replyUrls() == null) { createParameters.withReplyUrls(new ArrayList<String>()); } createParameters.replyUrls().add(replyUrl); } else { if (updateParameters.replyUrls() == null) { updateParameters.withReplyUrls(new ArrayList<>(replyUrls())); } updateParameters.replyUrls().add(replyUrl); } return this; } @Override public ActiveDirectoryApplicationImpl withoutReplyUrl(String replyUrl) { if (updateParameters.replyUrls() != null) { updateParameters.replyUrls().remove(replyUrl); } return this; } @Override public ActiveDirectoryApplicationImpl withIdentifierUrl(String identifierUrl) { if (isInCreateMode()) { if (createParameters.identifierUris() == null) { createParameters.withIdentifierUris(new ArrayList<String>()); } createParameters.identifierUris().add(identifierUrl); } else { if (updateParameters.identifierUris() == null) { updateParameters.withIdentifierUris(new ArrayList<>(identifierUris())); } updateParameters.identifierUris().add(identifierUrl); } return this; } @Override public Update withoutIdentifierUrl(String identifierUrl) { if (updateParameters.identifierUris() != null) { updateParameters.identifierUris().remove(identifierUrl); } return this; } @Override @SuppressWarnings("unchecked") public CertificateCredentialImpl defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override @SuppressWarnings("unchecked") public PasswordCredentialImpl definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ActiveDirectoryApplicationImpl withoutCredential(final String name) { if (cachedPasswordCredentials.containsKey(name)) { cachedPasswordCredentials.remove(name); updateParameters.withPasswordCredentials(Lists.transform( new ArrayList<>(cachedPasswordCredentials.values()), new Function<PasswordCredential, PasswordCredentialInner>() { @Override public PasswordCredentialInner apply(PasswordCredential input) { return input.inner(); } })); } else if (cachedCertificateCredentials.containsKey(name)) { cachedCertificateCredentials.remove(name); updateParameters.withKeyCredentials(Lists.transform( new ArrayList<>(cachedCertificateCredentials.values()), new Function<CertificateCredential, KeyCredentialInner>() { @Override public KeyCredentialInner apply(CertificateCredential input) { return input.inner(); } })); } return this; } @Override public ActiveDirectoryApplicationImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { if (isInCreateMode()) { if (createParameters.keyCredentials() == null) { createParameters.withKeyCredentials(new ArrayList<KeyCredentialInner>()); } createParameters.keyCredentials().add(credential.inner()); } else { if (updateParameters.keyCredentials() == null) { updateParameters.withKeyCredentials(new ArrayList<KeyCredentialInner>()); } updateParameters.keyCredentials().add(credential.inner()); } return this; } @Override public ActiveDirectoryApplicationImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { if (isInCreateMode()) { if (createParameters.passwordCredentials() == null) { createParameters.withPasswordCredentials(new ArrayList<PasswordCredentialInner>()); } createParameters.passwordCredentials().add(credential.inner()); } else { if (updateParameters.passwordCredentials() == null) { updateParameters.withPasswordCredentials(new ArrayList<PasswordCredentialInner>()); } updateParameters.passwordCredentials().add(credential.inner()); } return this; } @Override public ActiveDirectoryApplicationImpl withAvailableToOtherTenants(boolean availableToOtherTenants) { if (isInCreateMode()) { createParameters.withAvailableToOtherTenants(availableToOtherTenants); } else { updateParameters.withAvailableToOtherTenants(availableToOtherTenants); } return this; } @Override public GraphRbacManager manager() { return manager; } }
mit
110035/kissy
tools/module-compiler/tests/com/google/javascript/jscomp/OptimizeReturnsTest.java
7564
/* * Copyright 2009 The Closure Compiler Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.javascript.jscomp; import com.google.common.base.Joiner; /** * Tests OptimizeReturns * @author johnlenz@google.com (John Lenz) */ public class OptimizeReturnsTest extends CompilerTestCase { @Override protected CompilerPass getProcessor(Compiler compiler) { return new OptimizeReturns(compiler); } private static final String EXTERNAL_SYMBOLS = "var extern;extern.externalMethod"; public OptimizeReturnsTest() { super(EXTERNAL_SYMBOLS); } @Override protected int getNumRepetitions() { // run pass once. return 1; } @Override protected void setUp() throws Exception { super.setUp(); super.enableLineNumberCheck(true); disableTypeCheck(); } /** * Combine source strings using '\n' as the separator. */ private static String newlineJoin(String ... parts) { return Joiner.on("\n").join(parts); } public void testNoRewriteUsedResult1() throws Exception { String source = newlineJoin( "function a(){return 1}", "var x = a()"); testSame(source); } public void testNoRewriteUsedResult2() throws Exception { String source = newlineJoin( "var a = function(){return 1}", "a(); var b = a()"); testSame(source); } public void testRewriteUnusedResult1() throws Exception { String source = newlineJoin( "function a(){return 1}", "a()"); String expected = newlineJoin( "function a(){return}", "a()"); test(source, expected); } public void testRewriteUnusedResult2() throws Exception { String source = newlineJoin( "var a; a = function(){return 1}", "a()"); String expected = newlineJoin( "var a; a = function(){return}", "a()"); test(source, expected); } public void testRewriteUnusedResult3() throws Exception { String source = newlineJoin( "var a = function(){return 1}", "a()"); String expected = newlineJoin( "var a = function(){return}", "a()"); test(source, expected); } public void testRewriteUnusedResult4a() throws Exception { String source = newlineJoin( "var a = function(){return a()}", "a()"); testSame(source); } public void testRewriteUnusedResult4b() throws Exception { String source = newlineJoin( "var a = function b(){return b()}", "a()"); testSame(source); } public void testRewriteUnusedResult4c() throws Exception { String source = newlineJoin( "function a(){return a()}", "a()"); testSame(source); } public void testRewriteUnusedResult5() throws Exception { String source = newlineJoin( "function a(){}", "a.prototype.foo = function(args) {return args};", "var o = new a;", "o.foo()"); String expected = newlineJoin( "function a(){}", "a.prototype.foo = function(args) {return};", "var o = new a;", "o.foo()"); test(source, expected); } public void testRewriteUnusedResult6() throws Exception { String source = newlineJoin( "function a(){return (g = 1)}", "a()"); String expected = newlineJoin( "function a(){g = 1;return}", "a()"); test(source, expected); } public void testRewriteUnusedResult7a() throws Exception { String source = newlineJoin( "function a() { return 1 }", "function b() { return a() }", "function c() { return b() }", "c();"); // TODO(johnlenz): It would be better if we do some kind of fixed point. String expected = newlineJoin( "function a() { return 1 }", "function b() { return a() }", "function c() { b(); return }", "c();"); test(source, expected); } public void testRewriteUnusedResult7b() throws Exception { String source = newlineJoin( "c();", "function c() { return b() }", "function b() { return a() }", "function a() { return 1 }"); // TODO(johnlenz): It would be better if we do some kind of fixed point. String expected = newlineJoin( "c();", "function c() { b(); return }", "function b() { a(); return }", "function a() { return }"); test(source, expected); } public void testRewriteUnusedResult8() throws Exception { String source = newlineJoin( "function a() { return c() }", "function b() { return a() }", "function c() { return b() }", "c();"); testSame(source); } public void testNoRewriteObjLit1() throws Exception { String source = newlineJoin( "var a = {b:function(){return 1;}}", "for(c in a) (a[c])();", "a.b()"); testSame(source); } public void testNoRewriteObjLit2() throws Exception { String source = newlineJoin( "var a = {b:function fn(){return 1;}}", "for(c in a) (a[c])();", "a.b()"); testSame(source); } public void testNoRewriteArrLit() throws Exception { String source = newlineJoin( "var a = [function(){return 1;}]", "(a[0])();"); testSame(source); } public void testPrototypeMethod1() throws Exception { String source = newlineJoin( "function c(){}", "c.prototype.a = function(){return 1}", "var x = new c;", "x.a()"); String result = newlineJoin( "function c(){}", "c.prototype.a = function(){return}", "var x = new c;", "x.a()"); test(source, result); } public void testPrototypeMethod2() throws Exception { String source = newlineJoin( "function c(){}", "c.prototype.a = function(){return 1}", "goog.reflect.object({a: 'v'})", "var x = new c;", "x.a()"); testSame(source); } public void testPrototypeMethod3() throws Exception { String source = newlineJoin( "function c(){}", "c.prototype.a = function(){return 1}", "var x = new c;", "for(var key in goog.reflect.object({a: 'v'})){ x[key](); }", "x.a()"); testSame(source); } public void testPrototypeMethod4() throws Exception { String source = newlineJoin( "function c(){}", "c.prototype.a = function(){return 1}", "var x = new c;", "for(var key in goog.reflect.object({a: 'v'})){ x[key](); }"); testSame(source); } public void testCallOrApply() throws Exception { // TODO(johnlenz): Add support for .call and .apply testSame("function a() {return 1}; a.call(new foo);"); testSame("function a() {return 1}; a.apply(new foo);"); } public void testRewriteUseSiteRemoval() throws Exception { String source = newlineJoin( "function a() { return {\"_id\" : 1} }", "a();"); String expected = newlineJoin( "function a() { return }", "a();"); test(source, expected); } }
mit
DDoS/SpongeForge
src/main/java/org/spongepowered/mod/mixin/core/client/gui/MixinGuiOverlayDebug.java
4600
/* * This file is part of Sponge, licensed under the MIT License (MIT). * * Copyright (c) SpongePowered <https://www.spongepowered.org> * Copyright (c) contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.spongepowered.mod.mixin.core.client.gui; import net.minecraft.client.Minecraft; import net.minecraft.client.gui.GuiOverlayDebug; import net.minecraft.entity.Entity; import net.minecraft.util.BlockPos; import net.minecraft.util.MovingObjectPosition; import org.spongepowered.asm.mixin.Final; import org.spongepowered.asm.mixin.Mixin; import org.spongepowered.asm.mixin.Shadow; import org.spongepowered.asm.mixin.injection.At; import org.spongepowered.asm.mixin.injection.Inject; import org.spongepowered.asm.mixin.injection.callback.CallbackInfo; import org.spongepowered.asm.mixin.injection.callback.CallbackInfoReturnable; import org.spongepowered.common.network.message.MessageTrackerDataRequest; import org.spongepowered.common.network.message.SpongeMessageHandler; import org.spongepowered.mod.client.interfaces.IMixinGuiOverlayDebug; import org.spongepowered.mod.client.interfaces.IMixinMinecraft; import java.util.List; @Mixin(GuiOverlayDebug.class) public abstract class MixinGuiOverlayDebug implements IMixinGuiOverlayDebug { private String blockOwner = ""; private String blockNotifier = ""; private BlockPos cursorPos = new BlockPos(0, 0, 0); @Shadow @Final private Minecraft mc; @Shadow public abstract boolean isReducedDebug(); @Inject(method = "<init>", at = @At(value = "RETURN") ) public void onConstructDebugGui(Minecraft mc, CallbackInfo ci) { IMixinMinecraft spongeMc = (IMixinMinecraft) mc; spongeMc.setDebugGui((GuiOverlayDebug) (Object) this); } @Inject(method = "call()Ljava/util/List;", at = @At(value = "RETURN", ordinal = 1)) private void addOwnerInfo(CallbackInfoReturnable<List<String>> cir) { List<String> arraylist = cir.getReturnValue(); if (this.mc.objectMouseOver != null && this.mc.objectMouseOver.typeOfHit == MovingObjectPosition.MovingObjectType.BLOCK && this.mc.objectMouseOver.getBlockPos() != null) { BlockPos blockpos1 = this.mc.objectMouseOver.getBlockPos(); if (!blockpos1.equals(this.cursorPos)) { SpongeMessageHandler.getChannel().sendToServer( new MessageTrackerDataRequest(0, -1, blockpos1.getX(), blockpos1.getY(), blockpos1.getZ())); } arraylist.add("Block Owner: " + this.blockOwner); arraylist.add("Block Notifier: " + this.blockNotifier); this.cursorPos = this.mc.objectMouseOver.getBlockPos(); } else if (this.mc.objectMouseOver != null && this.mc.objectMouseOver.typeOfHit == MovingObjectPosition.MovingObjectType.ENTITY) { Entity target = this.mc.objectMouseOver.entityHit; BlockPos blockPos = target.getPosition(); if (!blockPos.equals(this.cursorPos)) { SpongeMessageHandler.getChannel().sendToServer( new MessageTrackerDataRequest(1, target.getEntityId(), blockPos.getX(), blockPos.getY(), blockPos.getZ())); } arraylist.add("Entity Owner: " + this.blockOwner); arraylist.add("Entity Notifier: " + this.blockNotifier); this.cursorPos = blockPos; } } @Override public void setPlayerTrackerData(String owner, String notifier) { this.blockOwner = owner; this.blockNotifier = notifier; } }
mit
tuura/workcraft
workcraft/WorkcraftCore/src/org/workcraft/serialisation/ModelDeserialiser.java
350
package org.workcraft.serialisation; import java.io.InputStream; import org.workcraft.dom.Model; import org.workcraft.exceptions.DeserialisationException; public interface ModelDeserialiser extends SerialFormat { DeserialisationResult deserialise(InputStream is, ReferenceResolver rr, Model underlyingModel) throws DeserialisationException; }
mit
hazendaz/waffle
Source/JNA/waffle-jna/src/test/java/waffle/jaas/UserPrincipalTest.java
3652
/* * MIT License * * Copyright (c) 2010-2021 The Waffle Project Contributors: https://github.com/Waffle/waffle/graphs/contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package waffle.jaas; import static org.assertj.core.api.Assertions.assertThat; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; /** * The Class UserPrincipalTest. * * @author dblock[at]dblock[dot]org */ class UserPrincipalTest { /** The user principal. */ private UserPrincipal userPrincipal; /** * Equals_other object. */ @Test void equals_otherObject() { Assertions.assertNotEquals("", this.userPrincipal); } /** * Equals_same object. */ @Test void equals_sameObject() { Assertions.assertEquals(this.userPrincipal, this.userPrincipal); } /** * Sets the up. */ @BeforeEach void setUp() { this.userPrincipal = new UserPrincipal("localhost\\Administrator"); } /** * Test equals_ symmetric. */ @Test void testEquals_Symmetric() { final UserPrincipal x = new UserPrincipal("localhost\\Administrator"); final UserPrincipal y = new UserPrincipal("localhost\\Administrator"); Assertions.assertEquals(x, y); Assertions.assertEquals(x.hashCode(), y.hashCode()); } /** * Test is serializable. * * @throws IOException * Signals that an I/O exception has occurred. * @throws ClassNotFoundException * the class not found exception */ @Test @SuppressWarnings("BanSerializableRead") void testIsSerializable() throws IOException, ClassNotFoundException { // serialize final ByteArrayOutputStream out = new ByteArrayOutputStream(); try (final ObjectOutputStream oos = new ObjectOutputStream(out)) { oos.writeObject(this.userPrincipal); } assertThat(out.toByteArray()).isNotEmpty(); // deserialize final InputStream in = new ByteArrayInputStream(out.toByteArray()); final ObjectInputStream ois = new ObjectInputStream(in); final UserPrincipal copy = (UserPrincipal) ois.readObject(); // test Assertions.assertEquals(this.userPrincipal, copy); Assertions.assertEquals(this.userPrincipal.getName(), copy.getName()); } }
mit