repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
jsampson/mayfly
web/hibernate-demo/FooPersistence.java
691
import java.sql.Connection; import org.hibernate.Session; import org.hibernate.SessionFactory; import org.hibernate.cfg.AnnotationConfiguration; public class FooPersistence { Session session; public FooPersistence(Connection connection) { AnnotationConfiguration configuration = new AnnotationConfiguration(); configuration.setProperty("hibernate.dialect", "MayflyDialect"); configuration.addAnnotatedClass(Foo.class); SessionFactory factory = configuration.buildSessionFactory(); session = factory.openSession(connection); } public Foo getFoo(int id) { return (Foo) session.get(Foo.class, id); } }
apache-2.0
uin3566/Dota2Helper
youkuPlayerOpenSDK/src/main/java/com/youku/player/util/DetailMessage.java
2652
package com.youku.player.util; public interface DetailMessage { public final int LOAD_NEXT_PAGE_SUCCESS = 100; public final int LOAD_NEXT_PAGE_FAILED = 101; public final int REFRESH_SUCCESS = 102; public final int REFRESH_FAILED = 103; public final int GET_SERIES_SUCCESS = 104; public final int GET_VARIETY_SERIES_SUCCESS = 1040; public final int TITLE_RQC_CACHE_LOGIN = 105; public final int LAYOUT_INIT_FINISH = 106; public final int WAIT = 107; public final static int MSG_UP_SUCCESS = 10; public final static int MSG_UP_FAIL = 11; public final static int MSG_DOWN_SUCCESS = 20; public final static int MSG_DOWN_FAIL = 21; public final static int MSG_RQC_FAV_LOG = 201; public final static int MSG_FAV_SUCCESS = 202; public final static int MSG_FAV_FAIL = 203; public final static int MSG_RQC_CACHE_LOGIN = 204; public final static int MSG_RQC_CACHE_LOCAL_BACK = 2041; public final static int GET_LAYOUT_DATA_FAIL = 206; public final static int DETAIL_PLAY_TASK = 207; public final static int SERIES_ITEM_TO_PLAY = 301; public final static int SHOW_CURRENT_PLAY = 302; public final static int SHOW_CACHED_ITEM = 401; public final static int SEEK_TO_POINT = 501; public final static int CACHE_START_DOWNLAOD = 502; public final static int GO_CACHED_LIST = 503; public final static int GO_RELATED_VIDEO = 504; public final static int WEAK_NETWORK = 506; public final static int SHOW_NETWORK_ERROR_DIALOG = 507; public final static int CACHED_ALREADY = 508; public final static int GET_CACHED_LIST = 509; public final static int DOWN_lOAD_SUCCESS = 610; public final static int DOWN_lOAD_FAILED = 611; public final static int GET_HIS_FINISH = 612; public final static int MSG_GET_PLAY_INFO_SUCCESS = 6130; public final static int MSG_GET_PLAY_INFO_FAIL = 6131; public final static int MSG_UPDATE_COMMENT = 6132; public final static int MSG_CANNOT_CACHE = 20120; public final static int MSG_CANNOT_CACHE_VARIETY = 20121; public final static int MSG_VIDEO_PLAY_CHANGE = 2013;// 播放视频更改 public final static int UPDATE_CACHE_ITEM = 201304; //用在刷新插件上 public static final int PLUGIN_SHOW_AD_PLAY = 1; public static final int PLUGIN_SHOW_SMALL_PLAYER = 2; public static final int PLUGIN_SHOW_FULLSCREEN_PLAYER = 3; public static final int PLUGIN_SHOW_FULLSCREEN_ENDPAGE = 4; public static final int PLUGIN_SHOW_IMAGE_AD = 5; public static final int PLUGIN_SHOW_INVESTIGATE = 6; public static final int PLUGIN_SHOW_NOT_SET = 7; }
apache-2.0
MandyChen0114/iSight
iSight/app/src/main/java/edu/cmu/supermandy/isight/ui/PagerAdapter.java
1102
package edu.cmu.supermandy.isight.ui; import android.support.v4.app.Fragment; import android.support.v4.app.FragmentManager; import android.support.v4.app.FragmentStatePagerAdapter; /** * Created by Mandy on 4/4/16. */ public class PagerAdapter extends FragmentStatePagerAdapter { int mNumOfTabs; public PagerAdapter(FragmentManager fm, int NumOfTabs) { super(fm); this.mNumOfTabs = NumOfTabs; } @Override public Fragment getItem(int position) { switch (position) { case 0: Fragment_Test tab1 = new Fragment_Test(); return tab1; case 1: Fragment_Quiz tab2 = new Fragment_Quiz(); return tab2; case 2: Fragment_History tab3 = new Fragment_History(); return tab3; case 3: Fragment_Setting tab4 = new Fragment_Setting(); return tab4; default: return null; } } @Override public int getCount() { return mNumOfTabs; } }
apache-2.0
amannm/undertow
core/src/main/java/io/undertow/protocols/ssl/SslConduit.java
42506
/* * JBoss, Home of Professional Open Source. * Copyright 2014 Red Hat, Inc., and individual contributors * as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.undertow.protocols.ssl; import io.undertow.UndertowLogger; import org.xnio.Buffers; import org.xnio.ChannelListener; import org.xnio.ChannelListeners; import org.xnio.IoUtils; import org.xnio.Pool; import org.xnio.Pooled; import org.xnio.StreamConnection; import org.xnio.XnioIoThread; import org.xnio.XnioWorker; import org.xnio.channels.StreamSinkChannel; import org.xnio.channels.StreamSourceChannel; import org.xnio.conduits.ConduitReadableByteChannel; import org.xnio.conduits.ConduitStreamSinkChannel; import org.xnio.conduits.ConduitStreamSourceChannel; import org.xnio.conduits.ConduitWritableByteChannel; import org.xnio.conduits.Conduits; import org.xnio.conduits.ReadReadyHandler; import org.xnio.conduits.StreamSinkConduit; import org.xnio.conduits.StreamSourceConduit; import org.xnio.conduits.WriteReadyHandler; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLEngineResult; import javax.net.ssl.SSLException; import javax.net.ssl.SSLSession; import java.io.IOException; import java.io.InterruptedIOException; import java.nio.ByteBuffer; import java.nio.channels.ClosedChannelException; import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; import static org.xnio.Bits.allAreClear; import static org.xnio.Bits.allAreSet; import static org.xnio.Bits.anyAreSet; /** * @author Stuart Douglas */ public class SslConduit implements StreamSourceConduit, StreamSinkConduit { /** * If this is set we are in the middle of a handshake, and we cannot * read any more data until we have written out our wrap result */ private static final int FLAG_READ_REQUIRES_WRITE = 1; /** * If this is set we are in the process of handshaking and we cannot write any * more data until we have read unwrapped data from the remote peer */ private static final int FLAG_WRITE_REQUIRES_READ = 1 << 1; /** * If reads are resumed. The underlying delegate may not be resumed if a write is required * to make progress. */ private static final int FLAG_READS_RESUMED = 1 << 2; /** * If writes are resumed, the underlying delegate may not be resumed if a read is required */ private static final int FLAG_WRITES_RESUMED = 1 << 3; /** * If there is data in the {@link #dataToUnwrap} buffer, and the last unwrap attempt did not result * in a buffer underflow */ private static final int FLAG_DATA_TO_UNWRAP = 1 << 4; /** * If the user has shutdown reads */ private static final int FLAG_READ_SHUTDOWN = 1 << 5; /** * If the user has shutdown writes */ private static final int FLAG_WRITE_SHUTDOWN = 1 << 6; /** * If the engine has been shut down */ private static final int FLAG_ENGINE_INBOUND_SHUTDOWN = 1 << 7; /** * If the engine has been shut down */ private static final int FLAG_ENGINE_OUTBOUND_SHUTDOWN = 1 << 8; private static final int FLAG_DELEGATE_SINK_SHUTDOWN = 1 << 9; private static final int FLAG_DELEGATE_SOURCE_SHUTDOWN = 1 << 10; private static final int FLAG_IN_HANDSHAKE = 1 << 11; private static final int FLAG_CLOSED = 1 << 12; private static final int FLAG_WRITE_CLOSED = 1 << 13; private static final int FLAG_READ_CLOSED = 1 << 14; public static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0); private final UndertowSslConnection connection; private final StreamConnection delegate; private final SSLEngine engine; private final StreamSinkConduit sink; private final StreamSourceConduit source; private final Pool<ByteBuffer> bufferPool; private final Runnable handshakeCallback; private int state = 0; private volatile int outstandingTasks = 0; /** * Data that has been wrapped and is ready to be sent to the underlying channel. * * This will be null if there is no data */ private Pooled<ByteBuffer> wrappedData; /** * Data that has been read from the underlying channel, and needs to be unwrapped. * * This will be null if there is no data. If there is data the {@link #FLAG_DATA_TO_UNWRAP} * flag must still be checked, otherwise there may be situations where even though some data * has been read there is not enough to unwrap (i.e. the engine returned buffer underflow). */ private Pooled<ByteBuffer> dataToUnwrap; /** * Unwrapped data, ready to be delivered to the application. Will be null if there is no data. * * If possible we avoid allocating this buffer, and instead unwrap directly into the end users buffer. */ private Pooled<ByteBuffer> unwrappedData; private SslWriteReadyHandler writeReadyHandler; private SslReadReadyHandler readReadyHandler; private boolean invokingReadListenerHandshake = false; SslConduit(UndertowSslConnection connection, StreamConnection delegate, SSLEngine engine, Pool<ByteBuffer> bufferPool, Runnable handshakeCallback) { this.connection = connection; this.delegate = delegate; this.handshakeCallback = handshakeCallback; this.sink = delegate.getSinkChannel().getConduit(); this.source = delegate.getSourceChannel().getConduit(); this.engine = engine; this.bufferPool = bufferPool; delegate.getSourceChannel().getConduit().setReadReadyHandler(readReadyHandler = new SslReadReadyHandler(null)); delegate.getSinkChannel().getConduit().setWriteReadyHandler(writeReadyHandler = new SslWriteReadyHandler(null)); if(engine.getUseClientMode()) { state = FLAG_IN_HANDSHAKE | FLAG_READ_REQUIRES_WRITE; } else { state = FLAG_IN_HANDSHAKE | FLAG_WRITE_REQUIRES_READ; } } @Override public void terminateReads() throws IOException { state |= FLAG_READ_SHUTDOWN; notifyReadClosed(); } @Override public boolean isReadShutdown() { return anyAreSet(state, FLAG_READ_SHUTDOWN); } @Override public void resumeReads() { if(anyAreSet(state, FLAG_READS_RESUMED)) { //already resumed return; } resumeReads(false); } @Override public void suspendReads() { state &= ~FLAG_READS_RESUMED; if(!allAreSet(state, FLAG_WRITES_RESUMED | FLAG_WRITE_REQUIRES_READ)) { delegate.getSourceChannel().suspendReads(); } } @Override public void wakeupReads() { resumeReads(true); } private void resumeReads(boolean wakeup) { state |= FLAG_READS_RESUMED; if(anyAreSet(state, FLAG_READ_REQUIRES_WRITE)) { delegate.getSinkChannel().resumeWrites(); } else { delegate.getSourceChannel().resumeReads(); if(anyAreSet(state, FLAG_DATA_TO_UNWRAP) || wakeup) { runReadListener(); } } } private void runReadListener() { try { delegate.getIoThread().execute(new Runnable() { @Override public void run() { readReadyHandler.readReady(); } }); } catch (Exception e) { //will only happen on shutdown IoUtils.safeClose(connection, delegate); UndertowLogger.REQUEST_IO_LOGGER.debugf(e, "Failed to queue read listener invocation"); } } private void runWriteListener() { try { delegate.getIoThread().execute(new Runnable() { @Override public void run() { writeReadyHandler.writeReady(); } }); } catch (Exception e) { //will only happen on shutdown IoUtils.safeClose(connection, delegate); UndertowLogger.REQUEST_IO_LOGGER.debugf(e, "Failed to queue read listener invocation"); } } @Override public boolean isReadResumed() { return anyAreSet(state, FLAG_READS_RESUMED); } @Override public void awaitReadable() throws IOException { synchronized (this) { if(outstandingTasks > 0) { try { wait(); return; } catch (InterruptedException e) { throw new InterruptedIOException(); } } } if(unwrappedData != null) { return; } if(anyAreSet(state, FLAG_DATA_TO_UNWRAP)) { return; } if(anyAreSet(state, FLAG_READ_REQUIRES_WRITE)) { awaitWritable(); return; } source.awaitReadable(); } @Override public void awaitReadable(long time, TimeUnit timeUnit) throws IOException { synchronized (this) { if(outstandingTasks > 0) { try { wait(timeUnit.toMillis(time)); return; } catch (InterruptedException e) { throw new InterruptedIOException(); } } } if(unwrappedData != null) { return; } if(anyAreSet(state, FLAG_DATA_TO_UNWRAP)) { return; } if(anyAreSet(state, FLAG_READ_REQUIRES_WRITE)) { awaitWritable(time, timeUnit); return; } source.awaitReadable(time, timeUnit); } @Override public XnioIoThread getReadThread() { return delegate.getIoThread(); } @Override public void setReadReadyHandler(ReadReadyHandler handler) { delegate.getSourceChannel().getConduit().setReadReadyHandler(readReadyHandler = new SslReadReadyHandler(handler)); } @Override public long transferFrom(FileChannel src, long position, long count) throws IOException { if(anyAreSet(state, FLAG_WRITE_SHUTDOWN)) { throw new ClosedChannelException(); } return src.transferTo(position, count, new ConduitWritableByteChannel(this)); } @Override public long transferFrom(StreamSourceChannel source, long count, ByteBuffer throughBuffer) throws IOException { if(anyAreSet(state, FLAG_WRITE_SHUTDOWN)) { throw new ClosedChannelException(); } return IoUtils.transfer(source, count, throughBuffer, new ConduitWritableByteChannel(this)); } @Override public int write(ByteBuffer src) throws IOException { if(anyAreSet(state, FLAG_WRITE_SHUTDOWN)) { throw new ClosedChannelException(); } return (int) doWrap(new ByteBuffer[]{src}, 0, 1); } @Override public long write(ByteBuffer[] srcs, int offs, int len) throws IOException { if(anyAreSet(state, FLAG_WRITE_SHUTDOWN)) { throw new ClosedChannelException(); } return doWrap(srcs, offs, len); } @Override public int writeFinal(ByteBuffer src) throws IOException { if(anyAreSet(state, FLAG_WRITE_SHUTDOWN)) { throw new ClosedChannelException(); } return Conduits.writeFinalBasic(this, src); } @Override public long writeFinal(ByteBuffer[] srcs, int offset, int length) throws IOException { return Conduits.writeFinalBasic(this, srcs, offset, length); } @Override public void terminateWrites() throws IOException { state |= FLAG_WRITE_SHUTDOWN; } @Override public boolean isWriteShutdown() { return false; //todo } @Override public void resumeWrites() { state |= FLAG_WRITES_RESUMED; if(anyAreSet(state, FLAG_WRITE_REQUIRES_READ)) { delegate.getSourceChannel().resumeReads(); } else { delegate.getSinkChannel().resumeWrites(); } } @Override public void suspendWrites() { state &= ~FLAG_WRITES_RESUMED; if(!allAreSet(state, FLAG_READS_RESUMED | FLAG_READ_REQUIRES_WRITE)) { delegate.getSinkChannel().suspendWrites(); } } @Override public void wakeupWrites() { resumeWrites(); getWriteThread().execute(new Runnable() { @Override public void run() { writeReadyHandler.writeReady(); } }); } @Override public boolean isWriteResumed() { return anyAreSet(state, FLAG_WRITES_RESUMED); } @Override public void awaitWritable() throws IOException { if(anyAreSet(state, FLAG_WRITE_SHUTDOWN)) { return; } if(outstandingTasks > 0) { synchronized (this) { if(outstandingTasks > 0) { try { this.wait(); return; } catch (InterruptedException e) { throw new InterruptedIOException(); } } } } if(anyAreSet(state, FLAG_WRITE_REQUIRES_READ)) { awaitReadable(); return; } sink.awaitWritable(); } @Override public void awaitWritable(long time, TimeUnit timeUnit) throws IOException { if(anyAreSet(state, FLAG_WRITE_SHUTDOWN)) { return; } if(outstandingTasks > 0) { synchronized (this) { if(outstandingTasks > 0) { try { this.wait(timeUnit.toMillis(time)); return; } catch (InterruptedException e) { throw new InterruptedIOException(); } } } } if(anyAreSet(state, FLAG_WRITE_REQUIRES_READ)) { awaitReadable(time, timeUnit); return; } sink.awaitWritable(); } @Override public XnioIoThread getWriteThread() { return delegate.getIoThread(); } @Override public void setWriteReadyHandler(WriteReadyHandler handler) { delegate.getSinkChannel().getConduit().setWriteReadyHandler(writeReadyHandler = new SslWriteReadyHandler(handler)); } @Override public void truncateWrites() throws IOException { notifyWriteClosed(); } @Override public boolean flush() throws IOException { if(anyAreSet(state, FLAG_DELEGATE_SINK_SHUTDOWN)) { return sink.flush(); } if(wrappedData != null) { doWrap(null, 0, 0); if(wrappedData != null) { return false; } } if(allAreSet(state, FLAG_WRITE_SHUTDOWN)) { if(allAreClear(state, FLAG_ENGINE_OUTBOUND_SHUTDOWN)) { state |= FLAG_ENGINE_OUTBOUND_SHUTDOWN; engine.closeOutbound(); doWrap(null, 0, 0); if(wrappedData != null) { return false; } } else if(wrappedData != null && allAreClear(state, FLAG_DELEGATE_SINK_SHUTDOWN)) { doWrap(null, 0, 0); if(wrappedData != null) { return false; } } if(allAreClear(state, FLAG_DELEGATE_SINK_SHUTDOWN)) { sink.terminateWrites(); state |= FLAG_DELEGATE_SINK_SHUTDOWN; } } return sink.flush(); } @Override public long transferTo(long position, long count, FileChannel target) throws IOException { if(anyAreSet(state, FLAG_READ_SHUTDOWN)) { throw new ClosedChannelException(); } return target.transferFrom(new ConduitReadableByteChannel(this), position, count); } @Override public long transferTo(long count, ByteBuffer throughBuffer, StreamSinkChannel target) throws IOException { if(anyAreSet(state, FLAG_READ_SHUTDOWN)) { throw new ClosedChannelException(); } return IoUtils.transfer(new ConduitReadableByteChannel(this), count, throughBuffer, target); } @Override public int read(ByteBuffer dst) throws IOException { if(anyAreSet(state, FLAG_READ_SHUTDOWN)) { throw new ClosedChannelException(); } return (int) doUnwrap(new ByteBuffer[]{dst}, 0, 1); } @Override public long read(ByteBuffer[] dsts, int offs, int len) throws IOException { if(anyAreSet(state, FLAG_READ_SHUTDOWN)) { throw new ClosedChannelException(); } return doUnwrap(dsts, offs, len); } @Override public XnioWorker getWorker() { return delegate.getWorker(); } void notifyWriteClosed() { if(anyAreSet(state, FLAG_WRITE_CLOSED)) { return; } boolean runListener = isWriteResumed() && anyAreSet(state, FLAG_CLOSED); connection.writeClosed(); engine.closeOutbound(); state |= FLAG_WRITE_CLOSED | FLAG_ENGINE_OUTBOUND_SHUTDOWN; if(anyAreSet(state, FLAG_READ_CLOSED)) { closed(); } if(anyAreSet(state, FLAG_READ_REQUIRES_WRITE)) { notifyReadClosed(); } state &= ~FLAG_WRITE_REQUIRES_READ; //unclean shutdown, run the listener if(runListener) { runWriteListener(); } } void notifyReadClosed() { if(anyAreSet(state, FLAG_READ_CLOSED)) { return; } boolean runListener = isReadResumed() && anyAreSet(state, FLAG_CLOSED); connection.readClosed(); try { engine.closeInbound(); } catch (SSLException e) { UndertowLogger.REQUEST_IO_LOGGER.ioException(new IOException(e)); } state |= FLAG_READ_CLOSED | FLAG_ENGINE_INBOUND_SHUTDOWN; if(anyAreSet(state, FLAG_WRITE_CLOSED)) { closed(); } if(anyAreSet(state, FLAG_WRITE_REQUIRES_READ)) { notifyWriteClosed(); } if(runListener) { runReadListener(); } } public void startHandshake() throws SSLException { state |= FLAG_READ_REQUIRES_WRITE; engine.beginHandshake(); } public SSLSession getSslSession() { return engine.getSession(); } /** * Force the handshake to continue * * @throws IOException */ private void doHandshake() throws IOException { doUnwrap(null, 0, 0); doWrap(null, 0, 0); } /** * Unwrap channel data into the user buffers. If no user buffer is supplied (e.g. during handshaking) then the * unwrap will happen into the channels unwrap buffer. * * If some data has already been unwrapped it will simply be copied into the user buffers * and no unwrap will actually take place. * * @return true if the unwrap operation made progress, false otherwise * @throws SSLException */ private long doUnwrap(ByteBuffer[] userBuffers, int off, int len) throws IOException { if(anyAreSet(state, FLAG_CLOSED)) { throw new ClosedChannelException(); } if(outstandingTasks > 0) { return 0; } if(anyAreSet(state, FLAG_READ_REQUIRES_WRITE)) { doWrap(null, 0, 0); if(allAreClear(state, FLAG_WRITE_REQUIRES_READ)) { //unless a wrap is immediately required we just return return 0; } } Pooled<ByteBuffer> unwrappedData = this.unwrappedData; //copy any exiting data if(unwrappedData != null && userBuffers != null) { long copied = Buffers.copy(userBuffers, off, len, unwrappedData.getResource()); if(!unwrappedData.getResource().hasRemaining()) { unwrappedData.free(); this.unwrappedData = null; } return copied; } try { //we need to store how much data is in the unwrap buffer. If no progress can be made then we unset //the data to unwrap flag int dataToUnwrapLength = -1; //try and read some data if we don't already have some if(allAreClear(state, FLAG_DATA_TO_UNWRAP)) { if(dataToUnwrap == null) { dataToUnwrap = bufferPool.allocate(); } int res; try { res = source.read(dataToUnwrap.getResource()); } catch (IOException e) { dataToUnwrap.free(); dataToUnwrap = null; throw e; } dataToUnwrap.getResource().flip(); if(res == -1) { dataToUnwrap.free(); dataToUnwrap = null; notifyReadClosed(); return -1; } else if(res == 0 && engine.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.FINISHED) { return 0; } } else { dataToUnwrapLength = dataToUnwrap.getResource().remaining(); } long original = 0; if(userBuffers != null) { original = Buffers.remaining(userBuffers); } //perform the actual unwrap operation //if possible this is done into the the user buffers, however //if none are supplied or this results in a buffer overflow then we allocate our own SSLEngineResult result; boolean unwrapBufferUsed = false; try { if (userBuffers != null) { result = engine.unwrap(this.dataToUnwrap.getResource(), userBuffers, off, len); if (result.getStatus() == SSLEngineResult.Status.BUFFER_OVERFLOW) { //not enough space in the user buffers //we use our own unwrappedData = bufferPool.allocate(); ByteBuffer[] d = new ByteBuffer[len + 1]; System.arraycopy(userBuffers, off, d, 0, len); d[len] = unwrappedData.getResource(); result = engine.unwrap(this.dataToUnwrap.getResource(), d); unwrapBufferUsed = true; } } else { unwrapBufferUsed = true; if (unwrappedData == null) { unwrappedData = bufferPool.allocate(); } else { unwrappedData.getResource().compact(); } result = engine.unwrap(this.dataToUnwrap.getResource(), unwrappedData.getResource()); } } finally { if(unwrapBufferUsed) { unwrappedData.getResource().flip(); if(!unwrappedData.getResource().hasRemaining()) { unwrappedData.free(); unwrappedData = null; } } this.unwrappedData = unwrappedData; } if (!handleHandshakeResult(result)) { if(this.dataToUnwrap.getResource().hasRemaining() && result.getStatus() != SSLEngineResult.Status.BUFFER_UNDERFLOW && dataToUnwrap.getResource().remaining() != dataToUnwrapLength) { state |= FLAG_DATA_TO_UNWRAP; } return 0; } if (result.getStatus() == SSLEngineResult.Status.CLOSED) { notifyReadClosed(); return -1; } if(result.getStatus() == SSLEngineResult.Status.BUFFER_UNDERFLOW) { state &= ~FLAG_DATA_TO_UNWRAP; } else if(result.getStatus() == SSLEngineResult.Status.BUFFER_OVERFLOW) { throw new IOException("overflow"); //todo: handle properly } else if(this.dataToUnwrap.getResource().hasRemaining() && dataToUnwrap.getResource().remaining() != dataToUnwrapLength) { state |= FLAG_DATA_TO_UNWRAP; } else { state &= ~FLAG_DATA_TO_UNWRAP; } if(userBuffers == null) { return 0; } else { return original - Buffers.remaining(userBuffers); } } finally { boolean requiresListenerInvocation = false; //if there is data in the buffer and reads are resumed we should re-run the listener if (unwrappedData != null && unwrappedData.getResource().hasRemaining()) { requiresListenerInvocation = true; } if(dataToUnwrap != null) { //if there is no data in the buffer we just free it if(!dataToUnwrap.getResource().hasRemaining()) { dataToUnwrap.free(); dataToUnwrap = null; state &= ~FLAG_DATA_TO_UNWRAP; } else if(allAreClear(state, FLAG_DATA_TO_UNWRAP)) { //if there is not enough data in the buffer we compact it to make room for more dataToUnwrap.getResource().compact(); } else { //there is more data, make sure we trigger a read listener invocation requiresListenerInvocation = true; } } //if we are in the read listener handshake we don't need to invoke //as it is about to be invoked anyway if(requiresListenerInvocation && anyAreSet(state, FLAG_READS_RESUMED) && !invokingReadListenerHandshake) { runReadListener(); } } } /** * Wraps the user data and attempts to send it to the remote client. If data has already been buffered then * this is attempted to be sent first. * * If the supplied buffers are null then a wrap operation is still attempted, which will happen during the * handshaking process. * @param userBuffers The buffers * @param off The offset * @param len The length * @return * @throws IOException */ private long doWrap(ByteBuffer[] userBuffers, int off, int len) throws IOException { if(anyAreSet(state, FLAG_CLOSED)) { throw new ClosedChannelException(); } if(outstandingTasks > 0) { return 0; } if(anyAreSet(state, FLAG_WRITE_REQUIRES_READ)) { doUnwrap(null, 0, 0); if(allAreClear(state, FLAG_READ_REQUIRES_WRITE)) { //unless a wrap is immediatly required we just return return 0; } } if(wrappedData != null) { int res = sink.write(wrappedData.getResource()); if(res == 0 || wrappedData.getResource().hasRemaining()) { return 0; } wrappedData.getResource().clear(); } else { wrappedData = bufferPool.allocate(); } try { SSLEngineResult result = null; while (result == null || (result.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_WRAP && result.getStatus() != SSLEngineResult.Status.BUFFER_OVERFLOW)) { if (userBuffers == null) { result = engine.wrap(EMPTY_BUFFER, wrappedData.getResource()); } else { result = engine.wrap(userBuffers, off, len, wrappedData.getResource()); } } wrappedData.getResource().flip(); if (result.getStatus() == SSLEngineResult.Status.BUFFER_UNDERFLOW) { throw new IOException("underflow"); //todo: can this happen? } else if (result.getStatus() == SSLEngineResult.Status.BUFFER_OVERFLOW) { if(!wrappedData.getResource().hasRemaining()) { //if an earlier wrap suceeded we ignore this throw new IOException("overflow"); //todo: handle properly } } //attempt to write it out, if we fail we just return //we ignore the handshake status, as wrap will get called again if(wrappedData.getResource().hasRemaining()) { sink.write(wrappedData.getResource()); } //if it was not a complete write we just return if(wrappedData.getResource().hasRemaining()) { return result.bytesConsumed(); } if (!handleHandshakeResult(result)) { return 0; } if (result.getStatus() == SSLEngineResult.Status.CLOSED && userBuffers != null) { notifyWriteClosed(); throw new ClosedChannelException(); } return result.bytesConsumed(); } finally { //this can be cleared if the channel is fully closed if(wrappedData != null) { if (!wrappedData.getResource().hasRemaining()) { wrappedData.free(); wrappedData = null; } } } } private boolean handleHandshakeResult(SSLEngineResult result) throws IOException { switch (result.getHandshakeStatus()) { case NEED_TASK: { state |= FLAG_IN_HANDSHAKE; clearReadRequiresWrite(); clearWriteRequiresRead(); runTasks(); return false; } case NEED_UNWRAP: { clearReadRequiresWrite(); state |= FLAG_WRITE_REQUIRES_READ | FLAG_IN_HANDSHAKE; sink.suspendWrites(); if(anyAreSet(state, FLAG_WRITES_RESUMED)) { source.resumeReads(); } if (anyAreSet(state, FLAG_DATA_TO_UNWRAP) && anyAreSet(state, FLAG_WRITES_RESUMED | FLAG_READS_RESUMED)) { runReadListener(); } return false; } case NEED_WRAP: { clearWriteRequiresRead(); state |= FLAG_READ_REQUIRES_WRITE | FLAG_IN_HANDSHAKE; source.suspendReads(); if(anyAreSet(state, FLAG_READS_RESUMED)) { sink.resumeWrites(); } return false; } case FINISHED: { if(anyAreSet(state, FLAG_IN_HANDSHAKE)) { state &= ~FLAG_IN_HANDSHAKE; handshakeCallback.run(); } } } clearReadRequiresWrite(); clearWriteRequiresRead(); return true; } private void clearReadRequiresWrite() { if(anyAreSet(state, FLAG_READ_REQUIRES_WRITE)) { state &= ~FLAG_READ_REQUIRES_WRITE; if(anyAreSet(state, FLAG_READS_RESUMED)) { resumeReads(false); } if(allAreClear(state, FLAG_WRITES_RESUMED)) { sink.suspendWrites(); } } } private void clearWriteRequiresRead() { if(anyAreSet(state, FLAG_WRITE_REQUIRES_READ)) { state &= ~FLAG_WRITE_REQUIRES_READ; if(anyAreSet(state, FLAG_WRITES_RESUMED)) { wakeupWrites(); } if(allAreClear(state, FLAG_READS_RESUMED)) { source.suspendReads(); } } } private void closed() { if(anyAreSet(state, FLAG_CLOSED)) { return; } state |= FLAG_CLOSED | FLAG_DELEGATE_SINK_SHUTDOWN | FLAG_DELEGATE_SOURCE_SHUTDOWN | FLAG_WRITE_SHUTDOWN | FLAG_READ_SHUTDOWN; notifyReadClosed(); notifyWriteClosed(); if(dataToUnwrap != null) { dataToUnwrap.free(); dataToUnwrap = null; } if(unwrappedData != null) { unwrappedData.free(); unwrappedData = null; } if(wrappedData != null) { wrappedData.free(); wrappedData = null; } if(allAreClear(state, FLAG_ENGINE_OUTBOUND_SHUTDOWN)) { engine.closeOutbound(); } if(allAreClear(state, FLAG_ENGINE_INBOUND_SHUTDOWN)) { try { engine.closeInbound(); } catch (SSLException e) { UndertowLogger.REQUEST_LOGGER.ioException(e); } } IoUtils.safeClose(delegate); } /** * Execute all the tasks in the worker * * Once they are complete we notify any waiting threads and wakeup reads/writes as appropriate */ private void runTasks() { //don't run anything in the IO thread till the tasks are done delegate.getSinkChannel().suspendWrites(); delegate.getSourceChannel().suspendReads(); List<Runnable> tasks = new ArrayList<>(); Runnable t = engine.getDelegatedTask(); while (t != null) { tasks.add(t); t = engine.getDelegatedTask(); } synchronized (this) { outstandingTasks += tasks.size(); for (final Runnable task : tasks) { getWorker().execute(new Runnable() { @Override public void run() { try { task.run(); } finally { synchronized (SslConduit.this) { if (outstandingTasks == 1) { getWriteThread().execute(new Runnable() { @Override public void run() { synchronized (SslConduit.this) { SslConduit.this.notifyAll(); --outstandingTasks; try { doHandshake(); } catch (IOException e) { IoUtils.safeClose(connection); } if (anyAreSet(state, FLAG_READS_RESUMED)) { wakeupReads(); //wakeup, because we need to run an unwrap even if there is no data to be read } if (anyAreSet(state, FLAG_WRITES_RESUMED)) { resumeWrites(); //we don't need to wakeup, as the channel should be writable } } } }); } else { outstandingTasks--; } } } } }); } } } public SSLEngine getSSLEngine() { return engine; } /** * forcibly closes the connection */ public void close() { closed(); } /** * Read ready handler that deals with read-requires-write semantics */ private class SslReadReadyHandler implements ReadReadyHandler { private final ReadReadyHandler delegateHandler; private SslReadReadyHandler(ReadReadyHandler delegateHandler) { this.delegateHandler = delegateHandler; } @Override public void readReady() { if(anyAreSet(state, FLAG_WRITE_REQUIRES_READ) && !anyAreSet(state, FLAG_ENGINE_INBOUND_SHUTDOWN)) { try { invokingReadListenerHandshake = true; doHandshake(); } catch (IOException e) { UndertowLogger.REQUEST_LOGGER.ioException(e); IoUtils.safeClose(delegate); } finally { invokingReadListenerHandshake = false; } } boolean noProgress = false; int initialUnwrapped = -1; if (anyAreSet(state, FLAG_READS_RESUMED)) { if (delegateHandler == null) { final ChannelListener<? super ConduitStreamSourceChannel> readListener = connection.getSourceChannel().getReadListener(); if (readListener == null) { suspendReads(); } else { if(anyAreSet(state, FLAG_DATA_TO_UNWRAP)) { initialUnwrapped = dataToUnwrap.getResource().remaining(); } ChannelListeners.invokeChannelListener(connection.getSourceChannel(), readListener); if(anyAreSet(state, FLAG_DATA_TO_UNWRAP) && initialUnwrapped == dataToUnwrap.getResource().remaining()) { noProgress = true; } } } else { delegateHandler.readReady(); } } if(!anyAreSet(state, FLAG_READS_RESUMED | FLAG_WRITE_REQUIRES_READ)) { delegate.getSourceChannel().suspendReads(); } else if(anyAreSet(state, FLAG_READS_RESUMED) && (unwrappedData != null || anyAreSet(state, FLAG_DATA_TO_UNWRAP))) { if(anyAreSet(state, FLAG_READ_CLOSED)) { if(unwrappedData != null) { unwrappedData.free(); } if(dataToUnwrap != null) { dataToUnwrap.free(); } unwrappedData = null; dataToUnwrap = null; } else { //there is data in the buffers so we do a wakeup //as we may not get an actual read notification //if we need to write for the SSL engine to progress we don't invoke the read listener //otherwise it will run in a busy loop till the channel becomes writable //we also don't re-run if we have outstanding tasks if(!(anyAreSet(state, FLAG_READ_REQUIRES_WRITE) && wrappedData != null) && outstandingTasks == 0 && !noProgress) { runReadListener(); } } } } @Override public void forceTermination() { try { if (delegateHandler != null) { delegateHandler.forceTermination(); } } finally { IoUtils.safeClose(delegate); } } @Override public void terminated() { ChannelListeners.invokeChannelListener(connection.getSourceChannel(), connection.getSourceChannel().getCloseListener()); } } /** * write read handler that deals with write-requires-read semantics */ private class SslWriteReadyHandler implements WriteReadyHandler { private final WriteReadyHandler delegateHandler; private SslWriteReadyHandler(WriteReadyHandler delegateHandler) { this.delegateHandler = delegateHandler; } @Override public void forceTermination() { try { if (delegateHandler != null) { delegateHandler.forceTermination(); } } finally { IoUtils.safeClose(delegate); } } @Override public void terminated() { ChannelListeners.invokeChannelListener(connection.getSinkChannel(), connection.getSinkChannel().getCloseListener()); } @Override public void writeReady() { if(anyAreSet(state, FLAG_READ_REQUIRES_WRITE)) { if(anyAreSet(state, FLAG_READS_RESUMED)) { readReadyHandler.readReady(); } else { try { doHandshake(); } catch (IOException e) { UndertowLogger.REQUEST_LOGGER.ioException(e); IoUtils.safeClose(delegate); } } } if (anyAreSet(state, FLAG_WRITES_RESUMED)) { if(delegateHandler == null) { final ChannelListener<? super ConduitStreamSinkChannel> writeListener = connection.getSinkChannel().getWriteListener(); if (writeListener == null) { suspendWrites(); } else { ChannelListeners.invokeChannelListener(connection.getSinkChannel(), writeListener); } } else { delegateHandler.writeReady(); } } if(!anyAreSet(state, FLAG_WRITES_RESUMED | FLAG_READ_REQUIRES_WRITE)) { delegate.getSinkChannel().suspendWrites(); } } } @Override public String toString() { return "SslConduit{" + "state=" + state + ", outstandingTasks=" + outstandingTasks + ", wrappedData=" + wrappedData + ", dataToUnwrap=" + dataToUnwrap + ", unwrappedData=" + unwrappedData + '}'; } }
apache-2.0
vjr/snappydata
core/src/main/java/io/snappydata/util/com/clearspring/analytics/stream/membership/DataOutputBuffer.java
2903
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the n * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.snappydata.util.com.clearspring.analytics.stream.membership; import java.io.ByteArrayOutputStream; import java.io.DataInput; import java.io.DataOutputStream; import java.io.IOException; import java.util.Arrays; /** * An implementation of the DataOutputStream interface. This class is completely thread * unsafe. */ public class DataOutputBuffer extends DataOutputStream { private static class Buffer extends ByteArrayOutputStream { public byte[] getData() { return Arrays.copyOf(buf, getLength()); //return buf; } public int getLength() { return count; } public void reset() { count = 0; } public void write(DataInput in, int len) throws IOException { int newcount = count + len; if (newcount > buf.length) { byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)]; System.arraycopy(buf, 0, newbuf, 0, count); buf = newbuf; } in.readFully(buf, count, len); count = newcount; } } private Buffer buffer; /** * Constructs a new empty buffer. */ public DataOutputBuffer() { this(new Buffer()); } private DataOutputBuffer(Buffer buffer) { super(buffer); this.buffer = buffer; } /** * Returns the current contents of the buffer. Data is only valid to * {@link #getLength()}. */ public byte[] getData() { return buffer.getData(); } /** * Returns the length of the valid data currently in the buffer. */ public int getLength() { return buffer.getLength(); } /** * Resets the buffer to empty. */ public DataOutputBuffer reset() { this.written = 0; buffer.reset(); return this; } /** * Writes bytes from a DataInput directly into the buffer. */ public void write(DataInput in, int length) throws IOException { buffer.write(in, length); } }
apache-2.0
JavaSummer/JavaMainRepo
Students/Farcas Silviu Vlad/Assignment 7/javasmmr.zoowsome/src/javasmmr/zoowsome/controllers/Main.java
393
package javasmmr.zoowsome.controllers; import java.util.ArrayList; import javasmmr.zoowsome.models.animals.*; import javasmmr.zoowsome.models.employees.*; import javasmmr.zoowsome.repositories.*; import javasmmr.zoowsome.views.MainMenuFrame; public class Main { public static void main(String[] args) throws Exception{ new MainMenuController(new MainMenuFrame("Main Menu"), false); } }
apache-2.0
palmanojkumar/ehcache3
impl/src/main/java/org/ehcache/config/serializer/DefaultSerializationProviderConfiguration.java
1821
/* * Copyright Terracotta, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.ehcache.config.serializer; import org.ehcache.internal.classes.ClassInstanceProviderConfiguration; import org.ehcache.spi.serialization.DefaultSerializationProvider; import org.ehcache.spi.serialization.Serializer; import org.ehcache.spi.service.ServiceConfiguration; public class DefaultSerializationProviderConfiguration extends ClassInstanceProviderConfiguration<Serializer<?>> implements ServiceConfiguration<DefaultSerializationProvider> { @Override public Class<DefaultSerializationProvider> getServiceType() { return DefaultSerializationProvider.class; } public <T> DefaultSerializationProviderConfiguration addSerializerFor(Class<T> serializableClass, Class<? extends Serializer<T>> serializerClass) { if (serializableClass == null) { throw new NullPointerException("Serializable class cannot be null"); } if (serializerClass == null) { throw new NullPointerException("Serializer class cannot be null"); } String alias = serializableClass.getName(); if (getDefaults().containsKey(alias)) { throw new IllegalArgumentException("Duplicate serializer for class : " + alias); } getDefaults().put(alias, serializerClass); return this; } }
apache-2.0
robertgeiger/incubator-geode
gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DistributedRegion.java
162027
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.gemstone.gemfire.internal.cache; import static com.gemstone.gemfire.internal.offheap.annotations.OffHeapIdentifier.ABSTRACT_REGION_ENTRY_FILL_IN_VALUE; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import org.apache.logging.log4j.Logger; import com.gemstone.gemfire.CancelException; import com.gemstone.gemfire.InternalGemFireError; import com.gemstone.gemfire.InvalidDeltaException; import com.gemstone.gemfire.SystemFailure; import com.gemstone.gemfire.cache.CacheClosedException; import com.gemstone.gemfire.cache.CacheListener; import com.gemstone.gemfire.cache.CacheLoader; import com.gemstone.gemfire.cache.CacheLoaderException; import com.gemstone.gemfire.cache.CacheWriter; import com.gemstone.gemfire.cache.CacheWriterException; import com.gemstone.gemfire.cache.DataPolicy; import com.gemstone.gemfire.cache.DiskAccessException; import com.gemstone.gemfire.cache.EntryNotFoundException; import com.gemstone.gemfire.cache.LossAction; import com.gemstone.gemfire.cache.MembershipAttributes; import com.gemstone.gemfire.cache.Operation; import com.gemstone.gemfire.cache.RegionAccessException; import com.gemstone.gemfire.cache.RegionAttributes; import com.gemstone.gemfire.cache.RegionDestroyedException; import com.gemstone.gemfire.cache.RegionDistributionException; import com.gemstone.gemfire.cache.RegionMembershipListener; import com.gemstone.gemfire.cache.ResumptionAction; import com.gemstone.gemfire.cache.RoleException; import com.gemstone.gemfire.cache.TimeoutException; import com.gemstone.gemfire.cache.TransactionId; import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl; import com.gemstone.gemfire.cache.execute.Function; import com.gemstone.gemfire.cache.execute.FunctionException; import com.gemstone.gemfire.cache.execute.ResultCollector; import com.gemstone.gemfire.cache.execute.ResultSender; import com.gemstone.gemfire.cache.persistence.PersistentReplicatesOfflineException; import com.gemstone.gemfire.cache.query.internal.IndexUpdater; import com.gemstone.gemfire.cache.wan.GatewaySender; import com.gemstone.gemfire.distributed.DistributedLockService; import com.gemstone.gemfire.distributed.DistributedMember; import com.gemstone.gemfire.distributed.LockServiceDestroyedException; import com.gemstone.gemfire.distributed.Role; import com.gemstone.gemfire.distributed.internal.DM; import com.gemstone.gemfire.distributed.internal.DistributionAdvisee; import com.gemstone.gemfire.distributed.internal.DistributionAdvisor; import com.gemstone.gemfire.distributed.internal.DistributionAdvisor.Profile; import com.gemstone.gemfire.distributed.internal.DistributionAdvisor.ProfileVisitor; import com.gemstone.gemfire.distributed.internal.DistributionConfig; import com.gemstone.gemfire.distributed.internal.MembershipListener; import com.gemstone.gemfire.distributed.internal.ReplyProcessor21; import com.gemstone.gemfire.distributed.internal.locks.DLockRemoteToken; import com.gemstone.gemfire.distributed.internal.locks.DLockService; import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember; import com.gemstone.gemfire.internal.Assert; import com.gemstone.gemfire.internal.cache.CacheDistributionAdvisor.CacheProfile; import com.gemstone.gemfire.internal.cache.InitialImageOperation.GIIStatus; import com.gemstone.gemfire.internal.cache.RemoteFetchVersionMessage.FetchVersionResponse; import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType; import com.gemstone.gemfire.internal.cache.control.MemoryEvent; import com.gemstone.gemfire.internal.cache.execute.DistributedRegionFunctionExecutor; import com.gemstone.gemfire.internal.cache.execute.DistributedRegionFunctionResultSender; import com.gemstone.gemfire.internal.cache.execute.DistributedRegionFunctionResultWaiter; import com.gemstone.gemfire.internal.cache.execute.FunctionStats; import com.gemstone.gemfire.internal.cache.execute.LocalResultCollector; import com.gemstone.gemfire.internal.cache.execute.RegionFunctionContextImpl; import com.gemstone.gemfire.internal.cache.execute.ServerToClientFunctionResultSender; import com.gemstone.gemfire.internal.cache.lru.LRUEntry; import com.gemstone.gemfire.internal.cache.persistence.CreatePersistentRegionProcessor; import com.gemstone.gemfire.internal.cache.persistence.PersistenceAdvisor; import com.gemstone.gemfire.internal.cache.persistence.PersistenceAdvisorImpl; import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberID; import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberManager; import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberView; import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID; import com.gemstone.gemfire.internal.cache.tier.sockets.VersionedObjectList; import com.gemstone.gemfire.internal.cache.versions.ConcurrentCacheModificationException; import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector; import com.gemstone.gemfire.internal.cache.versions.VersionSource; import com.gemstone.gemfire.internal.cache.versions.VersionTag; import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender; import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySenderEventProcessor; import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueConfigurationException; import com.gemstone.gemfire.internal.cache.wan.GatewaySenderConfigurationException; import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue; import com.gemstone.gemfire.internal.i18n.LocalizedStrings; import com.gemstone.gemfire.internal.logging.LogService; import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage; import com.gemstone.gemfire.internal.offheap.OffHeapHelper; import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl.Chunk; import com.gemstone.gemfire.internal.offheap.annotations.Released; import com.gemstone.gemfire.internal.offheap.annotations.Retained; import com.gemstone.gemfire.internal.sequencelog.RegionLogger; import com.gemstone.gemfire.internal.util.concurrent.StoppableCountDownLatch; import com.gemstone.org.jgroups.util.StringId; /** * * @author Eric Zoerner * @author Sudhir Menon */ @SuppressWarnings("deprecation") public class DistributedRegion extends LocalRegion implements CacheDistributionAdvisee { private static final Logger logger = LogService.getLogger(); /** causes cache profile to be added to afterRemoteRegionCreate notification for testing */ public static boolean TEST_HOOK_ADD_PROFILE = false; /** Used to sync accesses to this.dlockService to allow lazy construction */ private final Object dlockMonitor = new Object(); final CacheDistributionAdvisor distAdvisor; /** * @guarded.By {@link #dlockMonitor} */ private DistributedLockService dlockService; protected final AdvisorListener advisorListener = new AdvisorListener(); /** Set of currently missing required roles */ protected final HashSet missingRequiredRoles = new HashSet(); /** True if this region is currently missing any required roles */ protected volatile boolean isMissingRequiredRoles = false; /** * True if this region is has any required roles defined and the LossAction is * either NO_ACCESS or LIMITED_ACCESS. Reliability checks will only happen if * this is true. */ private final boolean requiresReliabilityCheck; /** * Provides a queue for reliable message delivery * * @since 5.0 */ protected final ReliableMessageQueue rmq; /** * Latch that is opened after initialization waits for required roles up to * the <a href="DistributedSystem#member-timeout">member-timeout </a>. */ protected final StoppableCountDownLatch initializationLatchAfterMemberTimeout; private final PersistenceAdvisor persistenceAdvisor; private final PersistentMemberID persistentId; /** * This boolean is set to false when this region * is non-persistent, but there are persistent members in the distributed system * to which all region modifications should be forwarded * see bug 45186 */ private volatile boolean generateVersionTag = true; /** Tests can set this to true and ignore reliability triggered reconnects */ public static boolean ignoreReconnect = false; /** * Lock to prevent multiple threads on this member from performing * a clear at the same time. */ private final Object clearLock = new Object(); private static AtomicBoolean loggedNetworkPartitionWarning = new AtomicBoolean(false); /** Creates a new instance of DistributedRegion */ protected DistributedRegion(String regionName, RegionAttributes attrs, LocalRegion parentRegion, GemFireCacheImpl cache, InternalRegionArguments internalRegionArgs) { super(regionName, attrs, parentRegion, cache, internalRegionArgs); this.initializationLatchAfterMemberTimeout = new StoppableCountDownLatch( getCancelCriterion(), 1); this.distAdvisor = createDistributionAdvisor(internalRegionArgs); if (getDistributionManager().getConfig().getEnableNetworkPartitionDetection() && !isInternalRegion() && !attrs.getScope().isAck() && !doesNotDistribute() && attrs.getDataPolicy().withStorage()) { logger.warn(LocalizedMessage.create(LocalizedStrings.DistributedRegion_REGION_0_1_SPLITBRAIN_CONFIG_WARNING, new Object[] { regionName, attrs.getScope() })); } if (!getDistributionManager().getConfig().getEnableNetworkPartitionDetection() && attrs.getDataPolicy().withPersistence() && !loggedNetworkPartitionWarning.getAndSet(true)) { logger.warn(LocalizedMessage.create( LocalizedStrings.DistributedRegion_REGION_0_ENABLE_NETWORK_PARTITION_WARNING, new Object[] { regionName, attrs.getScope() })); } boolean setRequiresReliabilityCheck = attrs.getMembershipAttributes() .hasRequiredRoles() && // note that the following includes NO_ACCESS, LIMITED_ACCESS, !attrs.getMembershipAttributes().getLossAction().isAllAccess() && !attrs.getMembershipAttributes().getLossAction().isReconnect(); // this optimization is safe for as long as Roles and Required Roles are // immutable // if this VM fulfills all required roles, make requiresReliabilityCheck // false Set reqRoles = new HashSet(attrs.getMembershipAttributes() .getRequiredRoles()); reqRoles.removeAll(getSystem().getDistributedMember().getRoles()); if (reqRoles.isEmpty()) { setRequiresReliabilityCheck = false; } this.requiresReliabilityCheck = setRequiresReliabilityCheck; { ReliableMessageQueue tmp = null; if (this.requiresReliabilityCheck) { // if // (attrs.getMembershipAttributes().getLossAction().isAllAccessWithQueuing()) // { // tmp = cache.getReliableMessageQueueFactory().create(this); // } } this.rmq = tmp; } if(internalRegionArgs.isUsedForPartitionedRegionBucket()) { this.persistenceAdvisor = internalRegionArgs.getPersistenceAdvisor(); } else if (this.allowsPersistence()){ //TODO prpersist - using this lock service is a hack. Maybe? Or maybe //it's ok if we have one (rarely used) lock service for many operations? //What does the resource manager do? DistributedLockService dl = cache.getPartitionedRegionLockService(); try { //TODO prpersist - this is just a quick and dirty storage mechanism so that //I can test the storage. DiskRegionStats diskStats; PersistentMemberView storage; if(getDataPolicy().withPersistence()) { storage = getDiskRegion(); diskStats = getDiskRegion().getStats(); } else { storage = new InMemoryPersistentMemberView(); diskStats = null; } PersistentMemberManager memberManager = cache.getPersistentMemberManager(); this.persistenceAdvisor = new PersistenceAdvisorImpl(distAdvisor, dl, storage, this.getFullPath(), diskStats, memberManager); } catch (Exception e) { throw new InternalGemFireError("Couldn't recover persistence"); } } else { this.persistenceAdvisor = null; } if(this.persistenceAdvisor != null) { this.persistentId = persistenceAdvisor.generatePersistentID(); } else { this.persistentId = null; } } @Override public void createEventTracker() { this.eventTracker = new EventTracker(this); this.eventTracker.start(); } /** * Intended for used during construction of a DistributedRegion * * @return the advisor to be used by the region */ protected CacheDistributionAdvisor createDistributionAdvisor(InternalRegionArguments internalRegionArgs) { return CacheDistributionAdvisor.createCacheDistributionAdvisor(this); // Warning: potential early escape of object before full construction } /** * Does this region support persistence? */ public boolean allowsPersistence() { return true; } @Override public boolean requiresOneHopForMissingEntry(EntryEventImpl event) { // received from another member - don't use one-hop if (event.isOriginRemote()) { return false; } // local ops aren't distributed if (event.getOperation().isLocal()) { return false; } // if it already has a valid version tag it can go out with a DistributedCacheOperation if (event.getVersionTag() != null && event.getVersionTag().getRegionVersion() > 0) { return false; } // if we're not allowed to generate a version tag we need to send it to someone who can if (!this.generateVersionTag) { return true; } return this.concurrencyChecksEnabled && (this.srp == null) && !isTX() && this.scope.isDistributed() && !this.dataPolicy.withReplication(); } /** * @see LocalRegion#virtualPut(EntryEventImpl, boolean, boolean, Object, * boolean, long, boolean) */ @Override protected boolean virtualPut(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, boolean overwriteDestroyed) throws TimeoutException, CacheWriterException { final boolean isTraceEnabled = logger.isTraceEnabled(); Lock dlock = null; if (this.scope.isGlobal() && // lock only applies to global scope !event.isOriginRemote() && // only if operation originating locally !event.isNetSearch() && // search and load processor handles own locking !event.isNetLoad() && // @todo darrel/kirk: what about putAll? !event.isLocalLoad() && !event.isSingleHopPutOp()) { // Single Hop Op means dlock is already taken at origin node. dlock = this.getDistributedLockIfGlobal(event.getKey()); } if (isTraceEnabled) { logger.trace("virtualPut invoked for event {}", event); } try { if (!hasSeenEvent(event)) { if (this.requiresOneHopForMissingEntry(event)) { // bug #45704: see if a one-hop must be done for this operation RegionEntry re = getRegionEntry(event.getKey()); if (re == null /*|| re.isTombstone()*/ || !this.generateVersionTag) { if (!event.isBulkOpInProgress() || this.dataPolicy.withStorage()) { // putAll will send a single one-hop for empty regions. for other missing entries // we need to get a valid version number before modifying the local cache boolean didDistribute = RemotePutMessage.distribute(event, lastModified, false, false, expectedOldValue, requireOldValue, !this.generateVersionTag); if (!didDistribute && isTraceEnabled) { logger.trace("Unable to perform one-hop messaging"); } if (!this.generateVersionTag && !didDistribute) { throw new PersistentReplicatesOfflineException(); } if (didDistribute) { if (isTraceEnabled) { logger.trace("Event after remotePut operation: {}", event); } if (event.getVersionTag() == null) { // if the event wasn't applied by the one-hop replicate it will not have a version tag // and so should not be applied to this cache return false; } } } } } return super.virtualPut(event, ifNew, ifOld, expectedOldValue, requireOldValue, lastModified, overwriteDestroyed); } else { if (event.getDeltaBytes() != null && event.getRawNewValue() == null) { // This means that this event has delta bytes but no full value. // Request the full value of this event. // The value in this vm may not be same as this event's value. throw new InvalidDeltaException( "Cache encountered replay of event containing delta bytes for key " + event.getKey()); } // if the listeners have already seen this event, then it has already // been successfully applied to the cache. Distributed messages and // return if (isTraceEnabled) { logger.trace("DR.virtualPut: this cache has already seen this event {}", event); } // Gester, Fix 39014: when hasSeenEvent, put will still distribute // event, but putAll did not. We add the logic back here, not to put // back into DR.distributeUpdate() because we moved this part up into // LR.basicPutPart3 in purpose. Reviewed by Bruce. if (event.isBulkOpInProgress() && !event.isOriginRemote()) { event.getPutAllOperation().addEntry(event, true); } /* doing this so that other VMs will apply this no matter what. If it * is an "update" they will not apply it if they don't have the key. * Because this is probably a retry, it will never get applied to this * local AbstractRegionMap, and so will never be flipped to a 'create' */ event.makeCreate(); distributeUpdate(event, lastModified, ifNew, ifOld, expectedOldValue, requireOldValue); event.invokeCallbacks(this,true, true); return true; } } finally { if (dlock != null) { dlock.unlock(); } } } @Override protected RegionEntry basicPutEntry(EntryEventImpl event, long lastModified) throws TimeoutException, CacheWriterException { final boolean isTraceEnabled = logger.isTraceEnabled(); if (isTraceEnabled) { logger.trace("basicPutEntry invoked for event {}", event); } if (this.requiresOneHopForMissingEntry(event)) { // bug #45704: see if a one-hop must be done for this operation RegionEntry re = getRegionEntry(event.getKey()); if (re == null /*|| re.isTombstone()*/ || !this.generateVersionTag) { final boolean ifNew = false; final boolean ifOld = false; boolean didDistribute = RemotePutMessage.distribute(event, lastModified, ifNew, ifOld, null, false, !this.generateVersionTag); if (!this.generateVersionTag && !didDistribute) { throw new PersistentReplicatesOfflineException(); } if (didDistribute && isTraceEnabled) { logger.trace("Event after remotePut for basicPutEntry: {}", event); } } } return super.basicPutEntry(event, lastModified); } @Override public void performPutAllEntry(EntryEventImpl event) { /* * force shared data view so that we just do the virtual op, accruing things in the put all operation for later */ if(isTX()) { event.getPutAllOperation().addEntry(event); } else { getSharedDataView().putEntry(event, false, false, null, false, 0L, false); } } @Override public void performRemoveAllEntry(EntryEventImpl event) { // force shared data view so that we just do the virtual op, accruing things in the bulk operation for later if(isTX()) { event.getRemoveAllOperation().addEntry(event); } else { basicDestroy(event, true, null); //getSharedDataView().destroyExistingEntry(event, true, null); } } /** * distribution and listener notification */ @Override public void basicPutPart3(EntryEventImpl event, RegionEntry entry, boolean isInitialized, long lastModified, boolean invokeCallbacks, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue) { distributeUpdate(event, lastModified, false, false, null, false); super.basicPutPart3(event, entry, isInitialized, lastModified, invokeCallbacks, ifNew, ifOld, expectedOldValue, requireOldValue); } /** distribute an update operation */ protected void distributeUpdate(EntryEventImpl event, long lastModified, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue) { // an update from a netSearch is not distributed if (!event.isOriginRemote() && !event.isNetSearch() && !event.isBulkOpInProgress()) { boolean distribute = true; if (event.getInhibitDistribution()) { // this has already been distributed by a one-hop operation distribute = false; } if (distribute) { UpdateOperation op = new UpdateOperation(event, lastModified); if (logger.isTraceEnabled()) { logger.trace("distributing operation for event : {} : for region : {}", event, this.getName()); } op.distribute(); } } } protected void setGeneratedVersionTag(boolean generateVersionTag) { // there is at-least one other persistent member, so turn on concurrencyChecks enableConcurrencyChecks(); this.generateVersionTag = generateVersionTag; } protected boolean getGenerateVersionTag() { return this.generateVersionTag; } @Override protected boolean shouldGenerateVersionTag(RegionEntry entry, EntryEventImpl event) { if (logger.isTraceEnabled()) { logger.trace("shouldGenerateVersionTag this.generateVersionTag={} ccenabled={} dataPolicy={} event:{}", this.generateVersionTag, this.concurrencyChecksEnabled, this.dataPolicy, event); } if (!this.concurrencyChecksEnabled || this.dataPolicy == DataPolicy.EMPTY || !this.generateVersionTag) { return false; } if (this.srp != null) { // client return false; } if (event.getVersionTag() != null && !event.getVersionTag().isGatewayTag()) { return false; } if (event.getOperation().isLocal()) { // bug #45402 - localDestroy generated a version tag return false; } if (!event.isOriginRemote() && this.dataPolicy.withReplication()) { return true; } if (!this.dataPolicy.withReplication() && !this.dataPolicy.withPersistence()) { if (!entry.getVersionStamp().hasValidVersion()) { // do not generate a version stamp in a region that has no replication if it's not based // on an existing version from a replicate region return false; } return true; } if (!event.isOriginRemote() && event.getDistributedMember() != null) { if (!event.getDistributedMember().equals(this.getMyId())) { return event.getVersionTag() == null; // one-hop remote message } } return false; } /** * Throws RegionAccessException if required roles are missing and the * LossAction is NO_ACCESS * * @throws RegionAccessException * if required roles are missing and the LossAction is NO_ACCESS */ @Override protected void checkForNoAccess() { if (this.requiresReliabilityCheck && this.isMissingRequiredRoles) { if (getMembershipAttributes().getLossAction().isNoAccess()) { synchronized (this.missingRequiredRoles) { if (!this.isMissingRequiredRoles) return; Set roles = Collections.unmodifiableSet(new HashSet( this.missingRequiredRoles)); throw new RegionAccessException(LocalizedStrings.DistributedRegion_OPERATION_IS_DISALLOWED_BY_LOSSACTION_0_BECAUSE_THESE_REQUIRED_ROLES_ARE_MISSING_1.toLocalizedString(new Object[] {getMembershipAttributes().getLossAction(), roles}), getFullPath(), roles); } } } } /** * Throws RegionAccessException is required roles are missing and the * LossAction is either NO_ACCESS or LIMITED_ACCESS. * * @throws RegionAccessException * if required roles are missing and the LossAction is either * NO_ACCESS or LIMITED_ACCESS */ @Override protected void checkForLimitedOrNoAccess() { if (this.requiresReliabilityCheck && this.isMissingRequiredRoles) { if (getMembershipAttributes().getLossAction().isNoAccess() || getMembershipAttributes().getLossAction().isLimitedAccess()) { synchronized (this.missingRequiredRoles) { if (!this.isMissingRequiredRoles) return; Set roles = Collections.unmodifiableSet(new HashSet( this.missingRequiredRoles)); Assert.assertTrue(!roles.isEmpty()); throw new RegionAccessException(LocalizedStrings.DistributedRegion_OPERATION_IS_DISALLOWED_BY_LOSSACTION_0_BECAUSE_THESE_REQUIRED_ROLES_ARE_MISSING_1 .toLocalizedString(new Object[] { getMembershipAttributes().getLossAction(), roles}), getFullPath(), roles); } } } } @Override protected void handleReliableDistribution(ReliableDistributionData data, Set successfulRecipients) { handleReliableDistribution(data, successfulRecipients, Collections.EMPTY_SET, Collections.EMPTY_SET); } protected void handleReliableDistribution(ReliableDistributionData data, Set successfulRecipients, Set otherRecipients1, Set otherRecipients2) { if (this.requiresReliabilityCheck) { MembershipAttributes ra = getMembershipAttributes(); Set recipients = successfulRecipients; // determine the successful roles Set roles = new HashSet(); for (Iterator iter = recipients.iterator(); iter.hasNext();) { InternalDistributedMember mbr = (InternalDistributedMember)iter.next(); if (mbr != null) { roles.addAll(mbr.getRoles()); } } for (Iterator iter = otherRecipients1.iterator(); iter.hasNext();) { InternalDistributedMember mbr = (InternalDistributedMember)iter.next(); if (mbr != null) { roles.addAll(mbr.getRoles()); } } for (Iterator iter = otherRecipients2.iterator(); iter.hasNext();) { InternalDistributedMember mbr = (InternalDistributedMember)iter.next(); if (mbr != null) { roles.addAll(mbr.getRoles()); } } // determine the missing roles Set failedRoles = new HashSet(ra.getRequiredRoles()); failedRoles.removeAll(roles); if (failedRoles.isEmpty()) return; // if (rp.isAllAccessWithQueuing()) { // this.rmq.add(data, failedRoles); // } else { throw new RegionDistributionException(LocalizedStrings.DistributedRegion_OPERATION_DISTRIBUTION_MAY_HAVE_FAILED_TO_NOTIFY_THESE_REQUIRED_ROLES_0.toLocalizedString(failedRoles), getFullPath(), failedRoles); // } } } /** * * Called when we do a distributed operation and don't have anyone to * distributed it too. Since this is only called when no distribution was done * (i.e. no recipients) we do not check isMissingRequiredRoles because it * might not longer be true due to race conditions * * @return false if this region has at least one required role and queuing is * configured. Returns true if sending to no one is ok. * @throws RoleException * if a required role is missing and the LossAction is either * NO_ACCESS or LIMITED_ACCESS. * @since 5.0 */ protected boolean isNoDistributionOk() { if (this.requiresReliabilityCheck) { MembershipAttributes ra = getMembershipAttributes(); // if (ra.getLossAction().isAllAccessWithQueuing()) { // return !ra.hasRequiredRoles(); // } else { Set failedRoles = ra.getRequiredRoles(); throw new RegionDistributionException(LocalizedStrings.DistributedRegion_OPERATION_DISTRIBUTION_WAS_NOT_DONE_TO_THESE_REQUIRED_ROLES_0.toLocalizedString(failedRoles), getFullPath(), failedRoles); // } } return true; } /** * returns true if this Region does not distribute its operations to other * members. * @since 6.0 * @see HARegion#localDestroyNoCallbacks(Object) */ public boolean doesNotDistribute() { return false; } @Override public boolean shouldSyncForCrashedMember(InternalDistributedMember id) { return !doesNotDistribute() && super.shouldSyncForCrashedMember(id); } /** * Adjust the specified set of recipients by removing any of them that are * currently having their data queued. * * @param recipients * the set of recipients that a message is to be distributed too. * Recipients that are currently having their data queued will be * removed from this set. * @return the set, possibly null, of recipients that are currently having * their data queued. * @since 5.0 */ protected Set adjustForQueuing(Set recipients) { Set result = null; // if (this.requiresReliabilityCheck) { // MembershipAttributes ra = getMembershipAttributes(); // if (ra.getLossAction().isAllAccessWithQueuing()) { // Set currentQueuedRoles = this.rmq.getQueuingRoles(); // if (currentQueuedRoles != null) { // // foreach recipient see if any of his roles are queued and if // // they are remove him from recipients and add him to result // Iterator it = recipients.iterator(); // while (it.hasNext()) { // DistributedMember dm = (DistributedMember)it.next(); // Set dmRoles = dm.getRoles(); // if (!dmRoles.isEmpty()) { // if (intersects(dmRoles, currentQueuedRoles)) { // it.remove(); // fix for bug 34447 // if (result == null) { // result = new HashSet(); // } // result.add(dm); // } // } // } // } // } // } return result; } /** * Returns true if the two sets intersect * * @param a * a non-null non-empty set * @param b * a non-null non-empty set * @return true if sets a and b intersect; false if not * @since 5.0 */ public static boolean intersects(Set a, Set b) { Iterator it; Set target; if (a.size() <= b.size()) { it = a.iterator(); target = b; } else { it = b.iterator(); target = a; } while (it.hasNext()) { if (target.contains(it.next())) return true; } return false; } @Override public boolean requiresReliabilityCheck() { return this.requiresReliabilityCheck; } /** * Returns true if the ExpiryTask is currently allowed to expire. * <p> * If the region is in NO_ACCESS due to reliability configuration, then no * expiration actions are allowed. * <p> * If the region is in LIMITED_ACCESS due to reliability configuration, then * only non-distributed expiration actions are allowed. */ @Override protected boolean isExpirationAllowed(ExpiryTask expiry) { if (this.requiresReliabilityCheck && this.isMissingRequiredRoles) { if (getMembershipAttributes().getLossAction().isNoAccess()) { return false; } if (getMembershipAttributes().getLossAction().isLimitedAccess() && expiry.isDistributedAction()) { return false; } } return true; } /** * Performs the resumption action when reliability is resumed. * * @return true if asynchronous resumption is triggered */ protected boolean resumeReliability(InternalDistributedMember id, Set newlyAcquiredRoles) { boolean async = false; try { ResumptionAction ra = getMembershipAttributes().getResumptionAction(); if (ra.isNone()) { if (logger.isDebugEnabled()) { logger.debug("Reliability resumption for action of none"); } resumeExpiration(); } else if (ra.isReinitialize()) { async = true; asyncResumeReliability(id, newlyAcquiredRoles); } } catch (Exception e) { logger.fatal(LocalizedMessage.create(LocalizedStrings.DistributedRegion_UNEXPECTED_EXCEPTION), e); } return async; } /** * Handles asynchronous ResumptionActions such as region reinitialize. */ private void asyncResumeReliability(final InternalDistributedMember id, final Set newlyAcquiredRoles) throws RejectedExecutionException { final ResumptionAction ra = getMembershipAttributes().getResumptionAction(); getDistributionManager().getWaitingThreadPool().execute(new Runnable() { public void run() { try { if (ra.isReinitialize()) { if (logger.isDebugEnabled()) { logger.debug("Reliability resumption for action of reinitialize"); } if (!isDestroyed() && !cache.isClosed()) { RegionEventImpl event = new RegionEventImpl( DistributedRegion.this, Operation.REGION_REINITIALIZE, null, false, getMyId(), generateEventID()); reinitialize(null, event); } synchronized (missingRequiredRoles) { // any number of threads may be waiting on missingRequiredRoles missingRequiredRoles.notifyAll(); if (hasListener() && id != null) { // fire afterRoleGain event RoleEventImpl relEvent = new RoleEventImpl( DistributedRegion.this, Operation.REGION_CREATE, null, true, id, newlyAcquiredRoles); dispatchListenerEvent(EnumListenerEvent.AFTER_ROLE_GAIN, relEvent); } } } } catch (Exception e) { logger.fatal(LocalizedMessage.create(LocalizedStrings.DistributedRegion_UNEXPECTED_EXCEPTION), e); } } }); } /** Reschedules expiry tasks when reliability is resumed. */ private void resumeExpiration() { boolean isNoAccess = getMembershipAttributes().getLossAction().isNoAccess(); boolean isLimitedAccess = getMembershipAttributes().getLossAction() .isLimitedAccess(); if (!(isNoAccess || isLimitedAccess)) { return; // early out: expiration was never affected by reliability } if (getEntryTimeToLive().getTimeout() > 0 && (isNoAccess || (isLimitedAccess && getEntryTimeToLive().getAction() .isDistributed()))) { rescheduleEntryExpiryTasks(); } else if (getEntryIdleTimeout().getTimeout() > 0 && (isNoAccess || (isLimitedAccess && getEntryIdleTimeout().getAction() .isDistributed()))) { rescheduleEntryExpiryTasks(); } else if (getCustomEntryTimeToLive() != null || getCustomEntryIdleTimeout() != null) { // Force all entries to be rescheduled rescheduleEntryExpiryTasks(); } if (getRegionTimeToLive().getTimeout() > 0 && (isNoAccess || (isLimitedAccess && getRegionTimeToLive().getAction() .isDistributed()))) { addTTLExpiryTask(); } if (getRegionIdleTimeout().getTimeout() > 0 && (isNoAccess || (isLimitedAccess && getRegionIdleTimeout() .getAction().isDistributed()))) { addIdleExpiryTask(); } } /** * A boolean used to indicate if its the intialization time i.e the * distributed Region is created for the first time. The variable is used at * the time of lost reliablility. */ private boolean isInitializingThread = false; /** * Called when reliability is lost. If MembershipAttributes are configured * with {@link LossAction#RECONNECT}then DistributedSystem reconnect will be * called asynchronously. * * @return true if asynchronous resumption is triggered */ protected boolean lostReliability(final InternalDistributedMember id, final Set newlyMissingRoles) { if (DistributedRegion.ignoreReconnect) return false; boolean async = false; try { if (getMembershipAttributes().getLossAction().isReconnect()) { async = true; if (isInitializingThread) { doLostReliability(true, id, newlyMissingRoles); } else { doLostReliability(false, id, newlyMissingRoles); } // we don't do this in the waiting pool because we're going to // disconnect // the distributed system, and it will wait for the pool to empty /* * moved to a new method called doLostReliablity. Thread t = new * Thread("Reconnect Distributed System") { public void run() { try { // * TODO: may need to check isReconnecting and checkReadiness... * initializationLatchAfterMemberTimeout.await(); // TODO: * call reconnect here * getSystem().tryReconnect((GemFireCache)getCache()); // added for * reconnect. synchronized (missingRequiredRoles) { // any number of * threads may be waiting on missingRequiredRoles * missingRequiredRoles.notifyAll(); // need to fire an event if id is * not null if (hasListener() && id != null) { RoleEventImpl relEvent = * new RoleEventImpl( DistributedRegion.this, Operation.CACHE_RECONNECT, * null, true, id, newlyMissingRoles); dispatchListenerEvent( * EnumListenerEvent.AFTER_ROLE_LOSS, relEvent); } } } catch (Exception * e) { } } }; * t.setDaemon(true); t.start(); */ } } catch (CancelException cce) { throw cce; } catch (Exception e) { logger.fatal(LocalizedMessage.create(LocalizedStrings.DistributedRegion_UNEXPECTED_EXCEPTION), e); } return async; } private void doLostReliability(boolean isInitializing, final InternalDistributedMember id, final Set newlyMissingRoles) { try { if (!isInitializing) { // moved code to a new thread. Thread t = new Thread(LocalizedStrings.DistributedRegion_RECONNECT_DISTRIBUTED_SYSTEM.toLocalizedString()) { @Override public void run() { try { // TODO: may need to check isReconnecting and checkReadiness... if (logger.isDebugEnabled()) { logger.debug("Reliability loss with policy of reconnect and membership thread doing reconnect"); } initializationLatchAfterMemberTimeout.await(); getSystem().tryReconnect(false, "Role Loss", getCache()); synchronized (missingRequiredRoles) { // any number of threads may be waiting on missingRequiredRoles missingRequiredRoles.notifyAll(); // need to fire an event if id is not null if (hasListener() && id != null) { RoleEventImpl relEvent = new RoleEventImpl( DistributedRegion.this, Operation.CACHE_RECONNECT, null, true, id, newlyMissingRoles); dispatchListenerEvent(EnumListenerEvent.AFTER_ROLE_LOSS, relEvent); } } } catch (Exception e) { logger.fatal(LocalizedMessage.create(LocalizedStrings.DistributedRegion_UNEXPECTED_EXCEPTION), e); } } }; t.setDaemon(true); t.start(); } else { getSystem().tryReconnect(false, "Role Loss", getCache()); // added for // reconnect. synchronized (missingRequiredRoles) { // any number of threads may be waiting on missingRequiredRoles missingRequiredRoles.notifyAll(); // need to fire an event if id is not null if (hasListener() && id != null) { RoleEventImpl relEvent = new RoleEventImpl(DistributedRegion.this, Operation.CACHE_RECONNECT, null, true, id, newlyMissingRoles); dispatchListenerEvent(EnumListenerEvent.AFTER_ROLE_LOSS, relEvent); } } // } catch (CancelException cce){ // } } } catch (CancelException ignor) { throw ignor; } catch (Exception e) { logger.fatal(LocalizedMessage.create(LocalizedStrings.DistributedRegion_UNEXPECTED_EXCEPTION), e); } } protected void lockCheckReadiness() { // fix for bug 32610 cache.getCancelCriterion().checkCancelInProgress(null); checkReadiness(); } @Override public final Object validatedDestroy(Object key, EntryEventImpl event) throws TimeoutException, EntryNotFoundException, CacheWriterException { Lock dlock = this.getDistributedLockIfGlobal(key); try { return super.validatedDestroy(key, event); } finally { if (dlock != null) { dlock.unlock(); } } } /** * @see LocalRegion#localDestroyNoCallbacks(Object) */ @Override public void localDestroyNoCallbacks(Object key) { super.localDestroyNoCallbacks(key); if (getScope().isGlobal()) { try { this.getLockService().freeResources(key); } catch (LockServiceDestroyedException ignore) { } } } /** * @see LocalRegion#localDestroy(Object, Object) */ @Override public void localDestroy(Object key, Object aCallbackArgument) throws EntryNotFoundException { super.localDestroy(key, aCallbackArgument); if (getScope().isGlobal()) { try { this.getLockService().freeResources(key); } catch (LockServiceDestroyedException ignore) { } } } /** * @see LocalRegion#invalidate(Object, Object) */ @Override public void invalidate(Object key, Object aCallbackArgument) throws TimeoutException, EntryNotFoundException { validateKey(key); validateCallbackArg(aCallbackArgument); checkReadiness(); checkForLimitedOrNoAccess(); Lock dlock = this.getDistributedLockIfGlobal(key); try { super.validatedInvalidate(key, aCallbackArgument); } finally { if (dlock != null) dlock.unlock(); } } @Override public Lock getRegionDistributedLock() throws IllegalStateException { lockCheckReadiness(); checkForLimitedOrNoAccess(); if (!this.scope.isGlobal()) { throw new IllegalStateException(LocalizedStrings.DistributedRegion_DISTRIBUTION_LOCKS_ARE_ONLY_SUPPORTED_FOR_REGIONS_WITH_GLOBAL_SCOPE_NOT_0.toLocalizedString(this.scope)); } return new RegionDistributedLock(); } @Override public Lock getDistributedLock(Object key) throws IllegalStateException { validateKey(key); lockCheckReadiness(); checkForLimitedOrNoAccess(); if (!this.scope.isGlobal()) { throw new IllegalStateException(LocalizedStrings.DistributedRegion_DISTRIBUTION_LOCKS_ARE_ONLY_SUPPORTED_FOR_REGIONS_WITH_GLOBAL_SCOPE_NOT_0.toLocalizedString(this.scope)); } if (isLockingSuspendedByCurrentThread()) { throw new IllegalStateException(LocalizedStrings.DistributedRegion_THIS_THREAD_HAS_SUSPENDED_ALL_LOCKING_FOR_THIS_REGION.toLocalizedString()); } return new DistributedLock(key); } /** * Called while NOT holding lock on parent's subregions * * @throws IllegalStateException * if region is not compatible with a region in another VM. * * @see LocalRegion#initialize(InputStream, InternalDistributedMember, InternalRegionArguments) */ @Override protected void initialize(InputStream snapshotInputStream, InternalDistributedMember imageTarget, InternalRegionArguments internalRegionArgs) throws TimeoutException, IOException, ClassNotFoundException { Assert.assertTrue(!isInitialized()); if (logger.isDebugEnabled()) { logger.debug("DistributedRegion.initialize BEGIN: {}", getFullPath()); } // if we're versioning entries we need a region-level version vector if (this.scope.isDistributed() && this.concurrencyChecksEnabled) { createVersionVector(); } if (this.scope.isGlobal()) { getLockService(); // create lock service eagerly now } final IndexUpdater indexUpdater = getIndexUpdater(); boolean sqlfGIILockTaken = false; // this try block is to release the SQLF GII lock in finally // which should be done after bucket status will be set // properly in LocalRegion#initialize() try { try { try { // take the GII lock to avoid missing entries while updating the // index list for SQLFabric (#41330 and others) if (indexUpdater != null) { indexUpdater.lockForGII(); sqlfGIILockTaken = true; } PersistentMemberID persistentId = null; boolean recoverFromDisk = isRecoveryNeeded(); DiskRegion dskRgn = getDiskRegion(); if (recoverFromDisk) { if (logger.isDebugEnabled()) { logger.debug("DistributedRegion.getInitialImageAndRecovery: Starting Recovery"); } dskRgn.initializeOwner(this); // do recovery if (logger.isDebugEnabled()) { logger.debug("DistributedRegion.getInitialImageAndRecovery: Finished Recovery"); } persistentId = dskRgn.getMyPersistentID(); } // Create OQL indexes before starting GII. createOQLIndexes(internalRegionArgs, recoverFromDisk); if (getDataPolicy().withReplication() || getDataPolicy().withPreloaded()) { getInitialImageAndRecovery(snapshotInputStream, imageTarget, internalRegionArgs, recoverFromDisk, persistentId); } else { new CreateRegionProcessor(this).initializeRegion(); if (snapshotInputStream != null) { releaseBeforeGetInitialImageLatch(); loadSnapshotDuringInitialization(snapshotInputStream); } } } catch (DiskAccessException dae) { this.handleDiskAccessException(dae, true); throw dae; } initMembershipRoles(); isInitializingThread = false; super.initialize(null, null, null); // makes sure all latches are released if they haven't been already } finally { if (this.eventTracker != null) { this.eventTracker.setInitialized(); } } } finally { if (sqlfGIILockTaken) { indexUpdater.unlockForGII(); } } } @Override public void initialized() { new UpdateAttributesProcessor(this).distribute(false); } /** True if GII was impacted by missing required roles */ private boolean giiMissingRequiredRoles = false; /** * A reference counter to protected the memoryThresholdReached boolean */ private final Set<DistributedMember> memoryThresholdReachedMembers = new CopyOnWriteArraySet<DistributedMember>(); private ConcurrentParallelGatewaySenderQueue hdfsQueue; /** Sets and returns giiMissingRequiredRoles */ private boolean checkInitialImageForReliability( InternalDistributedMember imageTarget, CacheDistributionAdvisor.InitialImageAdvice advice) { // assumption: required roles are interesting to GII only if Reinitialize... // if (true) return false; // if (getMembershipAttributes().hasRequiredRoles() // && getMembershipAttributes().getResumptionAction().isReinitialize()) { // // are any required roles missing for GII with Reinitialize? // Set missingRR = new HashSet(getMembershipAttributes().getRequiredRoles()); // missingRR.removeAll(getSystem().getDistributedMember().getRoles()); // for (Iterator iter = advice.replicates.iterator(); iter.hasNext();) { // DistributedMember member = (DistributedMember)iter.next(); // missingRR.removeAll(member.getRoles()); // } // for (Iterator iter = advice.others.iterator(); iter.hasNext();) { // DistributedMember member = (DistributedMember)iter.next(); // missingRR.removeAll(member.getRoles()); // } // for (Iterator iter = advice.preloaded.iterator(); iter.hasNext();) { // DistributedMember member = (DistributedMember)iter.next(); // missingRR.removeAll(member.getRoles()); // } // if (!missingRR.isEmpty()) { // // entering immediate loss condition, which will cause reinit on resume // this.giiMissingRequiredRoles = true; // } // } // return this.giiMissingRequiredRoles; } private void getInitialImageAndRecovery(InputStream snapshotInputStream, InternalDistributedMember imageSrc, InternalRegionArguments internalRegionArgs, boolean recoverFromDisk, PersistentMemberID persistentId) throws TimeoutException { logger.info(LocalizedMessage.create(LocalizedStrings.DistributedRegion_INITIALIZING_REGION_0, this.getName())); ImageState imgState = getImageState(); imgState.init(); boolean targetRecreated = internalRegionArgs.getRecreateFlag(); Boolean isCBool = (Boolean)isConversion.get(); boolean isForConversion = isCBool!=null?isCBool.booleanValue():false; if (recoverFromDisk && snapshotInputStream != null && !isForConversion) { throw new InternalGemFireError(LocalizedStrings.DistributedRegion_IF_LOADING_A_SNAPSHOT_THEN_SHOULD_NOT_BE_RECOVERING_ISRECOVERING_0_SNAPSHOTSTREAM_1.toLocalizedString(new Object[] {Boolean.valueOf(recoverFromDisk), snapshotInputStream})); } ProfileExchangeProcessor targetProvider; if (dataPolicy.withPersistence()) { targetProvider = new CreatePersistentRegionProcessor(this, getPersistenceAdvisor(), recoverFromDisk); } else { // this will go in the advisor profile targetProvider = new CreateRegionProcessor(this); } imgState.setInRecovery(false); RegionVersionVector recovered_rvv = null; if (dataPolicy.withPersistence()) { recovered_rvv = (this.getVersionVector()==null?null:this.getVersionVector().getCloneForTransmission()); } // initializeRegion will send out our profile targetProvider.initializeRegion(); if(persistenceAdvisor != null) { persistenceAdvisor.initialize(); } // Register listener here so that the remote members are known // since registering calls initializeCriticalMembers (which needs to know about // remote members if (!isInternalRegion()) { if (!this.isDestroyed) { cache.getResourceManager().addResourceListener(ResourceType.MEMORY, this); } } releaseBeforeGetInitialImageLatch(); // allow GII to invoke test hooks. Do this just after releasing the // before-gii latch for bug #48962. See ConcurrentLeaveDuringGIIDUnitTest InitialImageOperation.beforeGetInitialImage(this); if (snapshotInputStream != null) { try { if (logger.isDebugEnabled()) { logger.debug("DistributedRegion.getInitialImageAndRecovery: About to load snapshot, isInitialized={}; {}", isInitialized(), getFullPath()); } loadSnapshotDuringInitialization(snapshotInputStream); } catch (IOException e) { throw new RuntimeException(e); // @todo change this exception? } catch (ClassNotFoundException e) { throw new RuntimeException(e); // @todo change this exception? } cleanUpDestroyedTokensAndMarkGIIComplete(GIIStatus.NO_GII); return; } // No snapshot provided, use the imageTarget(s) // if we were given a recommended imageTarget, use that first, and // treat it like it is a replicate (regardless of whether it actually is // or not) InitialImageOperation iiop = new InitialImageOperation(this, this.entries); // [defunct] Special case GII for PR admin regions (which are always // replicates and always writers // bruce: this was commented out after adding the GIIAckRequest logic to // force // consistency before the gii operation begins // if (isUsedForPartitionedRegionAdmin() || // isUsedForPartitionedRegionBucket()) { // releaseBeforeGetInitialImageLatch(); // iiop.getFromAll(this.distAdvisor.adviseGeneric(), false); // cleanUpDestroyedTokens(); // return; // } CacheDistributionAdvisor.InitialImageAdvice advice = null; boolean done = false; while(!done && !isDestroyed()) { advice = targetProvider.getInitialImageAdvice(advice); checkInitialImageForReliability(imageSrc, advice); boolean attemptGetFromOne = imageSrc != null // we were given a specific member || this.dataPolicy.withPreloaded() && !advice.preloaded.isEmpty() // this is a preloaded region || (!advice.replicates.isEmpty()); // That is: if we have 0 or 1 giiProvider then we can do a getFromOne gii; // if we have 2 or more giiProviders then we must do a getFromAll gii. if (attemptGetFromOne) { if (recoverFromDisk) { if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER){ CacheObserverHolder.getInstance().afterMarkingGIIStarted(); } } { // If we have an imageSrc and the target is reinitializing mark the // getInitialImage so that it will wait until the target region is fully initialized // before responding to the get image request. Otherwise, the // source may respond with no data because it is still initializing, // e.g. loading a snapshot. // Plan A: use specified imageSrc, if specified if (imageSrc != null) { try { GIIStatus ret = iiop.getFromOne(Collections.singleton(imageSrc), targetRecreated, advice, recoverFromDisk, recovered_rvv); if (GIIStatus.didGII(ret)) { this.giiMissingRequiredRoles = false; cleanUpDestroyedTokensAndMarkGIIComplete(ret); done = true; return; } } finally { imageSrc = null; } } // Plan C: use a replicate, if one exists GIIStatus ret = iiop.getFromOne(advice.replicates, false, advice, recoverFromDisk, recovered_rvv); if (GIIStatus.didGII(ret)) { cleanUpDestroyedTokensAndMarkGIIComplete(ret); done = true; return; } // Plan D: if this is a PRELOADED region, fetch from another PRELOADED if (this.dataPolicy.isPreloaded()) { GIIStatus ret_preload = iiop.getFromOne(advice.preloaded, false, advice, recoverFromDisk, recovered_rvv); if (GIIStatus.didGII(ret_preload)) { cleanUpDestroyedTokensAndMarkGIIComplete(ret_preload); done = true; return; } } // isPreloaded } //If we got to this point, we failed in the GII. Cleanup //any partial image we received cleanUpAfterFailedGII(recoverFromDisk); } // attemptGetFromOne else { if(!isDestroyed()) { if(recoverFromDisk) { logger.info(LocalizedMessage.create(LocalizedStrings.DistributedRegion_INITIALIZED_FROM_DISK, new Object[] {this.getFullPath(), persistentId, getPersistentID()})); if(persistentId != null) { RegionLogger.logRecovery(this.getFullPath(), persistentId, getDistributionManager().getDistributionManagerId()); } } else { RegionLogger.logCreate(this.getFullPath(), getDistributionManager().getDistributionManagerId()); if (getPersistentID() != null) { RegionLogger.logPersistence(this.getFullPath(), getDistributionManager().getDistributionManagerId(), getPersistentID()); logger.info(LocalizedMessage.create(LocalizedStrings.DistributedRegion_NEW_PERSISTENT_REGION_CREATED, new Object[] {this.getFullPath(), getPersistentID()})); } } /* no more union GII // do union getInitialImage Set rest = new HashSet(); rest.addAll(advice.others); rest.addAll(advice.preloaded); // push profile w/ recovery flag turned off at same time that we // do a union getInitialImage boolean pushProfile = recoverFromDisk; iiop.getFromAll(rest, pushProfile); */ cleanUpDestroyedTokensAndMarkGIIComplete(GIIStatus.NO_GII); done = true; return; } break; } } return; } private void synchronizeWith(InternalDistributedMember target, VersionSource idToRecover) { InitialImageOperation op = new InitialImageOperation(this, this.entries); op.synchronizeWith(target, idToRecover, null); } /** * If this region has concurrency controls enabled this will pull any missing * changes from other replicates using InitialImageOperation and a filtered * chunking protocol. */ public void synchronizeForLostMember(InternalDistributedMember lostMember, VersionSource lostVersionID) { if (this.concurrencyChecksEnabled == false) { return; } CacheDistributionAdvisor advisor = getCacheDistributionAdvisor(); Set<InternalDistributedMember> targets = advisor.adviseInitializedReplicates(); for (InternalDistributedMember target: targets) { synchronizeWith(target, lostVersionID, lostMember); } } /** * synchronize with another member wrt messages from the given "lost" member. * This can be used when a primary bucket crashes to ensure that interrupted * message distribution is mended. */ private void synchronizeWith(InternalDistributedMember target, VersionSource versionMember, InternalDistributedMember lostMember) { InitialImageOperation op = new InitialImageOperation(this, this.entries); op.synchronizeWith(target, versionMember, lostMember); } /** * invoked just before an initial image is requested from another member */ /** remove any partial entries received in a failed GII */ protected void cleanUpAfterFailedGII(boolean recoverFromDisk) { DiskRegion dskRgn = getDiskRegion(); //if we have a persistent region, instead of deleting everything on disk, //we will just reset the "recovered from disk" flag. After //the next GII we will delete these entries if they do not come //in as part of the GII. if (recoverFromDisk && dskRgn != null && dskRgn.isBackup()) { dskRgn.resetRecoveredEntries(this); return; } if (!this.entries.isEmpty()) { closeEntries(); if (getDiskRegion() != null) { getDiskRegion().clear(this, null); } // clear the left-members and version-tags sets in imageState getImageState().getLeftMembers(); getImageState().getVersionTags(); // Clear OQL indexes if (this.indexManager != null) { try { this.indexManager.rerunIndexCreationQuery(); } catch (Exception ex){ if (logger.isDebugEnabled()) { logger.debug("Exception while clearing indexes after GII failure.", ex); } } } } } private void initMembershipRoles() { synchronized (this.advisorListener) { // hold sync to prevent listener from changing initial members Set others = this.distAdvisor .addMembershipListenerAndAdviseGeneric(this.advisorListener); this.advisorListener.addMembers(others); // initialize missing required roles with initial member info if (getMembershipAttributes().hasRequiredRoles()) { // AdvisorListener will also sync on missingRequiredRoles synchronized (this.missingRequiredRoles) { this.missingRequiredRoles.addAll(getMembershipAttributes() .getRequiredRoles()); // remove all the roles we are playing since they will never be // missing this.missingRequiredRoles.removeAll(getSystem() .getDistributedMember().getRoles()); for (Iterator iter = others.iterator(); iter.hasNext();) { DistributedMember other = (DistributedMember)iter.next(); this.missingRequiredRoles.removeAll(other.getRoles()); } } } } if (getMembershipAttributes().hasRequiredRoles()) { // wait up to memberTimeout for required roles... // boolean requiredRolesAreMissing = false; int memberTimeout = getSystem().getConfig().getMemberTimeout(); if (logger.isDebugEnabled()) { logger.debug("Waiting up to {} for required roles.", memberTimeout); } try { if (this.giiMissingRequiredRoles) { // force reliability loss and possibly resumption isInitializingThread = true; synchronized (this.advisorListener) { synchronized (this.missingRequiredRoles) { // forcing state of loss because of bad GII this.isMissingRequiredRoles = true; getCachePerfStats().incReliableRegionsMissing(1); if (getMembershipAttributes().getLossAction().isAllAccess()) getCachePerfStats().incReliableRegionsMissingFullAccess(1); // rahul else if (getMembershipAttributes().getLossAction() .isLimitedAccess()) getCachePerfStats().incReliableRegionsMissingLimitedAccess(1); else if (getMembershipAttributes().getLossAction().isNoAccess()) getCachePerfStats().incReliableRegionsMissingNoAccess(1); // pur code to increment the stats. if (logger.isDebugEnabled()) { logger.debug("GetInitialImage had missing required roles."); } // TODO: will this work with RECONNECT and REINITIALIZE? isInitializingThread = true; lostReliability(null, null); if (this.missingRequiredRoles.isEmpty()) { // all required roles are present so force resumption this.isMissingRequiredRoles = false; getCachePerfStats().incReliableRegionsMissing(-1); if (getMembershipAttributes().getLossAction().isAllAccess()) getCachePerfStats().incReliableRegionsMissingFullAccess(-1); // rahul else if (getMembershipAttributes().getLossAction() .isLimitedAccess()) getCachePerfStats() .incReliableRegionsMissingLimitedAccess(-1); else if (getMembershipAttributes().getLossAction().isNoAccess()) getCachePerfStats().incReliableRegionsMissingNoAccess(-1); // pur code to increment the stats. boolean async = resumeReliability(null, null); if (async) { advisorListener.destroyed = true; } } } } } else { if (!getSystem().isLoner()) { waitForRequiredRoles(memberTimeout); } synchronized (this.advisorListener) { synchronized (this.missingRequiredRoles) { if (this.missingRequiredRoles.isEmpty()) { Assert.assertTrue(!this.isMissingRequiredRoles); if (logger.isDebugEnabled()) { logger.debug("Initialization completed with all required roles present."); } } else { // starting in state of loss... this.isMissingRequiredRoles = true; getCachePerfStats().incReliableRegionsMissing(1); if (getMembershipAttributes().getLossAction().isAllAccess()) getCachePerfStats().incReliableRegionsMissingFullAccess(1); // rahul else if (getMembershipAttributes().getLossAction() .isLimitedAccess()) getCachePerfStats().incReliableRegionsMissingLimitedAccess(1); else if (getMembershipAttributes().getLossAction().isNoAccess()) getCachePerfStats().incReliableRegionsMissingNoAccess(1); if (logger.isDebugEnabled()) { logger.debug("Initialization completed with missing required roles: {}", this.missingRequiredRoles); } isInitializingThread = true; lostReliability(null, null); } } } } } catch (RegionDestroyedException ignore) { // ignore to fix bug 34639 may be thrown by waitForRequiredRoles } catch (CancelException ignore) { // ignore to fix bug 34639 may be thrown by waitForRequiredRoles if (isInitializingThread) { throw ignore; } } catch (Exception e) { logger.fatal(LocalizedMessage.create(LocalizedStrings.DistributedRegion_UNEXPECTED_EXCEPTION), e); } } // open latch which will allow any threads in lostReliability to proceed this.initializationLatchAfterMemberTimeout.countDown(); } private boolean isRecoveryNeeded() { return getDataPolicy().withPersistence() && getDiskRegion().isRecreated(); } // called by InitialImageOperation to clean up destroyed tokens // release afterGetInitialImageInitializationLatch before unlocking // cleanUpLock @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK") private void cleanUpDestroyedTokensAndMarkGIIComplete(GIIStatus giiStatus) { //We need to clean up the disk before we release the after get initial image latch DiskRegion dskRgn = getDiskRegion(); if (dskRgn != null && dskRgn.isBackup()) { dskRgn.finishInitializeOwner(this, giiStatus); } ImageState is = getImageState(); is.lockGII(); // clear the version tag and left-members sets is.getVersionTags(); is.getLeftMembers(); // remove DESTROYED tokens RegionVersionVector rvv = is.getClearRegionVersionVector(); try { Iterator/*<Object>*/ keysIt = getImageState().getDestroyedEntries(); while (keysIt.hasNext()) { this.entries.removeIfDestroyed(keysIt.next()); } if (rvv != null) { // clear any entries received in the GII that are older than the RVV versions. // this can happen if entry chunks were received prior to the clear() being // processed clearEntries(rvv); } //need to do this before we release the afterGetInitialImageLatch if(persistenceAdvisor != null) { persistenceAdvisor.setOnline(GIIStatus.didGII(giiStatus), false, getPersistentID()); } } finally { // release after gii lock first so basicDestroy will see isInitialized() // be true // when they get the cleanUp lock. try { releaseAfterGetInitialImageLatch(); } finally { // make sure unlockGII is done for bug 40001 is.unlockGII(); } } if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER){ CacheObserverHolder.getInstance().afterMarkingGIICompleted(); } //"Initializing region {0}" which is not acompanied by a completed message. Users think thread is stuck in some operation. Hence adding this log logger.info(LocalizedMessage.create(LocalizedStrings.DistributedRegion_INITIALIZING_REGION_COMPLETED_0, this.getName())); } /** * @see LocalRegion#basicDestroy(EntryEventImpl, boolean, Object) */ @Override protected void basicDestroy(EntryEventImpl event, boolean cacheWrite, Object expectedOldValue) throws EntryNotFoundException, CacheWriterException, TimeoutException { // disallow local destruction for mirrored keysvalues regions boolean invokeWriter = cacheWrite; boolean hasSeen = false; if (hasSeenEvent(event)) { hasSeen = true; } checkIfReplicatedAndLocalDestroy(event); try { if (this.requiresOneHopForMissingEntry(event)) { // bug #45704: see if a one-hop must be done for this operation RegionEntry re = getRegionEntry(event.getKey()); if (re == null /*|| re.isTombstone()*/ || !this.generateVersionTag) { if (this.srp == null) { // only assert for non-client regions. Assert.assertTrue(!this.dataPolicy.withReplication() || !this.generateVersionTag); } if (!event.isBulkOpInProgress() || this.dataPolicy.withStorage()) { // removeAll will send a single one-hop for empty regions. for other missing entries // we need to get a valid version number before modifying the local cache // TODO: deltaGII: verify that delegating to a peer when this region is also a client is acceptable boolean didDistribute = RemoteDestroyMessage.distribute(event, expectedOldValue, !this.generateVersionTag); if (!this.generateVersionTag && !didDistribute) { throw new PersistentReplicatesOfflineException(); } if (didDistribute) { if (logger.isTraceEnabled()) { logger.trace("Event after remoteDestroy operation: {}", event); } invokeWriter = false; // remote cache invoked the writer if (event.getVersionTag() == null) { // if the event wasn't applied by the one-hop replicate it will not have a version tag // and so should not be applied to this cache return; } } } } } super.basicDestroy(event, invokeWriter, expectedOldValue); // if this is a destroy coming in from remote source, free up lock resources // if this is a local origin destroy, this will happen after lock is // released if (this.scope.isGlobal() && event.isOriginRemote()) { try { getLockService().freeResources(event.getKey()); } catch (LockServiceDestroyedException ignore) { } } return; } finally { if (hasSeen) { if (event.isBulkOpInProgress() && !event.isOriginRemote()) { event.getRemoveAllOperation().addEntry(event, true); } distributeDestroy(event, expectedOldValue); event.invokeCallbacks(this,true, false); } } } @Override void basicDestroyPart3(RegionEntry re, EntryEventImpl event, boolean inTokenMode, boolean duringRI, boolean invokeCallbacks, Object expectedOldValue) { distributeDestroy(event, expectedOldValue); super.basicDestroyPart3(re, event, inTokenMode, duringRI, invokeCallbacks, expectedOldValue); } void distributeDestroy(EntryEventImpl event, Object expectedOldValue) { if (event.isDistributed() && !event.isOriginRemote() && !event.isBulkOpInProgress()) { boolean distribute = !event.getInhibitDistribution(); if (distribute) { DestroyOperation op = new DestroyOperation(event); op.distribute(); } } } @Override boolean evictDestroy(LRUEntry entry) { boolean evictDestroyWasDone = super.evictDestroy(entry); if (evictDestroyWasDone) { if (this.scope.isGlobal()) { try { getLockService().freeResources(entry.getKey()); } catch (LockServiceDestroyedException ignore) { } } } return evictDestroyWasDone; } /** * @see LocalRegion#basicInvalidateRegion(RegionEventImpl) */ @Override void basicInvalidateRegion(RegionEventImpl event) { // disallow local invalidation for replicated regions if (!event.isDistributed() && getScope().isDistributed() && getDataPolicy().withReplication()) { throw new IllegalStateException(LocalizedStrings.DistributedRegion_NOT_ALLOWED_TO_DO_A_LOCAL_INVALIDATION_ON_A_REPLICATED_REGION.toLocalizedString()); } if (shouldDistributeInvalidateRegion(event)) { distributeInvalidateRegion(event); } super.basicInvalidateRegion(event); } /** * decide if InvalidateRegionOperation should be sent to peers. broken out so * that BucketRegion can override * @param event * @return true if {@link InvalidateRegionOperation} should be distributed, false otherwise */ protected boolean shouldDistributeInvalidateRegion(RegionEventImpl event) { return event.isDistributed() && !event.isOriginRemote(); } /** * Distribute the invalidate of a region given its event. * This implementation sends the invalidate to peers. * @since 5.7 */ protected void distributeInvalidateRegion(RegionEventImpl event) { new InvalidateRegionOperation(event).distribute(); } /** * @see LocalRegion#basicDestroyRegion(RegionEventImpl, boolean, boolean, * boolean) */ @Override void basicDestroyRegion(RegionEventImpl event, boolean cacheWrite, boolean lock, boolean callbackEvents) throws CacheWriterException, TimeoutException { final String path = getFullPath(); //Keep track of regions that are being destroyed. This helps avoid a race //when another member concurrently creates this region. See bug 42051. boolean isClose = event.getOperation().isClose(); if(!isClose) { cache.beginDestroy(path, this); } try { super.basicDestroyRegion(event, cacheWrite, lock, callbackEvents); // send destroy region operation even if this is a localDestroyRegion (or // close) if (!event.isOriginRemote()) { distributeDestroyRegion(event, true); } else { if(!event.isReinitializing()) { RegionEventImpl localEvent = new RegionEventImpl(this, Operation.REGION_LOCAL_DESTROY, event.getCallbackArgument(), false, getMyId(), generateEventID()/* generate EventID */); distributeDestroyRegion(localEvent, false/*fixes bug 41111*/); } } notifyBridgeClients(event); } catch (CancelException e) { if (logger.isDebugEnabled()) { logger.debug("basicDestroyRegion short-circuited due to cancellation"); } } finally { if(!isClose) { cache.endDestroy(path, this); } RegionLogger.logDestroy(path, getMyId(), getPersistentID(), isClose); } } @Override protected void distributeDestroyRegion(RegionEventImpl event, boolean notifyOfRegionDeparture) { if(persistenceAdvisor != null) { persistenceAdvisor.releaseTieLock(); } new DestroyRegionOperation(event, notifyOfRegionDeparture).distribute(); } /** * Return true if invalidation occurred; false if it did not, for example if * it was already invalidated * * @see LocalRegion#basicInvalidate(EntryEventImpl) */ @Override void basicInvalidate(EntryEventImpl event) throws EntryNotFoundException { boolean hasSeen = false; if (hasSeenEvent(event)) { hasSeen = true; } try { // disallow local invalidation for replicated regions if (event.isLocalInvalid() && !event.getOperation().isLocal() && getScope().isDistributed() && getDataPolicy().withReplication()) { throw new IllegalStateException(LocalizedStrings.DistributedRegion_NOT_ALLOWED_TO_DO_A_LOCAL_INVALIDATION_ON_A_REPLICATED_REGION.toLocalizedString()); } if (this.requiresOneHopForMissingEntry(event)) { // bug #45704: see if a one-hop must be done for this operation RegionEntry re = getRegionEntry(event.getKey()); if (re == null/* || re.isTombstone()*/ || !this.generateVersionTag) { if (this.srp == null) { // only assert for non-client regions. Assert.assertTrue(!this.dataPolicy.withReplication() || !this.generateVersionTag); } // TODO: deltaGII: verify that delegating to a peer when this region is also a client is acceptable boolean didDistribute = RemoteInvalidateMessage.distribute(event, !this.generateVersionTag); if (!this.generateVersionTag && !didDistribute) { throw new PersistentReplicatesOfflineException(); } if (didDistribute) { if (logger.isDebugEnabled()) { logger.debug("Event after remoteInvalidate operation: {}", event); } if (event.getVersionTag() == null) { // if the event wasn't applied by the one-hop replicate it will not have a version tag // and so should not be applied to this cache return; } } } } super.basicInvalidate(event); return; } finally { if (hasSeen) { distributeInvalidate(event); event.invokeCallbacks(this,true, false); } } } @Override void basicInvalidatePart3(RegionEntry re, EntryEventImpl event, boolean invokeCallbacks) { distributeInvalidate(event); super.basicInvalidatePart3(re, event, invokeCallbacks); } void distributeInvalidate(EntryEventImpl event) { if (!this.regionInvalid && event.isDistributed() && !event.isOriginRemote() && !isTX() /* only distribute if non-tx */) { if (event.isDistributed() && !event.isOriginRemote()) { boolean distribute = !event.getInhibitDistribution(); if (distribute) { InvalidateOperation op = new InvalidateOperation(event); op.distribute(); } } } } @Override void basicUpdateEntryVersion(EntryEventImpl event) throws EntryNotFoundException { try { if (!hasSeenEvent(event)) { super.basicUpdateEntryVersion(event); } return; } finally { distributeUpdateEntryVersion(event); } } private void distributeUpdateEntryVersion(EntryEventImpl event) { if (!this.regionInvalid && event.isDistributed() && !event.isOriginRemote() && !isTX() /* only distribute if non-tx */) { if (event.isDistributed() && !event.isOriginRemote()) { UpdateEntryVersionOperation op = new UpdateEntryVersionOperation(event); op.distribute(); } } } @Override protected void basicClear(RegionEventImpl ev) { Lock dlock = this.getRegionDistributedLockIfGlobal(); try { super.basicClear(ev); } finally { if (dlock != null) dlock.unlock(); } } @Override void basicClear(RegionEventImpl regionEvent, boolean cacheWrite) { if (this.concurrencyChecksEnabled && !this.dataPolicy.withReplication()) { boolean retry = false; do { // non-replicate regions must defer to a replicate for clear/invalidate of region Set<InternalDistributedMember> repls = this.distAdvisor.adviseReplicates(); if (repls.size() > 0) { InternalDistributedMember mbr = repls.iterator().next(); RemoteRegionOperation op = RemoteRegionOperation.clear(mbr, this); try { op.distribute(); return; } catch (CancelException e) { this.stopper.checkCancelInProgress(e); retry = true; } catch (RemoteOperationException e) { this.stopper.checkCancelInProgress(e); retry = true; } } } while (retry); } // if no version vector or if no replicates are around, use the default mechanism super.basicClear(regionEvent, cacheWrite); } @Override void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean useRVV) { boolean enableRVV = useRVV && this.dataPolicy.withReplication() && this.concurrencyChecksEnabled && !getDistributionManager().isLoner(); //Fix for 46338 - apparently multiple threads from the same VM are allowed //to suspend locking, which is what distributedLockForClear() does. We don't //want that to happen, so we'll synchronize to make sure only one thread on //this member performs a clear. synchronized(clearLock) { if (enableRVV) { distributedLockForClear(); try { Set<InternalDistributedMember> participants = getCacheDistributionAdvisor().adviseInvalidateRegion(); // pause all generation of versions and flush from the other members to this one try { obtainWriteLocksForClear(regionEvent, participants); clearRegionLocally(regionEvent, cacheWrite, null); if (!regionEvent.isOriginRemote() && regionEvent.isDistributed()) { DistributedClearOperation.clear(regionEvent, null, participants); } } finally { releaseWriteLocksForClear(regionEvent, participants); } } finally { distributedUnlockForClear(); } } else { Set<InternalDistributedMember> participants = getCacheDistributionAdvisor().adviseInvalidateRegion(); clearRegionLocally(regionEvent, cacheWrite, null); if (!regionEvent.isOriginRemote() && regionEvent.isDistributed()) { DistributedClearOperation.clear(regionEvent, null, participants); } } } // since clients do not maintain RVVs except for tombstone GC // we need to ensure that current ops reach the client queues // before queuing a clear, but there is no infrastructure for doing so notifyBridgeClients(regionEvent); } /** * Obtain a distributed lock for the clear operation. */ private void distributedLockForClear() { if (!this.scope.isGlobal()) { // non-global regions must lock when using RVV try { getLockService().lock("_clearOperation", -1, -1); } catch(IllegalStateException e) { lockCheckReadiness(); throw e; } } } /** * Release the distributed lock for the clear operation. */ private void distributedUnlockForClear() { if (!this.scope.isGlobal()) { try { getLockService().unlock("_clearOperation"); } catch(IllegalStateException e) { lockCheckReadiness(); throw e; } } } /** obtain locks preventing generation of new versions in other members * @param participants **/ private void obtainWriteLocksForClear(RegionEventImpl regionEvent, Set<InternalDistributedMember> participants) { lockLocallyForClear(getDistributionManager(), getMyId()); DistributedClearOperation.lockAndFlushToOthers(regionEvent, participants); } /** pause local operations so that a clear() can be performed and flush comm channels to the given member */ public void lockLocallyForClear(DM dm, InternalDistributedMember locker) { RegionVersionVector rvv = getVersionVector(); if (rvv != null) { // block new operations from being applied to the region map rvv.lockForClear(getFullPath(), dm, locker); //Check for region destroyed after we have locked, to make sure //we don't continue a clear if the region has been destroyed. checkReadiness(); // wait for current operations to if (!locker.equals(dm.getDistributionManagerId())) { Set<InternalDistributedMember> mbrs = getDistributionAdvisor().adviseCacheOp(); StateFlushOperation.flushTo(mbrs, this); } } } /** releases the locks obtained in obtainWriteLocksForClear * @param participants */ private void releaseWriteLocksForClear(RegionEventImpl regionEvent, Set<InternalDistributedMember> participants) { getVersionVector().unlockForClear(getMyId()); DistributedClearOperation.releaseLocks(regionEvent, participants); } /** * Wait for in progress clears that were initiated by this member. */ private void waitForInProgressClear() { RegionVersionVector rvv = getVersionVector(); if (rvv != null) { synchronized(clearLock) { //do nothing; //DAN - I'm a little scared that the compiler might optimize //away this synchronization if we really do nothing. Hence //my fine log message below. This might not be necessary. if (logger.isDebugEnabled()) { logger.debug("Done waiting for clear"); } } } } /** * Distribute Tombstone garbage-collection information to all peers with storage */ protected EventID distributeTombstoneGC(Set<Object> keysRemoved) { this.getCachePerfStats().incTombstoneGCCount(); EventID eventId = new EventID(getSystem()); DistributedTombstoneOperation gc = DistributedTombstoneOperation.gc(this, eventId); gc.distribute(); notifyClientsOfTombstoneGC(getVersionVector().getTombstoneGCVector(), keysRemoved, eventId, null); return eventId; } // test hook for DistributedAckRegionCCEDUnitTest public static boolean LOCALCLEAR_TESTHOOK; @Override void basicLocalClear(RegionEventImpl rEvent) { if (getScope().isDistributed() && getDataPolicy().withReplication() && !LOCALCLEAR_TESTHOOK) { throw new UnsupportedOperationException(LocalizedStrings.DistributedRegion_LOCALCLEAR_IS_NOT_SUPPORTED_ON_DISTRIBUTED_REPLICATED_REGIONS.toLocalizedString()); } super.basicLocalClear(rEvent); } public final DistributionConfig getDistributionConfig() { return getSystem().getDistributionManager().getConfig(); } /** * Sends a list of queued messages to members playing a specified role * * @param list * List of QueuedOperation instances to send. Any messages sent will * be removed from this list * @param role * the role that a recipient must be playing * @return true if at least one message made it to at least one guy playing * the role */ boolean sendQueue(List list, Role role) { SendQueueOperation op = new SendQueueOperation(getDistributionManager(), this, list, role); return op.distribute(); } /* * @see SearchLoadAndWriteProcessor#initialize(LocalRegion, Object, Object) */ public final CacheDistributionAdvisor getDistributionAdvisor() { return this.distAdvisor; } public final CacheDistributionAdvisor getCacheDistributionAdvisor() { return this.distAdvisor; } public final PersistenceAdvisor getPersistenceAdvisor() { return this.persistenceAdvisor; } public final PersistentMemberID getPersistentID() { return this.persistentId; } /** Returns the distribution profile; lazily creates one if needed */ public Profile getProfile() { return this.distAdvisor.createProfile(); } public void fillInProfile(Profile p) { assert p instanceof CacheProfile; CacheProfile profile = (CacheProfile)p; profile.dataPolicy = getDataPolicy(); profile.hasCacheLoader = basicGetLoader() != null; profile.hasCacheWriter = basicGetWriter() != null; profile.hasCacheListener = hasListener(); Assert.assertTrue(this.scope.isDistributed()); profile.scope = this.scope; profile.inRecovery = getImageState().getInRecovery(); profile.isPersistent = getDataPolicy().withPersistence(); profile.setSubscriptionAttributes(getSubscriptionAttributes()); // Kishor : Below PDX check is added for rolling upgrade support. We are // removing Old wan in this checkin. PDX region are always gatewayEnabled // irrespective whether gatewayHub is configured or not. // Old version Pdx region always has this attribute true so to avoid region // attribute comparison across member we are setting it to true. if (this.isPdxTypesRegion()) { profile.isGatewayEnabled = true; } else { profile.isGatewayEnabled = false; } profile.serialNumber = getSerialNumber(); profile.regionInitialized = this.isInitialized(); if (!this.isUsedForPartitionedRegionBucket()) { profile.memberUnInitialized = getCache().isUnInitializedMember( profile.getDistributedMember()); } else { profile.memberUnInitialized = false; } profile.persistentID = getPersistentID(); if(getPersistenceAdvisor() != null) { profile.persistenceInitialized = getPersistenceAdvisor().isOnline(); } profile.hasCacheServer = ((this.cache.getCacheServers().size() > 0)?true:false); profile.requiresOldValueInEvents = this.dataPolicy.withReplication() && this.filterProfile != null && this.filterProfile.hasCQs(); profile.gatewaySenderIds = getGatewaySenderIds(); profile.asyncEventQueueIds = getAsyncEventQueueIds(); profile.isOffHeap = getOffHeap(); } /** * Return the DistributedLockService associated with this Region. This method * will lazily create that service the first time it is invoked on this * region. */ public DistributedLockService getLockService() { synchronized (this.dlockMonitor) { // Assert.assertTrue(this.scope.isGlobal()); since 7.0 this is used for distributing clear() ops String svcName = getFullPath(); if (this.dlockService == null) { this.dlockService = DistributedLockService.getServiceNamed(svcName); if (this.dlockService == null) { this.dlockService = DLockService.create( getFullPath(), getSystem(), true /*distributed*/, false /*destroyOnDisconnect*/, // region destroy will destroy dls false /*automateFreeResources*/); // manual freeResources only } // handle is-lock-grantor region attribute... if (this.isLockGrantor) { this.dlockService.becomeLockGrantor(); } if (logger.isDebugEnabled()) { logger.debug("LockService for {} is using LockLease={}, LockTimeout=", svcName, getCache().getLockLease(), getCache().getLockTimeout()); } } return this.dlockService; } } /** * @see LocalRegion#isCurrentlyLockGrantor() */ @Override protected boolean isCurrentlyLockGrantor() { if (!this.scope.isGlobal()) return false; return getLockService().isLockGrantor(); } @Override public boolean isLockGrantor() { if (!this.scope.isGlobal()) return false; return this.isLockGrantor; } @Override public void becomeLockGrantor() { checkReadiness(); checkForLimitedOrNoAccess(); if (!this.scope.isGlobal()) { throw new IllegalStateException(LocalizedStrings.DistributedRegion_DISTRIBUTION_LOCKS_ARE_ONLY_SUPPORTED_FOR_REGIONS_WITH_GLOBAL_SCOPE_NOT_0.toLocalizedString(this.scope)); } DistributedLockService svc = getLockService(); try { super.becomeLockGrantor(); if (!svc.isLockGrantor()) { svc.becomeLockGrantor(); } } finally { if (!svc.isLockGrantor()) { if (logger.isDebugEnabled()) { logger.debug("isLockGrantor is false after becomeLockGrantor for {}", getFullPath()); } } } } /** @return the deserialized value */ @Override @Retained protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate, TXStateInterface txState, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowReadFromHDFS) throws CacheLoaderException, TimeoutException { checkForLimitedOrNoAccess(); RegionEntry re = null; final Object key = keyInfo.getKey(); final Object aCallbackArgument = keyInfo.getCallbackArg(); Operation op; if (isCreate) { op = Operation.CREATE; } else { op = Operation.UPDATE; } long lastModified = 0L; boolean fromServer = false; EntryEventImpl event = null; @Retained Object result = null; boolean incrementUseCountForSqlf = false; try { { if (this.srp != null) { EntryEventImpl holder = EntryEventImpl.createVersionTagHolder(); try { Object value = this.srp.get(key, aCallbackArgument, holder); fromServer = value != null; if (fromServer) { event = EntryEventImpl.create(this, op, key, value, aCallbackArgument, false, getMyId(), generateCallbacks); event.setVersionTag(holder.getVersionTag()); event.setFromServer(fromServer); // fix for bug 39358 if (clientEvent != null && clientEvent.getVersionTag() == null) { clientEvent.setVersionTag(holder.getVersionTag()); } } } finally { holder.release(); } } } if (!fromServer) { //Do not generate Event ID event = EntryEventImpl.create(this, op, key, null /*newValue*/, aCallbackArgument, false, getMyId(), generateCallbacks); if (requestingClient != null) { event.setContext(requestingClient); } SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor(); try { processor.initialize(this, key, aCallbackArgument); // processor fills in event processor.doSearchAndLoad(event, txState, localValue); if (clientEvent != null && clientEvent.getVersionTag() == null) { clientEvent.setVersionTag(event.getVersionTag()); } lastModified = processor.getLastModified(); } finally { processor.release(); } } if (event.hasNewValue() && !isMemoryThresholdReachedForLoad()) { try { // Set eventId. Required for interested clients. event.setNewEventId(cache.getDistributedSystem()); long startPut = CachePerfStats.getStatTime(); validateKey(key); // if (event.getOperation().isLoad()) { // this.performedLoad(event, lastModified, txState); // } // this next step also distributes the object to other processes, if necessary try { // set the tail key so that the event is passed to GatewaySender queues. // if the tailKey is not set, the event gets filtered out in ParallelGatewaySenderQueue if (this instanceof BucketRegion) { if (((BucketRegion)this).getPartitionedRegion().isParallelWanEnabled()) ((BucketRegion)this).handleWANEvent(event); } re = basicPutEntry(event, lastModified); incrementUseCountForSqlf = GemFireCacheImpl.sqlfSystem() ; } catch (ConcurrentCacheModificationException e) { // the cache was modified while we were searching for this entry and // the netsearch result was elided. Return the current value from the cache re = getRegionEntry(key); if (re != null) { event.setNewValue(re.getValue(this)); // OFFHEAP: need to incrc, copy to heap to setNewValue, decrc } } if (!isTX()) { getCachePerfStats().endPut(startPut, event.isOriginRemote()); } } catch (CacheWriterException cwe) { if (logger.isDebugEnabled()) { logger.debug("findObjectInSystem: writer exception putting entry {} : {}", event, cwe); } } } if (isCreate) { recordMiss(re, key); } if (preferCD) { if (event.hasDelta()) { result = event.getNewValue(); } else { result = event.getRawNewValueAsHeapObject(); } } else { result = event.getNewValue(); } //For SQLFire , we need to increment the use count so that returned //object has use count 2 if( incrementUseCountForSqlf && result instanceof Chunk) { ((Chunk)result).retain(); } return result; } finally { if (event != null) { event.release(); } } } protected ConcurrentParallelGatewaySenderQueue getHDFSQueue() { if (this.hdfsQueue == null) { String asyncQId = this.getPartitionedRegion().getHDFSEventQueueName(); final AsyncEventQueueImpl asyncQ = (AsyncEventQueueImpl)this.getCache().getAsyncEventQueue(asyncQId); final AbstractGatewaySender gatewaySender = (AbstractGatewaySender)asyncQ.getSender(); AbstractGatewaySenderEventProcessor ep = gatewaySender.getEventProcessor(); if (ep == null) return null; hdfsQueue = (ConcurrentParallelGatewaySenderQueue)ep.getQueue(); } return hdfsQueue; } /** hook for subclasses to note that a cache load was performed * @see BucketRegion#performedLoad */ // void performedLoad(EntryEventImpl event, long lastModifiedTime, TXState txState) // throws CacheWriterException { // // no action in DistributedRegion // } /** * @see LocalRegion#cacheWriteBeforeDestroy(EntryEventImpl, Object) * @return true if cacheWrite was performed */ @Override boolean cacheWriteBeforeDestroy(EntryEventImpl event, Object expectedOldValue) throws CacheWriterException, EntryNotFoundException, TimeoutException { boolean result = false; if (event.isDistributed()) { CacheWriter localWriter = basicGetWriter(); Set netWriteRecipients = localWriter == null ? this.distAdvisor .adviseNetWrite() : null; if ((localWriter != null || (netWriteRecipients != null && !netWriteRecipients.isEmpty())) && !event.inhibitAllNotifications()) { final long start = getCachePerfStats().startCacheWriterCall(); try { event.setOldValueFromRegion(); SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor(); try { processor.initialize(this, event.getKey(), null); processor.doNetWrite(event, netWriteRecipients, localWriter, SearchLoadAndWriteProcessor.BEFOREDESTROY); result = true; } finally { processor.release(); } } finally { getCachePerfStats().endCacheWriterCall(start); } } serverDestroy(event, expectedOldValue); } return result; } /** * @see LocalRegion#cacheWriteBeforeRegionDestroy(RegionEventImpl) */ @Override boolean cacheWriteBeforeRegionDestroy(RegionEventImpl event) throws CacheWriterException, TimeoutException { boolean result = false; if (event.getOperation().isDistributed()) { CacheWriter localWriter = basicGetWriter(); Set netWriteRecipients = localWriter == null ? this.distAdvisor .adviseNetWrite() : null; if (localWriter != null || (netWriteRecipients != null && !netWriteRecipients.isEmpty())) { final long start = getCachePerfStats().startCacheWriterCall(); try { SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor(); try { processor.initialize(this, "preDestroyRegion", null); processor.doNetWrite(event, netWriteRecipients, localWriter, SearchLoadAndWriteProcessor.BEFOREREGIONDESTROY); result = true; } finally { processor.release(); } } finally { getCachePerfStats().endCacheWriterCall(start); } } serverRegionDestroy(event); } return result; } protected void distributedRegionCleanup(RegionEventImpl event) { if (event == null || event.getOperation() != Operation.REGION_REINITIALIZE) { // only perform this if reinitialize is not due to resumption // (REGION_REINITIALIZE) // or if event is null then this was a failed initialize (create) // wake up any threads in waitForRequiredRoles... they will checkReadiness synchronized (this.missingRequiredRoles) { this.missingRequiredRoles.notifyAll(); } } if(persistenceAdvisor != null) { this.persistenceAdvisor.close(); // fix for bug 41094 } this.distAdvisor.close(); DLockService dls = null; //Fix for bug 46338. Wait for in progress clears before destroying the //lock service, because destroying the service immediately releases the dlock waitForInProgressClear(); synchronized (this.dlockMonitor) { if (this.dlockService != null) { dls = (DLockService)this.dlockService; } } if (dls != null) { try { dls.destroyAndRemove(); } catch (CancelException e) { // bug 37118 if (logger.isDebugEnabled()) { logger.debug("DLS destroy abridged due to shutdown", e); } } catch (Exception ex) { logger.warn(LocalizedMessage.create(LocalizedStrings.DistributedRegion_DLS_DESTROY_MAY_HAVE_FAILED_FOR_0, this.getFullPath()), ex); } } if (this.rmq != null) { this.rmq.close(); } //Fix for #48066 - make sure that region operations are completely //distributed to peers before destroying the region. long timeout = 1000L * getCache().getDistributedSystem().getConfig().getAckWaitThreshold(); Boolean flushOnClose = !Boolean.getBoolean("gemfire.no-flush-on-close"); // test hook if (!this.cache.forcedDisconnect() && flushOnClose && this.getDistributionManager().getMembershipManager() != null && this.getDistributionManager().getMembershipManager().isConnected()) { getDistributionAdvisor().forceNewMembershipVersion(); try { getDistributionAdvisor().waitForCurrentOperations(timeout); } catch (Exception e) { // log this but try to close the region so that listeners are invoked logger.warn(LocalizedMessage.create(LocalizedStrings.GemFireCache_0_ERROR_CLOSING_REGION_1, new Object[] { this, getFullPath() }), e); } } } /** * In addition to inherited code this method also invokes * RegionMembershipListeners */ @Override protected void postCreateRegion() { super.postCreateRegion(); // should we sync on this.distAdvisor first to prevent bug 44369? synchronized (this.advisorListener) { Set others = this.advisorListener.getInitialMembers(); CacheListener[] listeners = fetchCacheListenersField(); if (listeners != null) { for (int i = 0; i < listeners.length; i++) { if (listeners[i] instanceof RegionMembershipListener) { RegionMembershipListener rml = (RegionMembershipListener)listeners[i]; try { DistributedMember[] otherDms = new DistributedMember[others .size()]; others.toArray(otherDms); rml.initialMembers(this, otherDms); } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Throwable t) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.error(LocalizedMessage.create(LocalizedStrings.DistributedRegion_EXCEPTION_OCCURRED_IN_REGIONMEMBERSHIPLISTENER), t); } } } } Set<String> allGatewaySenderIds = getAllGatewaySenderIds(); if (!allGatewaySenderIds.isEmpty()) { for (GatewaySender sender : cache.getAllGatewaySenders()) { if (sender.isParallel() && allGatewaySenderIds.contains(sender.getId())) { //Fix for Bug#51491. Once decided to support this configuration we have call addShadowPartitionedRegionForUserRR if (sender.getId().contains( AsyncEventQueueImpl.ASYNC_EVENT_QUEUE_PREFIX)) { throw new AsyncEventQueueConfigurationException( LocalizedStrings.ParallelAsyncEventQueue_0_CAN_NOT_BE_USED_WITH_REPLICATED_REGION_1.toLocalizedString(new Object[] { AsyncEventQueueImpl .getAsyncEventQueueIdFromSenderId(sender.getId()), this.getFullPath() })); } throw new GatewaySenderConfigurationException( LocalizedStrings.ParallelGatewaySender_0_CAN_NOT_BE_USED_WITH_REPLICATED_REGION_1 .toLocalizedString(new Object[] { sender.getId(), this.getFullPath() })); // if (sender.isRunning()) { // ConcurrentParallelGatewaySenderQueue parallelQueue = // (ConcurrentParallelGatewaySenderQueue)((ParallelGatewaySenderImpl)sender) // .getQueues().toArray(new RegionQueue[1])[0]; // parallelQueue.addShadowPartitionedRegionForUserRR(this); // } } } } } } /** * Free resources held by this region. This method is invoked after * isDestroyed has been set to true. * * @see LocalRegion#postDestroyRegion(boolean, RegionEventImpl) */ @Override protected void postDestroyRegion(boolean destroyDiskRegion, RegionEventImpl event) { distributedRegionCleanup(event); try { super.postDestroyRegion(destroyDiskRegion, event); } catch (CancelException e) { // I don't think this should ever happens: bulletproofing for bug 39454 logger.warn("postDestroyRegion: encountered cancellation", e); } if (this.rmq != null && destroyDiskRegion) { this.rmq.destroy(); } } @Override void cleanupFailedInitialization() { super.cleanupFailedInitialization(); try { RegionEventImpl ev = new RegionEventImpl(this, Operation.REGION_CLOSE, null, false, getMyId(), generateEventID()); distributeDestroyRegion(ev, true); distributedRegionCleanup(null); } catch(RegionDestroyedException e) { //someone else must have concurrently destroyed the region (maybe a distributed destroy) } catch(CancelException e) { //cache or DS is closed, ignore } catch(VirtualMachineError e) { SystemFailure.initiateFailure(e); throw e; } catch(Throwable t) { logger.warn(LocalizedMessage.create(LocalizedStrings.DistributedRegion_ERROR_CLEANING_UP_FAILED_INITIALIZATION, this), t); } } /** * @see LocalRegion#handleCacheClose(Operation) */ @Override void handleCacheClose(Operation op) { try { super.handleCacheClose(op); } finally { distributedRegionCleanup(null); } } /** * invoke a cache writer before a put is performed elsewhere * * @see LocalRegion#cacheWriteBeforePut(EntryEventImpl, Set, CacheWriter, boolean, Object) */ @Override protected void cacheWriteBeforePut(EntryEventImpl event, Set netWriteRecipients, CacheWriter localWriter, boolean requireOldValue, Object expectedOldValue) throws CacheWriterException, TimeoutException { if ((localWriter != null || (netWriteRecipients != null && !netWriteRecipients.isEmpty())) && !event.inhibitAllNotifications()) { final boolean isNewKey = event.getOperation().isCreate(); final long start = getCachePerfStats().startCacheWriterCall(); try { SearchLoadAndWriteProcessor processor = SearchLoadAndWriteProcessor.getProcessor(); processor.initialize(this, "preUpdate", null); try { if (!isNewKey) { processor.doNetWrite(event, netWriteRecipients, localWriter, SearchLoadAndWriteProcessor.BEFOREUPDATE); } else { processor.doNetWrite(event, netWriteRecipients, localWriter, SearchLoadAndWriteProcessor.BEFORECREATE); } } finally { processor.release(); } } finally { getCachePerfStats().endCacheWriterCall(start); } } serverPut(event, requireOldValue, expectedOldValue); } @Override protected void cacheListenersChanged(boolean nowHasListener) { if (nowHasListener) { this.advisorListener.initRMLWrappers(); } new UpdateAttributesProcessor(this).distribute(); } @Override protected void cacheWriterChanged(CacheWriter oldWriter) { super.cacheWriterChanged(oldWriter); if (oldWriter == null ^ basicGetWriter() == null) { new UpdateAttributesProcessor(this).distribute(); } } @Override protected void cacheLoaderChanged(CacheLoader oldLoader) { super.cacheLoaderChanged(oldLoader); if (oldLoader == null ^ basicGetLoader() == null) { new UpdateAttributesProcessor(this).distribute(); } } public void addGatewaySenderId(String gatewaySenderId) { super.addGatewaySenderId(gatewaySenderId); new UpdateAttributesProcessor(this).distribute(); } public void removeGatewaySenderId(String gatewaySenderId) { super.removeGatewaySenderId(gatewaySenderId); new UpdateAttributesProcessor(this).distribute(); } public void addAsyncEventQueueId(String asyncEventQueueId) { super.addAsyncEventQueueId(asyncEventQueueId); new UpdateAttributesProcessor(this).distribute(); } public void removeAsyncEventQueueId(String asyncEventQueueId) { super.removeAsyncEventQueueId(asyncEventQueueId); new UpdateAttributesProcessor(this).distribute(); } public void checkSameSenderIdsAvailableOnAllNodes() { List senderIds = this.getCacheDistributionAdvisor() .adviseSameGatewaySenderIds(getGatewaySenderIds()); if (!senderIds.isEmpty()) { throw new GatewaySenderConfigurationException( LocalizedStrings.Region_REGION_0_HAS_1_GATEWAY_SENDER_IDS_ANOTHER_CACHE_HAS_THE_SAME_REGION_WITH_2_GATEWAY_SENDER_IDS_FOR_REGION_ACROSS_ALL_MEMBERS_IN_DS_GATEWAY_SENDER_IDS_SHOULD_BE_SAME .toLocalizedString(new Object[] { this.getName(), senderIds.get(0), senderIds.get(1) })); } List asycnQueueIds = this.getCacheDistributionAdvisor() .adviseSameAsyncEventQueueIds(getAsyncEventQueueIds()); if (!asycnQueueIds.isEmpty()) { throw new GatewaySenderConfigurationException( LocalizedStrings.Region_REGION_0_HAS_1_ASYNC_EVENT_QUEUE_IDS_ANOTHER_CACHE_HAS_THE_SAME_REGION_WITH_2_ASYNC_EVENT_QUEUE_IDS_FOR_REGION_ACROSS_ALL_MEMBERS_IN_DS_ASYNC_EVENT_QUEUE_IDS_SHOULD_BE_SAME .toLocalizedString(new Object[] { this.getName(), asycnQueueIds.get(0), asycnQueueIds.get(1) })); } } /** * Wraps call to dlock service in order to throw RegionDestroyedException if * dlock service throws IllegalStateException and isDestroyed is true. */ private boolean isLockingSuspendedByCurrentThread() { try { return getLockService().isLockingSuspendedByCurrentThread(); } catch (IllegalStateException e) { lockCheckReadiness(); throw e; } } /** * If this region's scope is GLOBAL, get a distributed lock on the given key, * and return the Lock. The sender is responsible for unlocking. * * @return the acquired Lock if the region is GLOBAL, otherwise null. * * @throws NullPointerException * if key is null */ private Lock getDistributedLockIfGlobal(Object key) throws TimeoutException { if (getScope().isGlobal()) { if (isLockingSuspendedByCurrentThread()) return null; long start = System.currentTimeMillis(); long timeLeft = getCache().getLockTimeout(); long lockTimeout = timeLeft; StringId msg = null; Object[] msgArgs = null; while (timeLeft > 0 || lockTimeout == -1) { this.cache.getCancelCriterion().checkCancelInProgress(null); boolean interrupted = Thread.interrupted(); try { Lock dlock = getDistributedLock(key); if (!dlock.tryLock(timeLeft, TimeUnit.SECONDS)) { msg = LocalizedStrings.DistributedRegion_ATTEMPT_TO_ACQUIRE_DISTRIBUTED_LOCK_FOR_0_FAILED_AFTER_WAITING_1_SECONDS; msgArgs = new Object[] {key, Long.valueOf((System.currentTimeMillis() - start) / 1000L)}; break; } return dlock; } catch (InterruptedException ex) { interrupted = true; this.cache.getCancelCriterion().checkCancelInProgress(ex); // FIXME Why is it OK to keep going? if (lockTimeout > -1) { timeLeft = getCache().getLockTimeout() - ((System.currentTimeMillis() - start) / 1000L); } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } } // while if (msg == null) { msg = LocalizedStrings.DistributedRegion_TIMED_OUT_AFTER_WAITING_0_SECONDS_FOR_THE_DISTRIBUTED_LOCK_FOR_1; msgArgs = new Object[] {Integer.valueOf(getCache().getLockTimeout()), key}; } throw new TimeoutException(msg.toLocalizedString(msgArgs)); } else { return null; } } /** * Checks if the entry is a valid entry * * @return true if entry not null or entry is not removed * */ protected boolean checkEntryNotValid(RegionEntry mapEntry) { return (mapEntry == null || (mapEntry.isRemoved() && !mapEntry.isTombstone())); } /** * Get the best iterator for iterating over the contents of this * region. This method will either an iterator that uses hash * ordering from the entry map, or, in the case of an overflow * region, an iterator that iterates over the entries in disk order. */ public Iterator<RegionEntry> getBestIterator(boolean includeValues) { DiskRegion dr = this.getDiskRegion(); if (DiskPage.DISK_PAGE_SIZE > 0 && includeValues && dr != null) { //Wait for the disk region to recover values first. dr.waitForAsyncRecovery(); if(dr.getNumOverflowOnDisk() > 0) { return new DiskSavyIterator(); } } return this.entries.regionEntries().iterator(); } // /** // * The maximum number of entries that can be put into the diskMap before // * some of them are read from disk and returned by this iterator. // * The larger this number the more memory this iterator is allowed to consume // * and the better it will do in optimally reading the pending entries. // */ // static final long MAX_PENDING_ENTRIES = Long.getLong("gemfire.MAX_PENDING_ENTRIES", 1000000).longValue(); /** * Should only be used if this region has entries on disk that are not in memory. * This currently happens for overflow and for recovery when values are not recovered. * The first iteration does a normal iteration of the regionEntries. * But if it finds an entry that is currently only on disk * it saves it in a list sorted by the location on disk. * Once the regionEntries iterator has nothing more to iterate * it starts iterating over, in disk order, the entries on disk. */ private class DiskSavyIterator implements Iterator<RegionEntry> { private boolean usingIt = true; private Iterator<?> it = entries.regionEntries().iterator(); // iterator for nested ArrayLists private Iterator<RegionEntry> subIt = null; //private final ArrayList<DiskPosition> diskList = new ArrayList<DiskPosition>(/*@todo presize based on number of entries only on disk*/); // value will be either RegionEntry or an ArrayList<RegionEntry> // private long pendingCount = 0; private final java.util.TreeMap<DiskPage, Object> diskMap = new java.util.TreeMap<DiskPage, Object>(); // /** // * used to iterate over the fullest pages at the time we have // * added MAX_PENDING_ENTRIES to diskMap; // */ // private Iterator<Map.Entry<DiskPage, Object>> sortedDiskIt; public DiskSavyIterator() { } public boolean hasNext() { boolean result; if (this.subIt != null) { result = this.subIt.hasNext(); if (!result) { this.subIt = null; } else { return result; } } // if (this.sortedDiskIt != null) { // result = this.sortedDiskIt.hasNext(); // if (!result) { // this.sortedDiskIt = null; // } else { // return result; // } // } result = this.it.hasNext(); if (this.usingIt && !result) { this.usingIt = false; // long start = System.currentTimeMillis(); // Collections.sort(this.diskList); // long end = System.currentTimeMillis(); this.it = this.diskMap.values().iterator(); result = this.it.hasNext(); } return result; } public RegionEntry next() { for (;;) { if (this.subIt != null) { return this.subIt.next(); // } else if (this.sortedDiskIt != null) { // Map.Entry<DiskPage, Object> me = this.sortedDiskIt.next(); // // remove the page from the diskMap. // this.diskMap.remove(me.getKey()); // Object v = me.getValue(); // int size = 1; // if (v instanceof ArrayList) { // ArrayList al = (ArrayList)v; // size = al.size(); // // set up the iterator to start returning the entries on that page // this.subIt = al.iterator(); // v = this.subIt.next(); // } // // decrement pendingCount by the number of entries on the page // this.pendingCount -= size; // // return the first region entry on this page // return v; } if (this.usingIt) { RegionEntry re = (RegionEntry)this.it.next(); DiskPosition dp = new DiskPosition(); if (re.isOverflowedToDisk(DistributedRegion.this, dp)) { // add dp to sorted list DiskPage dPage = new DiskPage(dp); Object v = this.diskMap.get(dPage); if (v == null) { this.diskMap.put(dPage, re); } else if (v instanceof ArrayList) { ArrayList al = (ArrayList)v; al.add(re); } else { ArrayList al = new ArrayList(); al.add(v); al.add(re); this.diskMap.put(dPage, al); } if (!hasNext()) { assert false; // must be true } // this.pendingCount++; // if (this.usingIt && this.pendingCount >= MAX_PENDING_ENTRIES) { // // find the pages that have the most entries // int largestPage = 1; // ArrayList<Map.Entry<DiskPage, Object>> largestPages // = new ArrayList<Map.Entry<DiskPage, Object>>(); // for (Map.Entry<DiskPage, Object> me: this.diskMap.entrySet()) { // int meSize = 1; // if (me.getValue() instanceof ArrayList) { // meSize = ((ArrayList)me.getValue()).size(); // } // if (meSize > largestPage) { // largestPage = meSize; // largestPages.clear(); // throw away smaller pages // largestPages.add(me); // } else if (meSize == largestPage) { // largestPages.add(me); // } else { // // ignore this page // } // } // Collections.sort(largestPages, new Comparator // <Map.Entry<DiskPage, Object>>() { // /** // * Note: this comparator imposes orderings that are inconsistent // * with equals. // */ // public int compare(Map.Entry<DiskPage, Object> o1, Map.Entry<DiskPage, Object> o2) { // return o1.getKey().compareTo(o2.getKey()); // } // }); // this.sortedDiskIt = largestPages.iterator(); // // loop around and fetch first value from sortedDiskIt // } } else { return re; } } else { Object v = this.it.next(); if (v instanceof ArrayList) { ArrayList al = (ArrayList)v; this.subIt = al.iterator(); return this.subIt.next(); } else { return (RegionEntry) v; } } } } public void remove() { throw new UnsupportedOperationException(); } } public static class DiskPosition implements Comparable<DiskPosition> { private long oplogId; private long offset; DiskPosition() { } void setPosition(long oplogId, long offset) { this.oplogId = oplogId; this.offset = offset; } @Override public int hashCode() { return Long.valueOf(this.oplogId ^ this.offset).hashCode(); } @Override public boolean equals(Object o) { if (o instanceof DiskPosition) { DiskPosition other = (DiskPosition)o; return this.oplogId == other.oplogId && this.offset == other.offset; } else { return false; } } public int compareTo(DiskPosition o) { int result = Long.signum(this.oplogId - o.oplogId); if (result == 0) { result = Long.signum(this.offset - o.offset); } return result; } @Override public String toString() { StringBuffer sb = new StringBuffer(); sb.append("<").append(this.oplogId).append(":").append(this.offset).append(">"); return sb.toString(); } } static class DiskPage extends DiskPosition { static final long DISK_PAGE_SIZE = Long.getLong("gemfire.DISK_PAGE_SIZE", 8 * 1024L).longValue(); DiskPage(DiskPosition dp) { this.setPosition(dp.oplogId, dp.offset / DISK_PAGE_SIZE); } } /** * Returns the lock lease value to use for DistributedLock and * RegionDistributedLock. -1 is supported as non-expiring lock. */ protected long getLockLeaseForLock() { if (getCache().getLockLease() == -1) { return -1; } return getCache().getLockLease() * 1000; } /** * Returns the lock timeout value to use for DistributedLock and * RegionDistributedLock. -1 is supported as a lock that never times out. */ protected long getLockTimeoutForLock(long time, TimeUnit unit) { if (time == -1) { return -1; } return TimeUnit.MILLISECONDS.convert(time, unit); } /** ******************* DistributedLock ************************************* */ private class DistributedLock implements Lock { private final Object key; public DistributedLock(Object key) { this.key = key; } public void lock() { try { boolean locked = basicTryLock(-1, TimeUnit.MILLISECONDS, false); if (!locked) { lockCheckReadiness(); } Assert.assertTrue(locked, "Failed to acquire DistributedLock"); } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } catch (InterruptedException e) { Thread.currentThread().interrupt(); lockCheckReadiness(); Assert.assertTrue(false, "Failed to acquire DistributedLock"); } } public void lockInterruptibly() throws InterruptedException { try { boolean locked = basicTryLock(-1, TimeUnit.MILLISECONDS, true); if (!locked) { lockCheckReadiness(); } Assert.assertTrue(locked, "Failed to acquire DistributedLock"); } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } } public boolean tryLock() { try { ReplyProcessor21.forceSevereAlertProcessing(); return getLockService().lock(this.key, 0, getLockLeaseForLock()); } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } finally { ReplyProcessor21.unforceSevereAlertProcessing(); } } public boolean tryLock(long time, TimeUnit unit) throws InterruptedException { return basicTryLock(time, unit, true); } private boolean basicTryLock(long time, TimeUnit unit, boolean interruptible) throws InterruptedException { // if (Thread.interrupted()) throw new InterruptedException(); not necessary lockInterruptibly does this final DM dm = getDistributionManager(); long start = System.currentTimeMillis(); long timeoutMS = getLockTimeoutForLock(time, unit); long end; if (timeoutMS < 0) { timeoutMS = Long.MAX_VALUE; end = Long.MAX_VALUE; } else { end = start + timeoutMS; } long ackSAThreshold = getSystem().getConfig().getAckSevereAlertThreshold() * 1000; boolean suspected = false; boolean severeAlertIssued = false; DistributedMember lockHolder = null; long waitInterval; long ackWaitThreshold; if (ackSAThreshold > 0) { ackWaitThreshold = getSystem().getConfig().getAckWaitThreshold() * 1000; waitInterval = ackWaitThreshold; } else { waitInterval = timeoutMS; ackWaitThreshold = 0; } do { try { waitInterval = Math.min(end-System.currentTimeMillis(), waitInterval); ReplyProcessor21.forceSevereAlertProcessing(); final boolean gotLock; if (interruptible) { gotLock = getLockService().lockInterruptibly(this.key, waitInterval, getLockLeaseForLock()); } else { gotLock = getLockService().lock(this.key, waitInterval, getLockLeaseForLock()); } if (gotLock) { return true; } if (ackSAThreshold > 0) { long elapsed = System.currentTimeMillis() - start; if (elapsed > ackWaitThreshold) { if (!suspected) { // start suspect processing on the holder of the lock suspected = true; severeAlertIssued = false; // in case this is a new lock holder waitInterval = ackSAThreshold; DLockRemoteToken remoteToken = ((DLockService)getLockService()).queryLock(key); lockHolder = remoteToken.getLessee(); if (lockHolder != null) { dm.getMembershipManager() .suspectMember(lockHolder, "Has not released a global region entry lock in over " + ackWaitThreshold / 1000 + " seconds"); } } else if (elapsed > ackSAThreshold) { DLockRemoteToken remoteToken = ((DLockService)getLockService()).queryLock(key); if (lockHolder != null && remoteToken.getLessee() != null && lockHolder.equals(remoteToken.getLessee())) { if (!severeAlertIssued) { severeAlertIssued = true; logger.fatal(LocalizedMessage.create(LocalizedStrings.DistributedRegion_0_SECONDS_HAVE_ELAPSED_WAITING_FOR_GLOBAL_REGION_ENTRY_LOCK_HELD_BY_1, new Object[] {Long.valueOf(ackWaitThreshold+ackSAThreshold), lockHolder})); } } else { // the lock holder has changed suspected = false; waitInterval = ackWaitThreshold; lockHolder = null; } } } } // ackSAThreshold processing } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } finally { ReplyProcessor21.unforceSevereAlertProcessing(); } } while (System.currentTimeMillis() < end); return false; } public void unlock() { try { ReplyProcessor21.forceSevereAlertProcessing(); getLockService().unlock(this.key); if (!DistributedRegion.this.entries.containsKey(key)) { getLockService().freeResources(key); } } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } finally { ReplyProcessor21.unforceSevereAlertProcessing(); } } public Condition newCondition() { throw new UnsupportedOperationException(LocalizedStrings.DistributedRegion_NEWCONDITION_UNSUPPORTED.toLocalizedString()); } } /////////////////// RegionDistributedLock ////////////////// private class RegionDistributedLock implements Lock { public RegionDistributedLock() { } public void lock() { try { boolean locked = getLockService().suspendLocking(-1); Assert.assertTrue(locked, "Failed to acquire RegionDistributedLock"); } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } } public void lockInterruptibly() throws InterruptedException { // if (Thread.interrupted()) throw new InterruptedException(); not necessary suspendLockingInterruptibly does this try { boolean locked = getLockService().suspendLockingInterruptibly(-1); Assert.assertTrue(locked, "Failed to acquire RegionDistributedLock"); } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } } public boolean tryLock() { try { return getLockService().suspendLocking(0); } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } } public boolean tryLock(long time, TimeUnit unit) throws InterruptedException { // if (Thread.interrupted()) throw new InterruptedException(); not necessary suspendLockingINterruptibly does this try { return getLockService().suspendLockingInterruptibly( getLockTimeoutForLock(time, unit)); } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } } public void unlock() { try { getLockService().resumeLocking(); } catch (IllegalStateException ex) { lockCheckReadiness(); throw ex; } } public Condition newCondition() { throw new UnsupportedOperationException(LocalizedStrings.DistributedRegion_NEWCONDITION_UNSUPPORTED.toLocalizedString()); } } // - add in region locking for destroy and invalidate... /** * If this region's scope is GLOBAL, get the region distributed lock. The * sender is responsible for unlocking. * * @return the acquired Lock if the region is GLOBAL and not already suspend, * otherwise null. */ Lock getRegionDistributedLockIfGlobal() throws TimeoutException { if (getScope().isGlobal()) { if (isLockingSuspendedByCurrentThread()) return null; Lock dlock = getRegionDistributedLock(); dlock.lock(); return dlock; } return null; } /* * void localDestroyRegion(Object aCallbackArgument) { try { Lock dlock = * this.getRegionDistributedLockIfGlobal(); try { * super.localDestroyRegion(aCallbackArgument); } finally { if (dlock != null) { * dlock.unlock(); } } } catch (TimeoutException e) { throw new * GemFireCacheException("localDestroyRegion timed out", e); } } * * void destroyRegion(Object aCallbackArgument) throws CacheWriterException, * TimeoutException { Lock dlock = this.getRegionDistributedLockIfGlobal(); * try { super.destroyRegion(aCallbackArgument); } finally { if (dlock != * null) { dlock.unlock(); } } } * * void invalidateRegion(Object aCallbackArgument) throws TimeoutException { * Lock dlock = this.getRegionDistributedLockIfGlobal(); try { * super.invalidateRegion(aCallbackArgument); } finally { if (dlock != null) { * dlock.unlock(); } } } */ /** * Distribute the PutAllOp. * This implementation distributes it to peers. * @since 5.7 */ @Override public void postPutAllSend(DistributedPutAllOperation putAllOp, VersionedObjectList successfulPuts) { if (putAllOp.putAllDataSize > 0) { putAllOp.distribute(); } else { if (logger.isDebugEnabled()) { logger.debug("DR.postPutAll: no data to distribute"); } } } @Override public void postRemoveAllSend(DistributedRemoveAllOperation op, VersionedObjectList successfulOps) { if (op.removeAllDataSize > 0) { op.distribute(); } else { getCache().getLoggerI18n().fine("DR.postRemoveAll: no data to distribute"); } } @Override public VersionedObjectList basicPutAll(final Map<?, ?> map, final DistributedPutAllOperation putAllOp, final Map<Object, VersionTag> retryVersions) { Lock dlock = this.getRegionDistributedLockIfGlobal(); try { return super.basicPutAll(map, putAllOp, retryVersions); } finally { if (dlock != null) { dlock.unlock(); } } } @Override public VersionedObjectList basicRemoveAll(final Collection<Object> keys, final DistributedRemoveAllOperation removeAllOp, final ArrayList<VersionTag> retryVersions) { Lock dlock = this.getRegionDistributedLockIfGlobal(); try { return super.basicRemoveAll(keys, removeAllOp, retryVersions); } finally { if (dlock != null) { dlock.unlock(); } } } /** Returns true if any required roles are currently missing */ boolean isMissingRequiredRoles() { return this.isMissingRequiredRoles; } /** * Returns the missing required roles after waiting up to the timeout * * @throws IllegalStateException * if region is not configured with required roles * @throws InterruptedException TODO-javadocs */ public Set waitForRequiredRoles(long timeout) throws InterruptedException { if (Thread.interrupted()) throw new InterruptedException(); checkReadiness(); if (!getMembershipAttributes().hasRequiredRoles()) { throw new IllegalStateException(LocalizedStrings.DistributedRegion_REGION_HAS_NOT_BEEN_CONFIGURED_WITH_REQUIRED_ROLES.toLocalizedString()); } if (!this.isMissingRequiredRoles) { // should we delete this check? if (logger.isDebugEnabled()) { logger.debug("No missing required roles to wait for."); } return Collections.EMPTY_SET; // early-out: no missing required roles } if (timeout != 0) { // if timeout is zero then fall through past waits if (timeout == -1) { // infinite timeout while (this.isMissingRequiredRoles) { checkReadiness(); this.cache.getCancelCriterion().checkCancelInProgress(null); // bail if distribution has stopped synchronized (this.missingRequiredRoles) { // one more check while synced if (this.isMissingRequiredRoles) { if (logger.isDebugEnabled()) { logger.debug("About to wait for missing required roles."); } // TODO an infinite wait here might be a problem... this.missingRequiredRoles.wait(); // spurious wakeup ok } } } } else { // use the timeout long endTime = System.currentTimeMillis() + timeout; while (this.isMissingRequiredRoles) { checkReadiness(); this.cache.getCancelCriterion().checkCancelInProgress(null); // bail if distribution has stopped synchronized (this.missingRequiredRoles) { // one more check while synced if (this.isMissingRequiredRoles) { long timeToWait = endTime - System.currentTimeMillis(); if (timeToWait > 0) { if (logger.isDebugEnabled()) { logger.debug("About to wait up to {} milliseconds for missing required roles.", timeToWait); } this.missingRequiredRoles.wait(timeToWait); // spurious wakeup ok } else { break; } } } } } } // check readiness again: thread may have been notified at destroy time checkReadiness(); if (this.isMissingRequiredRoles) { // sync on missingRequiredRoles to prevent mods to required role status... synchronized (this.missingRequiredRoles) { return Collections.unmodifiableSet(new HashSet( this.missingRequiredRoles)); } } else { return Collections.EMPTY_SET; } } /** Returns true if the role is currently present this region's membership. */ public boolean isRoleInRegionMembership(Role role) { checkReadiness(); return basicIsRoleInRegionMembership(role); } protected boolean basicIsRoleInRegionMembership(Role role) { if (getSystem().getDistributedMember().getRoles().contains(role)) { // since we are playing the role return true; } Set members = this.distAdvisor.adviseGeneric(); for (Iterator iter = members.iterator(); iter.hasNext();) { DistributedMember member = (DistributedMember)iter.next(); Set roles = member.getRoles(); if (roles.contains(role)) { return true; } } return false; } @Override public void remoteRegionInitialized(CacheProfile profile) { synchronized(this.advisorListener) { if (this.advisorListener.members == null && hasListener()) { Object callback = TEST_HOOK_ADD_PROFILE? profile : null; RegionEventImpl event = new RegionEventImpl(this, Operation.REGION_CREATE, callback, true, profile.peerMemberId); dispatchListenerEvent(EnumListenerEvent.AFTER_REMOTE_REGION_CREATE, event); } } } @Override protected void removeSenderFromAdvisor(InternalDistributedMember sender, int serial, boolean regionDestroyed) { getDistributionAdvisor().removeIdWithSerial(sender, serial, regionDestroyed); } /** doesn't throw RegionDestroyedException, used by CacheDistributionAdvisor */ public DistributionAdvisee getParentAdvisee() { return (DistributionAdvisee) basicGetParentRegion(); } /** * Used to get membership events from our advisor to implement * RegionMembershipListener invocations. * * @since 5.0 */ protected class AdvisorListener implements MembershipListener { private Set members = new HashSet(); protected boolean destroyed = false; protected synchronized void addMembers(Set newMembers) { this.members.addAll(newMembers); } protected synchronized Set getInitialMembers() { Set initMembers = this.members; this.members = null; return initMembers; } public void quorumLost(Set<InternalDistributedMember> failures, List<InternalDistributedMember> remaining) { } public void memberSuspect(InternalDistributedMember id, InternalDistributedMember whoSuspected) { } /** called when membership listeners are added after region creation */ protected synchronized void initRMLWrappers() { Set membersWithThisRegion = DistributedRegion.this.distAdvisor.adviseGeneric(); initPostCreateRegionMembershipListeners(membersWithThisRegion); } public synchronized void memberJoined(InternalDistributedMember id) { if (this.destroyed) { return; } if (this.members != null) { this.members.add(id); } // bug #44684 - do not notify listener of create until remote member is initialized // if (this.members == null && hasListener()) { // RegionEventImpl event = new RegionEventImpl(DistributedRegion.this, // Operation.REGION_CREATE, null, true, id); // dispatchListenerEvent(EnumListenerEvent.AFTER_REMOTE_REGION_CREATE, // event); // } if (getMembershipAttributes().hasRequiredRoles()) { // newlyAcquiredRoles is used for intersection and RoleEvent Set newlyAcquiredRoles = Collections.EMPTY_SET; synchronized (missingRequiredRoles) { if (isMissingRequiredRoles) { Set roles = id.getRoles(); newlyAcquiredRoles = new HashSet(missingRequiredRoles); newlyAcquiredRoles.retainAll(roles); // find the intersection if (!newlyAcquiredRoles.isEmpty()) { if (DistributedRegion.this.rmq != null) { Iterator it = newlyAcquiredRoles.iterator(); final DM dm = getDistributionManager(); while (it.hasNext()) { getCache().getCancelCriterion().checkCancelInProgress(null); final Role role = (Role)it.next(); try { // do this in the waiting pool to make it async // @todo darrel/klund: add a single serial executor for // queue flush dm.getWaitingThreadPool().execute(new Runnable() { public void run() { DistributedRegion.this.rmq.roleReady(role); } }); break; } catch (RejectedExecutionException ex) { throw ex; } } // while } missingRequiredRoles.removeAll(newlyAcquiredRoles); if (this.members == null && missingRequiredRoles.isEmpty()) { isMissingRequiredRoles = false; getCachePerfStats().incReliableRegionsMissing(-1); if (getMembershipAttributes().getLossAction().isAllAccess()) getCachePerfStats().incReliableRegionsMissingFullAccess(-1); // rahul else if (getMembershipAttributes().getLossAction() .isLimitedAccess()) getCachePerfStats() .incReliableRegionsMissingLimitedAccess(-1); else if (getMembershipAttributes().getLossAction().isNoAccess()) getCachePerfStats().incReliableRegionsMissingNoAccess(-1); boolean async = resumeReliability(id, newlyAcquiredRoles); if (async) { this.destroyed = true; } } } } if (!this.destroyed) { // any number of threads may be waiting on missingRequiredRoles missingRequiredRoles.notifyAll(); } } if (!this.destroyed && this.members == null && hasListener()) { if (!newlyAcquiredRoles.isEmpty()) { // fire afterRoleGain event RoleEventImpl relEvent = new RoleEventImpl(DistributedRegion.this, Operation.REGION_CREATE, null, true, id, newlyAcquiredRoles); dispatchListenerEvent(EnumListenerEvent.AFTER_ROLE_GAIN, relEvent); } } } } public synchronized void memberDeparted(InternalDistributedMember id, boolean crashed) { if (this.destroyed) { return; } if (this.members != null) { this.members.remove(id); } if (this.members == null && hasListener()) { RegionEventImpl event = new RegionEventImpl(DistributedRegion.this, Operation.REGION_CLOSE, null, true, id); if (crashed) { dispatchListenerEvent(EnumListenerEvent.AFTER_REMOTE_REGION_CRASH, event); } else { // @todo darrel: it would be nice to know if what actual op was done // could be close, local destroy, or destroy (or load snap?) if (DestroyRegionOperation.isRegionDepartureNotificationOk()) { dispatchListenerEvent(EnumListenerEvent.AFTER_REMOTE_REGION_DEPARTURE, event); } } } if (getMembershipAttributes().hasRequiredRoles()) { Set newlyMissingRoles = Collections.EMPTY_SET; synchronized (missingRequiredRoles) { Set roles = id.getRoles(); for (Iterator iter = roles.iterator(); iter.hasNext();) { Role role = (Role)iter.next(); if (getMembershipAttributes().getRequiredRoles().contains(role) && !basicIsRoleInRegionMembership(role)) { if (newlyMissingRoles == Collections.EMPTY_SET) { newlyMissingRoles = new HashSet(); } newlyMissingRoles.add(role); if (this.members == null && !isMissingRequiredRoles) { isMissingRequiredRoles = true; getCachePerfStats().incReliableRegionsMissing(1); if (getMembershipAttributes().getLossAction().isAllAccess()) getCachePerfStats().incReliableRegionsMissingFullAccess(1); // rahul else if (getMembershipAttributes().getLossAction() .isLimitedAccess()) getCachePerfStats().incReliableRegionsMissingLimitedAccess(1); else if (getMembershipAttributes().getLossAction().isNoAccess()) getCachePerfStats().incReliableRegionsMissingNoAccess(1); boolean async = lostReliability(id, newlyMissingRoles); if (async) { this.destroyed = true; } } } } if (!this.destroyed) { missingRequiredRoles.addAll(newlyMissingRoles); // any number of threads may be waiting on missingRequiredRoles... missingRequiredRoles.notifyAll(); } } if (!this.destroyed && this.members == null && hasListener()) { if (!newlyMissingRoles.isEmpty()) { // fire afterRoleLoss event RoleEventImpl relEvent = new RoleEventImpl(DistributedRegion.this, Operation.REGION_CLOSE, null, true, id, newlyMissingRoles); dispatchListenerEvent(EnumListenerEvent.AFTER_ROLE_LOSS, relEvent); } } } } } /** * Used to bootstrap txState. * @param key * @return distributedRegions, * member with parimary bucket for partitionedRegions */ @Override public DistributedMember getOwnerForKey(KeyInfo key) { //Asif: fix for sqlfabric bug 42266 assert !this.isInternalRegion() || this.isMetaRegionWithTransactions(); if (!this.getAttributes().getDataPolicy().withStorage() || (this.concurrencyChecksEnabled && this.getAttributes() .getDataPolicy() == DataPolicy.NORMAL)) { // execute on random replicate return getRandomReplicate(); } // if we are non-persistent, forward transactions to // a persistent member if (this.concurrencyChecksEnabled && !generateVersionTag) { return getRandomPersistentReplicate(); } return super.getOwnerForKey(key); } /** * Execute the provided named function in all locations that contain the given * keys. So function can be executed on just one fabric node, executed in * parallel on a subset of nodes in parallel across all the nodes. * * @param function * @param args * @since 5.8 */ @Override public ResultCollector executeFunction( final DistributedRegionFunctionExecutor execution, final Function function, final Object args, final ResultCollector rc, final Set filter, final ServerToClientFunctionResultSender sender) { DistributedMember target = getTransactionalNode(); if (target != null) { if (target.equals(getMyId())) { return executeLocally(execution, function, args, 0, rc, filter, sender); } return executeOnReplicate(execution, function, args, rc, filter, target); } else if (this.getAttributes().getDataPolicy().withReplication() || this.getAttributes().getDataPolicy().withPreloaded()) { // execute locally final Set<InternalDistributedMember> singleMember = Collections .singleton(getMyId()); execution.validateExecution(function, singleMember); execution.setExecutionNodes(singleMember); return executeLocally(execution, function, args, 0, rc, filter, sender); } else { // select a random replicate target = getRandomReplicate(); if (target == null) { throw new FunctionException(LocalizedStrings .DistributedRegion_NO_REPLICATED_REGION_FOUND_FOR_EXECUTING_FUNCTION_0 .toLocalizedString(function.getId())); } } final LocalResultCollector<?, ?> localRC = execution .getLocalResultCollector(function, rc); return executeOnReplicate(execution, function, args, localRC, filter, target); } private ResultCollector executeOnReplicate( final DistributedRegionFunctionExecutor execution, final Function function, final Object args, ResultCollector rc, final Set filter, final DistributedMember target) { final Set singleMember = Collections.singleton(target); execution.validateExecution(function, singleMember); execution.setExecutionNodes(singleMember); HashMap<InternalDistributedMember, Object> memberArgs = new HashMap<InternalDistributedMember, Object>(); memberArgs.put((InternalDistributedMember)target, execution.getArgumentsForMember(target.getId())); ResultSender resultSender = new DistributedRegionFunctionResultSender(null, rc, function, execution.getServerResultSender()); DistributedRegionFunctionResultWaiter waiter = new DistributedRegionFunctionResultWaiter( this.getSystem(), this.getFullPath(), rc, function, filter, Collections.singleton(target), memberArgs, resultSender); rc = waiter.getFunctionResultFrom(Collections.singleton(target), function, execution); return rc; } /** * @return the node which a transaction is already is progress, null otherwise */ private DistributedMember getTransactionalNode() { if (cache.getTxManager().getTXState() != null) { return cache.getTxManager().getTXState().getTarget(); } return null; } /** * Implementation of {@link ProfileVisitor} that selects a random replicated * member from the available ones for this region. */ static final class GetRandomReplicate implements ProfileVisitor<DistributedMember> { private boolean onlyPersistent = false; InternalDistributedMember member = null; private int randIndex = -1; public GetRandomReplicate() { } public GetRandomReplicate(boolean onlyPersistent) { this.onlyPersistent = onlyPersistent; } public boolean visit(DistributionAdvisor advisor, Profile profile, int profileIndex, int numProfiles, DistributedMember member) { final CacheProfile cp = (CacheProfile)profile; if (this.randIndex < 0) { this.randIndex = PartitionedRegion.rand.nextInt(numProfiles); } if (cp.dataPolicy.withReplication() && cp.regionInitialized && !cp.memberUnInitialized) { if (onlyPersistent && !cp.dataPolicy.withPersistence()) { return true; } // store the last replicated member in any case since in the worst case // there may be no replicated node after "randIndex" in which case the // last visited member will be used this.member = cp.getDistributedMember(); if (profileIndex >= this.randIndex) { return false; } } return true; } } /** * @return a random replicate, null if there are none */ public InternalDistributedMember getRandomReplicate() { /* [sumedh] The old code causes creation of a unnecessary HashSet * and population with all replicates (which may be large), then * copy into an array and then selection of a random one from that. * The new approach uses a much more efficient visitor instead. Set replicates = this.getCacheDistributionAdvisor().adviseReplicates(); if (replicates.isEmpty()) { return null; } return (InternalDistributedMember)(replicates .toArray()[new Random().nextInt(replicates.size())]); */ final GetRandomReplicate getReplicate = new GetRandomReplicate(); this.getCacheDistributionAdvisor().accept(getReplicate, null); return getReplicate.member; } /** * @return a random persistent replicate, null if there is none */ public InternalDistributedMember getRandomPersistentReplicate() { final GetRandomReplicate getPersistentReplicate = new GetRandomReplicate(true); this.getCacheDistributionAdvisor().accept(getPersistentReplicate, null); return getPersistentReplicate.member; } void executeOnRegion(DistributedRegionFunctionStreamingMessage msg, final Function function, final Object args, int prid, final Set filter, boolean isReExecute) throws IOException { final DM dm = getDistributionManager(); ResultSender resultSender = new DistributedRegionFunctionResultSender(dm, msg, function); final RegionFunctionContextImpl context = new RegionFunctionContextImpl( function.getId(), this, args, filter, null, null, resultSender, isReExecute); FunctionStats stats = FunctionStats.getFunctionStats(function.getId(), dm.getSystem()); try { long start = stats.startTime(); stats.startFunctionExecution(function.hasResult()); function.execute(context); stats.endFunctionExecution(start,function.hasResult()); } catch (FunctionException functionException) { if (logger.isDebugEnabled()) { logger.debug("FunctionException occured on remote node while executing Function: {}", function.getId(), functionException); } stats.endFunctionExecutionWithException(function.hasResult()); throw functionException; } catch (CacheClosedException cacheClosedexception) { if (logger.isDebugEnabled()) { logger.debug("CacheClosedException occured on remote node while executing Function: {}", function.getId(), cacheClosedexception); } throw cacheClosedexception; } catch (Exception exception) { if (logger.isDebugEnabled()) { logger.debug("Exception occured on remote node while executing Function: {}", function.getId(), exception); } stats.endFunctionExecutionWithException(function.hasResult()); throw new FunctionException(exception); } } ResultCollector executeLocally( final DistributedRegionFunctionExecutor execution, final Function function, final Object args, int prid, final ResultCollector rc, final Set filter, final ServerToClientFunctionResultSender sender) { final LocalResultCollector<?, ?> localRC = execution .getLocalResultCollector(function, rc); final DM dm = getDistributionManager(); final DistributedRegionFunctionResultSender resultSender = new DistributedRegionFunctionResultSender( dm, localRC, function, sender); final RegionFunctionContextImpl context = new RegionFunctionContextImpl( function.getId(), DistributedRegion.this, args, filter, null, null, resultSender, execution.isReExecute()); execution.executeFunctionOnLocalNode(function, context, resultSender, dm, isTX()); return localRC; } @Override protected void setMemoryThresholdFlag(MemoryEvent event) { Set<InternalDistributedMember> others = getCacheDistributionAdvisor().adviseGeneric(); if (event.isLocal() || others.contains(event.getMember())) { if (event.getState().isCritical() && !event.getPreviousState().isCritical() && (event.getType() == ResourceType.HEAP_MEMORY || (event.getType() == ResourceType.OFFHEAP_MEMORY && getOffHeap()))) { setMemoryThresholdReachedCounterTrue(event.getMember()); } else if (!event.getState().isCritical() && event.getPreviousState().isCritical() && (event.getType() == ResourceType.HEAP_MEMORY || (event.getType() == ResourceType.OFFHEAP_MEMORY && getOffHeap()))) { removeMemberFromCriticalList(event.getMember()); } } } @Override public void removeMemberFromCriticalList(DistributedMember member) { if (logger.isDebugEnabled()) { logger.debug("DR: removing member {} from critical member list", member); } synchronized(this.memoryThresholdReachedMembers) { this.memoryThresholdReachedMembers.remove(member); if (this.memoryThresholdReachedMembers.size() == 0) { memoryThresholdReached.set(false); } } } @Override public Set<DistributedMember> getMemoryThresholdReachedMembers() { synchronized (this.memoryThresholdReachedMembers) { return Collections.unmodifiableSet(this.memoryThresholdReachedMembers); } } @Override public void initialCriticalMembers(boolean localMemoryIsCritical, Set<InternalDistributedMember> critialMembers) { Set<InternalDistributedMember> others = getCacheDistributionAdvisor().adviseGeneric(); for (InternalDistributedMember idm: critialMembers) { if (others.contains(idm)) { setMemoryThresholdReachedCounterTrue(idm); } } } /** * @param idm member whose threshold has been exceeded */ private void setMemoryThresholdReachedCounterTrue(final DistributedMember idm) { synchronized(this.memoryThresholdReachedMembers) { this.memoryThresholdReachedMembers.add(idm); if (this.memoryThresholdReachedMembers.size() > 0) { memoryThresholdReached.set(true); } } } /** * Fetch Version for the given key from a remote replicate member. * @param key * @throws EntryNotFoundException if the entry is not found on replicate member * @return VersionTag for the key */ protected VersionTag fetchRemoteVersionTag(Object key) { VersionTag tag = null; assert this.dataPolicy != DataPolicy.REPLICATE; TransactionId txId = cache.getCacheTransactionManager().suspend(); try { boolean retry = true; InternalDistributedMember member = getRandomReplicate(); while (retry) { try { if (member == null) { break; } FetchVersionResponse response = RemoteFetchVersionMessage.send(member, this, key); tag = response.waitForResponse(); retry = false; } catch (RemoteOperationException e) { member = getRandomReplicate(); if (member != null) { if (logger.isDebugEnabled()) { logger.debug("Retrying RemoteFetchVersionMessage on member:{}", member); } } } } } finally { if (txId != null) { cache.getCacheTransactionManager().resume(txId); } } return tag; } /** * Test hook for bug 48578. Returns true if it sees a net loader. * Returns false if it does not have one. */ public boolean hasNetLoader() { return this.hasNetLoader(getCacheDistributionAdvisor()); } }
apache-2.0
shurun19851206/ignite
modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePortableCacheTestSuite.java
6450
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.testsuites; import java.util.HashSet; import junit.framework.TestSuite; import org.apache.ignite.internal.processors.cache.GridCacheAffinityRoutingSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheEntryMemorySizeSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheMvccSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheOffHeapTieredAtomicSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheOffHeapTieredEvictionAtomicSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheOffHeapTieredEvictionSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheOffHeapTieredSelfTest; import org.apache.ignite.internal.processors.cache.expiry.IgniteCacheAtomicLocalExpiryPolicyTest; import org.apache.ignite.internal.processors.cache.expiry.IgniteCacheExpiryPolicyTestSuite; import org.apache.ignite.internal.processors.cache.portable.GridPortableCacheEntryMemorySizeSelfTest; import org.apache.ignite.internal.processors.cache.portable.datastreaming.DataStreamProcessorPortableSelfTest; import org.apache.ignite.internal.processors.cache.portable.datastreaming.GridDataStreamerImplSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheAffinityRoutingPortableSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheAtomicPartitionedOnlyPortableDataStreamerMultiNodeSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheAtomicPartitionedOnlyPortableDataStreamerMultithreadedSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheAtomicPartitionedOnlyPortableMultiNodeSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheAtomicPartitionedOnlyPortableMultithreadedSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheMemoryModePortableSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheOffHeapTieredAtomicPortableSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheOffHeapTieredEvictionAtomicPortableSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheOffHeapTieredEvictionPortableSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCacheOffHeapTieredPortableSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCachePortablesNearPartitionedByteArrayValuesSelfTest; import org.apache.ignite.internal.processors.cache.portable.distributed.dht.GridCachePortablesPartitionedOnlyByteArrayValuesSelfTest; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessorSelfTest; import org.apache.ignite.marshaller.portable.PortableMarshaller; import org.apache.ignite.testframework.config.GridTestProperties; /** * Cache suite with portable marshaller. */ public class IgnitePortableCacheTestSuite extends TestSuite { /** * @return Suite. * @throws Exception In case of error. */ public static TestSuite suite() throws Exception { GridTestProperties.setProperty(GridTestProperties.MARSH_CLASS_NAME, PortableMarshaller.class.getName()); TestSuite suite = new TestSuite("Portable Cache Test Suite"); HashSet<Class> ignoredTests = new HashSet<>(); // Tests below have a special version for Portable Marshaller ignoredTests.add(DataStreamProcessorSelfTest.class); ignoredTests.add(GridCacheOffHeapTieredEvictionAtomicSelfTest.class); ignoredTests.add(GridCacheOffHeapTieredEvictionSelfTest.class); ignoredTests.add(GridCacheOffHeapTieredSelfTest.class); ignoredTests.add(GridCacheOffHeapTieredAtomicSelfTest.class); ignoredTests.add(GridCacheAffinityRoutingSelfTest.class); ignoredTests.add(IgniteCacheAtomicLocalExpiryPolicyTest.class); ignoredTests.add(GridCacheEntryMemorySizeSelfTest.class); // Tests that are not ready to be used with PortableMarshaller ignoredTests.add(GridCacheMvccSelfTest.class); suite.addTest(IgniteCacheTestSuite.suite(ignoredTests)); suite.addTest(IgniteCacheExpiryPolicyTestSuite.suite()); suite.addTestSuite(GridCacheMemoryModePortableSelfTest.class); suite.addTestSuite(GridCacheOffHeapTieredEvictionAtomicPortableSelfTest.class); suite.addTestSuite(GridCacheOffHeapTieredEvictionPortableSelfTest.class); suite.addTestSuite(GridCachePortablesPartitionedOnlyByteArrayValuesSelfTest.class); suite.addTestSuite(GridCachePortablesNearPartitionedByteArrayValuesSelfTest.class); suite.addTestSuite(GridCacheOffHeapTieredPortableSelfTest.class); suite.addTestSuite(GridCacheOffHeapTieredAtomicPortableSelfTest.class); suite.addTestSuite(GridDataStreamerImplSelfTest.class); suite.addTestSuite(DataStreamProcessorPortableSelfTest.class); suite.addTestSuite(GridCacheAtomicPartitionedOnlyPortableDataStreamerMultiNodeSelfTest.class); suite.addTestSuite(GridCacheAtomicPartitionedOnlyPortableDataStreamerMultithreadedSelfTest.class); suite.addTestSuite(GridCacheAtomicPartitionedOnlyPortableMultiNodeSelfTest.class); suite.addTestSuite(GridCacheAtomicPartitionedOnlyPortableMultithreadedSelfTest.class); suite.addTestSuite(GridCacheAffinityRoutingPortableSelfTest.class); suite.addTestSuite(GridPortableCacheEntryMemorySizeSelfTest.class); return suite; } }
apache-2.0
etothepii/haddock
betfair/build/generated/jax-wsCache/BFExchangeService/com/betfair/publicapi/types/exchange/v5/GetCurrentBetsReq.java
6030
package com.betfair.publicapi.types.exchange.v5; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlType; /** * * Obtain all bets placed on a given market. Pass marketId = 0 to obtain bets for all markets. If * deatiled is true then also return details of Matches when betStatus = M * * * <p>Java class for GetCurrentBetsReq complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="GetCurrentBetsReq"> * &lt;complexContent> * &lt;extension base="{http://www.betfair.com/publicapi/types/exchange/v5/}APIRequest"> * &lt;sequence> * &lt;element name="betStatus" type="{http://www.betfair.com/publicapi/types/exchange/v5/}BetStatusEnum"/> * &lt;element name="detailed" type="{http://www.w3.org/2001/XMLSchema}boolean"/> * &lt;element name="locale" type="{http://www.w3.org/2001/XMLSchema}string"/> * &lt;element name="timezone" type="{http://www.w3.org/2001/XMLSchema}string"/> * &lt;element name="marketId" type="{http://www.w3.org/2001/XMLSchema}int"/> * &lt;element name="orderBy" type="{http://www.betfair.com/publicapi/types/exchange/v5/}BetsOrderByEnum"/> * &lt;element name="recordCount" type="{http://www.w3.org/2001/XMLSchema}int"/> * &lt;element name="startRecord" type="{http://www.w3.org/2001/XMLSchema}int"/> * &lt;element name="noTotalRecordCount" type="{http://www.w3.org/2001/XMLSchema}boolean"/> * &lt;/sequence> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "GetCurrentBetsReq", propOrder = { "betStatus", "detailed", "locale", "timezone", "marketId", "orderBy", "recordCount", "startRecord", "noTotalRecordCount" }) public class GetCurrentBetsReq extends APIRequest { @XmlElement(required = true) protected BetStatusEnum betStatus; protected boolean detailed; @XmlElement(required = true, nillable = true) protected String locale; @XmlElement(required = true, nillable = true) protected String timezone; protected int marketId; @XmlElement(required = true) protected BetsOrderByEnum orderBy; protected int recordCount; protected int startRecord; protected boolean noTotalRecordCount; /** * Gets the value of the betStatus property. * * @return * possible object is * {@link BetStatusEnum } * */ public BetStatusEnum getBetStatus() { return betStatus; } /** * Sets the value of the betStatus property. * * @param value * allowed object is * {@link BetStatusEnum } * */ public void setBetStatus(BetStatusEnum value) { this.betStatus = value; } /** * Gets the value of the detailed property. * */ public boolean isDetailed() { return detailed; } /** * Sets the value of the detailed property. * */ public void setDetailed(boolean value) { this.detailed = value; } /** * Gets the value of the locale property. * * @return * possible object is * {@link String } * */ public String getLocale() { return locale; } /** * Sets the value of the locale property. * * @param value * allowed object is * {@link String } * */ public void setLocale(String value) { this.locale = value; } /** * Gets the value of the timezone property. * * @return * possible object is * {@link String } * */ public String getTimezone() { return timezone; } /** * Sets the value of the timezone property. * * @param value * allowed object is * {@link String } * */ public void setTimezone(String value) { this.timezone = value; } /** * Gets the value of the marketId property. * */ public int getMarketId() { return marketId; } /** * Sets the value of the marketId property. * */ public void setMarketId(int value) { this.marketId = value; } /** * Gets the value of the orderBy property. * * @return * possible object is * {@link BetsOrderByEnum } * */ public BetsOrderByEnum getOrderBy() { return orderBy; } /** * Sets the value of the orderBy property. * * @param value * allowed object is * {@link BetsOrderByEnum } * */ public void setOrderBy(BetsOrderByEnum value) { this.orderBy = value; } /** * Gets the value of the recordCount property. * */ public int getRecordCount() { return recordCount; } /** * Sets the value of the recordCount property. * */ public void setRecordCount(int value) { this.recordCount = value; } /** * Gets the value of the startRecord property. * */ public int getStartRecord() { return startRecord; } /** * Sets the value of the startRecord property. * */ public void setStartRecord(int value) { this.startRecord = value; } /** * Gets the value of the noTotalRecordCount property. * */ public boolean isNoTotalRecordCount() { return noTotalRecordCount; } /** * Sets the value of the noTotalRecordCount property. * */ public void setNoTotalRecordCount(boolean value) { this.noTotalRecordCount = value; } }
apache-2.0
lemire/incubator-kylin
common/src/main/java/org/apache/kylin/common/util/LogTitlePrinter.java
1367
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kylin.common.util; /** * Created by Hongbin Ma(Binmahone) on 1/27/15. */ public class LogTitlePrinter { public static void printTitle(String title) { String leftAlignFormat = "| %-100s | %n"; System.out.format("+------------------------------------------------------------------------------------------------------+%n"); System.out.format(leftAlignFormat, title); System.out.format("+------------------------------------------------------------------------------------------------------+%n"); } }
apache-2.0
nasser-munshi/Android
ICare/src/com/ftfl/icare/database/VaccineDataSource.java
5201
package com.ftfl.icare.database; import java.util.ArrayList; import java.util.List; import android.content.ContentValues; import android.content.Context; import android.database.Cursor; import android.database.SQLException; import android.database.sqlite.SQLiteDatabase; import android.util.Log; import com.ftfl.icare.util.ICareConstants; import com.ftfl.icare.util.Vaccine; public class VaccineDataSource { private SQLiteDatabase mICareDatabase; private ICareSQLiteHelper mICareDbHelper; List<Vaccine> mVaccinationList = new ArrayList<Vaccine>(); public VaccineDataSource(Context context) { mICareDbHelper = new ICareSQLiteHelper(context); } /* * open a method for writable database */ public void open() throws SQLException { mICareDatabase = mICareDbHelper.getWritableDatabase(); } /* * close database connection */ public void close() { mICareDbHelper.close(); } /* * insert data into the database. */ public boolean insert(Vaccine eVaccine) { this.open(); ContentValues cv = new ContentValues(); cv.put(ICareSQLiteHelper.COL_VACCINE_NAME, eVaccine.getName()); cv.put(ICareSQLiteHelper.COL_VACCINE_DATE, eVaccine.getDate()); cv.put(ICareSQLiteHelper.COL_VACCINE_TIME, eVaccine.getTime()); cv.put(ICareSQLiteHelper.COL_VACCINE_STATUS, eVaccine.getStatus()); cv.put(ICareSQLiteHelper.COL_VACCINE_PROFILE_ID, eVaccine.getProfileId()); long check = mICareDatabase.insert( ICareSQLiteHelper.TABLE_VACCINE, null, cv); mICareDatabase.close(); this.close(); if (check < 0) return false; else return true; } // Updating database by id public boolean updateData(int eVaccineId, Vaccine eVaccine) { this.open(); ContentValues cv = new ContentValues(); cv.put(ICareSQLiteHelper.COL_VACCINE_NAME, eVaccine.getName()); cv.put(ICareSQLiteHelper.COL_VACCINE_DATE, eVaccine.getDate()); cv.put(ICareSQLiteHelper.COL_VACCINE_TIME, eVaccine.getTime()); cv.put(ICareSQLiteHelper.COL_VACCINE_STATUS, eVaccine.getStatus()); cv.put(ICareSQLiteHelper.COL_VACCINE_PROFILE_ID, eVaccine.getProfileId()); int check = mICareDatabase.update( ICareSQLiteHelper.TABLE_VACCINE, cv, ICareSQLiteHelper.COL_VACCINE_ID + "=" + eVaccineId, null); mICareDatabase.close(); this.close(); if (check == 0) return false; else return true; } // delete data form database. public boolean deleteData(int eVaccineId) { this.open(); try { mICareDatabase.delete(ICareSQLiteHelper.TABLE_VACCINE, ICareSQLiteHelper.COL_VACCINE_ID + "=" + eVaccineId, null); } catch (Exception ex) { Log.e("ERROR", "data insertion problem"); return false; } this.close(); return true; } /* * using cursor for display data from database. */ public List<Vaccine> vaccineList() { this.open(); Cursor mCursor = mICareDatabase.query( ICareSQLiteHelper.TABLE_VACCINE, null, ICareSQLiteHelper.COL_VACCINE_PROFILE_ID + "=" + ICareConstants.SELECTED_PROFILE_ID, null, null, null, null); if (mCursor != null) { if (mCursor.moveToFirst()) { do { String id = mCursor.getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_ID)); String name = mCursor.getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_NAME)); String date = mCursor .getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_DATE)); String time = mCursor .getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_TIME)); String status = mCursor .getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_STATUS)); String profileId = mCursor .getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_PROFILE_ID)); mVaccinationList.add(new Vaccine(id, name, date, time, status, profileId)); } while (mCursor.moveToNext()); } mCursor.close(); } this.close(); return mVaccinationList; } /* * create a profile of ICareProfile. Here the data of the database according * to the given id is set to the profile and return a profile. */ public Vaccine singleVaccineData(int eVaccineId) { this.open(); Vaccine singleVaccine; Cursor mCursor = mICareDatabase.query( ICareSQLiteHelper.TABLE_VACCINE, null, ICareSQLiteHelper.COL_VACCINE_ID + "=" + eVaccineId, null, null, null, null); mCursor.moveToFirst(); String id = mCursor.getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_ID)); String name = mCursor.getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_NAME)); String date = mCursor .getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_DATE)); String time = mCursor .getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_TIME)); String status = mCursor .getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_STATUS)); String profileId = mCursor .getString(mCursor .getColumnIndex(ICareSQLiteHelper.COL_VACCINE_PROFILE_ID)); mCursor.close(); singleVaccine = new Vaccine(id, name, date, time, status, profileId); this.close(); return singleVaccine; } }
apache-2.0
kiuby88/SeaCloudsPlatform
deployer/src/main/java/org/apache/brooklyn/entity/cloudfoundry/CloudFoundryEntity.java
2989
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.brooklyn.entity.cloudfoundry; import org.apache.brooklyn.api.entity.Entity; import org.apache.brooklyn.api.entity.ImplementedBy; import org.apache.brooklyn.api.sensor.AttributeSensor; import org.apache.brooklyn.config.ConfigKey; import org.apache.brooklyn.core.config.ConfigKeys; import org.apache.brooklyn.core.entity.Attributes; import org.apache.brooklyn.core.entity.BrooklynConfigKeys; import org.apache.brooklyn.core.entity.lifecycle.Lifecycle; import org.apache.brooklyn.core.entity.trait.Startable; import org.apache.brooklyn.core.sensor.Sensors; import org.apache.brooklyn.location.cloudfoundry.CloudFoundryPaasLocation; import org.apache.brooklyn.util.core.flags.SetFromFlag; import org.apache.brooklyn.util.time.Duration; @ImplementedBy(CloudFoundryEntityImpl.class) public interface CloudFoundryEntity extends Entity, Startable { @SetFromFlag("startTimeout") ConfigKey<Duration> START_TIMEOUT = BrooklynConfigKeys.START_TIMEOUT; @SetFromFlag("maxRebindSensorsDelay") ConfigKey<Duration> MAXIMUM_REBIND_SENSOR_CONNECT_DELAY = ConfigKeys .newConfigKey(Duration.class, "cloudFoundryWebApp.maxSensorRebindDelay", "The maximum delay to apply when reconnecting sensors when rebinding to " + "this entity. Brooklyn will wait a random amount of time, up to the " + "value of this config key, to avoid a thundering herd problem when " + "the entity shares its machine with several others. Set to null or " + "to 0 to disable any delay.", Duration.TEN_SECONDS); AttributeSensor<CloudFoundryPaasLocation> PAAS_LOCATION = Sensors.newSensor( CloudFoundryPaasLocation.class, "cloudFoundryWebApp.paasLocation", "Location used to deploy the application"); AttributeSensor<Boolean> SERVICE_PROCESS_IS_RUNNING = Sensors.newBooleanSensor( "service.process.isRunning", "Whether the process for the service is confirmed as running"); AttributeSensor<Lifecycle> SERVICE_STATE_ACTUAL = Attributes.SERVICE_STATE_ACTUAL; }
apache-2.0
cwsus/esolutions
eSolutionsSecurity/src/main/java/com/cws/esolutions/security/processors/enums/AuditType.java
2630
/* * Copyright (c) 2009 - 2020 CaspersBox Web Services * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cws.esolutions.security.processors.enums; /* * Project: eSolutionsSecurity * Package: com.cws.esolutions.security.processors.enums * File: AuditType.java * * History * * Author Date Comments * ---------------------------------------------------------------------------- * cws-khuntly 11/23/2008 22:39:20 Created. */ /** * @author cws-khuntly * @version 1.0 */ public enum AuditType { NONE, // authentication LOGON, LOGOFF, FORCELOGOFF, CHANGEPASS, RESETPASS, LOADSECURITY, VERIFYSECURITY, VERIFYRESET, // user management CREATEUSER, MODIFYUSER, DELETEUSER, SUSPENDUSER, PSUSPENDUSER, LISTUSERS, UNSUSPENDUSER, LOCKUSER, UNLOCKUSER, ADDSECURITY, SHOWAUDIT, SEARCHACCOUNTS, LOADACCOUNT, CHANGEKEYS, GETAUDITENTRIES, // file security DECRYPTFILE, ENCRYPTFILE, VERIFYFILE, SIGNFILE, // emailing SENDEMAIL, // service messaging SHOWMESSAGES, LOADMESSAGE, ADDSVCMESSAGE, EDITSVCMESSAGE, // app mgmt ADDAPP, UPDATEAPP, DELETEAPP, LISTAPPS, LOADAPP, GETFILES, DEPLOYAPP, // sysmgmt KILL, EXECCMD, ADDSERVER, DELETESERVER, UPDATESERVER, LISTSERVERS, GETSERVER, TELNET, REMOTEDATE, NETSTAT, PROCESSLIST, STOP, START, RESTART, SUSPEND, // project mgmt ADDPROJECT, UPDATEPROJECT, LISTPROJECTS, LOADPROJECT, // platform mgmt ADDPLATFORM, DELETEPLATFORM, UPDATEPLATFORM, LISTPLATFORMS, LOADPLATFORM, // dns mgmt CREATEDNSRECORD, PUSHDNSRECORD, SITETXFR, LOADRECORD, // datacenter mgmt ADDDATACENTER, LISTDATACENTERS, LOADDATACENTER, UPDATEDATACENTER, // certificate mgmt LISTCSR, GENERATECERT, APPLYCERT, // added to satisfy service tests // DO NOT REMOVE JUNIT; }
apache-2.0
apache/geronimo-yoko
yoko-spec-corba/src/main/java/org/omg/MessageRouting/UntypedReplyHandlerHelper.java
3825
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.omg.MessageRouting; // // IDL:omg.org/MessageRouting/UntypedReplyHandler:1.0 // final public class UntypedReplyHandlerHelper { public static void insert(org.omg.CORBA.Any any, UntypedReplyHandler val) { any.insert_Object(val, type()); } public static UntypedReplyHandler extract(org.omg.CORBA.Any any) { if(any.type().equivalent(type())) return narrow(any.extract_Object()); throw new org.omg.CORBA.BAD_OPERATION(); } private static org.omg.CORBA.TypeCode typeCode_; public static org.omg.CORBA.TypeCode type() { if(typeCode_ == null) { org.omg.CORBA.ORB orb = org.omg.CORBA.ORB.init(); typeCode_ = orb.create_interface_tc(id(), "UntypedReplyHandler"); } return typeCode_; } public static String id() { return "IDL:omg.org/MessageRouting/UntypedReplyHandler:1.0"; } public static UntypedReplyHandler read(org.omg.CORBA.portable.InputStream in) { org.omg.CORBA.Object _ob_v = in.read_Object(); try { return (UntypedReplyHandler)_ob_v; } catch(ClassCastException ex) { } org.omg.CORBA.portable.ObjectImpl _ob_impl; _ob_impl = (org.omg.CORBA.portable.ObjectImpl)_ob_v; _UntypedReplyHandlerStub _ob_stub = new _UntypedReplyHandlerStub(); _ob_stub._set_delegate(_ob_impl._get_delegate()); return _ob_stub; } public static void write(org.omg.CORBA.portable.OutputStream out, UntypedReplyHandler val) { out.write_Object(val); } public static UntypedReplyHandler narrow(org.omg.CORBA.Object val) { if(val != null) { try { return (UntypedReplyHandler)val; } catch(ClassCastException ex) { } if(val._is_a(id())) { org.omg.CORBA.portable.ObjectImpl _ob_impl; _UntypedReplyHandlerStub _ob_stub = new _UntypedReplyHandlerStub(); _ob_impl = (org.omg.CORBA.portable.ObjectImpl)val; _ob_stub._set_delegate(_ob_impl._get_delegate()); return _ob_stub; } throw new org.omg.CORBA.BAD_PARAM(); } return null; } public static UntypedReplyHandler unchecked_narrow(org.omg.CORBA.Object val) { if(val != null) { try { return (UntypedReplyHandler)val; } catch(ClassCastException ex) { } org.omg.CORBA.portable.ObjectImpl _ob_impl; _UntypedReplyHandlerStub _ob_stub = new _UntypedReplyHandlerStub(); _ob_impl = (org.omg.CORBA.portable.ObjectImpl)val; _ob_stub._set_delegate(_ob_impl._get_delegate()); return _ob_stub; } return null; } }
apache-2.0
marques-work/gocd
spark/spark-spa/src/main/java/com/thoughtworks/go/spark/spa/StatusReportsController.java
8851
/* * Copyright 2021 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.spark.spa; import com.thoughtworks.go.config.exceptions.RecordNotFoundException; import com.thoughtworks.go.config.policy.SupportedEntity; import com.thoughtworks.go.domain.JobInstance; import com.thoughtworks.go.plugin.access.exceptions.SecretResolutionFailureException; import com.thoughtworks.go.server.exceptions.RulesViolationException; import com.thoughtworks.go.server.service.ElasticAgentPluginService; import com.thoughtworks.go.server.service.JobInstanceService; import com.thoughtworks.go.spark.Routes; import com.thoughtworks.go.spark.SparkController; import com.thoughtworks.go.spark.spring.SPAAuthenticationHelper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.dao.DataRetrievalFailureException; import org.springframework.http.HttpStatus; import spark.ModelAndView; import spark.Request; import spark.Response; import spark.TemplateEngine; import java.util.HashMap; import java.util.Map; import static spark.Spark.*; public class StatusReportsController implements SparkController { private final SPAAuthenticationHelper authenticationHelper; private final TemplateEngine engine; private ElasticAgentPluginService elasticAgentPluginService; private JobInstanceService jobInstanceService; public static final String UNKNOWN_ERROR_MESSAGE = "Something went wrong while trying to fetch the Status Report. Please check the server and plugin logs for more details."; private static Logger LOGGER = LoggerFactory.getLogger(StatusReportsController.class); public StatusReportsController(SPAAuthenticationHelper authenticationHelper, TemplateEngine engine, ElasticAgentPluginService elasticAgentPluginService, JobInstanceService jobInstanceService) { this.authenticationHelper = authenticationHelper; this.engine = engine; this.elasticAgentPluginService = elasticAgentPluginService; this.jobInstanceService = jobInstanceService; } @Override public String controllerBasePath() { return Routes.StatusReports.SPA_BASE; } @Override public void setupRoutes() { path(controllerBasePath(), () -> { before("/:plugin_id", authenticationHelper::checkAdminUserAnd403); before("/:plugin_id/agent/:elastic_agent_id", (request, response) -> { authenticationHelper.checkUserHasPermissions(currentUsername(), getAction(request), SupportedEntity.ELASTIC_AGENT_PROFILE, request.params("elastic_agent_id")); }); before("/:plugin_id/cluster/:cluster_profile_id", (request, response) -> { authenticationHelper.checkUserHasPermissions(currentUsername(), getAction(request), SupportedEntity.CLUSTER_PROFILE, request.params("cluster_profile_id")); }); get("/:plugin_id", this::pluginStatusReport, engine); get("/:plugin_id/agent/:elastic_agent_id", this::agentStatusReport, engine); get("/:plugin_id/cluster/:cluster_profile_id", this::clusterStatusReport, engine); }); } public ModelAndView pluginStatusReport(Request request, Response response) { String pluginId = request.params("plugin_id"); try { String pluginStatusReport = elasticAgentPluginService.getPluginStatusReport(pluginId); Map<Object, Object> object = new HashMap<>() {{ put("viewTitle", "Plugin Status Report"); put("viewFromPlugin", pluginStatusReport); }}; return new ModelAndView(object, "status_reports/index.ftlh"); } catch (RecordNotFoundException e) { return errorPage(response, 404, "Plugin Status Report", e.getMessage()); } catch (UnsupportedOperationException e) { String message = String.format("Status Report for plugin with id: '%s' is not found.", pluginId); return errorPage(response, 404, "Plugin Status Report", message); } catch (RulesViolationException | SecretResolutionFailureException e) { LOGGER.error(e.getMessage(), e); return errorPage(response, 500, "Plugin Status Report", e.getMessage()); } catch (Exception e) { LOGGER.error(e.getMessage(), e); return errorPage(response, 500, "Plugin Status Report", UNKNOWN_ERROR_MESSAGE); } } public ModelAndView agentStatusReport(Request request, Response response) throws Exception { String pluginId = request.params("plugin_id"); String elasticAgentId = parseElasticAgentId(request); String jobIdString = request.queryParams("job_id"); long jobId; try { jobId = Long.parseLong(jobIdString); } catch (NumberFormatException e) { return errorPage(response, HttpStatus.UNPROCESSABLE_ENTITY.value(), "Agent Status Report", "Please provide a valid job_id for Agent Status Report."); } try { JobInstance jobInstance = jobInstanceService.buildById(jobId); String agentStatusReport = elasticAgentPluginService.getAgentStatusReport(pluginId, jobInstance.getIdentifier(), elasticAgentId); Map<Object, Object> object = new HashMap<>(); object.put("viewTitle", "Agent Status Report"); object.put("viewFromPlugin", agentStatusReport); return new ModelAndView(object, "status_reports/index.ftlh"); } catch (RecordNotFoundException e) { return errorPage(response, 404, "Agent Status Report", e.getMessage()); } catch (DataRetrievalFailureException | UnsupportedOperationException e) { String message = String.format("Status Report for plugin with id: '%s' for agent '%s' is not found.", pluginId, elasticAgentId); return errorPage(response, 404, "Agent Status Report", message); } catch (RulesViolationException | SecretResolutionFailureException e) { LOGGER.error(e.getMessage(), e); return errorPage(response, 500, "Agent Status Report", e.getMessage()); } catch (Exception e) { LOGGER.error(e.getMessage(), e); return errorPage(response, 500, "Agent Status Report", UNKNOWN_ERROR_MESSAGE); } } public ModelAndView clusterStatusReport(Request request, Response response) { String pluginId = request.params("plugin_id"); String clusterId = request.params("cluster_profile_id"); try { String clusterStatusReport = elasticAgentPluginService.getClusterStatusReport(pluginId, clusterId); Map<Object, Object> object = new HashMap<>() {{ put("viewTitle", "Cluster Status Report"); put("viewFromPlugin", clusterStatusReport); }}; return new ModelAndView(object, "status_reports/index.ftlh"); } catch (RecordNotFoundException e) { return errorPage(response, 404, "Cluster Status Report", e.getMessage()); } catch (DataRetrievalFailureException | UnsupportedOperationException e) { String message = String.format("Status Report for plugin with id: '%s' for cluster '%s' is not found.", pluginId, clusterId); return errorPage(response, 404, "Cluster Status Report", message); } catch (RulesViolationException | SecretResolutionFailureException e) { LOGGER.error(e.getMessage(), e); return errorPage(response, 500, "Cluster Status Report", e.getMessage()); } catch (Exception e) { LOGGER.error(e.getMessage(), e); return errorPage(response, 500, "Cluster Status Report", UNKNOWN_ERROR_MESSAGE); } } private ModelAndView errorPage(Response response, int statusCode, String viewTitle, String message) { response.status(statusCode); return new ModelAndView(Map.of("viewTitle", viewTitle, "message", message), "status_reports/error.ftlh"); } private String parseElasticAgentId(Request request) { String elasticAgentId = request.params("elastic_agent_id"); if ("unassigned".equalsIgnoreCase(elasticAgentId)) { return null; } return elasticAgentId; } }
apache-2.0
jsrudani/HadoopHDFSProject
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
14253
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.balancer; import static org.junit.Assert.assertEquals; import java.io.IOException; import java.net.URI; import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopologyWithNodeGroup; import org.junit.Assert; import org.junit.Test; /** * This class tests if a balancer schedules tasks correctly. */ public class TestBalancerWithNodeGroup { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestBalancerWithNodeGroup"); final private static long CAPACITY = 500L; final private static String RACK0 = "/rack0"; final private static String RACK1 = "/rack1"; final private static String NODEGROUP0 = "/nodegroup0"; final private static String NODEGROUP1 = "/nodegroup1"; final private static String NODEGROUP2 = "/nodegroup2"; final static private String fileName = "/tmp.txt"; final static private Path filePath = new Path(fileName); MiniDFSClusterWithNodeGroup cluster; ClientProtocol client; static final long TIMEOUT = 20000L; //msec static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5% static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta static final int DEFAULT_BLOCK_SIZE = 10; static { Balancer.setBlockMoveWaitTime(1000L) ; } static Configuration createConf() { Configuration conf = new HdfsConfiguration(); TestBalancer.initConf(conf); conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName()); conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyWithNodeGroup.class.getName()); return conf; } /** * Wait until heartbeat gives expected results, within CAPACITY_ALLOWED_VARIANCE, * summed over all nodes. Times out after TIMEOUT msec. * @param expectedUsedSpace * @param expectedTotalSpace * @throws IOException - if getStats() fails * @throws TimeoutException */ private void waitForHeartBeat(long expectedUsedSpace, long expectedTotalSpace) throws IOException, TimeoutException { long timeout = TIMEOUT; long failtime = (timeout <= 0L) ? Long.MAX_VALUE : System.currentTimeMillis() + timeout; while (true) { long[] status = client.getStats(); double totalSpaceVariance = Math.abs((double)status[0] - expectedTotalSpace) / expectedTotalSpace; double usedSpaceVariance = Math.abs((double)status[1] - expectedUsedSpace) / expectedUsedSpace; if (totalSpaceVariance < CAPACITY_ALLOWED_VARIANCE && usedSpaceVariance < CAPACITY_ALLOWED_VARIANCE) break; //done if (System.currentTimeMillis() > failtime) { throw new TimeoutException("Cluster failed to reached expected values of " + "totalSpace (current: " + status[0] + ", expected: " + expectedTotalSpace + "), or usedSpace (current: " + status[1] + ", expected: " + expectedUsedSpace + "), in more than " + timeout + " msec."); } try { Thread.sleep(100L); } catch(InterruptedException ignored) { } } } /** * Wait until balanced: each datanode gives utilization within * BALANCE_ALLOWED_VARIANCE of average * @throws IOException * @throws TimeoutException */ private void waitForBalancer(long totalUsedSpace, long totalCapacity) throws IOException, TimeoutException { long timeout = TIMEOUT; long failtime = (timeout <= 0L) ? Long.MAX_VALUE : System.currentTimeMillis() + timeout; final double avgUtilization = ((double)totalUsedSpace) / totalCapacity; boolean balanced; do { DatanodeInfo[] datanodeReport = client.getDatanodeReport(DatanodeReportType.ALL); assertEquals(datanodeReport.length, cluster.getDataNodes().size()); balanced = true; for (DatanodeInfo datanode : datanodeReport) { double nodeUtilization = ((double)datanode.getDfsUsed()) / datanode.getCapacity(); if (Math.abs(avgUtilization - nodeUtilization) > BALANCE_ALLOWED_VARIANCE) { balanced = false; if (System.currentTimeMillis() > failtime) { throw new TimeoutException( "Rebalancing expected avg utilization to become " + avgUtilization + ", but on datanode " + datanode + " it remains at " + nodeUtilization + " after more than " + TIMEOUT + " msec."); } try { Thread.sleep(100); } catch (InterruptedException ignored) { } break; } } } while (!balanced); } private void runBalancer(Configuration conf, long totalUsedSpace, long totalCapacity) throws Exception { waitForHeartBeat(totalUsedSpace, totalCapacity); // start rebalancing Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); waitForHeartBeat(totalUsedSpace, totalCapacity); LOG.info("Rebalancing with default factor."); waitForBalancer(totalUsedSpace, totalCapacity); } private void runBalancerCanFinish(Configuration conf, long totalUsedSpace, long totalCapacity) throws Exception { waitForHeartBeat(totalUsedSpace, totalCapacity); // start rebalancing Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); Assert.assertTrue(r == Balancer.ReturnStatus.SUCCESS.code || (r == Balancer.ReturnStatus.NO_MOVE_PROGRESS.code)); waitForHeartBeat(totalUsedSpace, totalCapacity); LOG.info("Rebalancing with default factor."); } /** * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test rack locality for balancer policy. */ @Test(timeout=60000) public void testBalancerWithRackLocality() throws Exception { Configuration conf = createConf(); long[] capacities = new long[]{CAPACITY, CAPACITY}; String[] racks = new String[]{RACK0, RACK1}; String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP1}; int numOfDatanodes = capacities.length; assertEquals(numOfDatanodes, racks.length); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf) .numDataNodes(capacities.length) .racks(racks) .simulatedCapacities(capacities); MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups); cluster = new MiniDFSClusterWithNodeGroup(builder); try { cluster.waitActive(); client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); long totalCapacity = TestBalancer.sum(capacities); // fill up the cluster to be 30% full long totalUsedSpace = totalCapacity * 3 / 10; TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes, (short) numOfDatanodes, 0); long newCapacity = CAPACITY; String newRack = RACK1; String newNodeGroup = NODEGROUP2; // start up an empty node with the same capacity and on the same rack cluster.startDataNodes(conf, 1, true, null, new String[]{newRack}, new long[] {newCapacity}, new String[]{newNodeGroup}); totalCapacity += newCapacity; // run balancer and validate results runBalancerCanFinish(conf, totalUsedSpace, totalCapacity); DatanodeInfo[] datanodeReport = client.getDatanodeReport(DatanodeReportType.ALL); Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>(); for (DatanodeInfo datanode: datanodeReport) { String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation()); int usedCapacity = (int) datanode.getDfsUsed(); if (rackToUsedCapacity.get(rack) != null) { rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack)); } else { rackToUsedCapacity.put(rack, usedCapacity); } } assertEquals(rackToUsedCapacity.size(), 2); assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1)); } finally { cluster.shutdown(); } } /** * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test node-group locality for balancer policy. */ @Test(timeout=60000) public void testBalancerWithNodeGroup() throws Exception { Configuration conf = createConf(); long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY}; String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1}; String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2}; int numOfDatanodes = capacities.length; assertEquals(numOfDatanodes, racks.length); assertEquals(numOfDatanodes, nodeGroups.length); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf) .numDataNodes(capacities.length) .racks(racks) .simulatedCapacities(capacities); MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups); cluster = new MiniDFSClusterWithNodeGroup(builder); try { cluster.waitActive(); client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); long totalCapacity = TestBalancer.sum(capacities); // fill up the cluster to be 20% full long totalUsedSpace = totalCapacity * 2 / 10; TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2), (short) (numOfDatanodes/2), 0); long newCapacity = CAPACITY; String newRack = RACK1; String newNodeGroup = NODEGROUP2; // start up an empty node with the same capacity and on NODEGROUP2 cluster.startDataNodes(conf, 1, true, null, new String[]{newRack}, new long[] {newCapacity}, new String[]{newNodeGroup}); totalCapacity += newCapacity; // run balancer and validate results runBalancer(conf, totalUsedSpace, totalCapacity); } finally { cluster.shutdown(); } } /** * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2) * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according * to replica placement policy with NodeGroup. As a result, n2 and n3 will be * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer * to end in 5 iterations without move block process. */ @Test(timeout=60000) public void testBalancerEndInNoMoveProgress() throws Exception { Configuration conf = createConf(); long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY}; String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1}; String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2}; int numOfDatanodes = capacities.length; assertEquals(numOfDatanodes, racks.length); assertEquals(numOfDatanodes, nodeGroups.length); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf) .numDataNodes(capacities.length) .racks(racks) .simulatedCapacities(capacities); MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups); cluster = new MiniDFSClusterWithNodeGroup(builder); try { cluster.waitActive(); client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); long totalCapacity = TestBalancer.sum(capacities); // fill up the cluster to be 60% full long totalUsedSpace = totalCapacity * 6 / 10; TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, (short) (3), 0); // run balancer which can finish in 5 iterations with no block movement. runBalancerCanFinish(conf, totalUsedSpace, totalCapacity); } finally { cluster.shutdown(); } } }
apache-2.0
mgsx-dev/box2d-editor
core/src/net/mgsx/game/blueprint/ui/GraphView.java
15686
package net.mgsx.game.blueprint.ui; import com.badlogic.gdx.Gdx; import com.badlogic.gdx.Input; import com.badlogic.gdx.files.FileHandle; import com.badlogic.gdx.graphics.Color; import com.badlogic.gdx.graphics.g2d.Batch; import com.badlogic.gdx.graphics.glutils.ShapeRenderer; import com.badlogic.gdx.graphics.glutils.ShapeRenderer.ShapeType; import com.badlogic.gdx.math.Rectangle; import com.badlogic.gdx.math.Vector2; import com.badlogic.gdx.scenes.scene2d.Actor; import com.badlogic.gdx.scenes.scene2d.InputEvent; import com.badlogic.gdx.scenes.scene2d.InputListener; import com.badlogic.gdx.scenes.scene2d.Touchable; import com.badlogic.gdx.scenes.scene2d.ui.SelectBox; import com.badlogic.gdx.scenes.scene2d.ui.Skin; import com.badlogic.gdx.scenes.scene2d.ui.WidgetGroup; import com.badlogic.gdx.scenes.scene2d.utils.ChangeListener; import com.badlogic.gdx.scenes.scene2d.utils.DragListener; import com.badlogic.gdx.scenes.scene2d.utils.Layout; import com.badlogic.gdx.utils.Array; import com.badlogic.gdx.utils.ObjectMap; import com.badlogic.gdx.utils.ObjectMap.Entry; import net.mgsx.game.blueprint.annotations.Node; import net.mgsx.game.blueprint.events.GraphEvent.LinkAddedEvent; import net.mgsx.game.blueprint.events.GraphEvent.NodeMovedEvent; import net.mgsx.game.blueprint.model.Graph; import net.mgsx.game.blueprint.model.GraphNode; import net.mgsx.game.blueprint.model.Link; import net.mgsx.game.blueprint.model.Portlet; import net.mgsx.game.blueprint.storage.GraphIO; import net.mgsx.game.core.helpers.ReflectionHelper; public class GraphView extends WidgetGroup { public static enum LinkLayout{ DIRECT, ORTHO } public static class GraphViewConfig{ public final ObjectMap<Class, Color> colorByType = new ObjectMap<Class, Color>(); public FileHandle file; public void setTypeColor(Color color, Class... types) { for(Class type : types) colorByType.put(type, color); } public boolean isKeyDownSave(int key) { return isControlsPressed() && key == Input.Keys.S; } public static boolean isControlsPressed(){ return Gdx.input.isKeyPressed(Input.Keys.CONTROL_LEFT) || Gdx.input.isKeyPressed(Input.Keys.CONTROL_RIGHT); } } public LinkLayout linkLayout = LinkLayout.ORTHO; private Array<NodeFactory> nodeFactories = new Array<NodeFactory>(); protected final Graph graph; private ShapeRenderer renderer; protected final Skin skin; private Portlet dragPortlet; private boolean dragValid; private SelectBox<String> selector; protected final GraphViewConfig config; public GraphView(final Graph graph, final Skin skin) { this(graph, skin, new GraphViewConfig()); } public GraphView(final Graph graph, final Skin skin, final GraphViewConfig config) { super(); this.graph = graph; this.skin = skin; this.config = config; setScale(1f); setTransform(true); setTouchable(Touchable.enabled); for(GraphNode node : graph.nodes){ addNode(node); } renderer = new ShapeRenderer(); addListener(new InputListener(){ @Override public boolean keyDown(InputEvent event, int keycode) { boolean addNodePuredataLike = keycode == Input.Keys.NUM_1 && Gdx.input.isKeyPressed(Input.Keys.CONTROL_LEFT) && Gdx.input.isKeyPressed(Input.Keys.SHIFT_LEFT); boolean addNodeBlenderLike = keycode == Input.Keys.A && (Gdx.input.isKeyPressed(Input.Keys.SHIFT_LEFT) || Gdx.input.isKeyPressed(Input.Keys.SHIFT_RIGHT)); if(keycode == Input.Keys.ESCAPE){ if(selector != null){ selector.remove(); selector = null; } }else if(config.isKeyDownSave(keycode)){ if(config.file != null){ new GraphIO().save(graph, config.file); }else{ Gdx.app.error("Blueprint", "Warning: no file configured, skip saving."); } } else if(addNodePuredataLike || addNodeBlenderLike){ selector = new SelectBox<String>(skin); Array<String> items = new Array<String>(); items.add(""); final ObjectMap<String, NodeFactory> map = new ObjectMap<String, NodeFactory>(); for(NodeFactory factory : nodeFactories){ String key = factory.displayName(); map.put(key, factory); items.add(key); } selector.setItems(items); addActor(selector); selector.validate(); final Vector2 pos = screenToLocalCoordinates(new Vector2(Gdx.input.getX(), Gdx.input.getY())); selector.setBounds(pos.x, pos.y, selector.getPrefWidth(), selector.getPrefHeight()); selector.addListener(new ChangeListener() { @Override public void changed(ChangeEvent event, Actor actor) { NodeFactory factory = map.get(selector.getSelected()); Object object = factory.create(); selector.remove(); selector = null; addNode(graph.addNode(object, pos.x, pos.y)); } }); selector.showList(); } return super.keyDown(event, keycode); } }); addListener(new DragListener(){ private NodeView dragNode; private float px, py; private Portlet dropPortlet; @Override public boolean touchDown(InputEvent event, float x, float y, int pointer, int button) { getStage().setKeyboardFocus(GraphView.this); begin(x, y); return super.touchDown(event, x, y, pointer, button); } @Override public void dragStart(InputEvent event, float x, float y, int pointer) { //begin(x, y); } private void begin(float x, float y){ Actor actor = hit(x, y, isTouchable()); px = x; py = y; while(actor != null && actor != GraphView.this){ if(actor instanceof NodeView){ dragNode = (NodeView)actor; break; }else if(actor.getUserObject() instanceof Portlet){ dragPortlet = (Portlet)actor.getUserObject(); break; } actor = actor.getParent(); } } @Override public void drag(InputEvent event, float x, float y, int pointer) { dragValid = true; if(dragNode != null){ float newX = dragNode.getX() + x - px; float newY = dragNode.getY() + y - py; dragNode.setX(newX); dragNode.setY(newY); dragNode.node.position.set(newX, newY); // TODO events ! invalidateHierarchy(); getStage().cancelTouchFocusExcept(this, GraphView.this); }else if(dragPortlet != null){ if(dropPortlet != null) dropPortlet.actor.setColor(dropPortlet.color); dropPortlet = null; // TODO check cycles and compatibility ? Actor actor = hit(x, y, isTouchable()); while(actor != null && actor != GraphView.this){ if(actor instanceof NodeView){ // dragNode = (Node)actor; // break; }else if(actor.getUserObject() instanceof Portlet){ Portlet portlet = (Portlet)actor.getUserObject(); if(dragPortlet.outlet != null && portlet.inlet != null || dragPortlet.inlet != null && portlet.outlet != null){ dropPortlet = portlet; boolean outInCompatible = dragPortlet.inlet != null && dropPortlet.accessor.getType().isAssignableFrom(dragPortlet.accessor.getType()); boolean inOutCompatible = dragPortlet.outlet != null && dragPortlet.accessor.getType().isAssignableFrom(dropPortlet.accessor.getType()); dragValid = outInCompatible || inOutCompatible; if(dragValid){ dropPortlet.actor.getColor().add(Color.DARK_GRAY); }else{ dropPortlet.actor.getColor().mul(.5f); } break; } } actor = actor.getParent(); } getStage().cancelTouchFocusExcept(this, GraphView.this); }else{ } px = x; py = y; super.drag(event, x, y, pointer); } @Override public void dragStop(InputEvent event, float x, float y, int pointer) { end(); super.dragStop(event, x, y, pointer); } @Override public void touchUp(InputEvent event, float x, float y, int pointer, int button) { end(); super.touchUp(event, x, y, pointer, button); } private void end() { if(dragNode != null){ fire(new NodeMovedEvent(dragNode.node)); } dragNode = null; if(dropPortlet != null){ dropPortlet.actor.setColor(dropPortlet.color); } if(dragPortlet != null && dropPortlet != null){ if(dragValid){ Link link = null; if(dragPortlet.outlet != null && dropPortlet.inlet != null){ link = graph.addLink(dragPortlet, dropPortlet); }else if(dragPortlet.inlet != null && dropPortlet.outlet != null){ link = graph.addLink(dropPortlet, dragPortlet); } if(link != null){ fire(new LinkAddedEvent(link)); } } } else if(dragPortlet != null){ // TODO create ? } dragPortlet = null; dropPortlet = null; } }); } private ObjectMap<GraphNode, NodeView> nodeViews = new ObjectMap<GraphNode, NodeView>(); protected NodeView createNodeView(GraphNode node){ return new NodeView(config, graph, node, skin); } public void load(){ if(config.file != null && config.file.exists()){ // TODO clear ? new GraphIO().load(graph, config.file, nodeFactories); for(GraphNode node : graph.nodes){ addNode(node); } } } private void addNode(GraphNode node){ NodeView nodeView = createNodeView(node); nodeView.setX(node.position.x); nodeView.setY(node.position.y); addActor(nodeView); nodeViews.put(node, nodeView); Color c = Color.WHITE; for(Entry<Class, Color> e : config.colorByType){ if(e.key.isAssignableFrom(node.object.getClass())){ c = e.value; break; } } nodeView.setColor(c); node.color.set(c); } Rectangle bounds = new Rectangle(); @Override public void layout() { bounds.set(0, 0, 0, 0); for(Actor actor : getChildren()){ if(actor instanceof Layout){ ((Layout) actor).layout(); actor.setSize(((Layout) actor).getPrefWidth(), ((Layout) actor).getPrefHeight()); } bounds.merge(actor.getX(), actor.getY()); bounds.merge(actor.getX() + actor.getWidth(), actor.getY() + actor.getHeight()); } for(Actor actor : getChildren()){ actor.setX(actor.getX() - bounds.x); actor.setY(actor.getY() - bounds.y); } bounds.width -= bounds.x; bounds.x = 0; bounds.height -= bounds.y; bounds.y = 0; } @Override public float getPrefWidth() { return bounds.width; } @Override public float getPrefHeight() { return bounds.height; } private Vector2 a = new Vector2(), b = new Vector2(), m = new Vector2(); private Rectangle ar = new Rectangle(), br = new Rectangle(); @Override public void draw(Batch batch, float parentAlpha) { super.draw(batch, parentAlpha); batch.end(); if (isTransform()) applyTransform(batch, computeTransform()); renderer.setProjectionMatrix(batch.getProjectionMatrix()); renderer.setTransformMatrix(batch.getTransformMatrix()); renderer.begin(ShapeType.Line); renderer.setColor(Color.CYAN); for(Link link : graph.links){ renderer.setColor(link.src.color); link.src.actor.localToAscendantCoordinates(this, a.set(link.getSrcPosition())); link.dst.actor.localToAscendantCoordinates(this, b.set(link.getDstPosition())); NodeView actor; actor = nodeViews.get(link.src.node); ar.set(actor.getX(), actor.getY(), actor.getWidth(), actor.getHeight()); actor = nodeViews.get(link.dst.node); br.set(actor.getX(), actor.getY(), actor.getWidth(), actor.getHeight()); m.set(a).add(b).scl(.5f); if(ar.y < br.y){ m.y = (br.y + ar.y + ar.height) / 2; }else{ m.y = (ar.y + br.y + br.height) / 2; } drawLink(renderer, a, b); } if(dragPortlet != null){ dragPortlet.actor.localToAscendantCoordinates(this, Link.getCenter(dragPortlet.actor, a)); getStage().screenToStageCoordinates(b.set(Gdx.input.getX(), Gdx.input.getY())); stageToLocalCoordinates(b); //.add(getCullingArea().x, getY()); m.set(a).add(b).scl(.5f); ar.set(m.x, m.y, 0, 0); br.set(m.x, m.y, 0, 0); if(dragValid){ renderer.setColor(Color.WHITE); }else{ renderer.setColor(Color.DARK_GRAY); } // renderer.line(a, b); if(dragPortlet.outlet != null) drawLink(renderer, a, b); else drawLink(renderer, b, a); } renderer.end(); if (isTransform()) resetTransform(batch); batch.begin(); } private void drawLink(ShapeRenderer renderer, Vector2 a, Vector2 b) { float arrowSize = 12; float leafMinSize = 32; float borderMinSize = 32; switch(linkLayout){ case DIRECT: renderer.line(a, b); break; default: case ORTHO: if(b.x - a.x > leafMinSize * 2){ renderer.line(a.x, a.y, m.x, a.y); renderer.line(m.x, a.y, m.x, b.y); renderer.line(m.x, b.y, b.x, b.y); }else if(Math.abs(a.x - b.x) < leafMinSize * 2 && Math.abs(a.y - b.y) < leafMinSize * 2){ renderer.line(a, b); }else{ if(br.y + br.height > ar.y && br.y < ar.y){ // renderer.setColor(Color.RED); renderer.line(a.x, a.y, a.x + leafMinSize, a.y); renderer.line(a.x + leafMinSize, a.y, a.x + leafMinSize, ar.y + ar.height + borderMinSize); renderer.line(a.x + leafMinSize, ar.y + ar.height + borderMinSize, b.x - leafMinSize, ar.y + ar.height + borderMinSize); renderer.line(b.x - leafMinSize, ar.y + ar.height + borderMinSize, b.x - leafMinSize, b.y); renderer.line(b.x - leafMinSize, b.y, b.x, b.y); }else if(ar.y + ar.height > br.y && ar.y < br.y){ // renderer.setColor(Color.BLUE); renderer.line(a.x, a.y, a.x + leafMinSize, a.y); renderer.line(a.x + leafMinSize, a.y, a.x + leafMinSize, br.y + br.height + borderMinSize); renderer.line(a.x + leafMinSize, br.y + br.height + borderMinSize, b.x - leafMinSize, br.y + br.height + borderMinSize); renderer.line(b.x - leafMinSize, br.y + br.height + borderMinSize, b.x - leafMinSize, b.y); renderer.line(b.x - leafMinSize, b.y, b.x, b.y); }else if(ar.y < br.y){ // renderer.setColor(Color.BROWN); renderer.line(a.x, a.y, a.x + leafMinSize, a.y); renderer.line(a.x + leafMinSize, a.y, a.x + leafMinSize, m.y); renderer.line(a.x + leafMinSize, m.y, b.x - leafMinSize, m.y); renderer.line(b.x - leafMinSize, m.y, b.x - leafMinSize, b.y); renderer.line(b.x - leafMinSize, b.y, b.x, b.y); }else{ // renderer.setColor(Color.YELLOW); renderer.line(a.x, a.y, a.x + leafMinSize, a.y); renderer.line(a.x + leafMinSize, a.y, a.x + leafMinSize, m.y); renderer.line(a.x + leafMinSize, m.y, b.x - leafMinSize, m.y); renderer.line(b.x - leafMinSize, m.y, b.x - leafMinSize, b.y); renderer.line(b.x - leafMinSize, b.y, b.x, b.y); } } break; } renderer.circle(a.x, a.y, 2); renderer.circle(b.x, b.y, 6); // renderer.line(b.x, b.y, b.x - arrowSize, b.y - arrowSize); // renderer.line(b.x, b.y, b.x - arrowSize, b.y + arrowSize); } public void addNodeType(Class<?> ...types) { for(final Class<?> type : types) addType(type); } public <T> void addNodeType(Array<Class<? extends T>> types) { for(final Class<?> type : types) addType(type); } private void addType(final Class<?> type){ addNodeFactory(new NodeFactory() { @Override public String displayName() { Node node = type.getAnnotation(Node.class); if(node != null && !node.value().isEmpty()){ return node.value(); } return type.getSimpleName(); } @Override public Object create() { return ReflectionHelper.newInstance(type); } }); } public static interface NodeFactory { public Object create(); public String displayName(); } public void addNodeFactory(NodeFactory nodeFactory){ nodeFactories.add(nodeFactory); } }
apache-2.0
diennea/majordodo
majordodo-net/src/main/java/majordodo/network/netty/GenericNettyBrokerLocator.java
4536
/* Licensed to Diennea S.r.l. under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Diennea S.r.l. licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package majordodo.network.netty; import majordodo.network.BrokerLocator; import majordodo.network.BrokerNotAvailableException; import majordodo.network.BrokerRejectedConnectionException; import majordodo.network.Channel; import majordodo.network.ChannelEventListener; import majordodo.network.ConnectionRequestInfo; import majordodo.network.Message; import java.net.InetSocketAddress; import java.util.concurrent.TimeoutException; import java.util.logging.Level; import java.util.logging.Logger; import majordodo.network.BrokerHostData; import static majordodo.security.sasl.ClientAuthenticationUtils.performAuthentication; /** * Network connection, based on Netty * * @author enrico.olivelli */ public abstract class GenericNettyBrokerLocator implements BrokerLocator { protected abstract BrokerHostData getServer(); private boolean sslUnsecure; public boolean isSslUnsecure() { return sslUnsecure; } public void setSslUnsecure(boolean sslUnsecure) { this.sslUnsecure = sslUnsecure; } @Override public Channel connect(ChannelEventListener messageReceiver, ConnectionRequestInfo workerInfo) throws InterruptedException, BrokerNotAvailableException, BrokerRejectedConnectionException { boolean ok = false; NettyConnector connector = new NettyConnector(messageReceiver); try { BrokerHostData broker = getServer(); LOGGER.log(Level.SEVERE, "connect to broker {0}", broker); if (broker == null) { throw new BrokerNotAvailableException(new Exception("no broker available")); } InetSocketAddress addre = broker.getSocketAddress(); connector.setPort(addre.getPort()); String host = addre.getHostName(); if (host == null) { host = addre.getAddress().getHostAddress(); } connector.setHost(host); connector.setSsl(broker.isSsl()); connector.setSslUnsecure(sslUnsecure); NettyChannel channel; try { channel = connector.connect(); } catch (final Exception e) { throw new BrokerNotAvailableException(e); } try { performAuthentication(channel, channel.getRemoteHost(), workerInfo.getSharedSecret()); } catch (Exception err) { throw new BrokerRejectedConnectionException("auth failed:" + err, err); } Message acceptMessage = Message.CONNECTION_REQUEST(workerInfo.getWorkerId(), workerInfo.getProcessId(), workerInfo.getLocation(), workerInfo.getSharedSecret(), workerInfo.getRunningTaskIds(), workerInfo.getMaxThreads(), workerInfo.getMaxThreadsByTaskType(), workerInfo.getGroups(), workerInfo.getExcludedGroups(), workerInfo.getResourceLimits(), workerInfo.getClientType(), workerInfo.getMaxThreadPerUserPerTaskTypePercent()); try { Message connectionResponse = channel.sendMessageWithReply(acceptMessage, 10000); if (connectionResponse.type == Message.TYPE_ACK) { ok = true; return channel; } else { throw new BrokerRejectedConnectionException("Broker rejected connection, response message:" + connectionResponse); } } catch (TimeoutException err) { throw new BrokerNotAvailableException(err); } } finally { if (!ok && connector != null) { connector.close(); } } } private static final Logger LOGGER = Logger.getLogger(GenericNettyBrokerLocator.class.getName()); }
apache-2.0
HebaKhaled/bposs
src/com.mentor.nucleus.bp.ui.text.test/src/com/mentor/nucleus/bp/ui/text/test/i589Test/AllEditorsDirtyTest.java
2741
//===================================================================== // //File: $RCSfile: AllEditorsDirtyTest.java,v $ //Version: $Revision: 1.9 $ //Modified: $Date: 2013/05/10 06:01:48 $ // //(c) Copyright 2004-2014 by Mentor Graphics Corp. All rights reserved. // //===================================================================== // Licensed under the Apache License, Version 2.0 (the "License"); you may not // use this file except in compliance with the License. You may obtain a copy // of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations under // the License. //===================================================================== package com.mentor.nucleus.bp.ui.text.test.i589Test; import org.eclipse.core.runtime.CoreException; import org.eclipse.jface.text.IDocument; import org.eclipse.ui.IEditorReference; import org.eclipse.ui.IWorkbenchPage; import org.eclipse.ui.IWorkbenchWindow; import org.eclipse.ui.PlatformUI; import org.eclipse.ui.editors.text.TextEditor; import com.mentor.nucleus.bp.ui.text.activity.ActivityEditor; import com.mentor.nucleus.bp.ui.text.description.DescriptionEditor; import com.mentor.nucleus.bp.ui.text.test.UITextTest; public class AllEditorsDirtyTest extends UITextTest { public AllEditorsDirtyTest(String projectName, String name) throws CoreException { super(null, name); } public AllEditorsDirtyTest(String name) throws CoreException { super(null, name); } protected void tearDown() throws Exception { super.tearDown(); } public void testMakeAllEditorsDirty() { IWorkbenchWindow[] windows = PlatformUI.getWorkbench().getWorkbenchWindows(); for (int i = 0; i < windows.length; ++i) { IWorkbenchPage[] pages = windows[i].getPages(); for (int j = 0; j < pages.length; ++j) { IEditorReference[] editors = pages[j].getEditorReferences(); for (int k = 0; k < editors.length; ++k) { if (editors[k].getPart(false) instanceof ActivityEditor || editors[k].getPart(false) instanceof DescriptionEditor ) { TextEditor editor =(TextEditor)editors[k].getPart(false); IDocument doc =editor.getDocumentProvider().getDocument(editor.getEditorInput()); doc.set(editor.getTitle()); assertTrue( editor.isSaveOnCloseNeeded() ); assertTrue( editor.isDirty() ); } } } } } }
apache-2.0
smalldatalab/omh-shims
shim-server/src/test/java/org/openmhealth/shim/withings/mapper/WithingsDailyCaloriesBurnedDataPointMapperUnitTests.java
4148
/* * Copyright 2015 Open mHealth * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.openmhealth.shim.withings.mapper; import com.fasterxml.jackson.databind.JsonNode; import org.openmhealth.schema.domain.omh.*; import org.openmhealth.shim.common.mapper.DataPointMapperUnitTests; import org.springframework.core.io.ClassPathResource; import org.testng.annotations.BeforeTest; import org.testng.annotations.Test; import java.io.IOException; import java.time.OffsetDateTime; import java.util.List; import static java.util.Collections.singletonList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.openmhealth.schema.domain.omh.DataPointModality.SENSED; import static org.openmhealth.shim.withings.mapper.WithingsDataPointMapper.RESOURCE_API_SOURCE_NAME; // TODO clean up public class WithingsDailyCaloriesBurnedDataPointMapperUnitTests extends DataPointMapperUnitTests { private JsonNode responseNode; private WithingsDailyCaloriesBurnedDataPointMapper mapper = new WithingsDailyCaloriesBurnedDataPointMapper(); @BeforeTest public void initializeResponseNode() throws IOException { ClassPathResource resource = new ClassPathResource("/org/openmhealth/shim/withings/mapper/withings-activity-measures.json"); responseNode = objectMapper.readTree(resource.getInputStream()); } @Test public void asDataPointsShouldReturnCorrectNumberOfDataPoints() { List<DataPoint<CaloriesBurned>> dataPoints = mapper.asDataPoints(singletonList(responseNode)); assertThat(dataPoints.size(), equalTo(4)); } @Test public void asDataPointsShouldReturnCorrectDataPoints() { List<DataPoint<CaloriesBurned>> dataPoints = mapper.asDataPoints(singletonList(responseNode)); testDailyCaloriesBurnedDataPoint(dataPoints.get(0), 139, "2015-06-18T00:00:00-07:00", "2015-06-19T00:00:00-07:00"); testDailyCaloriesBurnedDataPoint(dataPoints.get(1), 130, "2015-06-19T00:00:00-07:00", "2015-06-20T00:00:00-07:00"); testDailyCaloriesBurnedDataPoint(dataPoints.get(2), 241, "2015-06-20T00:00:00-07:00", "2015-06-21T00:00:00-07:00"); testDailyCaloriesBurnedDataPoint(dataPoints.get(3), 99, "2015-02-21T00:00:00-08:00", "2015-02-22T00:00:00-08:00"); } public void testDailyCaloriesBurnedDataPoint(DataPoint<CaloriesBurned> caloriesBurnedDataPoint, long expectedCaloriesBurnedValue, String expectedDateString, String expectedEndDateString) { CaloriesBurned.Builder expectedCaloriesBurnedBuilder = new CaloriesBurned.Builder(new KcalUnitValue(KcalUnit.KILOCALORIE, expectedCaloriesBurnedValue)); expectedCaloriesBurnedBuilder.setEffectiveTimeFrame( TimeInterval.ofStartDateTimeAndEndDateTime(OffsetDateTime.parse(expectedDateString), OffsetDateTime.parse(expectedEndDateString))); CaloriesBurned testCaloriesBurned = caloriesBurnedDataPoint.getBody(); CaloriesBurned expectedCaloriesBurned = expectedCaloriesBurnedBuilder.build(); assertThat(testCaloriesBurned, equalTo(expectedCaloriesBurned)); assertThat(caloriesBurnedDataPoint.getHeader().getAcquisitionProvenance().getModality(), equalTo(SENSED)); assertThat(caloriesBurnedDataPoint.getHeader().getAcquisitionProvenance().getSourceName(), equalTo( RESOURCE_API_SOURCE_NAME)); assertThat(caloriesBurnedDataPoint.getHeader().getBodySchemaId(), equalTo(CaloriesBurned.SCHEMA_ID)); } }
apache-2.0
mdogan/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/services/ObjectNamespace.java
974
/* * Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.internal.services; /** * An ObjectNamespace that makes identification of object within a service possible. */ public interface ObjectNamespace extends ServiceNamespace { /** * Gets the object name within the service. * * @return the object name within the service */ String getObjectName(); }
apache-2.0
andrewvc/elasticsearch
src/test/java/org/elasticsearch/index/mapper/UidTests.java
1992
/* * Licensed to Elastic Search and Shay Banon under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Elastic Search licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.mapper; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.HashedBytesArray; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; public class UidTests extends ElasticsearchTestCase { @Test public void testCreateAndSplitId() { BytesRef createUid = Uid.createUidAsBytes("foo", "bar"); HashedBytesArray[] splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(createUid); assertThat("foo", equalTo(splitUidIntoTypeAndId[0].toUtf8())); assertThat("bar", equalTo(splitUidIntoTypeAndId[1].toUtf8())); // split also with an offset BytesRef ref = new BytesRef(createUid.length+10); ref.offset = 9; ref.length = createUid.length; System.arraycopy(createUid.bytes, createUid.offset, ref.bytes, ref.offset, ref.length); splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(ref); assertThat("foo", equalTo(splitUidIntoTypeAndId[0].toUtf8())); assertThat("bar", equalTo(splitUidIntoTypeAndId[1].toUtf8())); } }
apache-2.0
rhuss/jolokia
agent/core/src/test/java/org/jolokia/detector/JBossDetectorTest.java
9007
package org.jolokia.detector; /* * Copyright 2009-2011 Roland Huss * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.lang.instrument.Instrumentation; import java.net.MalformedURLException; import java.net.URL; import java.util.*; import javax.management.*; import org.jolokia.backend.executor.MBeanServerExecutor; import org.jolokia.request.JmxRequest; import org.jolokia.request.JmxRequestBuilder; import org.jolokia.util.RequestType; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import static org.easymock.EasyMock.*; import static org.testng.Assert.*; /** * @author roland * @since 02.09.11 */ public class JBossDetectorTest extends BaseDetectorTest { private JBossDetector detector; private MBeanServer server; private MBeanServerExecutor servers; @BeforeMethod public void setup() { detector = new JBossDetector(); server = createMock(MBeanServer.class); servers = getMBeanServerManager(server); } @Test public void simpleNotFound() throws MalformedObjectNameException { for (String name : new String[]{ "jboss.system:type=Server", "jboss.as:*", "jboss.modules:*" }) { expect(server.queryNames(new ObjectName(name), null)).andReturn(Collections.<ObjectName>emptySet()).anyTimes(); } replay(server); assertNull(detector.detect(servers)); verify(server); } @Test public void simpleFound() throws MalformedObjectNameException, InstanceNotFoundException, ReflectionException, AttributeNotFoundException, MBeanException, IntrospectionException { expect(server.queryNames(new ObjectName("jboss.as:management-root=server"),null)).andReturn(Collections.EMPTY_SET); ObjectName oName = prepareQuery("jboss.system:type=Server"); expect(server.isRegistered(oName)).andStubReturn(true); expect(server.getAttribute(oName, "Version")).andReturn("5.1.0"); replay(server); ServerHandle handle = detector.detect(servers); assertEquals(handle.getVersion(),"5.1.0"); assertEquals(handle.getVendor(),"RedHat"); assertEquals(handle.getProduct(),"jboss"); // Verify workaround reset(server); ObjectName memoryBean = new ObjectName("java.lang:type=Memory"); expect(server.isRegistered(memoryBean)).andStubReturn(true); replay(server); handle.preDispatch(servers, new JmxRequestBuilder(RequestType.READ, memoryBean).attribute("HeapMemoryUsage").<JmxRequest>build()); verify(server); } @Test public void version71() throws MalformedObjectNameException, IntrospectionException, InstanceNotFoundException, ReflectionException, AttributeNotFoundException, MBeanException { expect(server.queryNames(new ObjectName("jboss.system:type=Server"),null)).andReturn(Collections.<ObjectName>emptySet()); prepareQuery("jboss.as:*"); ObjectName oName = new ObjectName("jboss.as:management-root=server"); expect(server.getAttribute(oName,"productVersion")).andReturn(null); expect(server.getAttribute(oName,"releaseVersion")).andReturn("7.1.1.Final"); expect(server.getAttribute(oName,"productName")).andReturn(null); replay(server); ServerHandle handle = detector.detect(servers); assertEquals(handle.getVersion(),"7.1.1.Final"); assertEquals(handle.getVendor(),"RedHat"); assertEquals(handle.getProduct(),"jboss"); verifyNoWorkaround(handle); } @Test public void version101() throws MalformedObjectNameException, IntrospectionException, InstanceNotFoundException, ReflectionException, AttributeNotFoundException, MBeanException { expect(server.queryNames(new ObjectName("jboss.system:type=Server"),null)).andReturn(Collections.<ObjectName>emptySet()); prepareQuery("jboss.as:*"); ObjectName oName = new ObjectName("jboss.as:management-root=server"); expect(server.getAttribute(oName,"productVersion")).andReturn("10.1.0.Final"); expect(server.getAttribute(oName,"productName")).andReturn("WildFly Full"); replay(server); ServerHandle handle = detector.detect(servers); assertEquals(handle.getVersion(),"10.1.0.Final"); assertEquals(handle.getVendor(),"RedHat"); assertEquals(handle.getProduct(),"WildFly Full"); verifyNoWorkaround(handle); } private void verifyNoWorkaround(ServerHandle pHandle) throws MalformedObjectNameException { // Verify that no workaround is active reset(server); ObjectName memoryBean = new ObjectName("java.lang:type=Memory"); replay(server); pHandle.preDispatch(servers, new JmxRequestBuilder(RequestType.READ, memoryBean).attribute("HeapMemoryUsage").<JmxRequest>build()); verify(server); } @Test public void version7() throws MalformedObjectNameException, IntrospectionException, InstanceNotFoundException, ReflectionException { expect(server.queryNames(new ObjectName("jboss.system:type=Server"),null)).andReturn(Collections.<ObjectName>emptySet()); expect(server.queryNames(new ObjectName("jboss.as:*"),null)).andReturn(Collections.<ObjectName>emptySet()); prepareQuery("jboss.modules:*"); replay(server); ServerHandle handle = detector.detect(servers); assertEquals(handle.getVersion(),"7"); assertEquals(handle.getVendor(),"RedHat"); assertEquals(handle.getProduct(),"jboss"); // Verify that no workaround is active verifyNoWorkaround(handle); } private ObjectName prepareQuery(String pName) throws MalformedObjectNameException { ObjectName oName = new ObjectName(pName); Set<ObjectName> oNames = new HashSet<ObjectName>(Arrays.asList(oName)); expect(server.queryNames(oName,null)).andReturn(oNames); return oName; } @Test public void addMBeanServers() { replay(server); detector.addMBeanServers(new HashSet<MBeanServerConnection>()); } @Test(expectedExceptions = IllegalArgumentException.class) public void verifyIsClassLoadedArgumentChecksNullInstrumentation() { detector.isClassLoaded("xx", null); } @Test(expectedExceptions = IllegalArgumentException.class) public void verifyIsClassLoadedArgumentChecks2NullClassname() { Instrumentation inst = mock(Instrumentation.class); detector.isClassLoaded(null, inst); } @Test public void verifyIsClassLoadedNotLoaded() { Instrumentation inst = createMock(Instrumentation.class); expect(inst.getAllLoadedClasses()).andReturn(new Class[] {}).once(); replay(inst); assertFalse(detector.isClassLoaded("org.Dummy", inst)); verify(inst); } @Test public void verifyIsClassLoadedLoaded() { Instrumentation inst = createMock(Instrumentation.class); expect(inst.getAllLoadedClasses()).andReturn(new Class[] {JBossDetectorTest.class}).once(); replay(inst); assertTrue(detector.isClassLoaded(JBossDetectorTest.class.getName(), inst)); verify(inst); } @Test public void verifyJvmAgentStartup() throws MalformedURLException { Instrumentation inst = createMock(Instrumentation.class); expect(inst.getAllLoadedClasses()).andReturn(new Class[] {}).times(3); expect(inst.getAllLoadedClasses()).andReturn(new Class[] {JBossDetectorTest.class}).atLeastOnce(); ClassLoader cl = createMock(ClassLoader.class); expect(cl.getResource("org/jboss/modules/Main.class")).andReturn(new URL("http", "dummy", "")).anyTimes(); String prevPkgValue = System.setProperty("jboss.modules.system.pkgs", "blah"); String prevLogValue = System.setProperty("java.util.logging.manager", JBossDetectorTest.class.getName()); replay(inst,cl); try { detector.jvmAgentStartup(inst, cl); } finally { resetSysProp(prevLogValue, "java.util.logging.manager"); resetSysProp(prevPkgValue, "jboss.modules.system.pkgs"); } verify(inst); } protected void resetSysProp(String prevValue, String key) { if (prevValue == null) { System.getProperties().remove(key); } else { System.setProperty(key, prevValue); } } }
apache-2.0
ilantukh/ignite
modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java
40857
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache.distributed; import java.io.Externalizable; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.stream.Collectors; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.pagemem.wal.WALPointer; import org.apache.ignite.internal.pagemem.wal.record.DataEntry; import org.apache.ignite.internal.pagemem.wal.record.DataRecord; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheEntryEx; import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException; import org.apache.ignite.internal.processors.cache.GridCacheFilterFailedException; import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate; import org.apache.ignite.internal.processors.cache.GridCacheOperation; import org.apache.ignite.internal.processors.cache.GridCacheReturn; import org.apache.ignite.internal.processors.cache.GridCacheReturnCompletableWrapper; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.GridCacheUpdateTxResult; import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheEntry; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxAdapter; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxRemoteEx; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxRemoteState; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxState; import org.apache.ignite.internal.processors.cache.transactions.TxCounters; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext; import org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.lang.GridTuple; import org.apache.ignite.internal.util.tostring.GridToStringBuilder; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.transactions.TransactionConcurrency; import org.apache.ignite.transactions.TransactionIsolation; import org.apache.ignite.transactions.TransactionState; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.CREATE; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.NOOP; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.READ; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.RELOAD; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.UPDATE; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_BACKUP; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; import static org.apache.ignite.transactions.TransactionState.COMMITTED; import static org.apache.ignite.transactions.TransactionState.COMMITTING; import static org.apache.ignite.transactions.TransactionState.PREPARED; import static org.apache.ignite.transactions.TransactionState.PREPARING; import static org.apache.ignite.transactions.TransactionState.ROLLED_BACK; import static org.apache.ignite.transactions.TransactionState.ROLLING_BACK; import static org.apache.ignite.transactions.TransactionState.UNKNOWN; /** * Transaction created by system implicitly on remote nodes. */ public abstract class GridDistributedTxRemoteAdapter extends IgniteTxAdapter implements IgniteTxRemoteEx { /** */ private static final long serialVersionUID = 0L; /** Commit allowed field updater. */ private static final AtomicIntegerFieldUpdater<GridDistributedTxRemoteAdapter> COMMIT_ALLOWED_UPD = AtomicIntegerFieldUpdater.newUpdater(GridDistributedTxRemoteAdapter.class, "commitAllowed"); /** Explicit versions. */ @GridToStringInclude private List<GridCacheVersion> explicitVers; /** Started flag. */ @GridToStringInclude private boolean started; /** {@code True} only if all write entries are locked by this transaction. */ @GridToStringInclude private volatile int commitAllowed; /** */ @GridToStringInclude protected IgniteTxRemoteState txState; /** Transaction label. */ @GridToStringInclude @Nullable private String txLbl; /** * Empty constructor required for {@link Externalizable}. */ public GridDistributedTxRemoteAdapter() { // No-op. } /** * @param ctx Cache registry. * @param nodeId Node ID. * @param xidVer XID version. * @param commitVer Commit version. * @param sys System flag. * @param plc IO policy. * @param concurrency Concurrency level (should be pessimistic). * @param isolation Transaction isolation. * @param invalidate Invalidate flag. * @param timeout Timeout. * @param txSize Expected transaction size. * @param subjId Subject ID. * @param taskNameHash Task name hash code. * @param txLbl Transaction label. */ public GridDistributedTxRemoteAdapter( GridCacheSharedContext<?, ?> ctx, UUID nodeId, GridCacheVersion xidVer, GridCacheVersion commitVer, boolean sys, byte plc, TransactionConcurrency concurrency, TransactionIsolation isolation, boolean invalidate, long timeout, int txSize, @Nullable UUID subjId, int taskNameHash, String txLbl ) { super( ctx, nodeId, xidVer, ctx.versions().last(), Thread.currentThread().getId(), sys, plc, concurrency, isolation, timeout, txSize, subjId, taskNameHash); this.invalidate = invalidate; this.txLbl = txLbl; commitVersion(commitVer); // Must set started flag after concurrency and isolation. started = true; } /** {@inheritDoc} */ @Override public IgniteTxState txState() { return txState; } /** {@inheritDoc} */ @Override public UUID eventNodeId() { return nodeId; } /** {@inheritDoc} */ @Override public UUID originatingNodeId() { return nodeId; } /** {@inheritDoc} */ @Override public boolean activeCachesDeploymentEnabled() { return false; } /** {@inheritDoc} */ @Override public void activeCachesDeploymentEnabled(boolean depEnabled) { throw new UnsupportedOperationException("Remote tx doesn't support deployment."); } /** {@inheritDoc} */ @Override public void addActiveCache(GridCacheContext cacheCtx, boolean recovery) throws IgniteCheckedException { txState.addActiveCache(cacheCtx, recovery, this); } /** * @return Checks if transaction has no entries. */ @Override public boolean empty() { return txState.empty(); } /** {@inheritDoc} */ @Override public void invalidate(boolean invalidate) { this.invalidate = invalidate; } /** {@inheritDoc} */ @Override public Map<IgniteTxKey, IgniteTxEntry> writeMap() { return txState.writeMap(); } /** {@inheritDoc} */ @Override public Map<IgniteTxKey, IgniteTxEntry> readMap() { return txState.readMap(); } /** {@inheritDoc} */ @Override public void seal() { // No-op. } /** {@inheritDoc} */ @Override public GridTuple<CacheObject> peek(GridCacheContext cacheCtx, boolean failFast, KeyCacheObject key) throws GridCacheFilterFailedException { assert false : "Method peek can only be called on user transaction: " + this; throw new IllegalStateException("Method peek can only be called on user transaction: " + this); } /** {@inheritDoc} */ @Override public IgniteTxEntry entry(IgniteTxKey key) { return txState.entry(key); } /** * Clears entry from transaction as it never happened. * * @param key key to be removed. */ public void clearEntry(IgniteTxKey key) { txState.clearEntry(key); } /** * @param baseVer Base version. * @param committedVers Committed versions. * @param rolledbackVers Rolled back versions. */ @Override public void doneRemote(GridCacheVersion baseVer, Collection<GridCacheVersion> committedVers, Collection<GridCacheVersion> rolledbackVers, Collection<GridCacheVersion> pendingVers) { Map<IgniteTxKey, IgniteTxEntry> readMap = txState.readMap(); if (readMap != null && !readMap.isEmpty()) { for (IgniteTxEntry txEntry : readMap.values()) doneRemote(txEntry, baseVer, committedVers, rolledbackVers, pendingVers); } Map<IgniteTxKey, IgniteTxEntry> writeMap = txState.writeMap(); if (writeMap != null && !writeMap.isEmpty()) { for (IgniteTxEntry txEntry : writeMap.values()) doneRemote(txEntry, baseVer, committedVers, rolledbackVers, pendingVers); } } /** {@inheritDoc} */ @Override public void setPartitionUpdateCounters(long[] cntrs) { if (writeMap() != null && !writeMap().isEmpty() && cntrs != null && cntrs.length > 0) { int i = 0; for (IgniteTxEntry txEntry : writeMap().values()) { txEntry.updateCounter(cntrs[i]); ++i; } } } /** * Adds completed versions to an entry. * * @param txEntry Entry. * @param baseVer Base version for completed versions. * @param committedVers Completed versions relative to base version. * @param rolledbackVers Rolled back versions relative to base version. * @param pendingVers Pending versions. */ private void doneRemote(IgniteTxEntry txEntry, GridCacheVersion baseVer, Collection<GridCacheVersion> committedVers, Collection<GridCacheVersion> rolledbackVers, Collection<GridCacheVersion> pendingVers) { while (true) { GridDistributedCacheEntry entry = (GridDistributedCacheEntry)txEntry.cached(); try { // Handle explicit locks. GridCacheVersion doneVer = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer; entry.doneRemote(doneVer, baseVer, pendingVers, committedVers, rolledbackVers, isSystemInvalidate()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null; if (log.isDebugEnabled()) log.debug("Replacing obsolete entry in remote transaction [entry=" + entry + ", tx=" + this + ']'); // Replace the entry. txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion())); } } } /** {@inheritDoc} */ @Override public boolean onOwnerChanged(GridCacheEntryEx entry, GridCacheMvccCandidate owner) { if (!hasWriteKey(entry.txKey())) return false; try { commitIfLocked(); return true; } catch (IgniteCheckedException e) { U.error(log, "Failed to commit remote transaction: " + this, e); invalidate(true); systemInvalidate(true); rollbackRemoteTx(); return false; } } /** {@inheritDoc} */ @Override public boolean isStarted() { return started; } /** {@inheritDoc} */ @Override public boolean hasWriteKey(IgniteTxKey key) { return txState.hasWriteKey(key); } /** {@inheritDoc} */ @Override public Set<IgniteTxKey> readSet() { return txState.readSet(); } /** {@inheritDoc} */ @Override public Set<IgniteTxKey> writeSet() { return txState.writeSet(); } /** {@inheritDoc} */ @Override public Collection<IgniteTxEntry> allEntries() { return txState.allEntries(); } /** {@inheritDoc} */ @Override public Collection<IgniteTxEntry> writeEntries() { return txState.writeEntries(); } /** {@inheritDoc} */ @Override public Collection<IgniteTxEntry> readEntries() { return txState.readEntries(); } /** * @throws IgniteCheckedException If failed. */ public final void prepareRemoteTx() throws IgniteCheckedException { // If another thread is doing prepare or rollback. if (!state(PREPARING)) { // In optimistic mode prepare may be called multiple times. if (state() != PREPARING || !optimistic()) { if (log.isDebugEnabled()) log.debug("Invalid transaction state for prepare: " + this); return; } } try { cctx.tm().prepareTx(this, null); if (pessimistic() || isSystemInvalidate()) state(PREPARED); } catch (IgniteCheckedException e) { setRollbackOnly(); throw e; } } /** * @throws IgniteCheckedException If commit failed. */ private void commitIfLocked() throws IgniteCheckedException { if (state() == COMMITTING) { for (IgniteTxEntry txEntry : writeEntries()) { assert txEntry != null : "Missing transaction entry for tx: " + this; while (true) { GridCacheEntryEx entry = txEntry.cached(); assert entry != null : "Missing cached entry for transaction entry: " + txEntry; try { GridCacheVersion ver = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer; // If locks haven't been acquired yet, keep waiting. if (!entry.lockedBy(ver)) { if (log.isDebugEnabled()) log.debug("Transaction does not own lock for entry (will wait) [entry=" + entry + ", tx=" + this + ']'); return; } break; // While. } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Got removed entry while committing (will retry): " + txEntry); txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion())); } } } // Only one thread gets to commit. if (COMMIT_ALLOWED_UPD.compareAndSet(this, 0, 1)) { IgniteCheckedException err = null; Map<IgniteTxKey, IgniteTxEntry> writeMap = txState.writeMap(); GridCacheReturnCompletableWrapper wrapper = null; if (!F.isEmpty(writeMap) || mvccSnapshot != null) { GridCacheReturn ret = null; if (!near() && !local() && onePhaseCommit()) { if (needReturnValue()) { ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, true); UUID origNodeId = otherNodeId(); // Originating node. cctx.tm().addCommittedTxReturn(this, wrapper = new GridCacheReturnCompletableWrapper( !cctx.localNodeId().equals(origNodeId) ? origNodeId : null)); } else cctx.tm().addCommittedTx(this, this.nearXidVersion(), null); } // Register this transaction as completed prior to write-phase to // ensure proper lock ordering for removed entries. cctx.tm().addCommittedTx(this); AffinityTopologyVersion topVer = topologyVersion(); WALPointer ptr = null; cctx.database().checkpointReadLock(); try { assert !txState.mvccEnabled() || mvccSnapshot != null : "Mvcc is not initialized: " + this; Collection<IgniteTxEntry> entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries(); // Data entry to write to WAL and associated with it TxEntry. List<T2<DataEntry, IgniteTxEntry>> dataEntries = null; batchStoreCommit(writeMap().values()); // Node that for near transactions we grab all entries. for (IgniteTxEntry txEntry : entries) { GridCacheContext cacheCtx = txEntry.context(); boolean replicate = cacheCtx.isDrEnabled(); while (true) { try { GridCacheEntryEx cached = txEntry.cached(); if (cached == null) txEntry.cached(cached = cacheCtx.cache().entryEx(txEntry.key(), topologyVersion())); if (near() && cacheCtx.dr().receiveEnabled()) { cached.markObsolete(xidVer); break; } GridNearCacheEntry nearCached = null; if (updateNearCache(cacheCtx, txEntry.key(), topVer)) nearCached = cacheCtx.dht().near().peekExx(txEntry.key()); if (!F.isEmpty(txEntry.entryProcessors())) txEntry.cached().unswap(false); IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(txEntry, false, ret); GridCacheOperation op = res.get1(); CacheObject val = res.get2(); GridCacheVersion explicitVer = txEntry.conflictVersion(); if (explicitVer == null) explicitVer = writeVersion(); if (txEntry.ttl() == CU.TTL_ZERO) op = DELETE; boolean conflictNeedResolve = cacheCtx.conflictNeedResolve(); GridCacheVersionConflictContext conflictCtx = null; if (conflictNeedResolve) { IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> drRes = conflictResolve(op, txEntry, val, explicitVer, cached); assert drRes != null; conflictCtx = drRes.get2(); if (conflictCtx.isUseOld()) op = NOOP; else if (conflictCtx.isUseNew()) { txEntry.ttl(conflictCtx.ttl()); txEntry.conflictExpireTime(conflictCtx.expireTime()); } else if (conflictCtx.isMerge()) { op = drRes.get1(); val = txEntry.context().toCacheObject(conflictCtx.mergeValue()); explicitVer = writeVersion(); txEntry.ttl(conflictCtx.ttl()); txEntry.conflictExpireTime(conflictCtx.expireTime()); } } else // Nullify explicit version so that innerSet/innerRemove will work as usual. explicitVer = null; GridCacheVersion dhtVer = cached.isNear() ? writeVersion() : null; if (!near() && cacheCtx.group().persistenceEnabled() && cacheCtx.group().walEnabled() && op != NOOP && op != RELOAD && (op != READ || cctx.snapshot().needTxReadLogging())) { if (dataEntries == null) dataEntries = new ArrayList<>(entries.size()); dataEntries.add( new T2<>( new DataEntry( cacheCtx.cacheId(), txEntry.key(), val, op, nearXidVersion(), writeVersion(), 0, txEntry.key().partition(), txEntry.updateCounter() ), txEntry ) ); } if (op == CREATE || op == UPDATE) { // Invalidate only for near nodes (backups cannot be invalidated). if (isSystemInvalidate() || (isInvalidate() && cacheCtx.isNear())) cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter()); else { assert val != null : txEntry; GridCacheUpdateTxResult updRes = cached.innerSet(this, eventNodeId(), nodeId, val, false, false, txEntry.ttl(), true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, txEntry.conflictExpireTime(), near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter()); txEntry.updateCounter(updRes.updateCounter()); if (updRes.loggedPointer() != null) ptr = updRes.loggedPointer(); // Keep near entry up to date. if (nearCached != null) { CacheObject val0 = cached.valueBytes(); nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer); } } } else if (op == DELETE) { GridCacheUpdateTxResult updRes = cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter()); txEntry.updateCounter(updRes.updateCounter()); if (updRes.loggedPointer() != null) ptr = updRes.loggedPointer(); // Keep near entry up to date. if (nearCached != null) nearCached.updateOrEvict(xidVer, null, 0, 0, nodeId, topVer); } else if (op == RELOAD) { CacheObject reloaded = cached.innerReload(); if (nearCached != null) { nearCached.innerReload(); nearCached.updateOrEvict(cached.version(), reloaded, cached.expireTime(), cached.ttl(), nodeId, topVer); } } else if (op == READ) { assert near(); if (log.isDebugEnabled()) log.debug("Ignoring READ entry when committing: " + txEntry); } // No-op. else { if (conflictCtx == null || !conflictCtx.isUseOld()) { if (txEntry.ttl() != CU.TTL_NOT_CHANGED) cached.updateTtl(null, txEntry.ttl()); if (nearCached != null) { CacheObject val0 = cached.valueBytes(); nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer); } } } // Assert after setting values as we want to make sure // that if we replaced removed entries. assert txEntry.op() == READ || onePhaseCommit() || // If candidate is not there, then lock was explicit // and we simply allow the commit to proceed. !cached.hasLockCandidateUnsafe(xidVer) || cached.lockedByUnsafe(xidVer) : "Transaction does not own lock for commit [entry=" + cached + ", tx=" + this + ']'; // Break out of while loop. break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug("Attempting to commit a removed entry (will retry): " + txEntry); // Renew cached entry. txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), topologyVersion())); } } } // Apply cache size deltas. applyTxSizes(); TxCounters txCntrs = txCounters(false); // Apply update counters. if (txCntrs != null) cctx.tm().txHandler().applyPartitionsUpdatesCounters(txCntrs.updateCounters()); cctx.mvccCaching().onTxFinished(this, true); if (!near() && !F.isEmpty(dataEntries) && cctx.wal() != null) { // Set new update counters for data entries received from persisted tx entries. List<DataEntry> entriesWithCounters = dataEntries.stream() .map(tuple -> tuple.get1().partitionCounter(tuple.get2().updateCounter())) .collect(Collectors.toList()); cctx.wal().log(new DataRecord(entriesWithCounters)); } if (ptr != null && !cctx.tm().logTxRecords()) cctx.wal().flush(ptr, false); } catch (Throwable ex) { state(UNKNOWN); if (X.hasCause(ex, NodeStoppingException.class)) { U.warn(log, "Failed to commit transaction, node is stopping [tx=" + CU.txString(this) + ", err=" + ex + ']'); return; } err = heuristicException(ex); try { uncommit(); } catch (Throwable e) { err.addSuppressed(e); } throw err; } finally { cctx.database().checkpointReadUnlock(); if (wrapper != null) wrapper.initialize(ret); } } cctx.tm().commitTx(this); state(COMMITTED); } } } /** {@inheritDoc} */ @Override public final void commitRemoteTx() throws IgniteCheckedException { if (optimistic()) state(PREPARED); if (!state(COMMITTING)) { TransactionState state = state(); // If other thread is doing commit, then no-op. if (state == COMMITTING || state == COMMITTED) return; if (log.isDebugEnabled()) log.debug("Failed to set COMMITTING transaction state (will rollback): " + this); setRollbackOnly(); if (!isSystemInvalidate()) throw new IgniteCheckedException("Invalid transaction state for commit [state=" + state + ", tx=" + this + ']'); rollbackRemoteTx(); return; } try { commitIfLocked(); } catch (IgniteTxHeuristicCheckedException e) { // Treat heuristic exception as critical. cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); throw e; } } /** * Forces commit for this tx. * * @throws IgniteCheckedException If commit failed. */ public void forceCommit() throws IgniteCheckedException { commitIfLocked(); } /** {@inheritDoc} */ @Override public IgniteInternalFuture<IgniteInternalTx> commitAsync() { try { commitRemoteTx(); return new GridFinishedFuture<IgniteInternalTx>(this); } catch (IgniteCheckedException e) { return new GridFinishedFuture<>(e); } } /** {@inheritDoc} */ @Override public final IgniteInternalFuture<?> salvageTx() { try { systemInvalidate(true); prepareRemoteTx(); if (state() == PREPARING) { if (log.isDebugEnabled()) log.debug("Ignoring transaction in PREPARING state as it is currently handled " + "by another thread: " + this); return null; } doneRemote(xidVersion(), Collections.<GridCacheVersion>emptyList(), Collections.<GridCacheVersion>emptyList(), Collections.<GridCacheVersion>emptyList()); commitRemoteTx(); } catch (IgniteCheckedException e) { U.error(log, "Failed to invalidate transaction: " + xidVersion(), e); } return null; } /** {@inheritDoc} */ @Override public final void rollbackRemoteTx() { try { // Note that we don't evict near entries here - // they will be deleted by their corresponding transactions. if (state(ROLLING_BACK) || state() == UNKNOWN) { cctx.tm().rollbackTx(this, false, skipCompletedVersions()); TxCounters counters = txCounters(false); if (counters != null) cctx.tm().txHandler().applyPartitionsUpdatesCounters(counters.updateCounters(), true, false); state(ROLLED_BACK); cctx.mvccCaching().onTxFinished(this, false); } } catch (IgniteCheckedException | RuntimeException | Error e) { state(UNKNOWN); U.error(log, "Error during tx rollback.", e); if (e instanceof IgniteCheckedException) throw new IgniteException(e); else if (e instanceof RuntimeException) throw (RuntimeException) e; else throw (Error) e; } } /** {@inheritDoc} */ @Override public IgniteInternalFuture<IgniteInternalTx> rollbackAsync() { rollbackRemoteTx(); return new GridFinishedFuture<IgniteInternalTx>(this); } /** {@inheritDoc} */ @Override public Collection<GridCacheVersion> alternateVersions() { return explicitVers == null ? Collections.<GridCacheVersion>emptyList() : explicitVers; } /** {@inheritDoc} */ @Override public void commitError(Throwable e) { // No-op. } /** * Adds explicit version if there is one. * * @param e Transaction entry. */ protected void addExplicit(IgniteTxEntry e) { if (e.explicitVersion() != null) { if (explicitVers == null) explicitVers = new LinkedList<>(); if (!explicitVers.contains(e.explicitVersion())) { explicitVers.add(e.explicitVersion()); if (log.isDebugEnabled()) log.debug("Added explicit version to transaction [explicitVer=" + e.explicitVersion() + ", tx=" + this + ']'); // Register alternate version with TM. cctx.tm().addAlternateVersion(e.explicitVersion(), this); } } } /** {@inheritDoc} */ @Override public String label() { return txLbl; } /** {@inheritDoc} */ @Override public String toString() { return GridToStringBuilder.toString(GridDistributedTxRemoteAdapter.class, this, "super", super.toString()); } }
apache-2.0
grgrzybek/karaf
tooling/karaf-maven-plugin/src/it/test-run-bundle/src/main/java/test/Dummy.java
862
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package test; public class Dummy { // nothing }
apache-2.0
zstackorg/zstack
network/src/main/java/org/zstack/network/service/DhcpExtension.java
14549
package org.zstack.network.service; import org.springframework.beans.factory.annotation.Autowired; import org.zstack.core.componentloader.PluginRegistry; import org.zstack.core.db.Q; import org.zstack.core.db.SimpleQuery; import org.zstack.core.db.SimpleQuery.Op; import org.zstack.header.Component; import org.zstack.header.core.Completion; import org.zstack.header.core.NoErrorCompletion; import org.zstack.header.errorcode.ErrorCode; import org.zstack.header.exception.CloudRuntimeException; import org.zstack.header.network.l3.*; import org.zstack.header.network.service.DhcpStruct; import org.zstack.header.network.service.NetworkServiceDhcpBackend; import org.zstack.header.network.service.NetworkServiceProviderType; import org.zstack.header.network.service.NetworkServiceType; import org.zstack.header.vm.*; import org.zstack.header.vm.VmInstanceSpec.HostName; import org.zstack.network.l3.IpRangeHelper; import org.zstack.utils.CollectionUtils; import org.zstack.utils.Utils; import org.zstack.utils.function.Function; import org.zstack.utils.logging.CLogger; import org.zstack.utils.network.IPv6Constants; import org.zstack.utils.network.NetworkUtils; import java.util.*; import java.util.stream.Collectors; /** * Created with IntelliJ IDEA. * User: frank * Time: 7:53 PM * To change this template use File | Settings | File Templates. */ public class DhcpExtension extends AbstractNetworkServiceExtension implements Component, VmDefaultL3NetworkChangedExtensionPoint { private static final CLogger logger = Utils.getLogger(DhcpExtension.class); @Autowired private PluginRegistry pluginRgty; private final Map<NetworkServiceProviderType, NetworkServiceDhcpBackend> dhcpBackends = new HashMap<NetworkServiceProviderType, NetworkServiceDhcpBackend>(); private final String RESULT = String.format("result.%s", DhcpExtension.class.getName()); public NetworkServiceType getNetworkServiceType() { return NetworkServiceType.DHCP; } private void doDhcp(final Iterator<Map.Entry<NetworkServiceDhcpBackend, List<DhcpStruct>>> it, final VmInstanceSpec spec, final Completion complete) { if (!it.hasNext()) { complete.success(); return; } Map.Entry<NetworkServiceDhcpBackend, List<DhcpStruct>> e = it.next(); NetworkServiceDhcpBackend bkd = e.getKey(); List<DhcpStruct> structs = e.getValue(); logger.debug(String.format("%s is applying DHCP service", bkd.getClass().getName())); bkd.applyDhcpService(structs, spec, new Completion(complete) { @Override public void success() { doDhcp(it, spec, complete); } @Override public void fail(ErrorCode errorCode) { complete.fail(errorCode); } }); } private void releaseDhcp(final Iterator<Map.Entry<NetworkServiceDhcpBackend, List<DhcpStruct>>> it, final VmInstanceSpec spec, final NoErrorCompletion completion) { if (!it.hasNext()) { completion.done(); return; } Map.Entry<NetworkServiceDhcpBackend, List<DhcpStruct>> e = it.next(); NetworkServiceDhcpBackend bkd = e.getKey(); List<DhcpStruct> structs = e.getValue(); logger.debug(String.format("%s is releasing DHCP service", bkd.getClass().getName())); bkd.releaseDhcpService(structs, spec, new NoErrorCompletion(completion) { @Override public void done() { releaseDhcp(it, spec, completion); } }); } @Override public void applyNetworkService(VmInstanceSpec spec, Map<String, Object> data, Completion complete) { Map<NetworkServiceDhcpBackend, List<DhcpStruct>> entries = workoutDhcp(spec); data.put(RESULT, entries); doDhcp(entries.entrySet().iterator(), spec, complete); } @Override public void releaseNetworkService(VmInstanceSpec spec, Map<String, Object> data, NoErrorCompletion completion) { Map<NetworkServiceDhcpBackend, List<DhcpStruct>> entries = (Map<NetworkServiceDhcpBackend, List<DhcpStruct>>) data.get(RESULT); if (entries == null) { entries = workoutDhcp(spec); } releaseDhcp(entries.entrySet().iterator(), spec, completion); } private void populateExtensions() { for (NetworkServiceDhcpBackend bkd : pluginRgty.getExtensionList(NetworkServiceDhcpBackend.class)) { NetworkServiceDhcpBackend old = dhcpBackends.get(bkd.getProviderType()); if (old != null) { throw new CloudRuntimeException(String.format("duplicate NetworkServiceDhcpBackend[%s, %s] for type[%s]", bkd.getClass().getName(), old.getClass().getName(), bkd.getProviderType())); } dhcpBackends.put(bkd.getProviderType(), bkd); } } public boolean isDualStackNicInSingleL3Network(VmNicInventory nic) { if (nic.getUsedIps().size() < 2) { return false; } return nic.getUsedIps().stream().map(UsedIpInventory::getL3NetworkUuid).distinct().count() == 1; } private DhcpStruct getDhcpStruct(VmInstanceInventory vm, List<VmInstanceSpec.HostName> hostNames, VmNicVO nic, UsedIpVO ip, boolean isDefaultNic) { String l3Uuid = nic.getL3NetworkUuid(); if (ip != null) { l3Uuid = ip.getL3NetworkUuid(); } L3NetworkInventory l3 = L3NetworkInventory.valueOf(dbf.findByUuid(l3Uuid, L3NetworkVO.class)); DhcpStruct struct = new DhcpStruct(); struct.setVmUuid(nic.getVmInstanceUuid()); String hostname = CollectionUtils.find(hostNames, new Function<String, HostName>() { @Override public String call(HostName arg) { return arg.getL3NetworkUuid().equals(l3.getUuid()) ? arg.getHostname() : null; } }); if (hostname != null && l3.getDnsDomain() != null) { hostname = String.format("%s.%s", hostname, l3.getDnsDomain()); } struct.setHostname(hostname); struct.setDnsDomain(l3.getDnsDomain()); struct.setL3Network(l3); struct.setDefaultL3Network(isDefaultNic); struct.setMac(nic.getMac()); struct.setMtu(new MtuGetter().getMtu(l3.getUuid())); struct.setNicType(nic.getType()); return struct; } private void setDualStackNicOfSingleL3Network(DhcpStruct struct, VmNicVO nic) { struct.setIpVersion(IPv6Constants.DUAL_STACK); List<UsedIpVO> sortedIps = nic.getUsedIps().stream().sorted(Comparator.comparingLong(UsedIpVO::getIpVersionl)).collect(Collectors.toList()); for (UsedIpVO ip : sortedIps) { if (ip.getIpVersion() == IPv6Constants.IPv4) { struct.setGateway(ip.getGateway()); struct.setIp(ip.getIp()); struct.setNetmask(ip.getNetmask()); if (struct.getHostname() == null) { struct.setHostname(ip.getIp().replaceAll("\\.", "-")); } } else { List<NormalIpRangeVO> iprs = Q.New(NormalIpRangeVO.class).eq(NormalIpRangeVO_.l3NetworkUuid, ip.getL3NetworkUuid()) .eq(NormalIpRangeVO_.ipVersion, ip.getIpVersion()).list(); if (iprs.get(0).getAddressMode().equals(IPv6Constants.SLAAC)) { continue; } struct.setGateway6(ip.getGateway()); struct.setIp6(ip.getIp()); struct.setRaMode(iprs.get(0).getAddressMode()); struct.setPrefixLength(iprs.get(0).getPrefixLen()); struct.setFirstIp(NetworkUtils.getSmallestIp(iprs.stream().map(IpRangeVO::getStartIp).collect(Collectors.toList()))); struct.setEndIP(NetworkUtils.getBiggesttIp(iprs.stream().map(IpRangeVO::getEndIp).collect(Collectors.toList()))); } } } private void setNicDhcp(DhcpStruct struct, UsedIpVO ip) { if (ip.getIpVersion() == IPv6Constants.IPv4) { struct.setGateway(ip.getGateway()); struct.setIp(ip.getIp()); struct.setNetmask(ip.getNetmask()); if (struct.getHostname() == null) { struct.setHostname(ip.getIp().replaceAll("\\.", "-")); } } else { List<NormalIpRangeVO> iprs = Q.New(NormalIpRangeVO.class).eq(NormalIpRangeVO_.l3NetworkUuid, ip.getL3NetworkUuid()) .eq(NormalIpRangeVO_.ipVersion, IPv6Constants.IPv6).list(); struct.setGateway6(ip.getGateway()); struct.setIp6(ip.getIp()); struct.setRaMode(iprs.get(0).getAddressMode()); struct.setPrefixLength(iprs.get(0).getPrefixLen()); struct.setFirstIp(NetworkUtils.getSmallestIp(iprs.stream().map(IpRangeVO::getStartIp).collect(Collectors.toList()))); struct.setEndIP(NetworkUtils.getBiggesttIp(iprs.stream().map(IpRangeVO::getEndIp).collect(Collectors.toList()))); } } public List<DhcpStruct> makeDhcpStruct(VmInstanceInventory vm, List<VmInstanceSpec.HostName> hostNames, List<VmNicVO> nics) { List<DhcpStruct> res = new ArrayList<>(); List<VmNicVO> defaultNics = nics.stream().filter(nic -> nic.getL3NetworkUuid().equals(vm.getDefaultL3NetworkUuid())).collect(Collectors.toList()); for (VmNicVO nic : nics) { boolean isDefaultNic = nic.equals(VmNicVO.findTheEarliestOne(defaultNics)); if (isDualStackNicInSingleL3Network(VmNicInventory.valueOf(nic))) { DhcpStruct struct = getDhcpStruct(vm, hostNames, nic, null, isDefaultNic); setDualStackNicOfSingleL3Network(struct, nic); res.add(struct); continue; } for (UsedIpVO ip : nic.getUsedIps()) { NormalIpRangeVO ipr = dbf.findByUuid(ip.getIpRangeUuid(), NormalIpRangeVO.class); if (ipr.getIpVersion() == IPv6Constants.IPv6 && (ipr.getAddressMode().equals(IPv6Constants.SLAAC))) { continue; } DhcpStruct struct = getDhcpStruct(vm, hostNames, nic, ip, isDefaultNic); struct.setIpVersion(ip.getIpVersion()); setNicDhcp(struct, ip); res.add(struct); } } return res; } private Map<NetworkServiceDhcpBackend, List<DhcpStruct>> workoutDhcp(VmInstanceSpec spec) { Map<NetworkServiceDhcpBackend, List<DhcpStruct>> map = new HashMap<NetworkServiceDhcpBackend, List<DhcpStruct>>(); Map<NetworkServiceProviderType, List<L3NetworkInventory>> providerMap = getNetworkServiceProviderMap(NetworkServiceType.DHCP, VmNicSpec.getL3NetworkInventoryOfSpec(spec.getL3Networks())); for (Map.Entry<NetworkServiceProviderType, List<L3NetworkInventory>> e : providerMap.entrySet()) { NetworkServiceProviderType ptype = e.getKey(); List<DhcpStruct> lst = new ArrayList<DhcpStruct>(); List<VmNicVO> nics = new ArrayList<>(); Map<String, L3NetworkInventory> l3Map = new HashMap<>(); for (L3NetworkInventory l3 : e.getValue()) { l3Map.put(l3.getUuid(), l3); } for (VmNicInventory inv : spec.getDestNics()) { VmNicVO vmNicVO = dbf.findByUuid(inv.getUuid(), VmNicVO.class); for (UsedIpVO ip : vmNicVO.getUsedIps()) { L3NetworkInventory l3 = l3Map.get(ip.getL3NetworkUuid()); if (l3 == null) { continue; } List<IpRangeInventory> iprs = IpRangeHelper.getNormalIpRanges(l3); if (iprs.isEmpty()) { continue; } if (!nics.contains(vmNicVO)) { nics.add(vmNicVO); } } } lst.addAll(makeDhcpStruct(spec.getVmInventory(), spec.getHostnames(), nics)); NetworkServiceDhcpBackend bkd = dhcpBackends.get(ptype); if (bkd == null) { throw new CloudRuntimeException(String.format("unable to find NetworkServiceDhcpBackend[provider type: %s]", ptype)); } map.put(bkd, lst); if (logger.isTraceEnabled()) { logger.trace(String.format("DHCP Backend[%s] is about to apply entries: \n%s", bkd.getClass().getName(), lst)); } } return map; } @Override public boolean start() { populateExtensions(); return true; } @Override public boolean stop() { return true; } @Override public void vmDefaultL3NetworkChanged(VmInstanceInventory vm, String previousL3, String nowL3) { List<String> l3Uuids = new ArrayList<String>(); if (previousL3 != null) { l3Uuids.add(previousL3); } if (nowL3 != null) { l3Uuids.add(nowL3); } SimpleQuery<L3NetworkVO> q = dbf.createQuery(L3NetworkVO.class); q.add(L3NetworkVO_.uuid, Op.IN, l3Uuids); List<L3NetworkVO> vos = q.list(); List<L3NetworkInventory> invs = L3NetworkInventory.valueOf(vos); Map<NetworkServiceProviderType, List<L3NetworkInventory>> providerMap = getNetworkServiceProviderMap(NetworkServiceType.DHCP, invs); for (Map.Entry<NetworkServiceProviderType, List<L3NetworkInventory>> e : providerMap.entrySet()) { NetworkServiceProviderType ptype = e.getKey(); NetworkServiceDhcpBackend bkd = dhcpBackends.get(ptype); if (bkd == null) { throw new CloudRuntimeException(String.format("unable to find NetworkServiceDhcpBackend[provider type: %s]", ptype)); } bkd.vmDefaultL3NetworkChanged(vm, previousL3, nowL3, new Completion(null) { @Override public void success() { // pass } @Override public void fail(ErrorCode errorCode) { logger.warn(String.format("unable to change the VM[uuid:%s]'s default L3 network in the DHCP backend, %s. You may need to reboot" + " the VM to use the new default L3 network setting", vm.getUuid(), errorCode)); } }); } } }
apache-2.0
ivankelly/bookkeeper
stream/statelib/src/test/java/org/apache/bookkeeper/statelib/impl/mvcc/TestMVCCAsyncBytesStoreImpl.java
17417
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bookkeeper.statelib.impl.mvcc; import static com.google.common.base.Charsets.UTF_8; import static org.apache.bookkeeper.common.concurrent.FutureUtils.result; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import com.google.common.collect.Lists; import java.io.File; import java.net.URI; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.stream.IntStream; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.api.kv.op.PutOp; import org.apache.bookkeeper.api.kv.options.Options; import org.apache.bookkeeper.api.kv.result.Code; import org.apache.bookkeeper.api.kv.result.KeyValue; import org.apache.bookkeeper.api.kv.result.PutResult; import org.apache.bookkeeper.common.coder.ByteArrayCoder; import org.apache.bookkeeper.common.concurrent.FutureUtils; import org.apache.bookkeeper.statelib.api.StateStoreSpec; import org.apache.bookkeeper.statelib.api.exceptions.MVCCStoreException; import org.apache.distributedlog.DLMTestUtil; import org.apache.distributedlog.TestDistributedLogBase; import org.apache.distributedlog.api.namespace.Namespace; import org.apache.distributedlog.api.namespace.NamespaceBuilder; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; /** * Unit test of {@link MVCCAsyncBytesStoreImpl}. */ @Slf4j public class TestMVCCAsyncBytesStoreImpl extends TestDistributedLogBase { @Rule public final TemporaryFolder testDir = new TemporaryFolder(); private static URI uri; private static Namespace namespace; @BeforeClass public static void setupCluster() throws Exception { TestDistributedLogBase.setupCluster(); uri = DLMTestUtil.createDLMURI(zkPort, "/mvcc"); conf.setPeriodicFlushFrequencyMilliSeconds(2); conf.setWriteLockEnabled(false); namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .clientId("test-mvcc-async-store") .build(); } @AfterClass public static void teardownCluster() throws Exception { if (null != namespace) { namespace.close(); } TestDistributedLogBase.teardownCluster(); } private String streamName; private File tempDir; private MVCCAsyncBytesStoreImpl store; @Before @Override public void setup() throws Exception { super.setup(); ensureURICreated(uri); tempDir = testDir.newFolder(); store = new MVCCAsyncBytesStoreImpl( () -> new MVCCStoreImpl<>(), () -> namespace); } private StateStoreSpec initSpec(String streamName) { return StateStoreSpec.builder() .name(streamName) .keyCoder(ByteArrayCoder.of()) .valCoder(ByteArrayCoder.of()) .stream(streamName) .localStateStoreDir(tempDir) .build(); } @After @Override public void teardown() throws Exception { if (null != streamName) { namespace.deleteLog(streamName); } if (null != store) { store.close(); } super.teardown(); } @Test(expected = NullPointerException.class) public void testInitMissingStreamName() throws Exception { this.streamName = "test-init-missing-stream-name"; StateStoreSpec spec = StateStoreSpec.builder() .name(streamName) .keyCoder(ByteArrayCoder.of()) .valCoder(ByteArrayCoder.of()) .localStateStoreDir(tempDir) .build(); result(store.init(spec)); } @Test public void testInit() throws Exception { this.streamName = "test-init"; StateStoreSpec spec = initSpec(streamName); result(store.init(spec)); assertTrue(store.ownWriteScheduler()); assertFalse(store.ownReadScheduler()); assertEquals(streamName, store.name()); } // // Put & Range Ops // private byte[] getKey(int i) { return String.format("key-%05d", i).getBytes(UTF_8); } private byte[] getValue(int i) { return String.format("value-%05d", i).getBytes(UTF_8); } private List<PutResult<byte[], byte[]>> writeKVs(int numPairs, boolean prevKv) throws Exception { List<CompletableFuture<PutResult<byte[], byte[]>>> results = Lists.newArrayListWithExpectedSize(numPairs); for (int i = 0; i < numPairs; i++) { results.add(writeKV(i, prevKv)); } return result(FutureUtils.collect(results)); } private CompletableFuture<PutResult<byte[], byte[]>> writeKV(int i, boolean prevKv) { PutOp<byte[], byte[]> op = store.getOpFactory().newPut( getKey(i), getValue(i), Options.putAndGet()); return store.put(op).whenComplete((value, cause) -> op.close()); } @Test public void testBasicOps() throws Exception { this.streamName = "test-basic-ops"; StateStoreSpec spec = initSpec(streamName); result(store.init(spec)); // normal put { assertNull(result(store.get(getKey(0)))); result(store.put(getKey(0), getValue(0))); assertArrayEquals(getValue(0), result(store.get(getKey(0)))); } // putIfAbsent { // failure case assertArrayEquals(getValue(0), result(store.putIfAbsent(getKey(0), getValue(99)))); assertArrayEquals(getValue(0), result(store.get(getKey(0)))); // success case byte[] key1 = getKey(1); assertNull(result(store.putIfAbsent(key1, getValue(1)))); assertArrayEquals(getValue(1), result(store.get(key1))); } // vPut { // key-not-found case int key = 2; int initialVal = 2; int casVal = 99; try { result(store.vPut(getKey(key), getValue(initialVal), 100L)); fail("key2 doesn't exist yet"); } catch (MVCCStoreException e) { assertEquals(Code.KEY_NOT_FOUND, e.getCode()); } // vPut(k, v, -1L) try { result(store.vPut(getKey(key), getValue(initialVal), -1L)); fail("key2 doesn't exist yet"); } catch (MVCCStoreException e) { assertEquals(Code.KEY_NOT_FOUND, e.getCode()); } // put(key2, v) assertNull(result(store.putIfAbsent(getKey(key), getValue(initialVal)))); // vPut(key2, v, 0) assertEquals(1L, result(store.vPut(getKey(key), getValue(casVal), 0)).longValue()); assertArrayEquals(getValue(casVal), result(store.get(getKey(key)))); } // rPut { // key-not-found case int key = 3; int initialVal = 3; int casVal = 99; try { result(store.rPut(getKey(key), getValue(initialVal), 100L)); fail("key2 doesn't exist yet"); } catch (MVCCStoreException e) { assertEquals(Code.KEY_NOT_FOUND, e.getCode()); } // vPut(k, v, -1L) try { result(store.rPut(getKey(key), getValue(initialVal), -1L)); fail("key2 doesn't exist yet"); } catch (MVCCStoreException e) { assertEquals(Code.KEY_NOT_FOUND, e.getCode()); } // put(key2, v) assertNull(result(store.putIfAbsent(getKey(key), getValue(initialVal)))); KeyValue<byte[], byte[]> kv = result(store.getKeyValue(getKey(key))); long revision = kv.modifiedRevision(); assertArrayEquals(getValue(initialVal), kv.value()); // vPut(key2, v, 0) assertEquals(revision + 1, result(store.rPut(getKey(key), getValue(casVal), revision)).longValue()); assertArrayEquals(getValue(casVal), result(store.get(getKey(key)))); } // delete(k) { // key not found assertNull(result(store.delete(getKey(99)))); // key exists int key = 0; assertArrayEquals(getValue(key), result(store.get(getKey(key)))); assertArrayEquals(getValue(key), result(store.delete(getKey(key)))); assertNull(result(store.get(getKey(key)))); } // delete(k, v) { // key not found assertNull(result(store.delete(getKey(99)))); // key exists int key = 1; assertArrayEquals(getValue(key), result(store.get(getKey(key)))); assertFalse(result(store.delete(getKey(key), getValue(99)))); assertArrayEquals(getValue(key), result(store.get(getKey(key)))); assertTrue(result(store.delete(getKey(key), getValue(key)))); assertNull(result(store.get(getKey(key)))); } // vDelete { int key = 2; @Cleanup KeyValue<byte[], byte[]> kv = result(store.getKeyValue(getKey(key))); long expectedVersion = kv.version(); try { result(store.vDelete(getKey(key), expectedVersion + 1)); fail("should fail to delete a key with wrong version"); } catch (MVCCStoreException e) { assertEquals(Code.BAD_REVISION, e.getCode()); } // vDelete(k, -1L) try { result(store.vDelete(getKey(key), -1L)); fail("Should fail to delete a key with version(-1)"); } catch (MVCCStoreException e) { assertEquals(Code.BAD_REVISION, e.getCode()); } // vDelete(key2, version) @Cleanup KeyValue<byte[], byte[]> deletedKv = (result(store.vDelete(getKey(key), expectedVersion))); assertNotNull(deletedKv); assertEquals(kv.createRevision(), deletedKv.createRevision()); assertEquals(kv.modifiedRevision(), deletedKv.modifiedRevision()); assertEquals(kv.version(), deletedKv.version()); assertArrayEquals(kv.value(), deletedKv.value()); assertNull(result(store.get(getKey(key)))); } // rPut { int key = 3; @Cleanup KeyValue<byte[], byte[]> kv = result(store.getKeyValue(getKey(key))); long expectedRevision = kv.modifiedRevision(); try { result(store.rDelete(getKey(key), expectedRevision + 1)); fail("should fail to delete a key with wrong revision"); } catch (MVCCStoreException e) { assertEquals(Code.BAD_REVISION, e.getCode()); } // rDelete(k, -1L) try { result(store.rDelete(getKey(key), -1L)); fail("Should fail to delete a key with revision(-1)"); } catch (MVCCStoreException e) { assertEquals(Code.BAD_REVISION, e.getCode()); } // rDelete(key2, revision) @Cleanup KeyValue<byte[], byte[]> deletedKv = (result(store.rDelete(getKey(key), expectedRevision))); assertNotNull(deletedKv); assertEquals(kv.createRevision(), deletedKv.createRevision()); assertEquals(kv.modifiedRevision(), deletedKv.modifiedRevision()); assertEquals(kv.version(), deletedKv.version()); assertArrayEquals(kv.value(), deletedKv.value()); assertNull(result(store.get(getKey(key)))); } // increment failure { int ki = 3; byte[] key = getKey(ki); result(store.put(key, getValue(ki))); try { result(store.increment(key, 100L)); fail("Can't increment a non-number key"); } catch (MVCCStoreException e) { assertEquals(Code.ILLEGAL_OP, e.getCode()); } } // increment success { int ki = 4; byte[] key = getKey(ki); for (int i = 0; i < 5; i++) { result(store.increment(key, 100L)); @Cleanup KeyValue<byte[], byte[]> kv = result(store.getKeyValue(key)); assertEquals(100L * (i + 1), kv.numberValue()); } } } @Test public void testPutGetDeleteRanges() throws Exception { this.streamName = "test-put-kvs"; StateStoreSpec spec = initSpec(streamName); result(store.init(spec)); int numPairs = 100; List<PutResult<byte[], byte[]>> kvs = writeKVs(numPairs, true); assertEquals(numPairs, kvs.size()); for (PutResult<byte[], byte[]> kv : kvs) { assertEquals(Code.OK, kv.code()); assertNull(kv.prevKv()); kv.close(); } verifyRange(20, 70, 2, 2, 0); List<KeyValue<byte[], byte[]>> prevKvs = result(store.deleteRange(getKey(20), getKey(70))); assertNotNull(prevKvs); verifyRecords( prevKvs, 20, 70, 2, 2, 0); prevKvs.forEach(KeyValue::close); prevKvs = result(store.range(getKey(20), getKey(70))); assertTrue(prevKvs.isEmpty()); } private void verifyRange(int startKey, int endKey, int startCreateRevision, int startModRevision, int expectedVersion) throws Exception { int count = endKey - startKey + 1; List<KeyValue<byte[], byte[]>> kvs = result(store.range(getKey(startKey), getKey(endKey))); assertEquals(count, kvs.size()); verifyRecords(kvs, startKey, endKey, startCreateRevision, startModRevision, expectedVersion); kvs.forEach(KeyValue::close); } private void verifyRecords(List<KeyValue<byte[], byte[]>> kvs, int startKey, int endKey, int startCreateRevision, int startModRevision, int expectedVersion) { int idx = startKey; for (KeyValue<byte[], byte[]> record : kvs) { assertArrayEquals(getKey(idx), record.key()); assertArrayEquals(getValue(idx), record.value()); // revision - starts from 1, but the first revision is used for nop barrier record. assertEquals(idx + startCreateRevision, record.createRevision()); assertEquals(idx + startModRevision, record.modifiedRevision()); assertEquals(expectedVersion, record.version()); ++idx; } assertEquals(endKey + 1, idx); } @Test public void testReplayJournal() throws Exception { this.streamName = "test-replay-journal"; StateStoreSpec spec = initSpec(streamName); result(store.init(spec)); int numKvs = 10; // putIfAbsent IntStream.range(0, numKvs) .forEach(i -> { try { result(store.putIfAbsent(getKey(i), getValue(100 + i))); } catch (Exception e) { log.error("Failed to put kv pair ({})", i, e); } }); log.info("Closing the store '{}' ...", streamName); // close the store store.close(); log.info("Closed the store '{}' ...", streamName); // open the store again to replay the journal. store = new MVCCAsyncBytesStoreImpl( () -> new MVCCStoreImpl<>(), () -> namespace); spec = StateStoreSpec.builder() .name(streamName) .keyCoder(ByteArrayCoder.of()) .valCoder(ByteArrayCoder.of()) .stream(streamName) .localStateStoreDir(testDir.newFolder()) .build(); result(store.init(spec)); // verify the key/value pairs for (int i = 0; i < numKvs; i++) { byte[] value = result(store.get(getKey(i))); assertNotNull(value); assertArrayEquals(getValue(100 + i), value); } } }
apache-2.0
google/ExoPlayer
library/common/src/test/java/com/google/android/exoplayer2/PlaybackParametersTest.java
1275
/* * Copyright 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.android.exoplayer2; import static com.google.common.truth.Truth.assertThat; import androidx.test.ext.junit.runners.AndroidJUnit4; import org.junit.Test; import org.junit.runner.RunWith; /** Unit tests for {@link PlaybackParameters}. */ @RunWith(AndroidJUnit4.class) public class PlaybackParametersTest { @Test public void roundTripViaBundle_ofPlaybackParameters_yieldsEqualInstance() { PlaybackParameters playbackParameters = new PlaybackParameters(/* speed= */ 2.9f, /* pitch= */ 1.2f); assertThat(PlaybackParameters.CREATOR.fromBundle(playbackParameters.toBundle())) .isEqualTo(playbackParameters); } }
apache-2.0
monetate/druid
extensions-core/google-extensions/src/test/java/org/apache/druid/storage/google/GoogleStorageTest.java
3707
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.storage.google; import com.google.api.client.googleapis.testing.auth.oauth2.MockGoogleCredential; import com.google.api.client.http.ByteArrayContent; import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.json.jackson2.JacksonFactory; import com.google.api.client.testing.http.MockHttpTransport; import com.google.api.client.testing.http.MockLowLevelHttpRequest; import com.google.api.client.testing.http.MockLowLevelHttpResponse; import com.google.api.services.storage.Storage; import com.google.common.base.Suppliers; import org.apache.druid.java.util.common.StringUtils; import org.junit.Assert; import org.junit.Test; import java.io.IOException; import java.io.InputStream; public class GoogleStorageTest { @Test public void testGet() throws IOException { String content = "abcdefghij"; MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); response.setContent(content); GoogleStorage googleStorage = makeGoogleStorage(response); InputStream is = googleStorage.get("bucket", "path"); String actual = GoogleTestUtils.readAsString(is); Assert.assertEquals(content, actual); } @Test public void testGetWithOffset() throws IOException { String content = "abcdefghij"; MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); response.setContent(content); GoogleStorage googleStorage = makeGoogleStorage(response); InputStream is = googleStorage.get("bucket", "path", 2); String actual = GoogleTestUtils.readAsString(is); Assert.assertEquals(content.substring(2), actual); } @Test public void testInsert() throws IOException { String content = "abcdefghij"; MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); response.addHeader("Location", "http://random-path"); response.setContent("{}"); MockHttpTransport transport = new MockHttpTransport.Builder().setLowLevelHttpResponse(response).build(); GoogleStorage googleStorage = makeGoogleStorage(transport); googleStorage.insert("bucket", "path", new ByteArrayContent("text/html", StringUtils.toUtf8(content))); MockLowLevelHttpRequest request = transport.getLowLevelHttpRequest(); String actual = request.getContentAsString(); Assert.assertEquals(content, actual); } private GoogleStorage makeGoogleStorage(MockLowLevelHttpResponse response) { MockHttpTransport transport = new MockHttpTransport.Builder().setLowLevelHttpResponse(response).build(); return makeGoogleStorage(transport); } private GoogleStorage makeGoogleStorage(MockHttpTransport transport) { HttpRequestInitializer initializer = new MockGoogleCredential.Builder().build(); Storage storage = new Storage(transport, JacksonFactory.getDefaultInstance(), initializer); return new GoogleStorage(Suppliers.ofInstance(storage)); } }
apache-2.0
emag/wildfly-swarm
core/container/src/main/java/org/wildfly/swarm/container/config/ConfigNode.java
6392
package org.wildfly.swarm.container.config; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; import org.wildfly.swarm.spi.api.config.ConfigKey; import org.wildfly.swarm.spi.api.config.ConfigTree; import org.wildfly.swarm.spi.api.config.SimpleKey; /** * A configuration node capable of having a direct value in addition to key/value children. * * @author Bob McWhirter */ public class ConfigNode implements ConfigTree { public ConfigNode() { } ConfigNode(Object value) { this.value = value; } /** * Set the value of an immediate child. * * @param key The simple child key. * @param value The value to set. */ public void child(SimpleKey key, Object value) { if (value instanceof ConfigNode) { this.children.put(key, (ConfigNode) value); } else { this.children.put(key, new ConfigNode(value)); } } /** * Set the value of an immediate child. * * @param key The simple child key. * @param value The value to set. */ public void child(String key, Object value) { child(new SimpleKey(key), value); } /** * Set the value of a descendant. * * <p>Any intermediate leafs will be created as-needed.</p> * * @param key The possibly-complex key to a descendant. * @param value The value to set. */ public void recursiveChild(String key, Object value) { recursiveChild(ConfigKey.parse(key), value); } /** * Set the value of a descendant. * * <p>Any intermediate leafs will be created as-needed.</p> * * @param key The possibly-complex key to a descendant. * @param value The value to set. */ public void recursiveChild(ConfigKey key, Object value) { SimpleKey head = key.head(); if (head == ConfigKey.EMPTY) { value(value); } ConfigKey rest = key.subkey(1); if (rest == ConfigKey.EMPTY) { child(head, value); } else { ConfigNode child = child(head); if (child == null) { child = new ConfigNode(); child(head, child); } child.recursiveChild(rest, value); } } ConfigNode descendant(ConfigKey key) { SimpleKey head = key.head(); if (head == ConfigKey.EMPTY) { return this; } ConfigKey rest = key.subkey(1); ConfigNode child = child(head); if (child == null) { return null; } return child.descendant(rest); } /** * Retrieve the immediate child node. * * @param key The child's key. * @return The node or {@code null} is none present. */ ConfigNode child(SimpleKey key) { ConfigNode child = this.children.get(key); return child; } /** * Retrieve the immediate child node. * * @param key The child's key. * @return The node or {@code null} is none present. */ ConfigNode child(String key) { return child(new SimpleKey(key)); } /** * Retrieve all immediate children keys. * * @return All immediate children keys. */ public Set<SimpleKey> childrenKeys() { return this.children.keySet(); } /** * Retrieve all descendent keys. * * @return A stream of all descendent keys. */ public Stream<ConfigKey> allKeysRecursively() { Stream<ConfigKey> str = Stream.empty(); if (this.value != null) { str = Stream.of(ConfigKey.EMPTY); } str = Stream.concat(str, this.children.entrySet() .stream() .flatMap((kv) -> { ConfigKey key = kv.getKey(); Object value = kv.getValue(); if (value instanceof ConfigNode) { return ((ConfigNode) value).allKeysRecursively() .map(childKey -> key.append(childKey)); } return Stream.empty(); })); return str; } /** * Set the value on this node. * * @param value The value. */ void value(Object value) { if (value instanceof ConfigNode) { throw new RuntimeException("Cannot set config-node as a value of a tree config-node"); } this.value = value; } /** * Retrieve a value. * * @param key The possibly-complex key of the value to retrieve. * @return The value of {@code null} if none. */ public Object valueOf(ConfigKey key) { SimpleKey head = key.head(); if (head == ConfigKey.EMPTY) { if (this.value == null && this.children != null) { return this; } return this.value; } ConfigNode child = child(head); if (child != null) { ConfigKey rest = key.subkey(1); return child.valueOf(rest); } return null; } protected boolean isListLike() { return this.children.keySet().stream() .allMatch(e -> e.toString().matches("^[0-9]*$")); } public Object asObject() { if (this.value != null) { return this.value; } if (isListLike()) { return asList(); } return asMap(); } public List asList() { return this.children.values().stream() .map(e -> e.asObject()) .collect(Collectors.toList()); } public Map asMap() { Map map = new HashMap(); this.children.entrySet() .forEach(entry -> { map.put(entry.getKey().toString(), entry.getValue().asObject()); }); return map; } public String toString() { return "[ConfigNode: (" + System.identityHashCode(this.children) + ") children=" + this.children + "; value=" + this.value + "]"; } private Map<SimpleKey, ConfigNode> children = new HashMap<>(); private Object value; }
apache-2.0
palantir/atlasdb
timelock-impl/src/main/java/com/palantir/atlasdb/timelock/auth/api/Password.java
899
/* * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.atlasdb.timelock.auth.api; import org.immutables.value.Value; @Value.Immutable public interface Password { String value(); static Password of(String value) { return ImmutablePassword.builder().value(value).build(); } }
apache-2.0
apache/ant-ivyde
org.apache.ivyde.eclipse/src/java/org/apache/ivyde/internal/eclipse/ui/preferences/MappingSetupPreferencePage.java
2561
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.apache.ivyde.internal.eclipse.ui.preferences; import org.apache.ivyde.internal.eclipse.IvyPlugin; import org.apache.ivyde.internal.eclipse.ui.MappingSetupEditor; import org.eclipse.jface.preference.PreferencePage; import org.eclipse.swt.SWT; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.ui.IWorkbench; import org.eclipse.ui.IWorkbenchPreferencePage; public class MappingSetupPreferencePage extends PreferencePage implements IWorkbenchPreferencePage { /** the ID of the preference page */ public static final String PREFERENCE_PAGE_ID = "org.apache.ivyde.eclipse.ui.preferences.MappingSetupPreferencePage"; private MappingSetupEditor mappingSetupComposite; public MappingSetupPreferencePage() { setPreferenceStore(IvyPlugin.getDefault().getPreferenceStore()); } public void init(IWorkbench workbench) { setPreferenceStore(IvyPlugin.getDefault().getPreferenceStore()); } protected Control createContents(Composite parent) { mappingSetupComposite = new MappingSetupEditor(parent, SWT.NONE); mappingSetupComposite.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true)); mappingSetupComposite.init(IvyPlugin.getPreferenceStoreHelper().getMappingSetup()); return mappingSetupComposite; } public boolean performOk() { IvyDEPreferenceStoreHelper helper = IvyPlugin.getPreferenceStoreHelper(); helper.setMappingSetup(mappingSetupComposite.getMappingSetup()); return true; } protected void performDefaults() { mappingSetupComposite.init(PreferenceInitializer.DEFAULT_MAPPING_SETUP); } }
apache-2.0
y1011/cas-server
cas-server-core-monitor/src/test/java/org/jasig/cas/monitor/SessionMonitorTests.java
3177
package org.jasig.cas.monitor; import org.jasig.cas.mock.MockService; import org.jasig.cas.ticket.ExpirationPolicy; import org.jasig.cas.ticket.TicketGrantingTicketImpl; import org.jasig.cas.ticket.UniqueTicketIdGenerator; import org.jasig.cas.ticket.registry.DefaultTicketRegistry; import org.jasig.cas.ticket.registry.TicketRegistry; import org.jasig.cas.ticket.support.HardTimeoutExpirationPolicy; import org.jasig.cas.util.DefaultUniqueTicketIdGenerator; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.*; /** * Unit test for {@link SessionMonitor} class. * * @author Marvin S. Addison * @since 3.5.0 */ public class SessionMonitorTests { private static final ExpirationPolicy TEST_EXP_POLICY = new HardTimeoutExpirationPolicy(10000); private static final UniqueTicketIdGenerator GENERATOR = new DefaultUniqueTicketIdGenerator(); private DefaultTicketRegistry defaultRegistry; private SessionMonitor monitor; @Before public void setUp() { this.defaultRegistry = new DefaultTicketRegistry(); this.monitor = new SessionMonitor(); this.monitor.setTicketRegistry(this.defaultRegistry); } @Test public void verifyObserveOk() throws Exception { addTicketsToRegistry(this.defaultRegistry, 5, 10); final SessionStatus status = this.monitor.observe(); assertEquals(5, status.getSessionCount()); assertEquals(10, status.getServiceTicketCount()); assertEquals(StatusCode.OK, status.getCode()); } @Test public void verifyObserveWarnSessionsExceeded() throws Exception { addTicketsToRegistry(this.defaultRegistry, 10, 1); this.monitor.setSessionCountWarnThreshold(5); final SessionStatus status = this.monitor.observe(); assertEquals(StatusCode.WARN, status.getCode()); assertTrue(status.getDescription().contains("Session count")); } @Test public void verifyObserveWarnServiceTicketsExceeded() throws Exception { addTicketsToRegistry(this.defaultRegistry, 1, 10); this.monitor.setServiceTicketCountWarnThreshold(5); final SessionStatus status = this.monitor.observe(); assertEquals(StatusCode.WARN, status.getCode()); assertTrue(status.getDescription().contains("Service ticket count")); } private void addTicketsToRegistry(final TicketRegistry registry, final int tgtCount, final int stCount) { TicketGrantingTicketImpl ticket = null; for (int i = 0; i < tgtCount; i++) { ticket = new TicketGrantingTicketImpl( GENERATOR.getNewTicketId("TGT"), org.jasig.cas.authentication.TestUtils.getAuthentication(), TEST_EXP_POLICY); registry.addTicket(ticket); } if (ticket != null) { for (int i = 0; i < stCount; i++) { registry.addTicket(ticket.grantServiceTicket( GENERATOR.getNewTicketId("ST"), new MockService("junit"), TEST_EXP_POLICY, false, true)); } } } }
apache-2.0
52North/OpenSensorSearch
sir-it/src/test/java/org/n52/sir/oldIT/InsertSensorStatus.java
1768
/** * Copyright (C) 2012 52°North Initiative for Geospatial Open Source Software GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.n52.sir.oldIT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.File; import org.apache.xmlbeans.XmlObject; import org.junit.Test; import org.x52North.sir.x032.InsertSensorStatusRequestDocument; import org.x52North.sir.x032.InsertSensorStatusResponseDocument; /** * * @author Daniel Nüst * */ public class InsertSensorStatus extends SirTest { @Test public void testPostExample() throws Exception { File f = getPostExampleFile("InsertSensorStatus.xml"); InsertSensorStatusRequestDocument req = InsertSensorStatusRequestDocument.Factory.parse(f); XmlObject response = client.xSendPostRequest(req); // parse and validate response InsertSensorStatusResponseDocument responseDoc = InsertSensorStatusResponseDocument.Factory.parse(response.getDomNode()); assertEquals(req.getInsertSensorStatusRequest().getStatusDescription().getSensorIDInSIR(), responseDoc.getInsertSensorStatusResponse().getSensorIDInSIR()); assertTrue(responseDoc.validate()); } }
apache-2.0
prabushi/devstudio-tooling-esb
plugins/org.wso2.developerstudio.eclipse.gmf.esb/src/org/wso2/developerstudio/eclipse/gmf/esb/TransactionMediator.java
4390
/** * <copyright> * </copyright> * * $Id$ */ package org.wso2.developerstudio.eclipse.gmf.esb; /** * <!-- begin-user-doc --> * A representation of the model object '<em><b>Transaction Mediator</b></em>'. * <!-- end-user-doc --> * * <p> * The following features are supported: * </p> * <ul> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.TransactionMediator#getAction <em>Action</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.TransactionMediator#getInputConnector <em>Input Connector</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.TransactionMediator#getOutputConnector <em>Output Connector</em>}</li> * </ul> * * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getTransactionMediator() * @model * @generated */ public interface TransactionMediator extends Mediator { /** * Returns the value of the '<em><b>Action</b></em>' attribute. * The literals are from the enumeration {@link org.wso2.developerstudio.eclipse.gmf.esb.TransactionAction}. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Action</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Action</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.TransactionAction * @see #setAction(TransactionAction) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getTransactionMediator_Action() * @model * @generated */ TransactionAction getAction(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.TransactionMediator#getAction <em>Action</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Action</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.TransactionAction * @see #getAction() * @generated */ void setAction(TransactionAction value); /** * Returns the value of the '<em><b>Input Connector</b></em>' containment reference. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Input Connector</em>' containment reference isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Input Connector</em>' containment reference. * @see #setInputConnector(TransactionMediatorInputConnector) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getTransactionMediator_InputConnector() * @model containment="true" * @generated */ TransactionMediatorInputConnector getInputConnector(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.TransactionMediator#getInputConnector <em>Input Connector</em>}' containment reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Input Connector</em>' containment reference. * @see #getInputConnector() * @generated */ void setInputConnector(TransactionMediatorInputConnector value); /** * Returns the value of the '<em><b>Output Connector</b></em>' containment reference. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Output Connector</em>' containment reference isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Output Connector</em>' containment reference. * @see #setOutputConnector(TransactionMediatorOutputConnector) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getTransactionMediator_OutputConnector() * @model containment="true" * @generated */ TransactionMediatorOutputConnector getOutputConnector(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.TransactionMediator#getOutputConnector <em>Output Connector</em>}' containment reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Output Connector</em>' containment reference. * @see #getOutputConnector() * @generated */ void setOutputConnector(TransactionMediatorOutputConnector value); } // TransactionMediator
apache-2.0
ern/elasticsearch
server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java
36667
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.nested.Nested; import org.elasticsearch.search.aggregations.bucket.nested.ReverseNested; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.ValueCount; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.count; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.nested; import static org.elasticsearch.search.aggregations.AggregationBuilders.reverseNested; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; import static org.hamcrest.core.IsNull.notNullValue; @ESIntegTestCase.SuiteScopeTestCase public class ReverseNestedIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { assertAcked( prepareCreate("idx1").setMapping( jsonBuilder().startObject() .startObject("properties") .startObject("field1") .field("type", "keyword") .endObject() .startObject("alias") .field("type", "alias") .field("path", "field1") .endObject() .startObject("nested1") .field("type", "nested") .startObject("properties") .startObject("field2") .field("type", "keyword") .endObject() .endObject() .endObject() .endObject() .endObject() ) ); assertAcked( prepareCreate("idx2").setMapping( jsonBuilder().startObject() .startObject("properties") .startObject("nested1") .field("type", "nested") .startObject("properties") .startObject("field1") .field("type", "keyword") .endObject() .startObject("nested2") .field("type", "nested") .startObject("properties") .startObject("field2") .field("type", "keyword") .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() ) ); insertIdx1(Arrays.asList("a", "b", "c"), Arrays.asList("1", "2", "3", "4")); insertIdx1(Arrays.asList("b", "c", "d"), Arrays.asList("4", "5", "6", "7")); insertIdx1(Arrays.asList("c", "d", "e"), Arrays.asList("7", "8", "9", "1")); refresh(); insertIdx1(Arrays.asList("a", "e"), Arrays.asList("7", "4", "1", "1")); insertIdx1(Arrays.asList("a", "c"), Arrays.asList("2", "1")); insertIdx1(Arrays.asList("a"), Arrays.asList("3", "4")); refresh(); insertIdx1(Arrays.asList("x", "c"), Arrays.asList("1", "8")); insertIdx1(Arrays.asList("y", "c"), Arrays.asList("6")); insertIdx1(Arrays.asList("z"), Arrays.asList("5", "9")); refresh(); insertIdx2( new String[][] { new String[] { "a", "0", "0", "1", "2" }, new String[] { "b", "0", "1", "1", "2" }, new String[] { "a", "0" } } ); insertIdx2(new String[][] { new String[] { "c", "1", "1", "2", "2" }, new String[] { "d", "3", "4" } }); refresh(); insertIdx2(new String[][] { new String[] { "a", "0", "0", "0", "0" }, new String[] { "b", "0", "0", "0", "0" } }); insertIdx2(new String[][] { new String[] { "e", "1", "2" }, new String[] { "f", "3", "4" } }); refresh(); ensureSearchable(); } private void insertIdx1(List<String> values1, List<String> values2) throws Exception { XContentBuilder source = jsonBuilder().startObject().array("field1", values1.toArray()).startArray("nested1"); for (String value1 : values2) { source.startObject().field("field2", value1).endObject(); } source.endArray().endObject(); indexRandom(false, client().prepareIndex("idx1").setRouting("1").setSource(source)); } private void insertIdx2(String[][] values) throws Exception { XContentBuilder source = jsonBuilder().startObject().startArray("nested1"); for (String[] value : values) { source.startObject().field("field1", value[0]).startArray("nested2"); for (int i = 1; i < value.length; i++) { source.startObject().field("field2", value[i]).endObject(); } source.endArray().endObject(); } source.endArray().endObject(); indexRandom(false, client().prepareIndex("idx2").setRouting("1").setSource(source)); } public void testSimpleReverseNestedToRoot() throws Exception { SearchResponse response = client().prepareSearch("idx1") .addAggregation( nested("nested1", "nested1").subAggregation( terms("field2").field("nested1.field2") .subAggregation( reverseNested("nested1_to_field1").subAggregation( terms("field1").field("field1").collectMode(randomFrom(SubAggCollectionMode.values())) ) ) ) ) .get(); assertSearchResponse(response); Nested nested = response.getAggregations().get("nested1"); assertThat(nested, notNullValue()); assertThat(nested.getName(), equalTo("nested1")); assertThat(nested.getDocCount(), equalTo(25L)); assertThat(nested.getAggregations().asList().isEmpty(), is(false)); Terms usernames = nested.getAggregations().get("field2"); assertThat(usernames, notNullValue()); assertThat(usernames.getBuckets().size(), equalTo(9)); List<Terms.Bucket> usernameBuckets = new ArrayList<>(usernames.getBuckets()); // nested.field2: 1 Terms.Bucket bucket = usernameBuckets.get(0); assertThat(bucket.getKeyAsString(), equalTo("1")); assertThat(bucket.getDocCount(), equalTo(6L)); ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); assertThat(((InternalAggregation) reverseNested).getProperty("_count"), equalTo(5L)); Terms tags = reverseNested.getAggregations().get("field1"); assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(tags)); List<Terms.Bucket> tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(6)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(4L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("a")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(3L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(5).getKeyAsString(), equalTo("x")); assertThat(tagsBuckets.get(5).getDocCount(), equalTo(1L)); // nested.field2: 4 bucket = usernameBuckets.get(1); assertThat(bucket.getKeyAsString(), equalTo("4")); assertThat(bucket.getDocCount(), equalTo(4L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(5)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("e")); assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); // nested.field2: 7 bucket = usernameBuckets.get(2); assertThat(bucket.getKeyAsString(), equalTo("7")); assertThat(bucket.getDocCount(), equalTo(3L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(5)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("a")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); // nested.field2: 2 bucket = usernameBuckets.get(3); assertThat(bucket.getKeyAsString(), equalTo("2")); assertThat(bucket.getDocCount(), equalTo(2L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(3)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); // nested.field2: 3 bucket = usernameBuckets.get(4); assertThat(bucket.getKeyAsString(), equalTo("3")); assertThat(bucket.getDocCount(), equalTo(2L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(3)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); // nested.field2: 5 bucket = usernameBuckets.get(5); assertThat(bucket.getKeyAsString(), equalTo("5")); assertThat(bucket.getDocCount(), equalTo(2L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(4)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); // nested.field2: 6 bucket = usernameBuckets.get(6); assertThat(bucket.getKeyAsString(), equalTo("6")); assertThat(bucket.getDocCount(), equalTo(2L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(4)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("y")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); // nested.field2: 8 bucket = usernameBuckets.get(7); assertThat(bucket.getKeyAsString(), equalTo("8")); assertThat(bucket.getDocCount(), equalTo(2L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(4)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("x")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); // nested.field2: 9 bucket = usernameBuckets.get(8); assertThat(bucket.getKeyAsString(), equalTo("9")); assertThat(bucket.getDocCount(), equalTo(2L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(4)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); } public void testSimpleNested1ToRootToNested2() throws Exception { SearchResponse response = client().prepareSearch("idx2") .addAggregation( nested("nested1", "nested1").subAggregation( reverseNested("nested1_to_root").subAggregation(nested("root_to_nested2", "nested1.nested2")) ) ) .get(); assertSearchResponse(response); Nested nested = response.getAggregations().get("nested1"); assertThat(nested.getName(), equalTo("nested1")); assertThat(nested.getDocCount(), equalTo(9L)); ReverseNested reverseNested = nested.getAggregations().get("nested1_to_root"); assertThat(reverseNested.getName(), equalTo("nested1_to_root")); assertThat(reverseNested.getDocCount(), equalTo(4L)); nested = reverseNested.getAggregations().get("root_to_nested2"); assertThat(nested.getName(), equalTo("root_to_nested2")); assertThat(nested.getDocCount(), equalTo(27L)); } public void testSimpleReverseNestedToNested1() throws Exception { SearchResponse response = client().prepareSearch("idx2") .addAggregation( nested("nested1", "nested1.nested2").subAggregation( terms("field2").field("nested1.nested2.field2") .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .size(10000) .subAggregation( reverseNested("nested1_to_field1").path("nested1") .subAggregation( terms("field1").field("nested1.field1") .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values())) ) ) ) ) .get(); assertSearchResponse(response); Nested nested = response.getAggregations().get("nested1"); assertThat(nested, notNullValue()); assertThat(nested.getName(), equalTo("nested1")); assertThat(nested.getDocCount(), equalTo(27L)); assertThat(nested.getAggregations().asList().isEmpty(), is(false)); Terms usernames = nested.getAggregations().get("field2"); assertThat(usernames, notNullValue()); assertThat(usernames.getBuckets().size(), equalTo(5)); List<Terms.Bucket> usernameBuckets = new ArrayList<>(usernames.getBuckets()); Terms.Bucket bucket = usernameBuckets.get(0); assertThat(bucket.getKeyAsString(), equalTo("0")); assertThat(bucket.getDocCount(), equalTo(12L)); ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); assertThat(reverseNested.getDocCount(), equalTo(5L)); Terms tags = reverseNested.getAggregations().get("field1"); List<Terms.Bucket> tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(2)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); bucket = usernameBuckets.get(1); assertThat(bucket.getKeyAsString(), equalTo("1")); assertThat(bucket.getDocCount(), equalTo(6L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); assertThat(reverseNested.getDocCount(), equalTo(4L)); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(4)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); bucket = usernameBuckets.get(2); assertThat(bucket.getKeyAsString(), equalTo("2")); assertThat(bucket.getDocCount(), equalTo(5L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); assertThat(reverseNested.getDocCount(), equalTo(4L)); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(4)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); bucket = usernameBuckets.get(3); assertThat(bucket.getKeyAsString(), equalTo("3")); assertThat(bucket.getDocCount(), equalTo(2L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); assertThat(reverseNested.getDocCount(), equalTo(2L)); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(2)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); bucket = usernameBuckets.get(4); assertThat(bucket.getKeyAsString(), equalTo("4")); assertThat(bucket.getDocCount(), equalTo(2L)); reverseNested = bucket.getAggregations().get("nested1_to_field1"); assertThat(reverseNested.getDocCount(), equalTo(2L)); tags = reverseNested.getAggregations().get("field1"); tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(2)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); } public void testReverseNestedAggWithoutNestedAgg() { try { client().prepareSearch("idx2") .addAggregation( terms("field2").field("nested1.nested2.field2") .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation( reverseNested("nested1_to_field1").subAggregation( terms("field1").field("nested1.field1").collectMode(randomFrom(SubAggCollectionMode.values())) ) ) ) .get(); fail("Expected SearchPhaseExecutionException"); } catch (SearchPhaseExecutionException e) { assertThat(e.getMessage(), is("all shards failed")); } } public void testNonExistingNestedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx2") .setQuery(matchAllQuery()) .addAggregation(nested("nested2", "nested1.nested2").subAggregation(reverseNested("incorrect").path("nested3"))) .get(); Nested nested = searchResponse.getAggregations().get("nested2"); assertThat(nested, notNullValue()); assertThat(nested.getName(), equalTo("nested2")); ReverseNested reverseNested = nested.getAggregations().get("incorrect"); assertThat(reverseNested.getDocCount(), is(0L)); // Test that parsing the reverse_nested agg doesn't fail, because the parent nested agg is unmapped: searchResponse = client().prepareSearch("idx1") .setQuery(matchAllQuery()) .addAggregation(nested("incorrect1", "incorrect1").subAggregation(reverseNested("incorrect2").path("incorrect2"))) .get(); nested = searchResponse.getAggregations().get("incorrect1"); assertThat(nested, notNullValue()); assertThat(nested.getName(), equalTo("incorrect1")); assertThat(nested.getDocCount(), is(0L)); } public void testSameParentDocHavingMultipleBuckets() throws Exception { XContentBuilder mapping = jsonBuilder().startObject() .startObject("_doc") .field("dynamic", "strict") .startObject("properties") .startObject("id") .field("type", "long") .endObject() .startObject("category") .field("type", "nested") .startObject("properties") .startObject("name") .field("type", "keyword") .endObject() .endObject() .endObject() .startObject("sku") .field("type", "nested") .startObject("properties") .startObject("sku_type") .field("type", "keyword") .endObject() .startObject("colors") .field("type", "nested") .startObject("properties") .startObject("name") .field("type", "keyword") .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .endObject(); assertAcked( prepareCreate("idx3").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) .setMapping(mapping) ); client().prepareIndex("idx3") .setId("1") .setRefreshPolicy(IMMEDIATE) .setSource( jsonBuilder().startObject() .startArray("sku") .startObject() .field("sku_type", "bar1") .startArray("colors") .startObject() .field("name", "red") .endObject() .startObject() .field("name", "green") .endObject() .startObject() .field("name", "yellow") .endObject() .endArray() .endObject() .startObject() .field("sku_type", "bar1") .startArray("colors") .startObject() .field("name", "red") .endObject() .startObject() .field("name", "blue") .endObject() .startObject() .field("name", "white") .endObject() .endArray() .endObject() .startObject() .field("sku_type", "bar1") .startArray("colors") .startObject() .field("name", "black") .endObject() .startObject() .field("name", "blue") .endObject() .endArray() .endObject() .startObject() .field("sku_type", "bar2") .startArray("colors") .startObject() .field("name", "orange") .endObject() .endArray() .endObject() .startObject() .field("sku_type", "bar2") .startArray("colors") .startObject() .field("name", "pink") .endObject() .endArray() .endObject() .endArray() .startArray("category") .startObject() .field("name", "abc") .endObject() .startObject() .field("name", "klm") .endObject() .startObject() .field("name", "xyz") .endObject() .endArray() .endObject() ) .get(); SearchResponse response = client().prepareSearch("idx3") .addAggregation( nested("nested_0", "category").subAggregation( terms("group_by_category").field("category.name") .subAggregation( reverseNested("to_root").subAggregation( nested("nested_1", "sku").subAggregation( filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( count("sku_count").field("sku.sku_type") ) ) ) ) ) ) .get(); assertNoFailures(response); assertHitCount(response, 1); Nested nested0 = response.getAggregations().get("nested_0"); assertThat(nested0.getDocCount(), equalTo(3L)); Terms terms = nested0.getAggregations().get("group_by_category"); assertThat(terms.getBuckets().size(), equalTo(3)); for (String bucketName : new String[] { "abc", "klm", "xyz" }) { logger.info("Checking results for bucket {}", bucketName); Terms.Bucket bucket = terms.getBucketByKey(bucketName); assertThat(bucket.getDocCount(), equalTo(1L)); ReverseNested toRoot = bucket.getAggregations().get("to_root"); assertThat(toRoot.getDocCount(), equalTo(1L)); Nested nested1 = toRoot.getAggregations().get("nested_1"); assertThat(nested1.getDocCount(), equalTo(5L)); Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); assertThat(filterByBar.getDocCount(), equalTo(3L)); ValueCount barCount = filterByBar.getAggregations().get("sku_count"); assertThat(barCount.getValue(), equalTo(3L)); } response = client().prepareSearch("idx3") .addAggregation( nested("nested_0", "category").subAggregation( terms("group_by_category").field("category.name") .subAggregation( reverseNested("to_root").subAggregation( nested("nested_1", "sku").subAggregation( filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( nested("nested_2", "sku.colors").subAggregation( filter("filter_sku_color", termQuery("sku.colors.name", "red")).subAggregation( reverseNested("reverse_to_sku").path("sku") .subAggregation(count("sku_count").field("sku.sku_type")) ) ) ) ) ) ) ) ) .get(); assertNoFailures(response); assertHitCount(response, 1); nested0 = response.getAggregations().get("nested_0"); assertThat(nested0.getDocCount(), equalTo(3L)); terms = nested0.getAggregations().get("group_by_category"); assertThat(terms.getBuckets().size(), equalTo(3)); for (String bucketName : new String[] { "abc", "klm", "xyz" }) { logger.info("Checking results for bucket {}", bucketName); Terms.Bucket bucket = terms.getBucketByKey(bucketName); assertThat(bucket.getDocCount(), equalTo(1L)); ReverseNested toRoot = bucket.getAggregations().get("to_root"); assertThat(toRoot.getDocCount(), equalTo(1L)); Nested nested1 = toRoot.getAggregations().get("nested_1"); assertThat(nested1.getDocCount(), equalTo(5L)); Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); assertThat(filterByBar.getDocCount(), equalTo(3L)); Nested nested2 = filterByBar.getAggregations().get("nested_2"); assertThat(nested2.getDocCount(), equalTo(8L)); Filter filterBarColor = nested2.getAggregations().get("filter_sku_color"); assertThat(filterBarColor.getDocCount(), equalTo(2L)); ReverseNested reverseToBar = filterBarColor.getAggregations().get("reverse_to_sku"); assertThat(reverseToBar.getDocCount(), equalTo(2L)); ValueCount barCount = reverseToBar.getAggregations().get("sku_count"); assertThat(barCount.getValue(), equalTo(2L)); } } public void testFieldAlias() { SearchResponse response = client().prepareSearch("idx1") .addAggregation( nested("nested1", "nested1").subAggregation( terms("field2").field("nested1.field2") .subAggregation( reverseNested("nested1_to_field1").subAggregation( terms("field1").field("alias").collectMode(randomFrom(SubAggCollectionMode.values())) ) ) ) ) .get(); assertSearchResponse(response); Nested nested = response.getAggregations().get("nested1"); Terms nestedTerms = nested.getAggregations().get("field2"); Terms.Bucket bucket = nestedTerms.getBuckets().iterator().next(); ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); Terms reverseNestedTerms = reverseNested.getAggregations().get("field1"); assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(reverseNestedTerms)); assertThat(reverseNestedTerms.getBuckets().size(), equalTo(6)); } }
apache-2.0
Darsstar/framework
client/src/main/java/com/vaadin/client/widget/grid/selection/MultiSelectionRenderer.java
28136
/* * Copyright 2000-2016 Vaadin Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.vaadin.client.widget.grid.selection; import java.util.Collection; import java.util.HashSet; import com.google.gwt.animation.client.AnimationScheduler; import com.google.gwt.animation.client.AnimationScheduler.AnimationCallback; import com.google.gwt.animation.client.AnimationScheduler.AnimationHandle; import com.google.gwt.core.client.GWT; import com.google.gwt.dom.client.BrowserEvents; import com.google.gwt.dom.client.Element; import com.google.gwt.dom.client.NativeEvent; import com.google.gwt.dom.client.TableElement; import com.google.gwt.dom.client.TableRowElement; import com.google.gwt.dom.client.TableSectionElement; import com.google.gwt.event.dom.client.ClickEvent; import com.google.gwt.event.dom.client.ClickHandler; import com.google.gwt.event.dom.client.MouseDownEvent; import com.google.gwt.event.dom.client.MouseDownHandler; import com.google.gwt.event.dom.client.TouchStartEvent; import com.google.gwt.event.dom.client.TouchStartHandler; import com.google.gwt.event.shared.HandlerRegistration; import com.google.gwt.user.client.Event; import com.google.gwt.user.client.Event.NativePreviewEvent; import com.google.gwt.user.client.Event.NativePreviewHandler; import com.google.gwt.user.client.ui.CheckBox; import com.vaadin.client.WidgetUtil; import com.vaadin.client.renderers.ClickableRenderer; import com.vaadin.client.widget.grid.CellReference; import com.vaadin.client.widget.grid.RendererCellReference; import com.vaadin.client.widget.grid.events.GridEnabledHandler; import com.vaadin.client.widget.grid.events.GridSelectionAllowedEvent; import com.vaadin.client.widget.grid.events.GridSelectionAllowedHandler; import com.vaadin.client.widgets.Escalator.AbstractRowContainer; import com.vaadin.client.widgets.Grid; /** * Renderer showing multi selection check boxes. * * @author Vaadin Ltd * @param <T> * the type of the associated grid * @since 7.4 */ public class MultiSelectionRenderer<T> extends ClickableRenderer<Boolean, CheckBox> { private static final String SELECTION_CHECKBOX_CLASSNAME = "-selection-checkbox"; /** The size of the autoscroll area, both top and bottom. */ private static final int SCROLL_AREA_GRADIENT_PX = 100; /** The maximum number of pixels per second to autoscroll. */ private static final int SCROLL_TOP_SPEED_PX_SEC = 500; /** * The minimum area where the grid doesn't scroll while the pointer is * pressed. */ private static final int MIN_NO_AUTOSCROLL_AREA_PX = 50; /** * Handler for MouseDown and TouchStart events for selection checkboxes. * * @since 7.5 */ private final class CheckBoxEventHandler implements MouseDownHandler, TouchStartHandler, ClickHandler, GridEnabledHandler, GridSelectionAllowedHandler { private final CheckBox checkBox; /** * @param checkBox * checkbox widget for this handler */ private CheckBoxEventHandler(CheckBox checkBox) { this.checkBox = checkBox; } @Override public void onMouseDown(MouseDownEvent event) { if (checkBox.isEnabled()) { if (event.getNativeButton() == NativeEvent.BUTTON_LEFT) { startDragSelect(event.getNativeEvent(), checkBox.getElement()); } } } @Override public void onTouchStart(TouchStartEvent event) { if (checkBox.isEnabled()) { startDragSelect(event.getNativeEvent(), checkBox.getElement()); } } @Override public void onClick(ClickEvent event) { // Clicking is already handled with MultiSelectionRenderer event.preventDefault(); event.stopPropagation(); } @Override public void onEnabled(boolean enabled) { updateEnable(); } @Override public void onSelectionAllowed(GridSelectionAllowedEvent event) { updateEnable(); } private void updateEnable() { checkBox.setEnabled(grid.isEnabled() && grid.getSelectionModel().isSelectionAllowed()); } } /** * This class's main objective is to listen when to stop autoscrolling, and * make sure everything stops accordingly. */ private class TouchEventHandler implements NativePreviewHandler { @Override public void onPreviewNativeEvent(final NativePreviewEvent event) { switch (event.getTypeInt()) { case Event.ONTOUCHSTART: { if (event.getNativeEvent().getTouches().length() == 1) { /* * Something has dropped a touchend/touchcancel and the * scroller is most probably running amok. Let's cancel it * and pretend that everything's going as expected * * Because this is a preview, this code is run before the * event handler in MultiSelectionRenderer.onBrowserEvent. * Therefore, we can simply kill everything and let that * method restart things as they should. */ autoScrollHandler.stop(); /* * Related TODO: investigate why iOS seems to ignore a * touchend/touchcancel when frames are dropped, and/or if * something can be done about that. */ } break; } case Event.ONTOUCHMOVE: event.cancel(); break; case Event.ONTOUCHEND: case Event.ONTOUCHCANCEL: /* * Remember: targetElement is always where touchstart started, * not where the finger is pointing currently. */ final Element targetElement = Element .as(event.getNativeEvent().getEventTarget()); if (isInFirstColumn(targetElement)) { removeNativeHandler(); event.cancel(); } break; } } private boolean isInFirstColumn(final Element element) { if (element == null) { return false; } final Element tbody = getTbodyElement(); if (tbody == null || !tbody.isOrHasChild(element)) { return false; } /* * The null-parent in the while clause is in the case where element * is an immediate tr child in the tbody. Should never happen in * internal code, but hey... */ Element cursor = element; while (cursor.getParentElement() != null && cursor.getParentElement().getParentElement() != tbody) { cursor = cursor.getParentElement(); } final Element tr = cursor.getParentElement(); return tr.getFirstChildElement().equals(cursor); } } /** * This class's responsibility is to * <ul> * <li>scroll the table while a pointer is kept in a scrolling zone and * <li>select rows whenever a pointer is "activated" on a selection cell * </ul> * <p> * <em>Techical note:</em> This class is an AnimationCallback because we * need a timer: when the finger is kept in place while the grid scrolls, we * still need to be able to make new selections. So, instead of relying on * events (which won't be fired, since the pointer isn't necessarily * moving), we do this check on each frame while the pointer is "active" * (mouse is pressed, finger is on screen). */ private class AutoScrollerAndSelector implements AnimationCallback { /** * If the acceleration gradient area is smaller than this, autoscrolling * will be disabled (it becomes too quick to accelerate to be usable). */ private static final int GRADIENT_MIN_THRESHOLD_PX = 10; /** * The speed at which the gradient area recovers, once scrolling in that * direction has started. */ private static final int SCROLL_AREA_REBOUND_PX_PER_SEC = 1; private static final double SCROLL_AREA_REBOUND_PX_PER_MS = SCROLL_AREA_REBOUND_PX_PER_SEC / 1000.0d; /** * The lowest y-coordinate on the {@link Event#getClientY() client} from * where we need to start scrolling towards the top. */ private int topBound = -1; /** * The highest y-coordinate on the {@link Event#getClientY() client} * from where we need to scrolling towards the bottom. */ private int bottomBound = -1; /** * <code>true</code> if the pointer is selecting, <code>false</code> if * the pointer is deselecting. */ private final boolean selectionPaint; /** * The area where the selection acceleration takes place. If &lt; * {@link #GRADIENT_MIN_THRESHOLD_PX}, autoscrolling is disabled */ private final int gradientArea; /** * The number of pixels per seconds we currently are scrolling (negative * is towards the top, positive is towards the bottom). */ private double scrollSpeed = 0; private double prevTimestamp = 0; /** * This field stores fractions of pixels to scroll, to make sure that * we're able to scroll less than one px per frame. */ private double pixelsToScroll = 0.0d; /** Should this animator be running. */ private boolean running = false; /** The handle in which this instance is running. */ private AnimationHandle handle; /** The pointer's pageX coordinate of the first click. */ private int initialPageX = -1; /** The pointer's pageY coordinate. */ private int pageY; /** The logical index of the row that was most recently modified. */ private int lastModifiedLogicalRow = -1; /** @see #doScrollAreaChecks(int) */ private int finalTopBound; /** @see #doScrollAreaChecks(int) */ private int finalBottomBound; private boolean scrollAreaShouldRebound = false; private final int bodyAbsoluteTop; private final int bodyAbsoluteBottom; public AutoScrollerAndSelector(final int topBound, final int bottomBound, final int gradientArea, final boolean selectionPaint) { finalTopBound = topBound; finalBottomBound = bottomBound; this.gradientArea = gradientArea; this.selectionPaint = selectionPaint; bodyAbsoluteTop = getBodyClientTop(); bodyAbsoluteBottom = getBodyClientBottom(); } @Override public void execute(final double timestamp) { final double timeDiff = timestamp - prevTimestamp; prevTimestamp = timestamp; reboundScrollArea(timeDiff); pixelsToScroll += scrollSpeed * (timeDiff / 1000.0d); final int intPixelsToScroll = (int) pixelsToScroll; pixelsToScroll -= intPixelsToScroll; if (intPixelsToScroll != 0) { grid.setScrollTop(grid.getScrollTop() + intPixelsToScroll); } int constrainedPageY = Math.max(bodyAbsoluteTop, Math.min(bodyAbsoluteBottom, pageY)); int logicalRow = getLogicalRowIndex(grid, WidgetUtil .getElementFromPoint(initialPageX, constrainedPageY)); int incrementOrDecrement = (logicalRow > lastModifiedLogicalRow) ? 1 : -1; /* * Both pageY and initialPageX have their initialized (and * unupdated) values while the cursor hasn't moved since the first * invocation. This will lead to logicalRow being -1, until the * pointer has been moved. */ while (logicalRow != -1 && lastModifiedLogicalRow != logicalRow) { lastModifiedLogicalRow += incrementOrDecrement; setSelected(lastModifiedLogicalRow, selectionPaint); } reschedule(); } /** * If the scroll are has been offset by the pointer starting out there, * move it back a bit */ private void reboundScrollArea(double timeDiff) { if (!scrollAreaShouldRebound) { return; } int reboundPx = (int) Math .ceil(SCROLL_AREA_REBOUND_PX_PER_MS * timeDiff); if (topBound < finalTopBound) { topBound += reboundPx; topBound = Math.min(topBound, finalTopBound); updateScrollSpeed(pageY); } else if (bottomBound > finalBottomBound) { bottomBound -= reboundPx; bottomBound = Math.max(bottomBound, finalBottomBound); updateScrollSpeed(pageY); } } private void updateScrollSpeed(final int pointerPageY) { final double ratio; if (pointerPageY < topBound) { final double distance = pointerPageY - topBound; ratio = Math.max(-1, distance / gradientArea); } else if (pointerPageY > bottomBound) { final double distance = pointerPageY - bottomBound; ratio = Math.min(1, distance / gradientArea); } else { ratio = 0; } scrollSpeed = ratio * SCROLL_TOP_SPEED_PX_SEC; } public void start(int logicalRowIndex) { running = true; setSelected(logicalRowIndex, selectionPaint); lastModifiedLogicalRow = logicalRowIndex; reschedule(); } public void stop() { running = false; if (handle != null) { handle.cancel(); handle = null; } } private void reschedule() { if (running && gradientArea >= GRADIENT_MIN_THRESHOLD_PX) { handle = AnimationScheduler.get().requestAnimationFrame(this, grid.getElement()); } } public void updatePointerCoords(int pageX, int pageY) { doScrollAreaChecks(pageY); updateScrollSpeed(pageY); this.pageY = pageY; if (initialPageX == -1) { initialPageX = pageX; } } /** * This method checks whether the first pointer event started in an area * that would start scrolling immediately, and does some actions * accordingly. * <p> * If it is, that scroll area will be offset "beyond" the pointer (above * if pointer is towards the top, otherwise below). * <p> * <span style="font-size:smaller">*) This behavior will change in * future patches (henrik paul 2.7.2014)</span> */ private void doScrollAreaChecks(int pageY) { /* * The first run makes sure that neither scroll position is * underneath the finger, but offset to either direction from * underneath the pointer. */ if (topBound == -1) { topBound = Math.min(finalTopBound, pageY); bottomBound = Math.max(finalBottomBound, pageY); } else { /* * Subsequent runs make sure that the scroll area grows (but doesn't * shrink) with the finger, but no further than the final bound. */ int oldTopBound = topBound; if (topBound < finalTopBound) { topBound = Math.max(topBound, Math.min(finalTopBound, pageY)); } int oldBottomBound = bottomBound; if (bottomBound > finalBottomBound) { bottomBound = Math.min(bottomBound, Math.max(finalBottomBound, pageY)); } final boolean topDidNotMove = oldTopBound == topBound; final boolean bottomDidNotMove = oldBottomBound == bottomBound; final boolean wasVerticalMovement = pageY != this.pageY; scrollAreaShouldRebound = (topDidNotMove && bottomDidNotMove && wasVerticalMovement); } } } /** * This class makes sure that pointer movemenets are registered and * delegated to the autoscroller so that it can: * <ul> * <li>modify the speed in which we autoscroll. * <li>"paint" a new row with the selection. * </ul> * Essentially, when a pointer is pressed on the selection column, a native * preview handler is registered (so that selection gestures can happen * outside of the selection column). The handler itself makes sure that it's * detached when the pointer is "lifted". */ private class AutoScrollHandler { private AutoScrollerAndSelector autoScroller; /** The registration info for {@link #scrollPreviewHandler} */ private HandlerRegistration handlerRegistration; private final NativePreviewHandler scrollPreviewHandler = event -> { if (autoScroller == null) { stop(); return; } final NativeEvent nativeEvent = event.getNativeEvent(); int pageY = 0; int pageX = 0; switch (event.getTypeInt()) { case Event.ONMOUSEMOVE: case Event.ONTOUCHMOVE: pageY = WidgetUtil.getTouchOrMouseClientY(nativeEvent); pageX = WidgetUtil.getTouchOrMouseClientX(nativeEvent); autoScroller.updatePointerCoords(pageX, pageY); break; case Event.ONMOUSEUP: case Event.ONTOUCHEND: case Event.ONTOUCHCANCEL: stop(); break; } }; /** * The top bound, as calculated from the {@link Event#getClientY() * client} coordinates. */ private int topBound = -1; /** * The bottom bound, as calculated from the {@link Event#getClientY() * client} coordinates. */ private int bottomBound = -1; /** The size of the autoscroll acceleration area. */ private int gradientArea; public void start(int logicalRowIndex) { /* * bounds are updated whenever the autoscroll cycle starts, to make * sure that the widget hasn't changed in size, moved around, or * whatnot. */ updateScrollBounds(); assert handlerRegistration == null : "handlerRegistration was not null"; assert autoScroller == null : "autoScroller was not null"; handlerRegistration = Event .addNativePreviewHandler(scrollPreviewHandler); autoScroller = new AutoScrollerAndSelector(topBound, bottomBound, gradientArea, !isSelected(logicalRowIndex)); autoScroller.start(logicalRowIndex); } private void updateScrollBounds() { final int topBorder = getBodyClientTop(); final int bottomBorder = getBodyClientBottom(); topBound = topBorder + SCROLL_AREA_GRADIENT_PX; bottomBound = bottomBorder - SCROLL_AREA_GRADIENT_PX; gradientArea = SCROLL_AREA_GRADIENT_PX; // modify bounds if they're too tightly packed if (bottomBound - topBound < MIN_NO_AUTOSCROLL_AREA_PX) { int adjustment = MIN_NO_AUTOSCROLL_AREA_PX - (bottomBound - topBound); topBound -= adjustment / 2; bottomBound += adjustment / 2; gradientArea -= adjustment / 2; } } public void stop() { if (handlerRegistration != null) { handlerRegistration.removeHandler(); handlerRegistration = null; } if (autoScroller != null) { autoScroller.stop(); autoScroller = null; } removeNativeHandler(); } } private final Grid<T> grid; private HandlerRegistration nativePreviewHandlerRegistration; private final AutoScrollHandler autoScrollHandler = new AutoScrollHandler(); public MultiSelectionRenderer(final Grid<T> grid) { this.grid = grid; } @Override public void destroy() { if (nativePreviewHandlerRegistration != null) { removeNativeHandler(); } } @Override public CheckBox createWidget() { final CheckBox checkBox = GWT.create(CheckBox.class); checkBox.setStylePrimaryName( grid.getStylePrimaryName() + SELECTION_CHECKBOX_CLASSNAME); CheckBoxEventHandler handler = new CheckBoxEventHandler(checkBox); // label of checkbox should only be visible for assistive devices checkBox.addStyleName("v-assistive-device-only-label"); // Sink events checkBox.sinkBitlessEvent(BrowserEvents.MOUSEDOWN); checkBox.sinkBitlessEvent(BrowserEvents.TOUCHSTART); checkBox.sinkBitlessEvent(BrowserEvents.CLICK); // Add handlers checkBox.addMouseDownHandler(handler); checkBox.addTouchStartHandler(handler); checkBox.addClickHandler(handler); grid.addEnabledHandler(handler); grid.addSelectionAllowedHandler(handler); return checkBox; } @Override public void render(final RendererCellReference cell, final Boolean data, CheckBox checkBox) { checkBox.setValue(data, false); // this should be a temp fix. checkBox.setText("Selects row number " + getDOMRowIndex(cell) + "."); checkBox.setEnabled(grid.isEnabled() && !grid.isEditorActive()); } private int getDOMRowIndex(RendererCellReference cell){ // getRowIndex starts with zero, that's why we add an additional 1. // getDOMRowIndex should include getHeaderRows as well, this number // should be equals to aria-rowindex. return cell.getGrid().getHeaderRowCount() + cell.getRowIndex() + 1; } @Override public Collection<String> getConsumedEvents() { final HashSet<String> events = new HashSet<>(); /* * this column's first interest is only to attach a NativePreventHandler * that does all the magic. These events are the beginning of that * cycle. */ events.add(BrowserEvents.MOUSEDOWN); events.add(BrowserEvents.TOUCHSTART); return events; } @Override public boolean onBrowserEvent(final CellReference<?> cell, final NativeEvent event) { if (BrowserEvents.TOUCHSTART.equals(event.getType()) || (BrowserEvents.MOUSEDOWN.equals(event.getType()) && event.getButton() == NativeEvent.BUTTON_LEFT)) { startDragSelect(event, Element.as(event.getEventTarget())); return true; } else { throw new IllegalStateException( "received unexpected event: " + event.getType()); } } private void startDragSelect(NativeEvent event, final Element target) { injectNativeHandler(); int logicalRowIndex = getLogicalRowIndex(grid, target); autoScrollHandler.start(logicalRowIndex); event.preventDefault(); event.stopPropagation(); } private void injectNativeHandler() { removeNativeHandler(); nativePreviewHandlerRegistration = Event .addNativePreviewHandler(new TouchEventHandler()); } private void removeNativeHandler() { if (nativePreviewHandlerRegistration != null) { nativePreviewHandlerRegistration.removeHandler(); nativePreviewHandlerRegistration = null; } } private int getLogicalRowIndex(Grid<T> grid, final Element target) { if (target == null) { return -1; } /* * We can't simply go backwards until we find a <tr> first element, * because of the table-in-table scenario. We need to, unfortunately, go * up from our known root. */ final Element tbody = getTbodyElement(); Element tr = tbody.getFirstChildElement(); while (tr != null) { if (tr.isOrHasChild(target)) { final Element td = tr.getFirstChildElement(); assert td != null : "Cell has disappeared"; final Element checkbox = td.getFirstChildElement(); assert checkbox != null : "Checkbox has disappeared"; return ((AbstractRowContainer) grid.getEscalator().getBody()) .getLogicalRowIndex((TableRowElement) tr); } tr = tr.getNextSiblingElement(); } return -1; } private TableElement getTableElement() { final Element root = grid.getElement(); final Element tablewrapper = Element.as(root.getChild(2)); if (tablewrapper != null) { return TableElement.as(tablewrapper.getFirstChildElement()); } else { return null; } } private TableSectionElement getTbodyElement() { TableElement table = getTableElement(); if (table != null) { return table.getTBodies().getItem(0); } else { return null; } } private TableSectionElement getTheadElement() { TableElement table = getTableElement(); if (table != null) { return table.getTHead(); } else { return null; } } private TableSectionElement getTfootElement() { TableElement table = getTableElement(); if (table != null) { return table.getTFoot(); } else { return null; } } /** Get the "top" of an element in relation to "client" coordinates. */ private int getClientTop(final Element e) { return e.getAbsoluteTop(); } private int getBodyClientBottom() { return getClientTop(getTfootElement()) - 1; } private int getBodyClientTop() { // Off by one pixel miscalculation. possibly border related. return getClientTop(grid.getElement()) + getTheadElement().getOffsetHeight() + 1; } protected boolean isSelected(final int logicalRow) { return grid.isSelected(grid.getDataSource().getRow(logicalRow)); } protected void setSelected(final int logicalRow, final boolean select) { T row = grid.getDataSource().getRow(logicalRow); if (select) { grid.select(row); } else { grid.deselect(row); } } }
apache-2.0
prabushi/devstudio-tooling-esb
plugins/org.wso2.developerstudio.visualdatamapper.diagram/src/org/wso2/developerstudio/datamapper/diagram/edit/parts/EqualEditPart.java
5756
package org.wso2.developerstudio.datamapper.diagram.edit.parts; import org.eclipse.draw2d.IFigure; import org.eclipse.draw2d.Label; import org.eclipse.draw2d.RectangleFigure; import org.eclipse.draw2d.RoundedRectangle; import org.eclipse.draw2d.Shape; import org.eclipse.draw2d.StackLayout; import org.eclipse.draw2d.TitleBarBorder; import org.eclipse.gef.EditPart; import org.eclipse.gef.EditPolicy; import org.eclipse.gef.Request; import org.eclipse.gef.commands.Command; import org.eclipse.gef.editpolicies.LayoutEditPolicy; import org.eclipse.gef.editpolicies.NonResizableEditPolicy; import org.eclipse.gef.requests.CreateRequest; import org.eclipse.gmf.runtime.diagram.ui.editpolicies.EditPolicyRoles; import org.eclipse.gmf.runtime.draw2d.ui.figures.ConstrainedToolbarLayout; import org.eclipse.gmf.runtime.gef.ui.figures.DefaultSizeNodeFigure; import org.eclipse.gmf.runtime.gef.ui.figures.NodeFigure; import org.eclipse.gmf.runtime.notation.View; import org.eclipse.swt.SWT; import org.eclipse.swt.graphics.Color; import org.eclipse.swt.graphics.Font; import org.wso2.developerstudio.datamapper.diagram.custom.edit.part.AbstractOperatorEditPart; /** * @generated NOT */ public class EqualEditPart extends AbstractOperatorEditPart { /** * @generated */ public static final int VISUAL_ID = 2005; /** * @generated */ protected IFigure contentPane; /** * @generated */ protected IFigure primaryShape; /** * @generated */ public EqualEditPart(View view) { super(view); } /** * @generated NOT */ @Override public boolean canAttachNote() { return false; } /** * @generated */ protected void createDefaultEditPolicies() { super.createDefaultEditPolicies(); installEditPolicy(EditPolicyRoles.SEMANTIC_ROLE, new org.wso2.developerstudio.datamapper.diagram.edit.policies.EqualItemSemanticEditPolicy()); installEditPolicy(EditPolicyRoles.CANONICAL_ROLE, new org.wso2.developerstudio.datamapper.diagram.edit.policies.EqualCanonicalEditPolicy()); installEditPolicy(EditPolicy.LAYOUT_ROLE, createLayoutEditPolicy()); } /** * @generated */ protected LayoutEditPolicy createLayoutEditPolicy() { org.eclipse.gmf.runtime.diagram.ui.editpolicies.LayoutEditPolicy lep = new org.eclipse.gmf.runtime.diagram.ui.editpolicies.LayoutEditPolicy() { protected EditPolicy createChildEditPolicy(EditPart child) { EditPolicy result = child.getEditPolicy(EditPolicy.PRIMARY_DRAG_ROLE); if (result == null) { result = new NonResizableEditPolicy(); } return result; } protected Command getMoveChildrenCommand(Request request) { return null; } protected Command getCreateCommand(CreateRequest request) { return null; } }; return lep; } /** * @generated NOT */ protected IFigure createNodeShape() { return primaryShape = new EqualFigure(); } /** * @generated */ public RectangleFigure getPrimaryShape() { return (RectangleFigure) primaryShape; } /** * @generated */ protected NodeFigure createNodePlate() { DefaultSizeNodeFigure result = new DefaultSizeNodeFigure(40, 40); return result; } /** * Creates figure for this edit part. * * Body of this method does not depend on settings in generation model * so you may safely remove <i>generated</i> tag and modify it. * * @generated */ protected NodeFigure createNodeFigure() { NodeFigure figure = createNodePlate(); figure.setLayoutManager(new StackLayout()); IFigure shape = createNodeShape(); figure.add(shape); contentPane = setupContentPane(shape); return figure; } /** * Default implementation treats passed figure as content pane. * Respects layout one may have set for generated figure. * @param nodeShape instance of generated figure class * @generated */ protected IFigure setupContentPane(IFigure nodeShape) { if (nodeShape.getLayoutManager() == null) { ConstrainedToolbarLayout layout = new ConstrainedToolbarLayout(); layout.setSpacing(5); nodeShape.setLayoutManager(layout); } return nodeShape; // use nodeShape itself as contentPane } /** * @generated */ public IFigure getContentPane() { if (contentPane != null) { return contentPane; } return super.getContentPane(); } /** * @generated */ protected void setForegroundColor(Color color) { if (primaryShape != null) { primaryShape.setForegroundColor(color); } } /** * @generated */ protected void setBackgroundColor(Color color) { if (primaryShape != null) { primaryShape.setBackgroundColor(color); } } /** * @generated */ protected void setLineWidth(int width) { if (primaryShape instanceof Shape) { ((Shape) primaryShape).setLineWidth(width); } } /** * @generated */ protected void setLineType(int style) { if (primaryShape instanceof Shape) { ((Shape) primaryShape).setLineStyle(style); } } public class EqualFigure extends RoundedRectangle { public EqualFigure() { this.setBackgroundColor(THIS_BACK); TitleBarBorder titleBarBorder = new TitleBarBorder("Equal"); titleBarBorder.setBackgroundColor(new Color(null, 183, 183, 183)); titleBarBorder.setTextColor(new Color(null, 0, 0, 0)); titleBarBorder.setFont(new Font(null, "Arial", 10, SWT.NORMAL)); this.setBorder(titleBarBorder); /* RoundedRectangleBorder border = new RoundedRectangleBorder(8, 8); border.setColor(new Color(null, 255, 0, 0));*/ this.setBorder(titleBarBorder); } public String getIconPath() { return "icons/ico20/log-mediator.gif"; } public String getNodeName() { return "Equal"; } public IFigure getToolTip() { return new Label("Equal Operation"); } } static final Color THIS_BACK = DataMapperColorConstants.connectorColor; }
apache-2.0
MarkRunWu/buck
src/com/facebook/buck/apple/PlistProcessStep.java
2840
/* * Copyright 2014-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.apple; import com.facebook.buck.io.ProjectFilesystem; import com.facebook.buck.step.ExecutionContext; import com.facebook.buck.step.Step; import com.google.common.collect.ImmutableMap; import com.dd.plist.NSDictionary; import com.dd.plist.NSObject; import com.dd.plist.PropertyListParser; import java.io.IOException; import java.io.InputStream; import java.io.BufferedInputStream; import java.nio.file.Path; public class PlistProcessStep implements Step { private final Path input; private final Path output; private final ImmutableMap<String, NSObject> additionalKeys; private final ImmutableMap<String, NSObject> overrideKeys; public PlistProcessStep( Path input, Path output, ImmutableMap<String, NSObject> additionalKeys, ImmutableMap<String, NSObject> overrideKeys) { this.input = input; this.output = output; this.additionalKeys = additionalKeys; this.overrideKeys = overrideKeys; } @Override public int execute(ExecutionContext context) throws InterruptedException { ProjectFilesystem filesystem = context.getProjectFilesystem(); try (InputStream stream = filesystem.newFileInputStream(input); BufferedInputStream bufferedStream = new BufferedInputStream(stream)) { NSDictionary infoPlist; try { infoPlist = (NSDictionary) PropertyListParser.parse(bufferedStream); } catch (Exception e) { throw new IOException(e); } for (ImmutableMap.Entry<String, NSObject> entry : additionalKeys.entrySet()) { if (!infoPlist.containsKey(entry.getKey())) { infoPlist.put(entry.getKey(), entry.getValue()); } } infoPlist.putAll(overrideKeys); String serializedInfoPlist = infoPlist.toXMLPropertyList(); filesystem.writeContentsToPath( serializedInfoPlist, output); } catch (IOException e) { context.logError(e, "error parsing plist %s", input); return 1; } return 0; } @Override public String getShortName() { return "process-plist"; } @Override public String getDescription(ExecutionContext context) { return String.format("process-plist %s %s", input, output); } }
apache-2.0
ezbake/ezbake-platform-services
warehaus/service/src/main/java/ezbake/warehaus/AccumuloWarehaus.java
79271
/* Copyright (C) 2013-2015 Computer Sciences Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ezbake.warehaus; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import com.google.common.base.Function; import ezbake.base.thrift.*; import ezbake.common.properties.EzProperties; import ezbake.data.common.TimeUtil; import ezbake.data.iterator.EzBakeVisibilityFilter; import ezbake.security.client.EzSecurityTokenWrapper; import ezbake.security.client.EzbakeSecurityClient; import ezbake.services.centralPurge.thrift.ezCentralPurgeServiceConstants; import ezbake.services.provenance.thrift.PositionsToUris; import ezbake.services.provenance.thrift.ProvenanceService; import ezbake.services.provenance.thrift.ProvenanceServiceConstants; import ezbake.thrift.ThriftClientPool; import ezbake.security.permissions.PermissionUtils; import ezbake.security.serialize.VisibilitySerialization; import ezbake.security.serialize.thrift.VisibilityWrapper; import ezbake.thrift.ThriftUtils; import ezbake.util.AuditEvent; import ezbake.util.AuditEventType; import ezbake.util.AuditLogger; import ezbake.util.AuditLoggerConfigurator; import ezbakehelpers.accumulo.AccumuloHelper; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.client.BatchDeleter; import org.apache.accumulo.core.client.BatchScanner; import org.apache.accumulo.core.client.BatchWriter; import org.apache.accumulo.core.client.BatchWriterConfig; import org.apache.accumulo.core.client.Connector; import org.apache.accumulo.core.client.IteratorSetting; import org.apache.accumulo.core.client.MutationsRejectedException; import org.apache.accumulo.core.client.Scanner; import org.apache.accumulo.core.client.ScannerBase; import org.apache.accumulo.core.client.TableNotFoundException; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.PartialKey; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.iterators.user.TimestampFilter; import org.apache.accumulo.core.iterators.user.VersioningIterator; import org.apache.accumulo.core.security.ColumnVisibility; import org.apache.accumulo.core.security.TablePermission; import org.apache.hadoop.io.Text; import org.apache.thrift.TException; import org.apache.thrift.TProcessor; import org.apache.thrift.TSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class AccumuloWarehaus extends ezbake.base.thrift.EzBakeBaseThriftService implements WarehausService.Iface, EzBakeBasePurgeService.Iface { private static final Logger logger = LoggerFactory.getLogger(AccumuloWarehaus.class); private EzbakeSecurityClient securityClient = null; private Connector connector; // private FileSystem hdfs; private String accumuloNamespace; private String purgeVisibility; private String purgeAppSecurityId; private static AuditLogger auditLogger; public AccumuloWarehaus() { } @SuppressWarnings({"unchecked", "rawtypes"}) @Override public TProcessor getThriftProcessor() { Properties properties = getConfigurationProperties(); AccumuloHelper accumulo = new AccumuloHelper(properties); try { EzProperties ezproperties = new EzProperties(getConfigurationProperties(), false); purgeVisibility = ezproperties.getProperty(WarehausConstants.PURGE_VISIBILITY_KEY); } catch (Exception e) { throw new RuntimeException(e); } if (purgeVisibility == null || purgeVisibility.trim().isEmpty()) { String msg = "The required Warehaus purge visibility configuration parameter '" + WarehausConstants.PURGE_VISIBILITY_KEY + "' could not be found."; logger.error(msg + ". Set the parameter in the Warehaus application configuration file before starting the Warehaus service."); throw new RuntimeException(msg); } accumuloNamespace = accumulo.getAccumuloNamespace(); if (logger.isDebugEnabled()) { logger.debug("Setting configuration..."); for (Object prop : properties.keySet()) { logger.debug("Property: {} = {}", prop, properties.get(prop)); } } // try { // this.hdfs = HDFSHelper.getFileSystemFromProperties(properties); // } catch (IOException e) { // throw new RuntimeException(e); // } try { this.connector = accumulo.getConnector(true); ensureTable(); } catch (Exception e) { throw new RuntimeException(e); } securityClient = new EzbakeSecurityClient(properties); AuditLoggerConfigurator.setAdditivity(true); auditLogger = AuditLogger.getAuditLogger(AccumuloWarehaus.class); return new WarehausService.Processor(this); } @Override public boolean ping() { try { boolean ping = connector.tableOperations().exists(WarehausConstants.TABLE_NAME); if (!ping) { logger.error("Ping: The warehaus table does not exist."); } boolean purgePing = connector.tableOperations().exists(WarehausConstants.PURGE_TABLE_NAME); if (!purgePing) { logger.error("Ping: The purge warehaus table does not exist."); } return ping && purgePing; } catch (Exception ex) { logger.error("Ping failed with an unexpected exception", ex); return false; } } @Override public IngestStatus insert(Repository data, Visibility visibility, EzSecurityToken security) throws TException { securityClient.validateReceivedToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "insert"); auditArgs.put("uri", data.getUri()); auditLog(security, AuditEventType.FileObjectCreate, auditArgs); UpdateEntry entry = new UpdateEntry(); entry.setUri(data.getUri()); entry.setParsedData(data.getParsedData()); entry.setRawData(data.getRawData()); entry.setUpdateVisibility(data.isUpdateVisibility()); IngestStatus status = insertUpdate(entry, visibility, security); logger.debug("insert status : " + status); return status; } @Override public BinaryReplay getLatestRaw(String uri, EzSecurityToken security) throws TException, EntryNotInWarehausException { securityClient.validateReceivedToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "getLatestRaw"); auditArgs.put("uri", uri); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); return getLatest(uri, WarehausUtils.getUriPrefixFromUri(uri), GetDataType.RAW.toString(), security); } @Override public BinaryReplay getLatestParsed(String uri, EzSecurityToken security) throws TException, EntryNotInWarehausException { securityClient.validateReceivedToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "getLatestParsed"); auditArgs.put("uri", uri); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); return getLatest(uri, WarehausUtils.getUriPrefixFromUri(uri), GetDataType.PARSED.toString(), security); } @Override public BinaryReplay getRaw(String uri, long timestamp, EzSecurityToken security) throws TException, EntryNotInWarehausException { securityClient.validateReceivedToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "getRaw"); auditArgs.put("uri", uri); auditArgs.put("timestamp", String.valueOf(timestamp)); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); return getVersion(uri, WarehausUtils.getUriPrefixFromUri(uri), GetDataType.RAW.toString(), timestamp, security); } @Override public BinaryReplay getParsed(String uri, long timestamp, EzSecurityToken security) throws TException, EntryNotInWarehausException { securityClient.validateReceivedToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "getParsed"); auditArgs.put("uri", uri); auditArgs.put("timestamp", String.valueOf(timestamp)); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); return getVersion(uri, WarehausUtils.getUriPrefixFromUri(uri), GetDataType.PARSED.toString(), timestamp, security); } @Override public List<ezbake.warehaus.BinaryReplay> get(ezbake.warehaus.GetRequest getRequest, ezbake.base.thrift.EzSecurityToken security) throws MaxGetRequestSizeExceededException, org.apache.thrift.TException { securityClient.validateReceivedToken(security); List<BinaryReplay> retList; GetDataType getType = getRequest.getGetDataType(); String colQualifier = null; switch (getType) { case RAW: colQualifier = getType.toString(); break; case PARSED: colQualifier = getType.toString(); break; default: break; } List<Key> keys = Lists.newArrayList(); boolean getLatestErr = false; boolean getTimestampErr = false; HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "get"); for (RequestParameter param : getRequest.getRequestParams()) { long timestamp = 0; // if latest, a timestamp value must not be provided if (getRequest.isLatestVersion() && (param.getTimestamp() != null)) { getLatestErr = true; break; } // if not latest, a timestamp value must be provided if (!getRequest.isLatestVersion()) { if (param.getTimestamp() == null) { getTimestampErr = true; break; } else { timestamp = TimeUtil.convertFromThriftDateTime(param.getTimestamp()); } } String uri = param.getUri(); if (getType == GetDataType.VIEW) { colQualifier = param.getSpacename() + "_" + param.getView(); } Key key = new Key(WarehausUtils.getKey(uri), new Text(WarehausUtils.getUriPrefixFromUri(uri)), new Text(colQualifier), timestamp); keys.add(key); auditArgs.put("uri", uri); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); } if (getLatestErr) { throw new TException("A uri timestamp value CAN NOT be provided when requesting the latest version"); } if (getTimestampErr) { throw new TException("A uri timestamp value MUST be provided when not requesting the latest version"); } if (getRequest.isLatestVersion()) { retList = getLatest(keys, security); } else { retList = getVersion(keys, security); } return retList; } @Override public List<DatedURI> replay(String uriPrefix, boolean replayOnlyLatest, DateTime start, DateTime finish, GetDataType type, EzSecurityToken security) throws TException { securityClient.validateReceivedToken(security); if (uriPrefix == null || "".equals(uriPrefix.trim())) { throw new TException("Cannot replay a null or empty URI prefix."); } if (type == GetDataType.VIEW) { throw new TException("Cannot replay data from a view"); } // Default to PARSED if the user did not provide a data type GetDataType typeToReplay = type == null ? GetDataType.PARSED : type; String auths = WarehausUtils.getAuthsListFromToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "replay"); auditArgs.put("uriPrefix", uriPrefix); auditArgs.put("start", start != null ? "" + TimeUtil.convertFromThriftDateTime(start) : ""); auditArgs.put("finish", finish != null ? "" + TimeUtil.convertFromThriftDateTime(finish) : ""); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); BatchScanner scanner = null; List<DatedURI> retVal = null; try { scanner = createScanner(auths); IteratorSetting iteratorSetting = new IteratorSetting(13, "warehausReplayVisibilityIterator", EzBakeVisibilityFilter.class); addEzBakeVisibilityFilter(scanner, security, EnumSet.of(Permission.READ), iteratorSetting); IteratorSetting is = new IteratorSetting(10, "replay", VersioningIterator.class); if (replayOnlyLatest) { VersioningIterator.setMaxVersions(is, 1); } else { VersioningIterator.setMaxVersions(is, Integer.MAX_VALUE); } scanner.addScanIterator(is); long startTime = 0, endTime = System.currentTimeMillis(); if (start != null) { startTime = TimeUtil.convertFromThriftDateTime(start); } if (finish != null) { endTime = TimeUtil.convertFromThriftDateTime(finish); } IteratorSetting tis = new IteratorSetting(20, "timestamp", TimestampFilter.class); TimestampFilter.setRange(tis, startTime, true, endTime, true); scanner.addScanIterator(tis); scanner.setRanges(Lists.newArrayList(new Range())); scanner.fetchColumn(new Text(uriPrefix), new Text(typeToReplay.toString())); retVal = Lists.newArrayList(); Map<String, Integer> uriToRetValPosition = Maps.newHashMap(); for (Entry<Key, Value> entry : scanner) { long ts = entry.getKey().getTimestamp(); String uri = WarehausUtils.getUriFromComputed(entry.getKey().getRow().toString()); DateTime currentDateTime = TimeUtil.convertToThriftDateTime(ts); Visibility visibility = VisibilitySerialization.deserializeVisibilityWrappedValue(entry.getValue()).getVisibilityMarkings(); DatedURI uriToAdd = new DatedURI(currentDateTime, uri, visibility); // If we're only replaying the latest, check if we've already inserted this URI into the list being returned. if (replayOnlyLatest) { // If we've seen this URI already, check the timestamp that we've seen, and if it's older than what we // currently have, replace it. Otherwise ignore it if (uriToRetValPosition.containsKey(uri)) { int position = uriToRetValPosition.get(uri); long oldTimeStamp = TimeUtil.convertFromThriftDateTime(retVal.get(position).getTimestamp()); if (oldTimeStamp < ts) { retVal.remove(position); uriToRetValPosition.put(uri, retVal.size()); retVal.add(uriToAdd); } } else { retVal.add(uriToAdd); } } else { retVal.add(uriToAdd); } } } catch (IOException e) { logger.error("Could not deserialize value from Accumulo", e); throw new TException("Could not retrieve data for request", e); } finally { if (scanner != null) { scanner.close(); } } Collections.sort(retVal, new Comparator<DatedURI>() { @Override public int compare(DatedURI o1, DatedURI o2) { return o1.getTimestamp().compareTo(o2.getTimestamp()); } }); return retVal; } @Override public int replayCount(String urn, DateTime start, DateTime finish, GetDataType type, EzSecurityToken security) throws TException { securityClient.validateReceivedToken(security); logger.info("Next Replay Call is for count"); return replay(urn, false, start, finish, type, security).size(); } @Override public List<Long> getVersions(String uri, EzSecurityToken security) throws TException { String auths = WarehausUtils.getAuthsListFromToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "getVersions"); auditArgs.put("uri", uri); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); List<Long> forReturn = Lists.newLinkedList(); BatchScanner scanner = null; try { scanner = createScanner(auths); IteratorSetting iteratorSetting = new IteratorSetting(16, "warehausVersionsVisibilityIterator", EzBakeVisibilityFilter.class); addEzBakeVisibilityFilter(scanner, security, EnumSet.of(Permission.READ), iteratorSetting); IteratorSetting is = new IteratorSetting(10, VersioningIterator.class); VersioningIterator.setMaxVersions(is, Integer.MAX_VALUE); scanner.addScanIterator(is); Key key = new Key(WarehausUtils.getKey(uri), new Text(WarehausUtils.getUriPrefixFromUri(uri))); scanner.setRanges(Lists.newArrayList(new Range(key, true, key.followingKey(PartialKey.ROW_COLFAM), false))); for (Entry<Key, Value> entry : scanner) { long ts = entry.getKey().getTimestamp(); if (!forReturn.contains(ts)) { forReturn.add(ts); } } } finally { if (scanner != null) { scanner.close(); } } return forReturn; } @Override public IngestStatus insertView(ByteBuffer data, ViewId id, Visibility visibility, EzSecurityToken security) throws TException { securityClient.validateReceivedToken(security); IngestStatus status = new IngestStatus(); Long timestamp = Calendar.getInstance().getTimeInMillis(); status.setTimestamp(timestamp); try { checkWritePermission(id.getUri(), visibility, security, false); } catch (EzBakeAccessDenied ad) { status.setStatus(IngestStatusEnum.FAIL); status.setFailedURIs(Lists.newArrayList(id.getUri())); status.setFailureReason(ad.getMessage()); logger.debug("insertView status : " + status); return status; } String accessorId = confirmToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "insertView"); auditArgs.put("uri", id.getUri()); auditArgs.put("accessorId", accessorId); auditLog(security, AuditEventType.FileObjectCreate, auditArgs); Mutation dataMutator = new Mutation(WarehausUtils.getKey(id.getUri())); try { dataMutator.put(new Text(WarehausUtils.getUriPrefixFromUri(id.getUri())), new Text(id.getSpacename() + "_" + id.getView()), new ColumnVisibility(PermissionUtils.getVisibilityString(visibility)), timestamp, VisibilitySerialization.serializeVisibilityWithDataToValue(visibility, new TSerializer().serialize(new VersionControl(data, accessorId)))); } catch (IOException e) { logger.error("Could not serialize value to insert into Accumulo", e); throw new TException("Could not insert data into the Warehaus", e); } BatchWriter writer = null; try { writer = createWriter(); writeMutation(dataMutator, writer); flushWriter(writer); } finally { closeWriter(writer); } status.setStatus(IngestStatusEnum.SUCCESS); logger.debug("insertView status : " + status); return status; } @Override public BinaryReplay getLatestView(ViewId id, EzSecurityToken security) throws TException, EntryNotInWarehausException { securityClient.validateReceivedToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "getLatestView"); auditArgs.put("uri", id.getUri()); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); return getLatest(id.getUri(), WarehausUtils.getUriPrefixFromUri(id.getUri()), id.getSpacename() + "_" + id.getView(), security); } @Override public BinaryReplay getView(ViewId id, long timestamp, EzSecurityToken security) throws TException, EntryNotInWarehausException { securityClient.validateReceivedToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "getView"); auditArgs.put("uri", id.getUri()); auditArgs.put("timestamp", String.valueOf(timestamp)); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); return getVersion(id.getUri(), WarehausUtils.getUriPrefixFromUri(id.getUri()), id.getSpacename() + "_" + id.getView(), timestamp, security); } @Override public void importFromHadoop(String filename, Visibility visibility, EzSecurityToken security) throws TException { // logRequest("importFromHadoop", filename, confirmToken(security), WarehausUtils.getAuthsListFromToken(security)); // InputStream is; // try { // is = hdfs.open(new Path(filename)); // } catch (IOException e) { // throw new TException(e); // } // ByteArrayOutputStream baos = new ByteArrayOutputStream(); // try { // int character = is.read(); // while (character > -1) { // baos.write(character); // character = is.read(); // } // } catch (IOException e) { // throw new TException(e); // } finally { // Closeables.closeQuietly(is); // } // Repository data = new Repository(); // new TDeserializer().deserialize(data, baos.toByteArray()); // insert(data, visibility, security); throw new TException("This endpoint is not implemented"); } @Override public IngestStatus updateEntry(UpdateEntry update, Visibility visibility, EzSecurityToken security) throws TException { HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "updateEntry"); auditArgs.put("uri", update.getUri()); auditLog(security, AuditEventType.FileObjectModify, auditArgs); IngestStatus status = insertUpdate(update, visibility, security); logger.debug("updateEntry status : " + status); return status; } private IngestStatus insertUpdate(UpdateEntry update, Visibility visibility, EzSecurityToken security) throws TException { try { checkWritePermission(update.getUri(), visibility, security, update.isUpdateVisibility()); } catch (EzBakeAccessDenied ad) { IngestStatus status = new IngestStatus(); status.setTimestamp(Calendar.getInstance().getTimeInMillis()); status.setStatus(IngestStatusEnum.FAIL); status.setFailedURIs(Lists.newArrayList(update.getUri())); status.setFailureReason(ad.getMessage()); return status; } String id = confirmToken(security); Map<String, VersionControl> parsed = Maps.newHashMap(); Map<String, VersionControl> raw = Maps.newHashMap(); Map<String, Boolean> updateVisibilityFlagMap = Maps.newHashMap(); Map<String, Visibility> visibilityMap = Maps.newHashMap(); if (update.isSetParsedData()) { VersionControl vc = new VersionControl(ByteBuffer.wrap(update.getParsedData()), id); parsed.put(update.getUri(), vc); } if (update.isSetRawData()) { VersionControl vc = new VersionControl(ByteBuffer.wrap(update.getRawData()), id); raw.put(update.getUri(), vc); } updateVisibilityFlagMap.put(update.getUri(), update.isUpdateVisibility()); visibilityMap.put(update.getUri(), visibility); return updateEntries(Lists.newArrayList(update.getUri()), parsed, raw, updateVisibilityFlagMap, visibilityMap, security); } @Override public IngestStatus put(ezbake.warehaus.PutRequest putRequest, EzSecurityToken security) throws TException { String id = confirmToken(security); Map<String, VersionControl> parsedMap = Maps.newHashMap(); Map<String, VersionControl> rawMap = Maps.newHashMap(); Map<String, Boolean> updateVisibilityMap = Maps.newHashMap(); List<String> uriList = Lists.newArrayList(); Map<String, Visibility> visibilityMap = Maps.newHashMap(); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "put"); for (PutUpdateEntry putEntry : putRequest.getEntries()) { UpdateEntry update = putEntry.getEntry(); uriList.add(update.getUri()); updateVisibilityMap.put(update.getUri(), update.isUpdateVisibility()); visibilityMap.put(update.getUri(), putEntry.getVisibility()); if (update.isSetParsedData()) { VersionControl vc = new VersionControl(ByteBuffer.wrap(update.getParsedData()), id); parsedMap.put(update.getUri(), vc); } if (update.isSetRawData()) { VersionControl vc = new VersionControl(ByteBuffer.wrap(update.getRawData()), id); rawMap.put(update.getUri(), vc); } auditArgs.put("uri", update.getUri()); auditLog(security, AuditEventType.FileObjectModify, auditArgs); } IngestStatus status = updateEntries(uriList, parsedMap, rawMap, updateVisibilityMap, visibilityMap, security); logger.debug("put status : " + status); return status; } private IngestStatus updateEntries(List<String> uriList, Map<String, VersionControl> parsedMap, Map<String, VersionControl> rawMap, Map<String, Boolean> updateVisibilityMap, Map<String, Visibility> visibilityMap, EzSecurityToken security) throws TException { String userAuths = WarehausUtils.getAuthsListFromToken(security); String id = confirmToken(security); long timestamp = Calendar.getInstance().getTimeInMillis(); Set<GetDataType> dataTypes = Sets.newHashSet(GetDataType.values()); Map<String, Mutation> mutationMap = Maps.newHashMap(); List<Range> ranges = Lists.newArrayList(); List<String> writableURIs = Lists.newArrayList(); List<String> accessDenied = Lists.newArrayList(); IngestStatus status = new IngestStatus(); BatchScanner scanner = null; BatchWriter writer = null; // Below code is mostly organized to avoid scanning on a uri basis // and instead take advantage of batch scans for improved performance. // A rough order of tasks - // 1. update visibilities, when requested and different from old ones // 2. add new rows for parsed/raw types, version index try { writer = createWriter(); for (String uri : uriList) { for (GetDataType type : dataTypes) { Key key = new Key(WarehausUtils.getKey(uri), new Text(WarehausUtils.getUriPrefixFromUri(uri)), new Text(type.toString())); ranges.add(new Range(key, true, key.followingKey(PartialKey.ROW_COLFAM_COLQUAL), false)); } } if (!ranges.isEmpty()) { try { scanner = createScanner(userAuths); scanner.setRanges(ranges); // need existing group auths IteratorSetting iteratorSetting = new IteratorSetting(21, "warehausEntriesVisibilityIterator", EzBakeVisibilityFilter.class); addEzBakeVisibilityFilter(scanner, security, EnumSet.of(Permission.READ, Permission.MANAGE_VISIBILITY, Permission.WRITE), iteratorSetting); for (Entry<Key, Value> entry : scanner) { String uri = WarehausUtils.getUriFromKey(entry.getKey()); writableURIs.add(uri); // update visibility of old entry if the flag is set. if (updateVisibilityMap.get(uri)) { long oldTimeStamp = entry.getKey().getTimestamp(); Visibility visibilityForUpdate = visibilityMap.get(uri); ColumnVisibility oldVisibility = new ColumnVisibility(entry.getKey().getColumnVisibility()); // Update if new visibility is different than the old one. if (visibilityForUpdate.toString().equals(oldVisibility.toString())) { continue; } VisibilityWrapper wrapper = VisibilitySerialization.deserializeVisibilityWrappedValue(entry.getValue()); VersionControl value = ThriftUtils.deserialize(VersionControl.class, wrapper.getValue()); // Delete to ensure removal of entry with old visibility. // Update only the visibility, leave everything else (incl. timestamp) as is. Mutation mutation = mutationMap.get(uri); if (mutation == null) { mutation = new Mutation(WarehausUtils.getKey(uri)); } mutation.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), oldVisibility, oldTimeStamp); mutation.put(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), new ColumnVisibility(PermissionUtils.getVisibilityString(visibilityForUpdate)), oldTimeStamp, VisibilitySerialization.serializeVisibilityWithDataToValue(visibilityForUpdate, ThriftUtils.serialize(new VersionControl(ByteBuffer.wrap(value.getPacket()), id)))); mutationMap.put(uri, mutation); } } } finally { if (scanner != null) { scanner.close(); } } } // Scan for existing URIs since writableURIs would only contain // those that user can write to in case of updates. // This will let distinguish between inserts and updates. List<String> existingURIs = Lists.newArrayList(); if (!ranges.isEmpty()) { try { scanner = createScanner(userAuths); scanner.setRanges(ranges); for (Entry<Key, Value> entry : scanner) { String uri = WarehausUtils.getUriFromKey(entry.getKey()); existingURIs.add(uri); } } finally { if (scanner != null) { scanner.close(); } } } // Add new updates to parsed/raw data for (String uri : uriList) { // updates should be writable if (existingURIs.contains(uri) && !writableURIs.contains(uri)) { accessDenied.add(uri); continue; } Visibility visibilityForUpdate = visibilityMap.get(uri); try { checkWritePermission(uri, visibilityForUpdate, security, false); } catch (EzBakeAccessDenied ad) { accessDenied.add(uri); continue; } Mutation mutation = mutationMap.get(uri); if (mutation == null) { mutation = new Mutation(WarehausUtils.getKey(uri)); } if (parsedMap.containsKey(uri)) { mutation.put(new Text(WarehausUtils.getUriPrefixFromUri(uri)), new Text(GetDataType.PARSED.toString()), new ColumnVisibility(PermissionUtils.getVisibilityString(visibilityForUpdate)), timestamp, VisibilitySerialization.serializeVisibilityWithDataToValue(visibilityForUpdate, ThriftUtils.serialize( new VersionControl(ByteBuffer.wrap(parsedMap.get(uri).getPacket()), id)))); } if (rawMap.containsKey(uri)) { mutation.put(new Text(WarehausUtils.getUriPrefixFromUri(uri)), new Text(GetDataType.RAW.toString()), new ColumnVisibility(PermissionUtils.getVisibilityString(visibilityForUpdate)), timestamp, VisibilitySerialization.serializeVisibilityWithDataToValue(visibilityForUpdate, ThriftUtils.serialize( new VersionControl(ByteBuffer.wrap(rawMap.get(uri).getPacket()), id)))); } writeMutation(mutation, writer); } flushWriter(writer); } catch (IOException e) { logger.error("Could not deserialize value from Accumulo", e); throw new TException("Could not retrieve data for request", e); } finally { closeWriter(writer); } status.setTimestamp(timestamp); status.setStatus(IngestStatusEnum.SUCCESS); if (!accessDenied.isEmpty()) { if (accessDenied.size() == uriList.size()) { status.setStatus(IngestStatusEnum.FAIL); } else { status.setStatus(IngestStatusEnum.PARTIAL); } status.setFailedURIs(accessDenied); status.setFailureReason("Given user token does not have the " + "required authorizations to update documents with listed URIs"); return status; } return status; } /** * @throws EntryNotInWarehausException If the document identified by the * given URI was not found. * @throws TException If an error occurs during the fetching of the entry. */ @Override public ezbake.warehaus.EntryDetail getEntryDetails(String uri, ezbake.base.thrift.EzSecurityToken security) throws org.apache.thrift.TException { securityClient.validateReceivedToken(security); String auths = WarehausUtils.getAuthsListFromToken(security); HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "getEntryDetails"); auditArgs.put("uri", uri); auditLog(security, AuditEventType.FileObjectAccess, auditArgs); List<Long> forReturn = Lists.newLinkedList(); List<VersionDetail> versions = Lists.newLinkedList(); BatchScanner scanner = null; int count = 0; try { scanner = createScanner(auths); IteratorSetting iteratorSetting = new IteratorSetting(27, "warehausEntryDetailVisibilityIterator", EzBakeVisibilityFilter.class); addEzBakeVisibilityFilter(scanner, security, EnumSet.of(Permission.READ), iteratorSetting); IteratorSetting is = new IteratorSetting(10, VersioningIterator.class); VersioningIterator.setMaxVersions(is, Integer.MAX_VALUE); scanner.addScanIterator(is); Key key = new Key(WarehausUtils.getKey(uri), new Text(WarehausUtils.getUriPrefixFromUri(uri))); scanner.setRanges(Lists.newArrayList(new Range(key, true, key.followingKey(PartialKey.ROW_COLFAM), false))); for (Entry<Key, Value> entry : scanner) { count++; long ts = entry.getKey().getTimestamp(); if (!forReturn.contains(ts)) { forReturn.add(ts); VersionDetail vd = new VersionDetail(); vd.setUri(uri); vd.setTimestamp(ts); VisibilityWrapper wrapper = VisibilitySerialization.deserializeVisibilityWrappedValue(entry.getValue()); vd.setVisibility(wrapper.getVisibilityMarkings()); VersionControl versionData = ThriftUtils.deserialize(VersionControl.class, wrapper.getValue()); vd.setSecurityId(versionData.getName()); versions.add(vd); } } } catch (IOException e) { logger.error("Could not deserialize the data from Accumulo associated with " + uri + ".", e); throw new TException("Could not deserialize the data from Accumulo associated with " + uri + ".", e); } finally { if (scanner != null) { scanner.close(); } } if (count == 0) { logger.debug("The following document URI was not found in the warehouse: " + uri); throw new EntryNotInWarehausException("The following document URI was not found in the warehouse: " + uri); } EntryDetail entryDetail = new EntryDetail(); entryDetail.setUri(uri); entryDetail.setVersions(versions); return entryDetail; } /********************************************************* /* /* Helper Functions /* *********************************************************/ /** * Validates the security token with the security service and returns either the application security ID or * the user DN associated with the token. * * @param security security token to validate * @return the ID (either application security ID or user DN) associated with the token * @throws TException */ private String confirmToken(EzSecurityToken security) throws TException { securityClient.validateReceivedToken(security); String id; if (security.getType() == TokenType.APP) { id = security.getValidity().getIssuedTo(); } else { id = security.getTokenPrincipal().getPrincipal(); } return id; } /** * <p> * Ensures that the warehaus table is in accumulo and attempts to create * it if it does not exist. * Likewise, checks that the warehaus purge table is in accumulo and will * recreate the table if it does not exist. * </p> * * @throws TException If an error occurs while checking for the accumulo * namespace, warehaus table or warehaus purge table. If an * error occurs while creating the accumulo namespace, warehaus * table or warehaus purge table. */ private void ensureTable() throws TException { try { if (!connector.namespaceOperations().exists(accumuloNamespace)) { logger.warn("The accumulo namespace '" + accumuloNamespace + "' does not exist. An attempt to create namespace will start now."); connector.namespaceOperations().create(accumuloNamespace); logger.warn("The accumulo namespace '" + accumuloNamespace + "' was created."); } } catch (Exception e) { logger.error("An error occurred while checking for the existence of or while creating the accumulo namespace '" + accumuloNamespace + "'."); throw new TException(e); } try { if (!connector.tableOperations().exists(WarehausConstants.TABLE_NAME)) { logger.warn("The warehaus table '" + WarehausConstants.TABLE_NAME + "' does not exist. An attempt to create the table will start now."); connector.tableOperations().create(WarehausConstants.TABLE_NAME, false); logger.warn("The warehaus table '" + WarehausConstants.TABLE_NAME + "' was created."); logger.info("Adding table splits"); String splitsAsString = getConfigurationProperties().getProperty(WarehausConstants.WAREHAUS_SPLITS_KEY, WarehausConstants.DEFAULT_WAREHAUS_SPLITS); String splitsArray[] = splitsAsString.split(","); List<Text> splitsAsText = Lists.transform(Arrays.asList(splitsArray), new Function<String, Text>() { @Override public Text apply(String input) { return new Text(input); } }); SortedSet<Text> splits = new TreeSet<>(splitsAsText); connector.tableOperations().addSplits(WarehausConstants.TABLE_NAME, splits); } } catch (Exception e) { logger.error("An error occurred while checking for the existence of or while creating the warehaus table '" + WarehausConstants.TABLE_NAME + "'.", e); throw new TException(e); } try { if (!connector.tableOperations().exists(WarehausConstants.PURGE_TABLE_NAME)) { logger.warn("The warehaus purge table '" + WarehausConstants.PURGE_TABLE_NAME + "' does not exist. An attempt to create the table will start now."); connector.tableOperations().create(WarehausConstants.PURGE_TABLE_NAME, true); logger.warn("The warehaus purge table '" + WarehausConstants.PURGE_TABLE_NAME + "' was created."); } } catch (Exception e) { logger.error("An error occurred while checking for the existence of or while creating the warehaus purge table '" + WarehausConstants.PURGE_TABLE_NAME + "'.", e); throw new TException(e); } try { String ezBatchUser = getConfigurationProperties().getProperty("ezbatch.user", "ezbake"); connector.securityOperations().grantTablePermission(ezBatchUser, WarehausConstants.TABLE_NAME, TablePermission.READ); logger.info("READ permission granted to ezbatch user"); } catch (Exception e) { logger.error("An error occurred while trying to give the ezbatch user access"); throw new TException(e); } } private BatchScanner createScanner(String auths) throws TException { try { return connector.createBatchScanner(WarehausConstants.TABLE_NAME, WarehausUtils.getAuthsFromString(auths), WarehausConstants.QUERY_THREADS); } catch (TableNotFoundException e) { throw new TException(e); } } private BatchDeleter createDeleter(String auths) throws TException { try { EzProperties properties = new EzProperties(getConfigurationProperties(), false); long maxMemory = properties.getLong(WarehausConstants.BATCH_WRITER_MAX_MEMORY_KEY, WarehausConstants.DEFAULT_WRITER_MAX_MEMORY); long latency = properties.getLong(WarehausConstants.BATCH_WRITER_LATENCY_MS_KEY, WarehausConstants.DEFAULT_LATENCY); int threads = properties.getInteger(WarehausConstants.BATCH_WRITER_WRITE_THREADS_KEY, WarehausConstants.DEFAULT_WRITE_THREADS); BatchWriterConfig config = new BatchWriterConfig().setMaxLatency(latency, TimeUnit.MILLISECONDS).setMaxMemory(maxMemory).setMaxWriteThreads(threads); return connector.createBatchDeleter(WarehausConstants.TABLE_NAME, WarehausUtils.getAuthsFromString(auths), WarehausConstants.QUERY_THREADS, config); } catch (TableNotFoundException e) { throw new TException(e); } } private BatchWriter createWriter() throws TException { try { EzProperties properties = new EzProperties(getConfigurationProperties(), false); long maxMemory = properties.getLong(WarehausConstants.BATCH_WRITER_MAX_MEMORY_KEY, WarehausConstants.DEFAULT_WRITER_MAX_MEMORY); long latency = properties.getLong(WarehausConstants.BATCH_WRITER_LATENCY_MS_KEY, WarehausConstants.DEFAULT_LATENCY); int threads = properties.getInteger(WarehausConstants.BATCH_WRITER_WRITE_THREADS_KEY, WarehausConstants.DEFAULT_WRITE_THREADS); BatchWriterConfig config = new BatchWriterConfig().setMaxLatency(latency, TimeUnit.MILLISECONDS).setMaxMemory(maxMemory).setMaxWriteThreads(threads); BatchWriter writer = connector.createBatchWriter(WarehausConstants.TABLE_NAME, config); logger.debug("Writer initialized with max memory of {}, latency of {}, and {} threads", maxMemory, latency, threads); return writer; } catch (TableNotFoundException e) { logger.error("Could not initialize batch writer because table is missing", e); throw new RuntimeException(e); } } private void writeMutation(Mutation mutator, BatchWriter writer) throws TException { try { writer.addMutation(mutator); } catch (MutationsRejectedException e) { throw new TException(e); } } private void flushWriter(BatchWriter writer) throws TException { try { if (writer != null) { // shouldn't normally be null, but anyway writer.flush(); } } catch (MutationsRejectedException e) { throw new TException(e); } } private void closeWriter(BatchWriter writer) throws TException { try { if (writer != null) { writer.close(); } } catch (MutationsRejectedException e) { throw new TException(e); } } private void addEzBakeVisibilityFilter(ScannerBase scanner, EzSecurityToken token, Set<Permission> permissions, IteratorSetting iteratorSetting) throws TException { iteratorSetting.clearOptions(); EzBakeVisibilityFilter.setOptions(iteratorSetting, token.getAuthorizations(), permissions); scanner.addScanIterator(iteratorSetting); } private void checkWritePermission(String uri, Visibility visibility, EzSecurityToken token, boolean updateVisibility) throws EzBakeAccessDenied { if (!PermissionUtils.getPermissions(token.getAuthorizations(), visibility, true).contains(Permission.WRITE)) { throw new EzBakeAccessDenied().setMessage("Given user token does not have the " + "required authorizations to add/update document with uri " + uri); } if (updateVisibility && !PermissionUtils.getPermissions(token.getAuthorizations(), visibility, true).contains(Permission.MANAGE_VISIBILITY)) { throw new EzBakeAccessDenied().setMessage("Given user token does not have the " + "required authorizations to add/update visibilty of document with uri " + uri); } } private void auditLog(EzSecurityToken userToken, AuditEventType eventType, Map<String, String> args) { AuditEvent event = new AuditEvent(eventType, userToken); for (String argName : args.keySet()) { event.arg(argName, args.get(argName)); } if (auditLogger != null) { auditLogger.logEvent(event); } } /** * ****************************************************** * /* * /* Code Consolidation * /* * ******************************************************* */ private BinaryReplay getLatest(String uri, String columnFamily, String columnQualifier, EzSecurityToken security) throws TException, EntryNotInWarehausException { List<Key> keys = Lists.newArrayList(new Key(WarehausUtils.getKey(uri), new Text(columnFamily), new Text(columnQualifier))); List<BinaryReplay> results = null; try { results = getLatest(keys, security); if (results.size() == 0) { throw new EntryNotInWarehausException(String.format("No entry found in warehaus for uri %s, and data type %s", uri, columnQualifier)); } return results.get(0); } catch (MaxGetRequestSizeExceededException ex) { // should not really happen when fetching one specific key. logger.error("Batch scan max memory exceeded error occured.", ex); throw new TException(ex); } } private List<BinaryReplay> getLatest(List<Key> keys, EzSecurityToken security) throws TException, MaxGetRequestSizeExceededException { List<BinaryReplay> results = Lists.newArrayList(); if (keys == null || keys.size() == 0) { return results; } String auths = WarehausUtils.getAuthsListFromToken(security); BatchScanner scanner = null; try { scanner = createScanner(auths); IteratorSetting iteratorSetting = new IteratorSetting(33, "warehausLatestVisibilityIterator", EzBakeVisibilityFilter.class); addEzBakeVisibilityFilter(scanner, security, EnumSet.of(Permission.READ), iteratorSetting); List<Range> ranges = Lists.newArrayList(); for (Key key : keys) { Range range = new Range(key, true, key.followingKey(PartialKey.ROW_COLFAM_COLQUAL), false); ranges.add(range); } scanner.setRanges(ranges); Map<String, Long> uriTimestamps = Maps.newHashMap(); Map<String, BinaryReplay> uriRetVals = Maps.newHashMap(); long currentScanSize = 0l; long maxBatchScanSize = new EzProperties(this.getConfigurationProperties(), false).getLong( WarehausConstants.BATCH_SCANNER_MAX_MEMORY_KEY, WarehausConstants.DEFAULT_SCANNER_MAX_MEMORY); for (Entry<Key, Value> latest : scanner) { if (latest != null) { String uri = WarehausUtils.getUriFromKey(latest.getKey()); long ts = latest.getKey().getTimestamp(); // Do some kludgy stuff with timestamp checks to ensure // we're only grabbing the latest. // Ideally, we'd like to use a VersioningIterator with // maxVersions set to 1 that gets us the latest, but that // won't always work because updateEntry() doesn't always // update all versions of a uri with the same visibility. if ((!uriTimestamps.containsKey(uri)) || (uriTimestamps.containsKey(uri) && uriTimestamps.get(uri).longValue() < ts)) { BinaryReplay forReturn = new BinaryReplay(); VisibilityWrapper visibilityAndValue = VisibilitySerialization.deserializeVisibilityWrappedValue(latest.getValue()); VersionControl versionData = ThriftUtils.deserialize(VersionControl.class, visibilityAndValue.getValue()); forReturn.setPacket(versionData.getPacket()); forReturn.setTimestamp(TimeUtil.convertToThriftDateTime(ts)); forReturn.setUri(uri); forReturn.setVisibility(visibilityAndValue.getVisibilityMarkings()); // if max batch scan size exceeded, break int len = ThriftUtils.serialize(forReturn).length; currentScanSize = currentScanSize + len; if (currentScanSize > maxBatchScanSize) { throw new MaxGetRequestSizeExceededException("Max get request size of " + maxBatchScanSize + " exceeded. " + "Configure the " + WarehausConstants.BATCH_SCANNER_MAX_MEMORY_KEY + " property appropriately and re-try"); } uriRetVals.put(uri, forReturn); uriTimestamps.put(uri, ts); } } } results.addAll(uriRetVals.values()); } catch (IOException e) { logger.error("Could not deserialize value from Accumulo", e); throw new TException("Could not retrieve data for request", e); } finally { if (scanner != null) { scanner.close(); } } return results; } private BinaryReplay getVersion(String uri, String columnFamily, String columnQualifier, long timestamp, EzSecurityToken security) throws TException, EntryNotInWarehausException { List<Key> keys = Lists.newArrayList(new Key(WarehausUtils.getKey(uri), new Text(columnFamily), new Text(columnQualifier), timestamp)); List<BinaryReplay> results = null; try { results = getVersion(keys, security); if (results.size() == 0) { throw new EntryNotInWarehausException(String.format("No entry found in warehaus for uri %s, and data type %s, at time %s", uri, columnQualifier, timestamp)); } return results.get(0); } catch (MaxGetRequestSizeExceededException ex) { // should not really happen when fetching one specific key. logger.error("Batch scan max memory exceeded error occured.", ex); throw new TException(ex); } } private List<BinaryReplay> getVersion(List<Key> keys, EzSecurityToken security) throws TException, MaxGetRequestSizeExceededException { String auths = WarehausUtils.getAuthsListFromToken(security); List<BinaryReplay> results = Lists.newArrayList(); if (keys == null || keys.size() == 0) { return results; } Map<String, Long> uriTimestamps = Maps.newHashMap(); BatchScanner scanner = null; try { scanner = createScanner(auths); IteratorSetting iteratorSetting = new IteratorSetting(41, "warehausVersionVisibilityIterator", EzBakeVisibilityFilter.class); addEzBakeVisibilityFilter(scanner, security, EnumSet.of(Permission.READ), iteratorSetting); List<Range> ranges = Lists.newArrayList(); for (Key key : keys) { Range range = new Range(key, true, key.followingKey(PartialKey.ROW_COLFAM_COLQUAL), false); ranges.add(range); uriTimestamps.put(WarehausUtils.getUriFromKey(key), key.getTimestamp()); } scanner.setRanges(ranges); // We don't know what maxVersions is going to be configured as on the accumulo cluster, so lets be safe // here and return MAX_VALUE versions for this scanner IteratorSetting is = new IteratorSetting(10, VersioningIterator.class); VersioningIterator.setMaxVersions(is, Integer.MAX_VALUE); scanner.addScanIterator(is); long currentScanSize = 0l; long maxBatchScanSize = new EzProperties(this.getConfigurationProperties(), false).getLong( WarehausConstants.BATCH_SCANNER_MAX_MEMORY_KEY, WarehausConstants.DEFAULT_SCANNER_MAX_MEMORY); for (Entry<Key, Value> entry : scanner) { if (entry != null) { String uri = WarehausUtils.getUriFromKey(entry.getKey()); long ts = entry.getKey().getTimestamp(); // I REALLY wanted to use the TimestampFilter iterator provided with Accumulo here, but it does not work at // millisecond granularity. Which is ridiculous since Accumulo stores timestamps with millisecond granularity. // Doesn't make a lot of sense...does it? /rant // So here's some kludgy stuff to filter by timestamp. if (ts == uriTimestamps.get(uri).longValue()) { BinaryReplay forReturn = new BinaryReplay(); VisibilityWrapper visibilityAndValue = VisibilitySerialization.deserializeVisibilityWrappedValue(entry.getValue()); VersionControl versionData = ThriftUtils.deserialize(VersionControl.class, visibilityAndValue.getValue()); forReturn.setPacket(versionData.getPacket()); forReturn.setTimestamp(TimeUtil.convertToThriftDateTime(ts)); forReturn.setUri(uri); forReturn.setVisibility(visibilityAndValue.getVisibilityMarkings()); // if max batch scan size exceeded, break int len = ThriftUtils.serialize(forReturn).length; currentScanSize = currentScanSize + len; if (currentScanSize > maxBatchScanSize) { throw new MaxGetRequestSizeExceededException("Max get request size of " + maxBatchScanSize + " exceeded. " + "Configure the " + WarehausConstants.BATCH_SCANNER_MAX_MEMORY_KEY + " property appropriately and re-try"); } results.add(forReturn); } } } } catch (IOException e) { logger.error("Could not deserialize value from Accumulo", e); throw new TException("Could not retrieve data for request", e); } finally { if (scanner != null) { scanner.close(); } } return results; } protected void resetTable() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TException { connector.tableOperations().delete(WarehausConstants.TABLE_NAME); connector.tableOperations().delete(WarehausConstants.PURGE_TABLE_NAME); ensureTable(); } /** * Start a purge of the given items. This method will begin purging items * that match the given a list of ids to purge and will call back to the * purgeCallbackService when the purge has completed. The return of this * function without exception indicates that the application has taken * responsibility of purging documents matching purgeIds from its data sets. * It does not indicate completion of the purge. * <p/> * Returns the state of the new purge request. * * @param purgeCallbackService ezDiscovery path of the purge service to call * back. * @param purgeId Unique id to use for this purge request.d should * not take any action based on that fact. Required. * @param idsToPurge A set containing all the items to purge. This should * be sent to the data access layer to perform the purge. * @param initiatorToken Security token for the service or user that * initiated the purge. * @throws PurgeException If the purgeId is null or empty. * @throws TException If an error occurred during the processing of the * purge. */ @Override public PurgeState beginPurge(String purgeCallbackService, long purgeId, Set<Long> idsToPurge, EzSecurityToken initiatorToken) throws PurgeException, EzSecurityTokenException, TException { if (initiatorToken == null) { throw new TException("The security token was not provided when requesting the warehaus purge."); } if (!isPurgeAppSecurityId(initiatorToken)) { throw new TException("A warehaus purge may only be initiated by the Central Purge Service."); } HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "beginPurge"); auditArgs.put("purgeId", Long.toString(purgeId)); auditLog(initiatorToken, AuditEventType.FileObjectDelete, auditArgs); PurgeState purgeState = createDefaultPurgeState(purgeId); purgeState.setPurgeStatus(PurgeStatus.STARTING); insertPurgeStatus(purgeState); ExecutorService executorService = Executors.newSingleThreadExecutor(); executorService.execute(new WarehousePurger(purgeId, idsToPurge, initiatorToken)); executorService.shutdown(); return purgeState; } @Override public PurgeState beginVirusPurge(String purgeCallbackService, long purgeId, Set<Long> idsToPurge, EzSecurityToken initiatorToken) throws PurgeException, EzSecurityTokenException, TException { return this.beginPurge(purgeCallbackService, purgeId, idsToPurge, initiatorToken); } /** * <p> * Returns the most recent state of a given purge request. * </p> * * @param purgeId Unique id to use for this purge request * @returns Status of the given purge, UNKNOWN_ID if it was not found */ @Override public PurgeState purgeStatus(EzSecurityToken token, long purgeId) throws EzSecurityTokenException, TException { if (token == null) { throw new TException("The security token was not provided when requesting the warehaus purge status."); } if (!isPurgeAppSecurityId(token)) { throw new TException("A warehaus purge status may only be initiated by the Central Purge Service."); } HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "purgeStatus"); auditArgs.put("purgeId", Long.toString(purgeId)); auditLog(token, AuditEventType.FileObjectAccess, auditArgs); PurgeState state = createDefaultPurgeState(purgeId); state.setPurgeStatus(PurgeStatus.UNKNOWN_ID); Scanner scanner = null; try { scanner = connector.createScanner( WarehausConstants.PURGE_TABLE_NAME, WarehausUtils.getAuthsFromString(WarehausUtils.getAuthsListFromToken(token))); IteratorSetting is = new IteratorSetting(10, VersioningIterator.class); VersioningIterator.setMaxVersions(is, Integer.MAX_VALUE); scanner.addScanIterator(is); scanner.setRange(new Range(new Text(String.valueOf(purgeId)))); int entriesFoundCount = 0; for (Entry<Key, Value> entry : scanner) { entriesFoundCount++; state = ThriftUtils.deserialize(PurgeState.class, entry.getValue().get()); } if (entriesFoundCount > 1) { logger.warn("A total of {} entries were found in the warehaus purge db when searching for the purge id #{}. Expected only the return of the most recent record.", entriesFoundCount, purgeId); } } catch (TableNotFoundException e) { throw new TException(e); } finally { if (scanner != null) { scanner.close(); } } return state; } /** * <p> * Cancelling a purge from the warehouse may not occur. As a result, the * cancel status will be set to {@link CancelStatus#CANNOT_CANCEL}. * </p> */ @Override public PurgeState cancelPurge(EzSecurityToken token, long purgeId) throws EzSecurityTokenException, TException { if (token == null) { throw new TException("The security token was not provided when requesting the warehaus purge cancellation."); } if (!isPurgeAppSecurityId(token)) { throw new TException("A warehaus purge cancellation may only be initiated by the Central Purge Service."); } HashMap<String, String> auditArgs = Maps.newHashMap(); auditArgs.put("action", "cancelPurge"); auditArgs.put("purgeId", Long.toString(purgeId)); auditLog(token, AuditEventType.FileObjectModify, auditArgs); PurgeState state = this.purgeStatus(token, purgeId); if (this.mayCancelPurgeProceed(state)) { state.setCancelStatus(CancelStatus.CANCELED); state.setPurgeStatus(PurgeStatus.FINISHED_COMPLETE); } else { state.setCancelStatus(CancelStatus.CANNOT_CANCEL); } state.setTimeStamp(TimeUtil.getCurrentThriftDateTime()); this.insertPurgeStatus(state); return state; } /** * <p> * Answers true if the give PurgeState is in a state where it may proceed * with a cancellation and false if it is not. * </p> * * @param state The PurgeState which is evaluated to determine if purge * cancellation is permitted. If this is null the false is returned. */ boolean mayCancelPurgeProceed(PurgeState state) { return state != null && CancelStatus.NOT_CANCELED.equals(state.getCancelStatus()) && (PurgeStatus.WAITING_TO_START.equals(state.getPurgeStatus()) || PurgeStatus.STARTING.equals(state.getPurgeStatus())); } /** * <p/> * Creates a PurgeState record with default values. By default, the * purge status is set to {@link PurgeStatus#WAITING_TO_START} and the * cancel status is set to {@link CancelStatus#NOT_CANCELED}. * * @param purgeId The purge id value to which the purgeId attribute * is set. * @return A new instance of a PurgeState containing the given purgeId * and the default values. */ PurgeState createDefaultPurgeState(long purgeId) { PurgeState state = new PurgeState(); state.setCancelStatus(CancelStatus.NOT_CANCELED); state.setNotPurged(new TreeSet<Long>()); state.setPurged(new TreeSet<Long>()); state.setPurgeId(purgeId); state.setSuggestedPollPeriod(2000); state.setTimeStamp(TimeUtil.getCurrentThriftDateTime()); state.setPurgeStatus(PurgeStatus.WAITING_TO_START); return state; } /** * <p> * Inserts a purge status record for the given collection of URIs. The state * of the purge is determined by the status given in the purgeState parameter. * </p> * * @param purgeState The state of the purge. This will be persisted for each * URI given in the uris parameter. Required. The purgeId is required. * @throws TException If an error occurs while writing to the purge table. * If either purgeId or purgeStatus are empty or null. */ void insertPurgeStatus(PurgeState purgeState) throws TException { if (purgeState == null) { throw new TException("The purge state is required for inserting a warehaus purge record."); } Visibility visibility = new Visibility(); visibility.setFormalVisibility(this.purgeVisibility); BatchWriter writer = null; writer = createPurgeWriter(); Mutation m = new Mutation(new Text(String.valueOf(purgeState.getPurgeId()))); try { m.put(new Text(""), new Text(""), new ColumnVisibility(visibility.getFormalVisibility()), Calendar.getInstance().getTimeInMillis(), new Value(ThriftUtils.serialize(purgeState))); writer.addMutation(m); } catch (MutationsRejectedException e) { logger.error("The write to the warehaus purge table failed for Purge Id '" + purgeState.getPurgeId() + "'.", e); throw new TException(e); } finally { try { flushWriter(writer); } finally { closeWriter(writer); } } } /** * <p> * Removes the warehaus entries identified by the given list of URIs. * </p> * * @param uriList A collection of URIs that uniquely identify the warehaus * entries to remove. If this is null or empty then no processing * occurs. * @param initiatorToken The security token. Required. * @throws Exception If an error occurs while deleting the warehaus entries. */ public void remove(Collection<String> uriList, EzSecurityToken initiatorToken) throws Exception { if (uriList == null || uriList.isEmpty()) return; BatchDeleter deleter = null; List<Range> ranges = Lists.newArrayList(); try { for (String uri : uriList) { ranges.add(Range.exact(WarehausUtils.getKey(uri))); } deleter = createDeleter(WarehausUtils.getAuthsListFromToken(initiatorToken)); deleter.setRanges(ranges); deleter.delete(); } finally { if (deleter != null) { deleter.close(); } } } /** * <p> * Given a set of purge bit ids, return the corresponding URI for each * purge bit id as one collection. To see the mapping of purge bit id * to URI then call #getUriMapping. * </p> * * @param idsToPurge All of he id of URIs used by the purge service that * are referenced in a single purge request. Required. * @param securityToken The security token. Required. * @return * @throws TException If an error occurs when translating the bitvector * from the provenance service. */ private Collection<String> getUris(Set<Long> idsToPurge, EzSecurityToken securityToken) throws TException { Map<Long, String> map = this.getUriMapping(idsToPurge, securityToken); Collection<String> uris = map.values(); return uris == null ? new ArrayList<String>() : uris; } /** * <p> * Given a set of ids, return a mapping of ids to URIs. * </p> * * @param idsToPurge All of he id of URIs used by the purge service that * are referenced in a single purge request. Required. * @param securityToken The security token. Required. * @return A map where the value is the URI. * @throws TException If an error occurs when translating the bitvector * from the provenance service. */ private Map<Long, String> getUriMapping(Set<Long> idsToPurge, EzSecurityToken securityToken) throws TException { ThriftClientPool pool = new ThriftClientPool(this.getConfigurationProperties()); ProvenanceService.Client client = null; try { client = pool.getClient(ProvenanceServiceConstants.SERVICE_NAME, ProvenanceService.Client.class); } finally { if (pool != null) pool.close(); } ArrayList<Long> idsToPurgeList = new ArrayList<>(); idsToPurgeList.addAll(idsToPurge); EzSecurityTokenWrapper chainedToken = securityClient.fetchDerivedTokenForApp(securityToken, pool.getSecurityId(ProvenanceServiceConstants.SERVICE_NAME)); PositionsToUris uriPositions = client.getDocumentUriFromId(chainedToken, idsToPurgeList); return uriPositions.getMapping(); } /** * <p> * Answers true if the given security token has an application security id * that is equal to the application security id from the purge service. If * they are not equivalent then false is returned. * </p> * * @param securityToken The security token that is checked to determine if * it is from the purge service. Required. * @return True if the token has an application security id that matches * purge service's application security id and false if not. */ private boolean isPurgeAppSecurityId(EzSecurityToken securityToken) throws EzSecurityTokenException { EzSecurityTokenWrapper securityWrapper = new EzSecurityTokenWrapper(securityToken); securityClient.validateReceivedToken(securityToken); return securityWrapper.getSecurityId().equals(this.getPurgeAppSecurityId()); } /** * <p> * Returns the application securityId for the purge service. * </p> * <p/> * This can be moved to the {@link #getThriftProcessor()} method. * * @return The application security id for the purge service. */ private String getPurgeAppSecurityId() { if (this.purgeAppSecurityId == null) { ThriftClientPool pool = new ThriftClientPool(this.getConfigurationProperties()); purgeAppSecurityId = pool.getSecurityId(ezCentralPurgeServiceConstants.SERVICE_NAME); pool.close(); } return purgeAppSecurityId; } /** * <p> * Create a writer instance for the purge table. * </p> * * @return An accumulo batch writer. * @throws TException If the purge table could not be found. */ private BatchWriter createPurgeWriter() throws TException { try { EzProperties properties = new EzProperties(getConfigurationProperties(), false); BatchWriterConfig writerConfig = new BatchWriterConfig() .setMaxLatency(properties.getLong(WarehausConstants.BATCH_WRITER_LATENCY_MS_KEY, WarehausConstants.DEFAULT_LATENCY), TimeUnit.MILLISECONDS) .setMaxMemory(properties.getLong(WarehausConstants.BATCH_WRITER_MAX_MEMORY_KEY, WarehausConstants.DEFAULT_WRITER_MAX_MEMORY)) .setMaxWriteThreads(properties.getInteger(WarehausConstants.BATCH_WRITER_WRITE_THREADS_KEY, WarehausConstants.DEFAULT_WRITE_THREADS)); return connector.createBatchWriter(WarehausConstants.PURGE_TABLE_NAME, writerConfig); } catch (TableNotFoundException e) { logger.error("A batch writer could not be initialized for the '" + WarehausConstants.PURGE_TABLE_NAME + "' table because it is missing.", e); throw new RuntimeException(e); } } private class WarehousePurger implements Runnable { private long purgeId; private EzSecurityToken initiatorToken; private Set<Long> idsToPurge; private Visibility visibility; WarehousePurger(long purgeId, Set<Long> idsToPurge, EzSecurityToken initiatorToken) { this.purgeId = purgeId; this.idsToPurge = idsToPurge; this.initiatorToken = initiatorToken; this.visibility = new Visibility(); this.visibility.setFormalVisibility(purgeVisibility); } /** * <p> * Answers true if, based on the given purge state, the purge may proceed * and false if it may not. * </p> * * @param state The purge state which is evaluated. */ private boolean mayPurgeProceed(PurgeState state) { return !(CancelStatus.CANCELED.equals(state.getCancelStatus()) || CancelStatus.CANCEL_IN_PROGRESS.equals(state.getCancelStatus()) || PurgeStatus.FINISHED_COMPLETE.equals(state.getPurgeStatus()) || PurgeStatus.FINISHED_INCOMPLETE.equals(state.getPurgeStatus())); } /** * <p> * Executes the warehouse purge. * </p> */ @Override public void run() { try { PurgeState state = purgeStatus(initiatorToken, this.purgeId); if (this.mayPurgeProceed(state)) { if (idsToPurge == null || idsToPurge.size() == 0) { logger.info("No warehouse entries were given for purge request #{}. Marking the purge as finished.", this.purgeId); state.setPurgeStatus(PurgeStatus.FINISHED_COMPLETE); } else { try { Collection<String> uriList = getUris(idsToPurge, initiatorToken); remove(uriList, initiatorToken); state.setPurged(idsToPurge); state.setPurgeStatus(PurgeStatus.FINISHED_COMPLETE); } catch (Exception e) { logger.error("The delete of the URIs from the warehouse table failed for purge request #{}.", this.purgeId, e); state.setNotPurged(idsToPurge); state.setPurgeStatus(PurgeStatus.ERROR); } } state.setTimeStamp(TimeUtil.getCurrentThriftDateTime()); insertPurgeStatus(state); } else { logger.info("The purge request #{} was skipped for warehouse because the state is not valid for a purge. The purge and cancel statuses, respectively, are: {} and {}.", this.purgeId, state.getPurgeStatus(), state.getCancelStatus()); } } catch (TException e) { logger.error("The purge request #{} encountered an error that prevented the warehouse purge from completing properly.", this.purgeId, e); } } } }
apache-2.0
Sethtroll/runelite
runelite-client/src/main/java/net/runelite/client/plugins/worldmap/MinigameLocation.java
4495
/* * Copyright (c) 2018, Magic fTail * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package net.runelite.client.plugins.worldmap; import lombok.Getter; import net.runelite.api.coords.WorldPoint; @Getter enum MinigameLocation { BARBARIAN_ASSAULT("Barbarian Assault", new WorldPoint(2531, 3569, 0)), BURGH_DE_ROTT_RAMBLE("Burgh de Rott Ramble", new WorldPoint(3434, 3487, 0)), CASTLE_WARS("Castle Wars", new WorldPoint(2439, 3092, 0)), CASTLE_WARS_PORTAL("Castle Wars Portal", new WorldPoint(3140, 3626, 0)), DUEL_ARENA("Duel Arena", new WorldPoint(3313, 3238, 0)), MAGE_ARENA("Mage Arena", new WorldPoint(3095, 3957, 0)), NIGHTMARE_ZONE("Nightmare Zone", new WorldPoint(2606, 3115, 0)), PEST_CONTROL_NOVICE("Pest Control Novice", new WorldPoint(2660, 2637, 0)), PEST_CONTROL_INTERMEDIATE("Pest Control Intermediate", new WorldPoint(2638, 2641, 0)), PEST_CONTROL_VETERAN("Pest Control Veteran", new WorldPoint(2632, 2648, 0)), TEMPLE_TREKKING("Temple Trekking", new WorldPoint(3479, 3240, 0)), TZHAAR_FIGHT_CAVE("TzHaar Fight Cave", new WorldPoint(2437, 5168, 0)), TZHAAR_FIGHT_PIT("TzHaar Fight Pit", new WorldPoint(2398, 5177, 0)), LAST_MAN_STANDING("Last Man Standing", new WorldPoint(3138, 3635, 0)), INFERNO("Inferno", new WorldPoint(2495, 5118, 0)), BRIMHAVEN_AGILITY_ARENA("Brimhaven Agility Arena", new WorldPoint(2809, 3191, 0)), FISHING_TRAWLER("Fishing Trawler", new WorldPoint(2667, 3163, 0)), GNOME_BALL("Gnome Ball", new WorldPoint(2381, 3488, 0)), GNOME_RESTAURANT("Gnome Restaurant", new WorldPoint(2436, 3502, 0)), IMPETUOUS_IMPULSES("Impetuous Impulses", new WorldPoint(2425, 4445, 0)), MAGE_TRAINING_ARENA("Mage Training Arena", new WorldPoint(3362, 3318, 0)), PYRAMID_PLUNDER("Pyramid Plunder", new WorldPoint(3288, 2787, 0)), RANGING_GUILD("Ranging Guild", new WorldPoint(2671, 3419, 0)), ROGUES_DEN("Rogues' Den", new WorldPoint(2905, 3537, 0)), SORCERESSS_GARDEN("Sorceress's Garden", new WorldPoint(3285, 3180, 0)), TROUBLE_BREWING("Trouble Brewing", new WorldPoint(3811, 3021, 0)), VOLCANIC_MINE("Volcanic Mine", new WorldPoint(3812, 3810, 0)), TAI_BWO_WANNAI_CLEANUP("Tai Bwo Wannai Cleanup", new WorldPoint(2795, 3066, 0)), BURTHORPE_GAMES_ROOM("Burthorpe Games Room", new WorldPoint(2900, 3565, 0)), RAT_PITS_PORT_SARIM("Rat Pits", new WorldPoint(3015, 3232, 0)), RAT_PITS_VARROCK("Rat Pits", new WorldPoint(3266, 3400, 0)), RAT_PITS_ARDOUGNE("Rat Pits", new WorldPoint(2561, 3318, 0)), RAT_PITS_KELDAGRIM("Rat Pits", new WorldPoint(2913, 10188, 0)), TEARS_OF_GUTHIX("Tears of Guthix", new WorldPoint(3257, 9517, 0)), CLAN_WARS("Clan Wars", new WorldPoint(3133, 3621, 0)), ANIMATION_ROOM("Animation Room", new WorldPoint(2853, 3537, 0)), DUMMY_ROOM("Dummy Room", new WorldPoint(2857, 3551, 0)), CATAPULT_ROOM("Catapult Room", new WorldPoint(2842, 3545, 0)), SHOT_PUT_ROOM("Shot Put Room", new WorldPoint(2863, 3550, 0)), HALLOWED_SEPULCHRE("Hallowed Sepulchre", new WorldPoint(3653, 3386, 1)), THE_GAUNTLET("The Gauntlet", new WorldPoint(3223, 12505, 1)); private final String tooltip; private final WorldPoint location; MinigameLocation(String tooltip, WorldPoint location) { this.tooltip = tooltip; this.location = location; } }
bsd-2-clause
sandor-balazs/nosql-java
oracle/src/main/java/com/github/sandor_balazs/nosql_java/web/rest/dto/ManagedUserDTO.java
1749
package com.github.sandor_balazs.nosql_java.web.rest.dto; import java.time.ZonedDateTime; import com.github.sandor_balazs.nosql_java.domain.User; /** * A DTO extending the UserDTO, which is meant to be used in the user management UI. */ public class ManagedUserDTO extends UserDTO { private Long id; private ZonedDateTime createdDate; private String lastModifiedBy; private ZonedDateTime lastModifiedDate; public ManagedUserDTO() { } public ManagedUserDTO(User user) { super(user); this.id = user.getId(); this.createdDate = user.getCreatedDate(); this.lastModifiedBy = user.getLastModifiedBy(); this.lastModifiedDate = user.getLastModifiedDate(); } public Long getId() { return id; } public void setId(Long id) { this.id = id; } public ZonedDateTime getCreatedDate() { return createdDate; } public void setCreatedDate(ZonedDateTime createdDate) { this.createdDate = createdDate; } public String getLastModifiedBy() { return lastModifiedBy; } public void setLastModifiedBy(String lastModifiedBy) { this.lastModifiedBy = lastModifiedBy; } public ZonedDateTime getLastModifiedDate() { return lastModifiedDate; } public void setLastModifiedDate(ZonedDateTime lastModifiedDate) { this.lastModifiedDate = lastModifiedDate; } @Override public String toString() { return "ManagedUserDTO{" + "id=" + id + ", createdDate=" + createdDate + ", lastModifiedBy='" + lastModifiedBy + '\'' + ", lastModifiedDate=" + lastModifiedDate + "} " + super.toString(); } }
bsd-2-clause
Holdo/perun
perun-web-gui/src/main/java/cz/metacentrum/perun/webgui/tabs/servicestabs/ServicesTabItem.java
6192
package cz.metacentrum.perun.webgui.tabs.servicestabs; import com.google.gwt.cell.client.FieldUpdater; import com.google.gwt.event.dom.client.ClickEvent; import com.google.gwt.event.dom.client.ClickHandler; import com.google.gwt.resources.client.ImageResource; import com.google.gwt.user.cellview.client.CellTable; import com.google.gwt.user.client.ui.*; import cz.metacentrum.perun.webgui.client.PerunWebSession; import cz.metacentrum.perun.webgui.client.UiElements; import cz.metacentrum.perun.webgui.client.localization.ButtonTranslation; import cz.metacentrum.perun.webgui.client.mainmenu.MainMenu; import cz.metacentrum.perun.webgui.client.resources.ButtonType; import cz.metacentrum.perun.webgui.client.resources.PerunSearchEvent; import cz.metacentrum.perun.webgui.client.resources.SmallIcons; import cz.metacentrum.perun.webgui.json.JsonCallbackEvents; import cz.metacentrum.perun.webgui.json.JsonUtils; import cz.metacentrum.perun.webgui.json.servicesManager.DeleteService; import cz.metacentrum.perun.webgui.json.servicesManager.GetServices; import cz.metacentrum.perun.webgui.model.Service; import cz.metacentrum.perun.webgui.tabs.ServicesTabs; import cz.metacentrum.perun.webgui.tabs.TabItem; import cz.metacentrum.perun.webgui.tabs.TabItemWithUrl; import cz.metacentrum.perun.webgui.tabs.UrlMapper; import cz.metacentrum.perun.webgui.widgets.CustomButton; import cz.metacentrum.perun.webgui.widgets.ExtendedSuggestBox; import cz.metacentrum.perun.webgui.widgets.TabMenu; import java.util.ArrayList; import java.util.Map; /** * Services management for Perun Admin * * @author Pavel Zlamal <256627@mail.muni.cz> * @author Vaclav Mach <374430@mail.muni.cz> */ public class ServicesTabItem implements TabItem, TabItemWithUrl{ /** * Perun web session */ private PerunWebSession session = PerunWebSession.getInstance(); /** * Content widget - should be simple panel */ private SimplePanel contentWidget = new SimplePanel(); /** * Title widget */ private Label titleWidget = new Label("Services"); /** * Creates a tab instance */ public ServicesTabItem(){} public boolean isPrepared(){ return true; } public Widget draw() { // create widget for the whole page VerticalPanel mainTab = new VerticalPanel(); mainTab.setSize("100%", "100%"); // create widget for menu on page TabMenu tabMenu = new TabMenu(); // get services final GetServices services = new GetServices(); final JsonCallbackEvents events = JsonCallbackEvents.refreshTableEvents(services); // get the table of services with custom field updater (lines are clickable and open service details) CellTable<Service> table = services.getTable(new FieldUpdater<Service, String>() { // when user click on a row -> open new tab public void update(int index, Service object, String value) { session.getTabManager().addTab(new ServiceDetailTabItem(object)); } }); // create button tabMenu.addWidget(TabMenu.getPredefinedButton(ButtonType.CREATE, ButtonTranslation.INSTANCE.createService(), new ClickHandler() { @Override public void onClick(ClickEvent clickEvent) { session.getTabManager().addTabToCurrentTab(new CreateServiceTabItem()); } })); final CustomButton deleteButton = TabMenu.getPredefinedButton(ButtonType.DELETE, ButtonTranslation.INSTANCE.deleteSelectedServices()); deleteButton.addClickHandler(new ClickHandler() { @Override public void onClick(ClickEvent event) { // get selected items final ArrayList<Service> itemsToRemove = services.getTableSelectedList(); UiElements.showDeleteConfirm(itemsToRemove, new ClickHandler() { @Override public void onClick(ClickEvent clickEvent) { // TODO - SHOULD HAVE ONLY ONE CALLBACK TO CORE for (int i=0; i<itemsToRemove.size(); i++ ) { DeleteService request; if(i == itemsToRemove.size()-1){ request = new DeleteService(JsonCallbackEvents.disableButtonEvents(deleteButton, events)); }else{ request = new DeleteService(JsonCallbackEvents.disableButtonEvents(deleteButton)); } request.deleteService(itemsToRemove.get(i).getId()); } } }); } }); tabMenu.addWidget(deleteButton); tabMenu.addFilterWidget(new ExtendedSuggestBox(services.getOracle()), new PerunSearchEvent() { @Override public void searchFor(String text) { services.filterTable(text); } }, "Filter services by name"); // add menu to page itself mainTab.add(tabMenu); mainTab.setCellHeight(tabMenu, "30px"); // add styling to table with services table.addStyleName("perun-table"); ScrollPanel sp = new ScrollPanel(table); sp.addStyleName("perun-tableScrollPanel"); mainTab.add(sp); deleteButton.setEnabled(false); JsonUtils.addTableManagedButton(services, table, deleteButton); session.getUiElements().resizePerunTable(sp, 350, this); this.contentWidget.setWidget(mainTab); return getWidget(); } public Widget getWidget() { return this.contentWidget; } public Widget getTitle() { return this.titleWidget; } public ImageResource getIcon() { return SmallIcons.INSTANCE.trafficLightsIcon(); } @Override public int hashCode() { final int prime = 1109; int result = 1; result = prime * result + 122341; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; return true; } public boolean multipleInstancesEnabled() { return false; } public void open() { session.getUiElements().getMenu().openMenu(MainMenu.PERUN_ADMIN, true); session.getUiElements().getBreadcrumbs().setLocation(MainMenu.PERUN_ADMIN, "Services", getUrlWithParameters()); } public boolean isAuthorized() { if (session.isPerunAdmin()) { return true; } else { return false; } } public final static String URL = "list"; public String getUrl() { return URL; } public String getUrlWithParameters() { return ServicesTabs.URL + UrlMapper.TAB_NAME_SEPARATOR + getUrl(); } static public ServicesTabItem load(Map<String, String> parameters) { return new ServicesTabItem(); } }
bsd-2-clause
adamd/z
src/com/adamldavis/z/java/GroovyPlay.java
4720
/** Copyright 2012, Adam L. Davis, all rights reserved. */ package com.adamldavis.z.java; import static java.util.Arrays.asList; import groovy.lang.Binding; import groovy.lang.GroovyShell; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import com.adamldavis.z.editor.Playground; public class GroovyPlay implements Runnable { private File script; private GroovyShell shell; private final List<String> out = Collections .synchronizedList(new LinkedList<String>()); private final Map<Pattern, String> methodMap = new HashMap<Pattern, String>(); Pattern assign = Pattern.compile("([\\w\\.]+) = ([^/]*)"); Pattern eq = Pattern.compile("\\(([^\\s]+) == ([^/]*)\\)"); Pattern neq = Pattern.compile("\\((\\w+) != ([^/]*)\\)"); Pattern ret = Pattern.compile("return ([^/]*)"); Pattern params1 = Pattern.compile("def (\\w+)\\((\\w+)\\)"); Pattern params2 = Pattern.compile("def (\\w+)\\((\\w+), (\\w+)\\)"); Pattern println = Pattern.compile("println ([^/]*)"); final List<Pattern> patterns = asList(assign, eq, neq, ret, params1, params2, println); final List<Pattern> paramPatterns = asList(params1, params2); public GroovyPlay(File script) { Binding binding = new Binding(); binding.setVariable("script_path", script.getAbsolutePath()); binding.setVariable("_out_", out); shell = new GroovyShell(binding); this.script = script; methodMap.put(assign, "replAssign"); methodMap.put(eq, "replEq"); methodMap.put(neq, "replNeq"); methodMap.put(ret, "replReturn"); methodMap.put(params1, "replParams"); methodMap.put(params2, "replParams"); methodMap.put(println, "replPrintln"); } @Override public void run() { try { shell.evaluate(modify(script)); } catch (IOException e) { throw new RuntimeException(e); } } private File modify(File script) { File file = null; try { file = File.createTempFile("modified", ".groovy"); List<String> code = FileUtils.readLines(script); List<String> result = new LinkedList<String>(); int i = 0; // line# for (String line : code) { boolean foundMatch = false; for (Pattern patt : patterns) { if (patt.matcher(line).find()) { result.add(handle(line, patt, i)); foundMatch = true; break; } } if (!foundMatch) { result.add(line); } i++; } result.add("def replAssign(a, b, n) {_out_.add(\"$n:$a = $b\" as String); }"); result.add("def replNeq(a, b, n) {_out_.add(\"$n:$a != $b\" as String); a != b}"); result.add("def replEq(a, b, n) {_out_.add(\"$n:$a == $b\" as String); a == b}"); result.add("def replReturn(a, n) {_out_.add(\"$n:return $a\" as String); a}"); result.add("def replParams(n, m, ... a) {_out_.add(n + ':def ' + m + ' ' + a.join(', '))}"); result.add("def replPrintln(a, n) {_out_.add(\"$n:println $a\" as String); println a}"); FileUtils.writeLines(file, result); } catch (IOException e) { e.printStackTrace(); } return file; } // converts the LOC to use a repl private String handle(String line, Pattern pattern, int n) { String methodName = methodMap.get(pattern); Matcher m = pattern.matcher(line); m.find(); int start = m.start(), end = m.end(); if (pattern == ret || pattern == println) { return line.substring(0, start) + methodName + "(" + m.group(1) + "," + n + ")"; } else if (pattern == assign) { // refers to left side only, so side-effects don't occur twice return line + ";" + methodName + "('" + m.group(1) + "'," + m.group(1) + "," + n + ");"; } else if (pattern == params1) { return line + methodName + "(" + n + ",'" + m.group(1) + "'," + m.group(2) + ");"; } else if (pattern == params2) { return line + methodName + "(" + n + ",'" + m.group(1) + "'," + m.group(2) + "," + m.group(3) + ");"; } else { return line.substring(0, start) + "(" + methodName + "(" + m.group(1) + "," + m.group(2) + "," + n + "))" + line.substring(end); } } public List<String> getOut() { return out; } /** Test main. */ public static void main(String[] args) { try { File file = File.createTempFile("temp", ".groovy"); IOUtils.copy(Playground.class.getResourceAsStream("/lcm.groovy"), new FileOutputStream(file)); file = new GroovyPlay(file).modify(file); IOUtils.copy(new FileInputStream(file), System.out); } catch (IOException e) { e.printStackTrace(); } } }
bsd-2-clause
vsch/commonmark-java
flexmark-util-ast/src/main/java/com/vladsch/flexmark/util/ast/package-info.java
39
package com.vladsch.flexmark.util.ast;
bsd-2-clause
vsch/commonmark-java
flexmark-ext-toc/src/main/java/com/vladsch/flexmark/ext/toc/SimTocBlock.java
2636
package com.vladsch.flexmark.ext.toc; import com.vladsch.flexmark.util.sequence.BasedSequence; import org.jetbrains.annotations.NotNull; /** * A simulated toc block node */ public class SimTocBlock extends TocBlockBase { protected BasedSequence anchorMarker = BasedSequence.NULL; protected BasedSequence openingTitleMarker = BasedSequence.NULL; protected BasedSequence title = BasedSequence.NULL; protected BasedSequence closingTitleMarker = BasedSequence.NULL; @Override public void getAstExtra(@NotNull StringBuilder out) { super.getAstExtra(out); segmentSpanChars(out, anchorMarker, "anchorMarker"); segmentSpanChars(out, openingTitleMarker, "openingTitleMarker"); segmentSpanChars(out, title, "title"); segmentSpanChars(out, closingTitleMarker, "closingTitleMarker"); } @NotNull @Override public BasedSequence[] getSegments() { BasedSequence[] nodeSegments = new BasedSequence[] { openingMarker, tocKeyword, style, closingMarker, anchorMarker, openingTitleMarker, title, closingTitleMarker }; if (lineSegments.size() == 0) return nodeSegments; BasedSequence[] allSegments = new BasedSequence[lineSegments.size() + nodeSegments.length]; lineSegments.toArray(allSegments); System.arraycopy(allSegments, 0, allSegments, nodeSegments.length, lineSegments.size()); return allSegments; } public SimTocBlock(BasedSequence chars) { this(chars, null, null); } public SimTocBlock(BasedSequence chars, BasedSequence styleChars, BasedSequence titleChars) { super(chars, styleChars, true); int anchorPos = chars.indexOf('#', closingMarker.getEndOffset() - chars.getStartOffset()); if (anchorPos == -1) { throw new IllegalStateException("Invalid TOC block sequence"); } anchorMarker = chars.subSequence(anchorPos, anchorPos + 1); if (titleChars != null) { if (titleChars.length() < 2) { throw new IllegalStateException("Invalid TOC block title sequence"); } openingTitleMarker = titleChars.subSequence(0, 1); title = titleChars.midSequence(1, -1); closingTitleMarker = titleChars.endSequence(1); } } public BasedSequence getAnchorMarker() { return anchorMarker; } public BasedSequence getOpeningTitleMarker() { return openingTitleMarker; } public BasedSequence getTitle() { return title; } public BasedSequence getClosingTitleMarker() { return closingTitleMarker; } }
bsd-2-clause
fullcontact/ez-vcard
src/main/java/ezvcard/parameter/VCardParameterCaseClasses.java
2566
package ezvcard.parameter; import java.lang.reflect.Constructor; import ezvcard.VCardVersion; import ezvcard.util.CaseClasses; /* Copyright (c) 2012-2015, Michael Angstadt All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * Manages the list of pre-defined values for a parameter that consists of a * single String value. * @author Michael Angstadt * @param <T> the parameter class */ public class VCardParameterCaseClasses<T extends VCardParameter> extends CaseClasses<T, String> { public VCardParameterCaseClasses(Class<T> clazz) { super(clazz); } @Override protected T create(String value) { //reflection: return new ClassName(value); try { //try (String) constructor Constructor<T> constructor = clazz.getDeclaredConstructor(String.class); constructor.setAccessible(true); return constructor.newInstance(value); } catch (Exception e) { try { //try (String, VCardVersion...) constructor Constructor<T> constructor = clazz.getDeclaredConstructor(String.class, VCardVersion[].class); constructor.setAccessible(true); return constructor.newInstance(value, new VCardVersion[] {}); } catch (Exception e2) { throw new RuntimeException(e2); } } } @Override protected boolean matches(T object, String value) { return object.getValue().equalsIgnoreCase(value); } }
bsd-2-clause
ngs-doo/dsl-json
library/src/main/java/com/dslplatform/json/StringConverter.java
3698
package com.dslplatform.json; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; public abstract class StringConverter { public static final JsonReader.ReadObject<String> READER = new JsonReader.ReadObject<String>() { @Nullable @Override public String read(JsonReader reader) throws IOException { if (reader.wasNull()) return null; return reader.readString(); } }; public static final JsonWriter.WriteObject<String> WRITER = new JsonWriter.WriteObject<String>() { @Override public void write(JsonWriter writer, @Nullable String value) { serializeNullable(value, writer); } }; public static final JsonWriter.WriteObject<CharSequence> WRITER_CHARS = new JsonWriter.WriteObject<CharSequence>() { @Override public void write(JsonWriter writer, @Nullable CharSequence value) { if (value == null) writer.writeNull(); else writer.writeString(value); } }; public static final JsonReader.ReadObject<StringBuilder> READER_BUILDER = new JsonReader.ReadObject<StringBuilder>() { @Nullable @Override public StringBuilder read(JsonReader reader) throws IOException { if (reader.wasNull()) return null; StringBuilder builder = new StringBuilder(); return reader.appendString(builder); } }; public static final JsonReader.ReadObject<StringBuffer> READER_BUFFER = new JsonReader.ReadObject<StringBuffer>() { @Nullable @Override public StringBuffer read(JsonReader reader) throws IOException { if (reader.wasNull()) return null; StringBuffer builder = new StringBuffer(); return reader.appendString(builder); } }; public static void serializeShortNullable(@Nullable final String value, final JsonWriter sw) { if (value == null) { sw.writeNull(); } else { sw.writeString(value); } } public static void serializeShort(final String value, final JsonWriter sw) { sw.writeString(value); } public static void serializeNullable(@Nullable final String value, final JsonWriter sw) { if (value == null) { sw.writeNull(); } else { sw.writeString(value); } } public static void serialize(final String value, final JsonWriter sw) { sw.writeString(value); } public static String deserialize(final JsonReader reader) throws IOException { return reader.readString(); } @Nullable public static String deserializeNullable(final JsonReader reader) throws IOException { if (reader.last() == 'n') { if (!reader.wasNull()) throw reader.newParseErrorAt("Expecting 'null' for null constant", 0); return null; } return reader.readString(); } @SuppressWarnings("unchecked") public static ArrayList<String> deserializeCollection(final JsonReader reader) throws IOException { return reader.deserializeCollection(READER); } public static void deserializeCollection(final JsonReader reader, final Collection<String> res) throws IOException { reader.deserializeCollection(READER, res); } @SuppressWarnings("unchecked") public static ArrayList<String> deserializeNullableCollection(final JsonReader reader) throws IOException { return reader.deserializeNullableCollection(READER); } public static void deserializeNullableCollection(final JsonReader reader, final Collection<String> res) throws IOException { reader.deserializeNullableCollection(READER, res); } public static void serialize(final List<String> list, final JsonWriter writer) { writer.writeByte(JsonWriter.ARRAY_START); if (list.size() != 0) { writer.writeString(list.get(0)); for (int i = 1; i < list.size(); i++) { writer.writeByte(JsonWriter.COMMA); writer.writeString(list.get(i)); } } writer.writeByte(JsonWriter.ARRAY_END); } }
bsd-3-clause
rvt/cnctools
cnctools/src/main/java/com/rvantwisk/cnctools/misc/ProjectModel.java
10361
/* * Copyright (c) 2013, R. van Twisk * All rights reserved. * Licensed under the The BSD 3-Clause License; * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://opensource.org/licenses/BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the aic-util nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.rvantwisk.cnctools.misc; import com.dooapp.xstreamfx.*; import com.rvantwisk.cnctools.data.*; import com.rvantwisk.cnctools.data.tools.BallMill; import com.rvantwisk.cnctools.data.tools.EndMill; import com.sun.javafx.collections.ObservableListWrapper; import com.thoughtworks.xstream.XStream; import com.thoughtworks.xstream.converters.Converter; import com.thoughtworks.xstream.io.HierarchicalStreamWriter; import com.thoughtworks.xstream.io.xml.XppDomDriver; import javafx.beans.property.*; import javafx.collections.FXCollections; import javafx.collections.ObservableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.*; import java.util.ArrayList; /** * Created with IntelliJ IDEA. * User: rvt * Date: 10/6/13 * Time: 1:05 PM * To change this template use File | Settings | File Templates. */ public class ProjectModel { private final Logger logger = LoggerFactory.getLogger(this.getClass()); private static String SEPARATOR = System.getProperty("line.separator"); private static final String PROJECTS_XML = "projects.xml"; private static final String TOOLS_XML = "tools.xml"; private static final String POSTPROCESSORS_XML = "postprocessors.xml"; final private ObservableList<Project> projectsProperty = FXCollections.observableArrayList(); final private ObservableList<ToolParameter> toolDBProperty = FXCollections.observableArrayList(); final private ObservableList<CNCToolsPostProcessConfig> postProcessors = FXCollections.observableArrayList(); private transient ToolDBManager toolDBManager; public ToolDBManager getToolDBManager() { if (toolDBManager==null) { toolDBManager = new ToolDBManager(toolDBProperty); } return toolDBManager; } public ObservableList<Project> projectsProperty() { return projectsProperty; } public ObservableList<ToolParameter> toolDBProperty() { return toolDBProperty; } public ObservableList<CNCToolsPostProcessConfig> postProcessorsProperty() { return postProcessors; } public void addProject(String projectname, String description) { Project project = new Project(projectname, description); projectsProperty.add(project); } public void addProject(final Project p) { projectsProperty.add(p); } private static Converter lookupTypeConverter(XStream xStream, Class clazz) { return xStream.getConverterLookup().lookupConverterForType(clazz); } private static XStream getXStream() { XStream xStream = new XStream(new XppDomDriver() { public HierarchicalStreamWriter createWriter(Writer out) { CdataWrapper cdw = new CdataWrapper(out); cdw.setToWrap(new String[]{"gcode"}); return cdw; } } ); xStream.registerConverter(new StringPropertyConverter(xStream.getMapper())); xStream.registerConverter(new BooleanPropertyConverter(xStream.getMapper())); // xStream.registerConverter(new ObjectPropertyConverter(xStream.getMapper())); xStream.registerConverter(new DoublePropertyConverter(xStream.getMapper())); xStream.registerConverter(new LongPropertyConverter(xStream.getMapper())); xStream.registerConverter(new IntegerPropertyConverter(xStream.getMapper())); // xStream.registerConverter(new ObservableListConverter(xStream.getMapper())); // xStream.registerConverter(new ConverterWrapper(lookupTypeConverter(xStream, List.class), ObservableListWrapper.class)); // xStream.registerConverter(new ConverterWrapper(lookupTypeConverter(xStream, List.class), ObservableList.class)); // xStream.registerConverter(new ConverterWrapper(lookupTypeConverter(xStream, Map.class), ObservableMap.class)); xStream.omitField(ObservableListWrapper.class, "listenerHelper"); // FXConverters.configure(xstream); // JavaFX aliases xStream.alias("IntProp", SimpleIntegerProperty.class); xStream.alias("StrProp", SimpleStringProperty.class); xStream.alias("DblProp", SimpleDoubleProperty.class); xStream.alias("Boolprop", SimpleBooleanProperty.class); xStream.alias("ObjProp", SimpleObjectProperty.class); xStream.alias("LongProp", SimpleLongProperty.class); xStream.alias("OListWrapper", ObservableListWrapper.class); // Program properties aliases xStream.alias("Task", TaskRunnable.class); xStream.alias("Project", Project.class); xStream.alias("StockToolParameter", StockToolParameter.class); xStream.alias("EndMill", EndMill.class); xStream.alias("BallMill", BallMill.class); xStream.alias("AvailableTask", AbstractTask.class); xStream.alias("ToolParameter", ToolParameter.class); String contents = "<tag>my data conents</tag>"; String xml = " <gcode class=\"StrProp\"><![CDATA[asd\n" + "asd\n" + "asd\n" + "]]></gcode>"; SimpleStringProperty results = (SimpleStringProperty) xStream.fromXML(xml); return xStream; } public void saveProjects() { XStream xstream = getXStream(); File file = new File(PROJECTS_XML); try { String xml = xstream.toXML(new ArrayList<>(projectsProperty)); FileUtil.saveFile(xml, file); } catch (Exception e) { // catches ANY exception logger.error("saveProjects", e); } } public void saveToolDB() { XStream xstream = getXStream(); File file = new File(TOOLS_XML); try { String xml = xstream.toXML(new ArrayList<>(toolDBProperty)); FileUtil.saveFile(xml, file); } catch (Exception e) { // catches ANY exception logger.error("saveToolDB", e); } } public void savePostProcessors() { XStream xstream = getXStream(); File file = new File(POSTPROCESSORS_XML); try { String xml = xstream.toXML(new ArrayList<>(postProcessors)); FileUtil.saveFile(xml, file); } catch (Exception e) { // catches ANY exception logger.error("savePostProcessors", e); } } public void loadToolsFromDB () { XStream xstream = getXStream(); try { File file = new File(TOOLS_XML); ArrayList<ToolParameter> tools = (ArrayList<ToolParameter>) xstream.fromXML(readFileIntoString(file).toString()); toolDBProperty.clear(); toolDBProperty.addAll(tools); } catch (Exception e) { // catches ANY exception logger.error("Error loading tools DB", e); } } public void loadProjectsFromDB() { XStream xstream = getXStream(); try { File file = new File(PROJECTS_XML); ArrayList<Project> projects = (ArrayList<Project>) xstream.fromXML(readFileIntoString(file).toString()); projectsProperty.clear(); projectsProperty.addAll(projects); } catch (Exception e) { // catches ANY exception logger.error("Error tools projects DB", e); } } public void loadPostProcessors() { XStream xstream = getXStream(); try { File file = new File(POSTPROCESSORS_XML); ArrayList<CNCToolsPostProcessConfig> data = (ArrayList<CNCToolsPostProcessConfig>) xstream.fromXML(readFileIntoString(file).toString()); postProcessors.clear(); postProcessors.addAll(data); } catch (Exception e) { // catches ANY exception logger.error("Error loading post processors from DB", e); } } /** * Read a file into a String * @param file * @return */ public static String readFileIntoString(File file) throws IOException { StringBuilder sb = new StringBuilder(); try (BufferedReader br = new BufferedReader(new FileReader(file.getPath()))) { String sCurrentLine; while ((sCurrentLine = br.readLine()) != null) { sb.append(sCurrentLine).append(SEPARATOR); } } return sb.toString(); } /** * Create a deep copy of a java bean * WARNING: Unesure you are doing this on beans only! * @param obj * @param <T> * @return */ public static <T extends Object> T deepCopy(final Object obj){ return (T)getXStream().fromXML(getXStream().toXML(obj)); } }
bsd-3-clause
msf-oca-his/dhis-core
dhis-2/dhis-support/dhis-support-system/src/main/java/org/hisp/dhis/system/log/Log4JLogConfigInitializer.java
6474
package org.hisp.dhis.system.log; /* * Copyright (c) 2004-2018, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; import org.apache.log4j.RollingFileAppender; import org.hisp.dhis.external.location.LocationManager; import org.springframework.beans.factory.annotation.Autowired; import java.io.File; import java.util.List; import static org.apache.commons.lang3.StringUtils.isNotBlank; /** * @author Lars Helge Overland */ public class Log4JLogConfigInitializer implements LogConfigInitializer { private static final PatternLayout PATTERN_LAYOUT = new PatternLayout( "* %-5p %d{ISO8601} %m (%F [%t])%n" ); private static final String MAX_FILE_SIZE = "50MB"; private static final String LOG_DIR = "logs"; private static final String ANALYTICS_TABLE_LOGGER_FILENAME = "dhis-analytics-table.log"; private static final String DATA_EXCHANGE_LOGGER_FILENAME = "dhis-data-exchange.log"; private static final String DATA_SYNC_LOGGER_FILENAME = "dhis-data-sync.log"; private static final String METADATA_SYNC_LOGGER_FILENAME = "dhis-metadata-sync.log"; private static final String GENERAL_LOGGER_FILENAME = "dhis.log"; private static final String PUSH_ANALYSIS_LOGGER_FILENAME = "dhis-push-analysis.log"; private static final String LOG4J_CONF_PROP = "log4j.configuration"; private static final Log log = LogFactory.getLog( Log4JLogConfigInitializer.class ); @Autowired private LocationManager locationManager; @Override public void initConfig() { if ( !locationManager.externalDirectorySet() ) { log.warn( "Could not initialize additional log configuration, external home directory not set" ); return; } if ( isNotBlank( System.getProperty( LOG4J_CONF_PROP ) ) ) { log.info( "Aborting default log config, external config set through system prop " + LOG4J_CONF_PROP + ": " + System.getProperty( LOG4J_CONF_PROP ) ); return; } locationManager.buildDirectory( LOG_DIR ); configureLoggers( ANALYTICS_TABLE_LOGGER_FILENAME, Lists.newArrayList( "org.hisp.dhis.resourcetable", "org.hisp.dhis.analytics.table" ) ); configureLoggers( DATA_EXCHANGE_LOGGER_FILENAME, Lists.newArrayList( "org.hisp.dhis.dxf2" ) ); configureLoggers( DATA_SYNC_LOGGER_FILENAME, Lists.newArrayList( "org.hisp.dhis.dxf2.synch" ) ); configureLoggers( METADATA_SYNC_LOGGER_FILENAME, Lists.newArrayList( "org.hisp.dhis.dxf2.metadata" ) ); configureLoggers( PUSH_ANALYSIS_LOGGER_FILENAME, Lists.newArrayList( "org.hisp.dhis.pushanalysis" ) ); configureRootLogger( GENERAL_LOGGER_FILENAME ); } /** * Configures rolling file loggers. * * @param filename the filename to output logging to. * @param loggers the logger names. */ private void configureLoggers( String filename, List<String> loggers ) { String file = getLogFile( filename ); RollingFileAppender appender = getRollingFileAppender( file ); for ( String loggerName : loggers ) { Logger logger = Logger.getRootLogger().getLoggerRepository().getLogger( loggerName ); logger.addAppender( appender ); log.info( "Added logger: " + loggerName + " using file: " + file ); } } /** * Configures a root file logger. * * @param filename the filename to output logging to. */ private void configureRootLogger( String filename ) { String file = getLogFile( filename ); RollingFileAppender appender = getRollingFileAppender( file ); Logger.getRootLogger().addAppender( appender ); log.info( "Added root logger using file: " + file ); } /** * Returns a rolling file appender. * * @param file the file to output to, including path and filename. */ private RollingFileAppender getRollingFileAppender( String file ) { RollingFileAppender appender = new RollingFileAppender(); appender.setThreshold( Level.INFO ); appender.setFile( file ); appender.setMaxFileSize( MAX_FILE_SIZE ); appender.setLayout( PATTERN_LAYOUT ); appender.activateOptions(); return appender; } /** * Returns a file including path and filename. * * @param filename the filename to use for the file path. */ private String getLogFile( String filename ) { return locationManager.getExternalDirectoryPath() + File.separator + LOG_DIR + File.separator + filename; } }
bsd-3-clause
eoogbe/api-client-staging
generated/java/gapic-google-cloud-video-intelligence-v1beta2/src/main/java/com/google/cloud/videointelligence/v1beta2/stub/GrpcVideoIntelligenceServiceStub.java
5221
/* * Copyright 2017, Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.cloud.videointelligence.v1beta2.stub; import com.google.api.core.BetaApi; import com.google.api.gax.core.BackgroundResource; import com.google.api.gax.core.BackgroundResourceAggregation; import com.google.api.gax.grpc.GrpcCallableFactory; import com.google.api.gax.rpc.ClientContext; import com.google.api.gax.rpc.OperationCallable; import com.google.api.gax.rpc.UnaryCallable; import com.google.cloud.videointelligence.v1beta2.AnnotateVideoProgress; import com.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest; import com.google.cloud.videointelligence.v1beta2.AnnotateVideoResponse; import com.google.cloud.videointelligence.v1beta2.VideoIntelligenceServiceSettings; import com.google.longrunning.Operation; import com.google.longrunning.stub.GrpcOperationsStub; import java.io.IOException; import java.util.concurrent.TimeUnit; import javax.annotation.Generated; // AUTO-GENERATED DOCUMENTATION AND CLASS /** * gRPC stub implementation for Google Cloud Video Intelligence API. * * <p>This class is for advanced usage and reflects the underlying API directly. */ @Generated("by GAPIC v0.0.5") @BetaApi public class GrpcVideoIntelligenceServiceStub extends VideoIntelligenceServiceStub { private static final UnaryCallable<AnnotateVideoRequest, Operation> directAnnotateVideoCallable = GrpcCallableFactory.createDirectCallable( io.grpc.MethodDescriptor.create( io.grpc.MethodDescriptor.MethodType.UNARY, "google.cloud.videointelligence.v1beta2.VideoIntelligenceService/AnnotateVideo", io.grpc.protobuf.ProtoUtils.marshaller(AnnotateVideoRequest.getDefaultInstance()), io.grpc.protobuf.ProtoUtils.marshaller(Operation.getDefaultInstance()))); private final BackgroundResource backgroundResources; private final GrpcOperationsStub operationsStub; private final UnaryCallable<AnnotateVideoRequest, Operation> annotateVideoCallable; private final OperationCallable< AnnotateVideoRequest, AnnotateVideoResponse, AnnotateVideoProgress, Operation> annotateVideoOperationCallable; public static final GrpcVideoIntelligenceServiceStub create( VideoIntelligenceServiceSettings settings) throws IOException { return new GrpcVideoIntelligenceServiceStub(settings, ClientContext.create(settings)); } public static final GrpcVideoIntelligenceServiceStub create(ClientContext clientContext) throws IOException { return new GrpcVideoIntelligenceServiceStub( VideoIntelligenceServiceSettings.newBuilder().build(), clientContext); } /** * Constructs an instance of GrpcVideoIntelligenceServiceStub, using the given settings. This is * protected so that it is easy to make a subclass, but otherwise, the static factory methods * should be preferred. */ protected GrpcVideoIntelligenceServiceStub( VideoIntelligenceServiceSettings settings, ClientContext clientContext) throws IOException { this.operationsStub = GrpcOperationsStub.create(clientContext); this.annotateVideoCallable = GrpcCallableFactory.create( directAnnotateVideoCallable, settings.annotateVideoSettings().getInitialCallSettings(), clientContext); this.annotateVideoOperationCallable = GrpcCallableFactory.create( directAnnotateVideoCallable, settings.annotateVideoSettings(), clientContext, this.operationsStub); backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); } public GrpcOperationsStub getOperationsStub() { return operationsStub; } public OperationCallable< AnnotateVideoRequest, AnnotateVideoResponse, AnnotateVideoProgress, Operation> annotateVideoOperationCallable() { return annotateVideoOperationCallable; } public UnaryCallable<AnnotateVideoRequest, Operation> annotateVideoCallable() { return annotateVideoCallable; } @Override public final void close() throws Exception { shutdown(); } @Override public void shutdown() { backgroundResources.shutdown(); } @Override public boolean isShutdown() { return backgroundResources.isShutdown(); } @Override public boolean isTerminated() { return backgroundResources.isTerminated(); } @Override public void shutdownNow() { backgroundResources.shutdownNow(); } @Override public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { return backgroundResources.awaitTermination(duration, unit); } }
bsd-3-clause
msf-oca-his/dhis-core
dhis-2/dhis-api/src/main/java/org/hisp/dhis/validation/ValidationRuleService.java
7339
package org.hisp.dhis.validation; /* * Copyright (c) 2004-2018, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ import org.hisp.dhis.dataelement.DataElement; import org.hisp.dhis.dataset.DataSet; import java.util.Collection; import java.util.List; import java.util.Set; /** * @author Margrethe Store */ public interface ValidationRuleService { String ID = ValidationRuleService.class.getName(); // ------------------------------------------------------------------------- // ValidationRule // ------------------------------------------------------------------------- /** * Save a ValidationRule to the database. * * @param validationRule the ValidationRule to save. * @return the generated unique identifier for the ValidationRule. */ int saveValidationRule( ValidationRule validationRule ); /** * Update a ValidationRule to the database. * * @param validationRule the ValidationRule to update. */ void updateValidationRule( ValidationRule validationRule ); /** * Delete a validation rule with the given identifiers from the database. * * @param validationRule the ValidationRule to delete. */ void deleteValidationRule( ValidationRule validationRule ); /** * Get ValidationRule with the given identifier. * * @param id the unique identifier of the ValidationRule. * @return the ValidationRule or null if it doesn't exist. */ ValidationRule getValidationRule( int id ); /** * Get ValidationRule with the given uid. * * @param uid the unique identifier of the ValidationRule. * @return the ValidationRule or null if it doesn't exist. */ ValidationRule getValidationRule( String uid ); /** * Get all validation rules. * * @return a List of ValidationRule or null if there are no validation rules. */ List<ValidationRule> getAllValidationRules(); /** * Get all validation rules for form validation. * * @return a List of ValidationRule or null if there are none for form * validation. */ List<ValidationRule> getAllFormValidationRules(); /** * Get a validation rule with the given name. * * @param name the name of the validation rule. */ ValidationRule getValidationRuleByName( String name ); /** * Get data elements part of the left side and right side expressions of the * given validation rule. * * @param validationRule the validation rule. * @return a set of data elements. */ Set<DataElement> getDataElements( ValidationRule validationRule ); /** * Returns all form validation rules for validating a data set. * * @param dataSet the data set to validate. * @return all validation rules which apply to that data set. */ Collection<ValidationRule> getValidationRulesForDataSet( DataSet dataSet ); /** * Returns all ValidationRules which have associated ValidationNotificationTemplates. * * @return a List of ValidationRule. */ List<ValidationRule> getValidationRulesWithNotificationTemplates(); // ------------------------------------------------------------------------- // ValidationRuleGroup // ------------------------------------------------------------------------- /** * Adds a ValidationRuleGroup to the database. * * @param validationRuleGroup the ValidationRuleGroup to add. * @return the generated unique identifier for the ValidationRuleGroup. */ int addValidationRuleGroup( ValidationRuleGroup validationRuleGroup ); /** * Delete a ValidationRuleGroup with the given identifiers from the database. * * @param validationRuleGroup the ValidationRuleGroup to delete. */ void deleteValidationRuleGroup( ValidationRuleGroup validationRuleGroup ); /** * Update a ValidationRuleGroup with the given identifiers. * * @param validationRuleGroup the ValidationRule to update. */ void updateValidationRuleGroup( ValidationRuleGroup validationRuleGroup ); /** * Get ValidationRuleGroup with the given identifier. * * @param id the unique identifier of the ValidationRuleGroup. * @return the ValidationRuleGroup or null if it doesn't exist. */ ValidationRuleGroup getValidationRuleGroup( int id ); /** * Get ValidationRuleGroup with the given uid. * * @param uid the unique identifier of the ValidationRuleGroup. * @return the ValidationRuleGroup or null if it doesn't exist. */ ValidationRuleGroup getValidationRuleGroup( String uid ); /** * Get all ValidationRuleGroups. * * @return a List of ValidationRuleGroup or null if it there are no ValidationRuleGroups. */ List<ValidationRuleGroup> getAllValidationRuleGroups(); /** * Get a ValidationRuleGroup with the given name. * * @param name the name of the ValidationRuleGroup. */ ValidationRuleGroup getValidationRuleGroupByName( String name ); List<ValidationRule> getValidationRulesBetween( int first, int max ); List<ValidationRule> getValidationRulesBetweenByName( String name, int first, int max ); int getValidationRuleCount(); int getValidationRuleCountByName( String name ); List<ValidationRuleGroup> getValidationRuleGroupsBetween( int first, int max ); List<ValidationRuleGroup> getValidationRuleGroupsBetweenByName( String name, int first, int max ); int getValidationRuleGroupCount(); int getValidationRuleGroupCountByName( String name ); }
bsd-3-clause
LWJGL-CI/lwjgl3
modules/lwjgl/openxr/src/generated/java/org/lwjgl/openxr/XrSpatialAnchorPersistenceNameMSFT.java
12829
/* * Copyright LWJGL. All rights reserved. * License terms: https://www.lwjgl.org/license * MACHINE GENERATED FILE, DO NOT EDIT */ package org.lwjgl.openxr; import javax.annotation.*; import java.nio.*; import org.lwjgl.*; import org.lwjgl.system.*; import static org.lwjgl.system.Checks.*; import static org.lwjgl.system.MemoryUtil.*; import static org.lwjgl.system.MemoryStack.*; import static org.lwjgl.openxr.MSFTSpatialAnchorPersistence.*; /** * The name to identify a Spatial anchor persistence operations. * * <h5>Description</h5> * * <p>If an {@link XrSpatialAnchorPersistenceNameMSFT} with an empty {@code name} value is passed to any function as a parameter, that function <b>must</b> return {@link MSFTSpatialAnchorPersistence#XR_ERROR_SPATIAL_ANCHOR_NAME_INVALID_MSFT ERROR_SPATIAL_ANCHOR_NAME_INVALID_MSFT}.</p> * * <h5>Valid Usage (Implicit)</h5> * * <ul> * <li>The {@link MSFTSpatialAnchorPersistence XR_MSFT_spatial_anchor_persistence} extension <b>must</b> be enabled prior to using {@link XrSpatialAnchorPersistenceNameMSFT}</li> * <li>{@code name} <b>must</b> be a null-terminated UTF-8 string whose length is less than or equal to {@link MSFTSpatialAnchorPersistence#XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT}</li> * </ul> * * <h5>See Also</h5> * * <p>{@link XrSpatialAnchorFromPersistedAnchorCreateInfoMSFT}, {@link XrSpatialAnchorPersistenceInfoMSFT}, {@link MSFTSpatialAnchorPersistence#xrClearSpatialAnchorStoreMSFT ClearSpatialAnchorStoreMSFT}, {@link MSFTSpatialAnchorPersistence#xrEnumeratePersistedSpatialAnchorNamesMSFT EnumeratePersistedSpatialAnchorNamesMSFT}, {@link MSFTSpatialAnchorPersistence#xrUnpersistSpatialAnchorMSFT UnpersistSpatialAnchorMSFT}</p> * * <h3>Layout</h3> * * <pre><code> * struct XrSpatialAnchorPersistenceNameMSFT { * char {@link #name}[XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT]; * }</code></pre> */ public class XrSpatialAnchorPersistenceNameMSFT extends Struct implements NativeResource { /** The struct size in bytes. */ public static final int SIZEOF; /** The struct alignment in bytes. */ public static final int ALIGNOF; /** The struct member offsets. */ public static final int NAME; static { Layout layout = __struct( __array(1, XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT) ); SIZEOF = layout.getSize(); ALIGNOF = layout.getAlignment(); NAME = layout.offsetof(0); } /** * Creates a {@code XrSpatialAnchorPersistenceNameMSFT} instance at the current position of the specified {@link ByteBuffer} container. Changes to the buffer's content will be * visible to the struct instance and vice versa. * * <p>The created instance holds a strong reference to the container object.</p> */ public XrSpatialAnchorPersistenceNameMSFT(ByteBuffer container) { super(memAddress(container), __checkContainer(container, SIZEOF)); } @Override public int sizeof() { return SIZEOF; } /** a null terminated character array of size {@link MSFTSpatialAnchorPersistence#XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT}. */ @NativeType("char[XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT]") public ByteBuffer name() { return nname(address()); } /** a null terminated character array of size {@link MSFTSpatialAnchorPersistence#XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT}. */ @NativeType("char[XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT]") public String nameString() { return nnameString(address()); } /** Copies the specified encoded string to the {@link #name} field. */ public XrSpatialAnchorPersistenceNameMSFT name(@NativeType("char[XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT]") ByteBuffer value) { nname(address(), value); return this; } /** * Copies the specified struct data to this struct. * * @param src the source struct * * @return this struct */ public XrSpatialAnchorPersistenceNameMSFT set(XrSpatialAnchorPersistenceNameMSFT src) { memCopy(src.address(), address(), SIZEOF); return this; } // ----------------------------------- /** Returns a new {@code XrSpatialAnchorPersistenceNameMSFT} instance allocated with {@link MemoryUtil#memAlloc memAlloc}. The instance must be explicitly freed. */ public static XrSpatialAnchorPersistenceNameMSFT malloc() { return wrap(XrSpatialAnchorPersistenceNameMSFT.class, nmemAllocChecked(SIZEOF)); } /** Returns a new {@code XrSpatialAnchorPersistenceNameMSFT} instance allocated with {@link MemoryUtil#memCalloc memCalloc}. The instance must be explicitly freed. */ public static XrSpatialAnchorPersistenceNameMSFT calloc() { return wrap(XrSpatialAnchorPersistenceNameMSFT.class, nmemCallocChecked(1, SIZEOF)); } /** Returns a new {@code XrSpatialAnchorPersistenceNameMSFT} instance allocated with {@link BufferUtils}. */ public static XrSpatialAnchorPersistenceNameMSFT create() { ByteBuffer container = BufferUtils.createByteBuffer(SIZEOF); return wrap(XrSpatialAnchorPersistenceNameMSFT.class, memAddress(container), container); } /** Returns a new {@code XrSpatialAnchorPersistenceNameMSFT} instance for the specified memory address. */ public static XrSpatialAnchorPersistenceNameMSFT create(long address) { return wrap(XrSpatialAnchorPersistenceNameMSFT.class, address); } /** Like {@link #create(long) create}, but returns {@code null} if {@code address} is {@code NULL}. */ @Nullable public static XrSpatialAnchorPersistenceNameMSFT createSafe(long address) { return address == NULL ? null : wrap(XrSpatialAnchorPersistenceNameMSFT.class, address); } /** * Returns a new {@link XrSpatialAnchorPersistenceNameMSFT.Buffer} instance allocated with {@link MemoryUtil#memAlloc memAlloc}. The instance must be explicitly freed. * * @param capacity the buffer capacity */ public static XrSpatialAnchorPersistenceNameMSFT.Buffer malloc(int capacity) { return wrap(Buffer.class, nmemAllocChecked(__checkMalloc(capacity, SIZEOF)), capacity); } /** * Returns a new {@link XrSpatialAnchorPersistenceNameMSFT.Buffer} instance allocated with {@link MemoryUtil#memCalloc memCalloc}. The instance must be explicitly freed. * * @param capacity the buffer capacity */ public static XrSpatialAnchorPersistenceNameMSFT.Buffer calloc(int capacity) { return wrap(Buffer.class, nmemCallocChecked(capacity, SIZEOF), capacity); } /** * Returns a new {@link XrSpatialAnchorPersistenceNameMSFT.Buffer} instance allocated with {@link BufferUtils}. * * @param capacity the buffer capacity */ public static XrSpatialAnchorPersistenceNameMSFT.Buffer create(int capacity) { ByteBuffer container = __create(capacity, SIZEOF); return wrap(Buffer.class, memAddress(container), capacity, container); } /** * Create a {@link XrSpatialAnchorPersistenceNameMSFT.Buffer} instance at the specified memory. * * @param address the memory address * @param capacity the buffer capacity */ public static XrSpatialAnchorPersistenceNameMSFT.Buffer create(long address, int capacity) { return wrap(Buffer.class, address, capacity); } /** Like {@link #create(long, int) create}, but returns {@code null} if {@code address} is {@code NULL}. */ @Nullable public static XrSpatialAnchorPersistenceNameMSFT.Buffer createSafe(long address, int capacity) { return address == NULL ? null : wrap(Buffer.class, address, capacity); } /** * Returns a new {@code XrSpatialAnchorPersistenceNameMSFT} instance allocated on the specified {@link MemoryStack}. * * @param stack the stack from which to allocate */ public static XrSpatialAnchorPersistenceNameMSFT malloc(MemoryStack stack) { return wrap(XrSpatialAnchorPersistenceNameMSFT.class, stack.nmalloc(ALIGNOF, SIZEOF)); } /** * Returns a new {@code XrSpatialAnchorPersistenceNameMSFT} instance allocated on the specified {@link MemoryStack} and initializes all its bits to zero. * * @param stack the stack from which to allocate */ public static XrSpatialAnchorPersistenceNameMSFT calloc(MemoryStack stack) { return wrap(XrSpatialAnchorPersistenceNameMSFT.class, stack.ncalloc(ALIGNOF, 1, SIZEOF)); } /** * Returns a new {@link XrSpatialAnchorPersistenceNameMSFT.Buffer} instance allocated on the specified {@link MemoryStack}. * * @param stack the stack from which to allocate * @param capacity the buffer capacity */ public static XrSpatialAnchorPersistenceNameMSFT.Buffer malloc(int capacity, MemoryStack stack) { return wrap(Buffer.class, stack.nmalloc(ALIGNOF, capacity * SIZEOF), capacity); } /** * Returns a new {@link XrSpatialAnchorPersistenceNameMSFT.Buffer} instance allocated on the specified {@link MemoryStack} and initializes all its bits to zero. * * @param stack the stack from which to allocate * @param capacity the buffer capacity */ public static XrSpatialAnchorPersistenceNameMSFT.Buffer calloc(int capacity, MemoryStack stack) { return wrap(Buffer.class, stack.ncalloc(ALIGNOF, capacity, SIZEOF), capacity); } // ----------------------------------- /** Unsafe version of {@link #name}. */ public static ByteBuffer nname(long struct) { return memByteBuffer(struct + XrSpatialAnchorPersistenceNameMSFT.NAME, XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT); } /** Unsafe version of {@link #nameString}. */ public static String nnameString(long struct) { return memUTF8(struct + XrSpatialAnchorPersistenceNameMSFT.NAME); } /** Unsafe version of {@link #name(ByteBuffer) name}. */ public static void nname(long struct, ByteBuffer value) { if (CHECKS) { checkNT1(value); checkGT(value, XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT); } memCopy(memAddress(value), struct + XrSpatialAnchorPersistenceNameMSFT.NAME, value.remaining()); } // ----------------------------------- /** An array of {@link XrSpatialAnchorPersistenceNameMSFT} structs. */ public static class Buffer extends StructBuffer<XrSpatialAnchorPersistenceNameMSFT, Buffer> implements NativeResource { private static final XrSpatialAnchorPersistenceNameMSFT ELEMENT_FACTORY = XrSpatialAnchorPersistenceNameMSFT.create(-1L); /** * Creates a new {@code XrSpatialAnchorPersistenceNameMSFT.Buffer} instance backed by the specified container. * * Changes to the container's content will be visible to the struct buffer instance and vice versa. The two buffers' position, limit, and mark values * will be independent. The new buffer's position will be zero, its capacity and its limit will be the number of bytes remaining in this buffer divided * by {@link XrSpatialAnchorPersistenceNameMSFT#SIZEOF}, and its mark will be undefined. * * <p>The created buffer instance holds a strong reference to the container object.</p> */ public Buffer(ByteBuffer container) { super(container, container.remaining() / SIZEOF); } public Buffer(long address, int cap) { super(address, null, -1, 0, cap, cap); } Buffer(long address, @Nullable ByteBuffer container, int mark, int pos, int lim, int cap) { super(address, container, mark, pos, lim, cap); } @Override protected Buffer self() { return this; } @Override protected XrSpatialAnchorPersistenceNameMSFT getElementFactory() { return ELEMENT_FACTORY; } /** @return a {@link ByteBuffer} view of the {@link XrSpatialAnchorPersistenceNameMSFT#name} field. */ @NativeType("char[XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT]") public ByteBuffer name() { return XrSpatialAnchorPersistenceNameMSFT.nname(address()); } /** @return the null-terminated string stored in the {@link XrSpatialAnchorPersistenceNameMSFT#name} field. */ @NativeType("char[XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT]") public String nameString() { return XrSpatialAnchorPersistenceNameMSFT.nnameString(address()); } /** Copies the specified encoded string to the {@link XrSpatialAnchorPersistenceNameMSFT#name} field. */ public XrSpatialAnchorPersistenceNameMSFT.Buffer name(@NativeType("char[XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT]") ByteBuffer value) { XrSpatialAnchorPersistenceNameMSFT.nname(address(), value); return this; } } }
bsd-3-clause
vasyl-khomko/k-9
k9mail/src/androidTest/java/com/fsck/k9/mailstore/MessageViewInfoExtractorTest.java
8785
package com.fsck.k9.mailstore; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Locale; import java.util.TimeZone; import android.support.test.InstrumentationRegistry; import android.support.test.runner.AndroidJUnit4; import com.fsck.k9.activity.K9ActivityCommon; import com.fsck.k9.mail.Address; import com.fsck.k9.mail.Message.RecipientType; import com.fsck.k9.mail.MessagingException; import com.fsck.k9.mail.Part; import com.fsck.k9.mail.internet.MessageExtractor; import com.fsck.k9.mail.internet.MimeBodyPart; import com.fsck.k9.mail.internet.MimeMessage; import com.fsck.k9.mail.internet.MimeMessageHelper; import com.fsck.k9.mail.internet.MimeMultipart; import com.fsck.k9.mail.internet.TextBody; import com.fsck.k9.mail.internet.Viewable; import com.fsck.k9.mailstore.MessageViewInfoExtractor.ViewableExtractedText; import org.junit.Test; import org.junit.runner.RunWith; import static com.fsck.k9.mailstore.MessageViewInfoExtractor.extractTextFromViewables; import static junit.framework.Assert.assertEquals; @RunWith(AndroidJUnit4.class) public class MessageViewInfoExtractorTest { @Test public void testSimplePlainTextMessage() throws MessagingException { String bodyText = "K-9 Mail rocks :>"; // Create text/plain body TextBody body = new TextBody(bodyText); // Create message MimeMessage message = new MimeMessage(); MimeMessageHelper.setBody(message, body); // Extract text List<Part> outputNonViewableParts = new ArrayList<Part>(); ArrayList<Viewable> outputViewableParts = new ArrayList<>(); MessageExtractor.findViewablesAndAttachments(message, outputViewableParts, outputNonViewableParts); ViewableExtractedText container = extractTextFromViewables(InstrumentationRegistry.getTargetContext(), outputViewableParts); String expectedText = bodyText; String expectedHtml = "<pre class=\"k9mail\">" + "K-9 Mail rocks :&gt;" + "</pre>"; assertEquals(expectedText, container.text); assertEquals(expectedHtml, container.html); } @Test public void testSimpleHtmlMessage() throws MessagingException { String bodyText = "<strong>K-9 Mail</strong> rocks :&gt;"; // Create text/plain body TextBody body = new TextBody(bodyText); // Create message MimeMessage message = new MimeMessage(); message.setHeader("Content-Type", "text/html"); MimeMessageHelper.setBody(message, body); // Extract text List<Part> attachments = new ArrayList<Part>(); ArrayList<Viewable> outputViewableParts = new ArrayList<>(); MessageExtractor.findViewablesAndAttachments(message, outputViewableParts, attachments); ViewableExtractedText container = extractTextFromViewables(InstrumentationRegistry.getTargetContext(), outputViewableParts); String expectedText = "K-9 Mail rocks :>"; String expectedHtml = bodyText; assertEquals(expectedText, container.text); assertEquals(expectedHtml, container.html); } @Test public void testMultipartPlainTextMessage() throws MessagingException { String bodyText1 = "text body 1"; String bodyText2 = "text body 2"; // Create text/plain bodies TextBody body1 = new TextBody(bodyText1); TextBody body2 = new TextBody(bodyText2); // Create multipart/mixed part MimeMultipart multipart = new MimeMultipart(); MimeBodyPart bodyPart1 = new MimeBodyPart(body1, "text/plain"); MimeBodyPart bodyPart2 = new MimeBodyPart(body2, "text/plain"); multipart.addBodyPart(bodyPart1); multipart.addBodyPart(bodyPart2); // Create message MimeMessage message = new MimeMessage(); MimeMessageHelper.setBody(message, multipart); // Extract text List<Part> outputNonViewableParts = new ArrayList<Part>(); ArrayList<Viewable> outputViewableParts = new ArrayList<>(); MessageExtractor.findViewablesAndAttachments(message, outputViewableParts, outputNonViewableParts); ViewableExtractedText container = extractTextFromViewables(InstrumentationRegistry.getTargetContext(), outputViewableParts); String expectedText = bodyText1 + "\r\n\r\n" + "------------------------------------------------------------------------\r\n\r\n" + bodyText2; String expectedHtml = "<pre class=\"k9mail\">" + bodyText1 + "</pre>" + "<p style=\"margin-top: 2.5em; margin-bottom: 1em; " + "border-bottom: 1px solid #000\"></p>" + "<pre class=\"k9mail\">" + bodyText2 + "</pre>"; assertEquals(expectedText, container.text); assertEquals(expectedHtml, container.html); } @Test public void testTextPlusRfc822Message() throws MessagingException { K9ActivityCommon.setLanguage(InstrumentationRegistry.getTargetContext(), "en"); Locale.setDefault(Locale.US); TimeZone.setDefault(TimeZone.getTimeZone("GMT+01:00")); String bodyText = "Some text here"; String innerBodyText = "Hey there. I'm inside a message/rfc822 (inline) attachment."; // Create text/plain body TextBody textBody = new TextBody(bodyText); // Create inner text/plain body TextBody innerBody = new TextBody(innerBodyText); // Create message/rfc822 body MimeMessage innerMessage = new MimeMessage(); innerMessage.addSentDate(new Date(112, 02, 17), false); innerMessage.setRecipients(RecipientType.TO, new Address[] { new Address("to@example.com") }); innerMessage.setSubject("Subject"); innerMessage.setFrom(new Address("from@example.com")); MimeMessageHelper.setBody(innerMessage, innerBody); // Create multipart/mixed part MimeMultipart multipart = new MimeMultipart(); MimeBodyPart bodyPart1 = new MimeBodyPart(textBody, "text/plain"); MimeBodyPart bodyPart2 = new MimeBodyPart(innerMessage, "message/rfc822"); bodyPart2.setHeader("Content-Disposition", "inline; filename=\"message.eml\""); multipart.addBodyPart(bodyPart1); multipart.addBodyPart(bodyPart2); // Create message MimeMessage message = new MimeMessage(); MimeMessageHelper.setBody(message, multipart); // Extract text List<Part> outputNonViewableParts = new ArrayList<Part>(); ArrayList<Viewable> outputViewableParts = new ArrayList<>(); MessageExtractor.findViewablesAndAttachments(message, outputViewableParts, outputNonViewableParts); ViewableExtractedText container = extractTextFromViewables(InstrumentationRegistry.getTargetContext(), outputViewableParts); String expectedText = bodyText + "\r\n\r\n" + "----- message.eml ------------------------------------------------------" + "\r\n\r\n" + "From: from@example.com" + "\r\n" + "To: to@example.com" + "\r\n" + "Sent: Sat Mar 17 00:00:00 GMT+01:00 2012" + "\r\n" + "Subject: Subject" + "\r\n" + "\r\n" + innerBodyText; String expectedHtml = "<pre class=\"k9mail\">" + bodyText + "</pre>" + "<p style=\"margin-top: 2.5em; margin-bottom: 1em; border-bottom: " + "1px solid #000\">message.eml</p>" + "<table style=\"border: 0\">" + "<tr>" + "<th style=\"text-align: left; vertical-align: top;\">From:</th>" + "<td>from@example.com</td>" + "</tr><tr>" + "<th style=\"text-align: left; vertical-align: top;\">To:</th>" + "<td>to@example.com</td>" + "</tr><tr>" + "<th style=\"text-align: left; vertical-align: top;\">Sent:</th>" + "<td>Sat Mar 17 00:00:00 GMT+01:00 2012</td>" + "</tr><tr>" + "<th style=\"text-align: left; vertical-align: top;\">Subject:</th>" + "<td>Subject</td>" + "</tr>" + "</table>" + "<pre class=\"k9mail\">" + innerBodyText + "</pre>"; assertEquals(expectedText, container.text); assertEquals(expectedHtml, container.html); } }
bsd-3-clause
pscadiz/pmd-4.2.6-gds
src/net/sourceforge/pmd/dfa/variableaccess/VariableAccess.java
1458
/* * Created on 14.07.2004 */ package net.sourceforge.pmd.dfa.variableaccess; /** * @author raik */ public class VariableAccess { public static final int DEFINITION = 0; public static final int REFERENCING = 1; public static final int UNDEFINITION = 2; private int accessType; private String variableName; public VariableAccess(int accessType, String varName) { this.accessType = accessType; int dotPos = varName.indexOf('.'); variableName = dotPos < 0 ? varName : varName.substring(0, dotPos); } // TODO completely encapsulate this somehow? public int getAccessType() { return accessType; } public boolean accessTypeMatches(int otherType) { return accessType == otherType; } public boolean isDefinition() { return this.accessType == DEFINITION; } public boolean isReference() { return this.accessType == REFERENCING; } public boolean isUndefinition() { return this.accessType == UNDEFINITION; } public String getVariableName() { return variableName; } public String toString() { if (isDefinition()) return "Definition(" + variableName + ")"; if (isReference()) return "Reference(" + variableName + ")"; if (isUndefinition()) return "Undefinition(" + variableName + ")"; throw new RuntimeException("Access type was never set"); } }
bsd-3-clause
ern/rsf
rsf-core/ponderutilcore/src/uk/org/ponder/stringutil/ByteToCharConverter.java
7013
package uk.org.ponder.stringutil; /** This abstract class forms part of the fast and better instrumented * uk.org.ponder.streamutil.DirectInputStreamReader architecture. It is * intended as a base class for specific byte to character decoders * (such as ByteToCharUTF8), and abstracts away the non-stream and * non-encoding specific tasks of working out whether anything needs * doing or not, and if so how much and where it is. */ public abstract class ByteToCharConverter { public ByteToCharConverter() {} /** Return code from <code>convert()</code> indicating conversion stopped because * there was no space left in the output buffer. */ public static final int STOP_OUTPUT_EXHAUSTED = 0; /** Return code from <code>convert()</code> indicating conversion stopped because * there was no space left in the input buffer. */ public static final int STOP_INPUT_EXHAUSTED = 1; /** Return code from <code>convert()</code> indicating conversion stopped because * there was no input left in the input buffer, but also that no partial input sequence * was left in it. */ public static final int STOP_INPUT_EXHAUSTED_EXACTLY = 2; /** Convert as many bytes from <code>inbuffer</code> to characters in <code>outbuffer</code> * as possible. The return codes from this method are listed above, indicating * which out of the input and the output was actually exhausted. * @return int */ public abstract int convert(); /** Returns the name of the byte to character (UTF-16) encoding performed by this * converter * @return the character encoding */ public abstract String getCharacterEncoding(); /** Returns the maximum possible number of characters that could be decoded from * an input byte sequence of the specified length. Currently disused. * @param inputsize The number of input bytes for which the maximum decoded characters * are required. * @return int */ public abstract int getMaxOutput(int inputsize); /* * Offset of next character to be output */ protected int outbufferpos; protected int outbufferlimit; protected char[] outbuffer; protected int totalbytesin; /* * Offset of next byte to be converted */ protected int inbufferpos; protected int inbufferlimit; protected byte[] inbuffer; /* * Length of bad input that caused a MalformedInputException. */ protected int input_sequence_length; protected int output_sequence_length; /* * Number of lines that have gone by */ protected int linenumber; private EncodingErrorHandler errorhandler; // The following four methods require public access since they are used // from above by DirectInputStreamReader, // resulting from a possible factorisation error in all of this logic. // See Felixified I/O routines involving crank() etc. public int getOutputBufferPos() { return outbufferpos; } public int getInputBufferLimit() { return inbufferlimit; } public byte[] getInputBuffer() { return inbuffer; } public void increaseInputBufferLimit(int bytesread) { inbufferlimit += bytesread; } /** Sets the output buffer to which decoded character data should be written. * @param outbuffer A character buffer to which character data can be written. * @param outbufferpos The position within the buffer to which the character data * can be written. * @param outbufferlimit The index of the logical end of the buffer. If data * is written exactly up to this point, the buffer will be considered full and * decoding will stop until another buffer is supplied. */ public void setOutputBuffer(char[] outbuffer, int outbufferpos, int outbufferlimit) { this.outbuffer = outbuffer; this.outbufferpos = outbufferpos; this.outbufferlimit = outbufferlimit; } /** Sets the error handler that will be used to report errors encountered in the * byte encoding of the data. * @param errorhandler An interface through which decoding errors may be reported. */ public void setEncodingErrorHandler(EncodingErrorHandler errorhandler) { this.errorhandler = errorhandler; } /** Reorganise the input buffer by rotating the current input point to the beginning, * ready to receive more input after <code>inbufferlimit</code> */ public void swizzInputBuffer() { System.arraycopy(inbuffer, inbufferpos, inbuffer, 0, inbufferlimit - inbufferpos); // totalbytesin += inbufferpos; inbufferlimit = inbufferlimit - inbufferpos; inbufferpos = 0; } /** Trigger an encoding error to be delivered to any registered EncodingErrorHandler. * There is one sort of error that can only be detected from the * outside of this class, namely an incomplete input sequence but no * further input available. For this reason this method has been * given public access to allow an error report to be triggered * externally * @param errortype A String reprenting the type of the error that has occurred. * This string will be passed on via the EncodingErrorHandler interface.*/ public void handleEncodingError(String errortype) { if (errorhandler != null) { int max_sequence_available = inbufferlimit - inbufferpos; // do not surprise our clients by returning pointers to nonexistent bytes // should the error be invoked by DirectInputStreamReader as a result of // incomplete final sequence. if (max_sequence_available > input_sequence_length) max_sequence_available = input_sequence_length; errorhandler.reportEncodingError (errortype, linenumber, totalbytesin, inbuffer, inbufferpos, max_sequence_available); } } /** Ensure that the current input buffer is big enough to accommodate the specified * number of input bytes, by reallocating it if necessary. This method does not * preserve the buffer contents. * @param buffersize The required input buffer size. */ public void ensureInputBuffer(int buffersize) { if (inbuffer == null || inbuffer.length < buffersize) { inbuffer = new byte[buffersize]; } } /** Destroy all the state stored in this converter, so it holds no resources * and is ready to begin conversion again. */ public void blastState() { inbufferpos = 0; inbufferlimit = 0; input_sequence_length = 0; outbufferpos = 0; outbufferlimit = 0; output_sequence_length = 0; outbuffer = null; inbuffer = null; linenumber = 1; totalbytesin = 0; errorhandler = null; } /** Returns the number of bytes needed to complete the current input sequence. * @return the number of bytes needed toc complete the current input sequence. * positive if we need more bytes to complete the current sequence, zero if we have exactly * used up all input, negative if there is more input remaining. */ public int missing_bytes() { return inbufferpos + input_sequence_length - inbufferlimit; } }
bsd-3-clause
cybergarage/cybergarage-x3d
core/src/main/java/org/cybergarage/x3d/parser/vrml97/VRML97ParserTokenManager.java
115770
/* Generated By:JavaCC: Do not edit this line. VRML97ParserTokenManager.java */ package org.cybergarage.x3d.parser.vrml97; import org.cybergarage.x3d.field.*; import org.cybergarage.x3d.node.*; import org.cybergarage.x3d.parser.*; public class VRML97ParserTokenManager implements VRML97ParserConstants { public java.io.PrintStream debugStream = System.out; public void setDebugStream(java.io.PrintStream ds) { debugStream = ds; } private final int jjStopStringLiteralDfa_0(int pos, long active0, long active1, long active2, long active3) { switch (pos) { case 0: if ((active1 & 0xa0080400L) != 0L || (active2 & 0x600000010L) != 0L) { jjmatchedKind = 227; return 26; } if ((active0 & 0x80000L) != 0L) return 17; if ((active0 & 0xfffffffffff798c0L) != 0L || (active1 & 0xffffffff5ff7fbffL) != 0L || (active2 & 0xfffffff9ffffffefL) != 0L || (active3 & 0x3ffffffffL) != 0L) { jjmatchedKind = 227; return 6; } return -1; case 1: if ((active0 & 0x40000L) != 0L || (active1 & 0x8000L) != 0L) return 6; if ((active0 & 0xfffffffffff398c0L) != 0L || (active1 & 0xffffffffffff7fffL) != 0L || (active2 & 0xffffffffffffffffL) != 0L || (active3 & 0x3ffffffffL) != 0L) { jjmatchedKind = 227; jjmatchedPos = 1; return 6; } return -1; case 2: if ((active0 & 0xfffbf7ffff7218c0L) != 0L || (active1 & 0xdffff7fbffef7fd3L) != 0L || (active2 & 0xffffffffffffffffL) != 0L || (active3 & 0x3ffffffffL) != 0L) { if (jjmatchedPos != 2) { jjmatchedKind = 227; jjmatchedPos = 2; } return 6; } if ((active0 & 0x4080000818000L) != 0L || (active1 & 0x200008040010002cL) != 0L) return 6; return -1; case 3: if ((active0 & 0xdbf3fffdff721040L) != 0L || (active1 & 0xdffffffbffef7fdbL) != 0L || (active2 & 0xffffffffffffefffL) != 0L || (active3 & 0x1dffe7bffL) != 0L) { if (jjmatchedPos != 3) { jjmatchedKind = 227; jjmatchedPos = 3; } return 6; } if ((active0 & 0x2408000200000880L) != 0L || (active2 & 0x1000L) != 0L || (active3 & 0x220018400L) != 0L) return 6; return -1; case 4: if ((active0 & 0xd833fff9ff700040L) != 0L || (active1 & 0x5f37bef9fecf7fdaL) != 0L || (active2 & 0xff7ffff7fff7eefeL) != 0L || (active3 & 0xcf3ffbefL) != 0L) { if (jjmatchedPos != 4) { jjmatchedKind = 227; jjmatchedPos = 4; } return 6; } if ((active0 & 0x3c0000400021000L) != 0L || (active1 & 0x80c8410201200001L) != 0L || (active2 & 0x80000800080101L) != 0L || (active3 & 0x110c00010L) != 0L) return 6; return -1; case 5: if ((active0 & 0x5800088000400000L) != 0L || (active1 & 0x5610008080801040L) != 0L || (active2 & 0xc01209104001c000L) != 0L || (active3 & 0x1a81L) != 0L) return 6; if ((active0 & 0x8233f779ff300040L) != 0L || (active1 & 0x9e7be797e6f6f9aL) != 0L || (active2 & 0x3f6df6e7bff62efeL) != 0L || (active3 & 0xdfbfe16eL) != 0L) { if (jjmatchedPos != 5) { jjmatchedKind = 227; jjmatchedPos = 5; } return 6; } return -1; case 6: if ((active0 & 0x9213f5b93f300040L) != 0L || (active1 & 0xbe49e317eef6b9aL) != 0L || (active2 & 0x390c0605bff72efeL) != 0L || (active3 & 0xdfbfe16fL) != 0L) { if (jjmatchedPos != 6) { jjmatchedKind = 227; jjmatchedPos = 6; } return 6; } if ((active0 & 0x200240c0000000L) != 0L || (active1 & 0x3204800000400L) != 0L || (active2 & 0x661f0e200000000L) != 0L) return 6; return -1; case 7: if ((active0 & 0x921350a957300000L) != 0L || (active1 & 0xac48e312aef6802L) != 0L || (active2 & 0x1040201baf72efcL) != 0L || (active3 & 0xdb9fe12fL) != 0L) { if (jjmatchedPos != 7) { jjmatchedKind = 227; jjmatchedPos = 7; } return 6; } if ((active0 & 0xa51028000040L) != 0L || (active1 & 0x120100054000398L) != 0L || (active2 & 0x3808040405000002L) != 0L || (active3 & 0x4200040L) != 0L) return 6; return -1; case 8: if ((active0 & 0x11008901000000L) != 0L || (active1 & 0x2020050800L) != 0L || (active2 & 0x820L) != 0L || (active3 & 0x82106006L) != 0L) return 6; if ((active0 & 0x9202502056300000L) != 0L || (active1 & 0xbc48e110aea6082L) != 0L || (active2 & 0x1040201baf726dcL) != 0L || (active3 & 0x598f8129L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 8; return 6; } return -1; case 9: if ((active0 & 0x8000002014000000L) != 0L || (active1 & 0xc002000a002002L) != 0L || (active2 & 0x104020000800400L) != 0L || (active3 & 0x20020L) != 0L) return 6; if ((active0 & 0x1202500042300000L) != 0L || (active1 & 0xb048c1100ea4080L) != 0L || (active2 & 0x1ba7722dcL) != 0L || (active3 & 0x598d8109L) != 0L) { if (jjmatchedPos != 9) { jjmatchedKind = 227; jjmatchedPos = 9; } return 6; } return -1; case 10: if ((active0 & 0x500002100000L) != 0L || (active1 & 0x2000c0000400000L) != 0L || (active2 & 0x2700000L) != 0L || (active3 & 0x19080108L) != 0L) return 6; if ((active0 & 0x1202000040200000L) != 0L || (active1 & 0x904801100aa4082L) != 0L || (active2 & 0x1b80722dcL) != 0L || (active3 & 0x40858001L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 10; return 6; } return -1; case 11: if ((active0 & 0x1000000000000000L) != 0L || (active1 & 0x800100000000L) != 0L || (active2 & 0x1800400c4L) != 0L || (active3 & 0x1L) != 0L) return 6; if ((active0 & 0x202000040200000L) != 0L || (active1 & 0x904001000aa4082L) != 0L || (active2 & 0x38032218L) != 0L || (active3 & 0x40858000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 11; return 6; } return -1; case 12: if ((active1 & 0x100000000080000L) != 0L || (active2 & 0x18L) != 0L || (active3 & 0x40000L) != 0L) return 6; if ((active0 & 0x202000040200000L) != 0L || (active1 & 0x804001000a24082L) != 0L || (active2 & 0x38032200L) != 0L || (active3 & 0x40818000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 12; return 6; } return -1; case 13: if ((active0 & 0x200000040000000L) != 0L || (active1 & 0x1000824002L) != 0L || (active2 & 0x38032000L) != 0L || (active3 & 0x40818000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 13; return 6; } if ((active0 & 0x2000000200000L) != 0L || (active1 & 0x804000000200080L) != 0L || (active2 & 0x200L) != 0L) return 6; return -1; case 14: if ((active0 & 0x200000040000000L) != 0L || (active1 & 0x24002L) != 0L || (active2 & 0x28030000L) != 0L || (active3 & 0x40818000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 14; return 6; } if ((active1 & 0x1000800000L) != 0L || (active2 & 0x10002000L) != 0L) return 6; return -1; case 15: if ((active0 & 0x200000000000000L) != 0L || (active1 & 0x2L) != 0L || (active2 & 0x28030000L) != 0L || (active3 & 0x8000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 15; return 6; } if ((active0 & 0x40000000L) != 0L || (active1 & 0x24000L) != 0L || (active3 & 0x40810000L) != 0L) return 6; return -1; case 16: if ((active1 & 0x2L) != 0L || (active2 & 0x28030000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 16; return 6; } if ((active0 & 0x200000000000000L) != 0L || (active3 & 0x8000L) != 0L) return 6; return -1; case 17: if ((active2 & 0x20010000L) != 0L) return 6; if ((active1 & 0x2L) != 0L || (active2 & 0x8020000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 17; return 6; } return -1; case 18: if ((active1 & 0x2L) != 0L || (active2 & 0x8020000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 18; return 6; } return -1; case 19: if ((active2 & 0x8000000L) != 0L) return 6; if ((active1 & 0x2L) != 0L || (active2 & 0x20000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 19; return 6; } return -1; case 20: if ((active1 & 0x2L) != 0L || (active2 & 0x20000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 20; return 6; } return -1; case 21: if ((active2 & 0x20000L) != 0L) { jjmatchedKind = 227; jjmatchedPos = 21; return 6; } if ((active1 & 0x2L) != 0L) return 6; return -1; default : return -1; } } private final int jjStartNfa_0(int pos, long active0, long active1, long active2, long active3) { return jjMoveNfa_0(jjStopStringLiteralDfa_0(pos, active0, active1, active2, active3), pos + 1); } private final int jjStopAtPos(int pos, int kind) { jjmatchedKind = kind; jjmatchedPos = pos; return pos + 1; } private final int jjStartNfaWithStates_0(int pos, int kind, int state) { jjmatchedKind = kind; jjmatchedPos = pos; try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { return pos + 1; } return jjMoveNfa_0(state, pos + 1); } private final int jjMoveStringLiteralDfa0_0() { switch(curChar) { case 44: return jjStopAtPos(0, 10); case 46: return jjStartNfaWithStates_0(0, 19, 17); case 65: return jjMoveStringLiteralDfa1_0(0x110400000L, 0x0L, 0x0L, 0x0L); case 66: return jjMoveStringLiteralDfa1_0(0x5002000000000L, 0x0L, 0x0L, 0x0L); case 67: return jjMoveStringLiteralDfa1_0(0x8690000000000000L, 0x92L, 0x0L, 0x0L); case 68: return jjMoveStringLiteralDfa1_0(0x8000L, 0x4000L, 0x0L, 0x0L); case 69: return jjMoveStringLiteralDfa1_0(0x0L, 0x20080000L, 0x0L, 0x0L); case 70: return jjMoveStringLiteralDfa1_0(0x1000L, 0x2400000000L, 0x0L, 0x0L); case 71: return jjMoveStringLiteralDfa1_0(0x0L, 0x400000000000L, 0x0L, 0x0L); case 73: return jjMoveStringLiteralDfa1_0(0x0L, 0x1804800000000000L, 0x0L, 0x0L); case 76: return jjMoveStringLiteralDfa1_0(0x0L, 0x2000000000000000L, 0x0L, 0x0L); case 77: return jjMoveStringLiteralDfa1_0(0x0L, 0x0L, 0x7fc00000000082L, 0x0L); case 78: return jjMoveStringLiteralDfa1_0(0x80L, 0x0L, 0x14200L, 0x0L); case 79: return jjMoveStringLiteralDfa1_0(0x0L, 0x0L, 0x20000L, 0x0L); case 80: return jjMoveStringLiteralDfa1_0(0x0L, 0x0L, 0x1c940000L, 0x0L); case 82: return jjMoveStringLiteralDfa1_0(0x20000L, 0x0L, 0x0L, 0x0L); case 83: return jjMoveStringLiteralDfa1_0(0x0L, 0x0L, 0x80803ff060000000L, 0x93L); case 84: return jjMoveStringLiteralDfa1_0(0x40800L, 0x0L, 0x0L, 0x1b8400L); case 85: return jjMoveStringLiteralDfa1_0(0x10000L, 0x0L, 0x0L, 0x0L); case 86: return jjMoveStringLiteralDfa1_0(0x0L, 0x0L, 0x0L, 0x42000000L); case 87: return jjMoveStringLiteralDfa1_0(0x0L, 0x0L, 0x0L, 0x80000000L); case 91: return jjStopAtPos(0, 13); case 93: return jjStopAtPos(0, 14); case 97: return jjMoveStringLiteralDfa1_0(0x2000000100000L, 0x22000L, 0x2000400L, 0x20L); case 98: return jjMoveStringLiteralDfa1_0(0x500000c00c000000L, 0x40000000L, 0x0L, 0x4L); case 99: return jjMoveStringLiteralDfa1_0(0x120000000000040L, 0x44c8000100700000L, 0x0L, 0x40208L); case 100: return jjMoveStringLiteralDfa1_0(0x2000000L, 0x40800L, 0x80000004L, 0x0L); case 101: return jjMoveStringLiteralDfa1_0(0x0L, 0x80000400L, 0x600000010L, 0x0L); case 102: return jjMoveStringLiteralDfa1_0(0x10000000000L, 0x8800000000L, 0x800000000L, 0x10004000L); case 103: return jjMoveStringLiteralDfa1_0(0x500000000000L, 0x0L, 0x0L, 0x40L); case 104: return jjMoveStringLiteralDfa1_0(0x800000000000000L, 0x20000000000L, 0x800L, 0x0L); case 105: return jjMoveStringLiteralDfa1_0(0x0L, 0x10000L, 0x80000L, 0x200000000L); case 106: return jjMoveStringLiteralDfa1_0(0x0L, 0x4000000000L, 0x0L, 0x20000000L); case 107: return jjMoveStringLiteralDfa1_0(0x0L, 0xcL, 0x0L, 0x0L); case 108: return jjMoveStringLiteralDfa1_0(0x20200000000L, 0x140000000000L, 0x1000001L, 0x1000L); case 109: return jjMoveStringLiteralDfa1_0(0x20000000L, 0x300L, 0x1e00000100600000L, 0x2000L); case 110: return jjMoveStringLiteralDfa1_0(0x0L, 0x210000000800000L, 0x0L, 0x0L); case 111: return jjMoveStringLiteralDfa1_0(0x0L, 0x9000L, 0x0L, 0x8000000L); case 112: return jjMoveStringLiteralDfa1_0(0x40000401000000L, 0x1L, 0x2000000000000000L, 0x4000000L); case 114: return jjMoveStringLiteralDfa1_0(0x40000200000L, 0x8003000000000040L, 0x0L, 0x200000L); case 115: return jjMoveStringLiteralDfa1_0(0x2008a01800000000L, 0x210201000000L, 0x4100000000000128L, 0xc00800L); case 116: return jjMoveStringLiteralDfa1_0(0x800c0000000L, 0x120080000000020L, 0x1040L, 0x101000000L); case 117: return jjMoveStringLiteralDfa1_0(0x800000L, 0x0L, 0x0L, 0x0L); case 118: return jjMoveStringLiteralDfa1_0(0x0L, 0x1000000000L, 0xa000L, 0x0L); case 119: return jjMoveStringLiteralDfa1_0(0x0L, 0x0L, 0x0L, 0x100L); case 120: return jjMoveStringLiteralDfa1_0(0x0L, 0x6000000L, 0x0L, 0x0L); case 122: return jjMoveStringLiteralDfa1_0(0x0L, 0x18000000L, 0x0L, 0x0L); case 123: return jjStopAtPos(0, 8); case 125: return jjStopAtPos(0, 9); default : return jjMoveNfa_0(0, 0); } } private final int jjMoveStringLiteralDfa1_0(long active0, long active1, long active2, long active3) { try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(0, active0, active1, active2, active3); return 1; } switch(curChar) { case 65: return jjMoveStringLiteralDfa2_0(active0, 0x1000L, active1, 0L, active2, 0L, active3, 0L); case 68: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0xa000000L, active2, 0L, active3, 0L); case 69: return jjMoveStringLiteralDfa2_0(active0, 0x8000L, active1, 0L, active2, 0L, active3, 0L); case 70: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0L, active2, 0x7ffff000000000L, active3, 0L); case 79: if ((active0 & 0x40000L) != 0L) return jjStartNfaWithStates_0(1, 18, 6); return jjMoveStringLiteralDfa2_0(active0, 0x20000L, active1, 0x2000000000000000L, active2, 0L, active3, 0L); case 82: return jjMoveStringLiteralDfa2_0(active0, 0x800L, active1, 0L, active2, 0L, active3, 0L); case 83: return jjMoveStringLiteralDfa2_0(active0, 0x10000L, active1, 0x14000000L, active2, 0L, active3, 0L); case 85: return jjMoveStringLiteralDfa2_0(active0, 0x80L, active1, 0L, active2, 0L, active3, 0L); case 97: return jjMoveStringLiteralDfa2_0(active0, 0x6021000000L, active1, 0x8000108000000240L, active2, 0xa00000000400202L, active3, 0x2000L); case 98: return jjMoveStringLiteralDfa2_0(active0, 0xc000000L, active1, 0L, active2, 0L, active3, 0L); case 99: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0x100000L, active2, 0x60000000L, active3, 0xc00000L); case 100: return jjMoveStringLiteralDfa2_0(active0, 0x100000L, active1, 0L, active2, 0L, active3, 0L); case 101: return jjMoveStringLiteralDfa2_0(active0, 0x8000200c2200000L, active1, 0x412304004000000cL, active2, 0x8801L, active3, 0x19444L); case 102: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0x1000L, active2, 0L, active3, 0L); case 104: return jjMoveStringLiteralDfa2_0(active0, 0x40L, active1, 0L, active2, 0x20L, active3, 0x310L); case 105: return jjMoveStringLiteralDfa2_0(active0, 0x2009040400000000L, active1, 0x1000044900L, active2, 0x1400000880242004L, active3, 0x152020000L); case 107: return jjMoveStringLiteralDfa2_0(active0, 0xa00000000000L, active1, 0L, active2, 0L, active3, 0L); case 108: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0x80000L, active2, 0x100000L, active3, 0L); case 109: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0x800000020000L, active2, 0x80010L, active3, 0L); case 110: if ((active1 & 0x8000L) != 0L) return jjStartNfaWithStates_0(1, 79, 6); return jjMoveStringLiteralDfa2_0(active0, 0x400000L, active1, 0x1804000080010400L, active2, 0L, active3, 0x200000000L); case 111: return jjMoveStringLiteralDfa2_0(active0, 0xd7b4088200000000L, active1, 0x6d80a2c01a00023L, active2, 0x408000000d814080L, active3, 0x84284000L); case 112: return jjMoveStringLiteralDfa2_0(active0, 0x10000000L, active1, 0x200200000000L, active2, 0x8100000000000108L, active3, 0x23L); case 114: return jjMoveStringLiteralDfa2_0(active0, 0x40510000800000L, active1, 0x400100400000L, active2, 0x2000000010020040L, active3, 0x9100000L); case 116: return jjMoveStringLiteralDfa2_0(active0, 0x1800000000L, active1, 0x10000000000L, active2, 0x2000000L, active3, 0x800L); case 117: return jjMoveStringLiteralDfa2_0(active0, 0x100000000L, active1, 0x4000002000L, active2, 0x100000000L, active3, 0x20000008L); case 118: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0L, active2, 0x600000400L, active3, 0L); case 119: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x80L); case 120: return jjMoveStringLiteralDfa2_0(active0, 0x2000000000000L, active1, 0x20000000L, active2, 0L, active3, 0L); case 121: return jjMoveStringLiteralDfa2_0(active0, 0L, active1, 0x90L, active2, 0x1000L, active3, 0x40000L); default : break; } return jjStartNfa_0(0, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa2_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(0, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(1, active0, active1, active2, active3); return 2; } switch(curChar) { case 66: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x1000000000L, active3, 0L); case 67: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x402000000000L, active3, 0L); case 68: if ((active1 & 0x2000000000000000L) != 0L) return jjStartNfaWithStates_0(2, 125, 6); break; case 69: if ((active0 & 0x10000L) != 0L) return jjStartNfaWithStates_0(2, 16, 6); break; case 70: if ((active0 & 0x8000L) != 0L) return jjStartNfaWithStates_0(2, 15, 6); return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x804000000000L, active3, 0L); case 73: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x1008000000000L, active3, 0L); case 76: return jjMoveStringLiteralDfa3_0(active0, 0x1080L, active1, 0L, active2, 0L, active3, 0L); case 78: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x2010000000000L, active3, 0L); case 82: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x4020000000000L, active3, 0L); case 83: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x8040000000000L, active3, 0L); case 84: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x10080000000000L, active3, 0L); case 85: return jjMoveStringLiteralDfa3_0(active0, 0x20800L, active1, 0L, active2, 0L, active3, 0L); case 86: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x60300000000000L, active3, 0L); case 97: return jjMoveStringLiteralDfa3_0(active0, 0x800000000L, active1, 0xa00000000400L, active2, 0x100000020180c40L, active3, 0x1d00014L); case 98: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0x20000L, active2, 0L, active3, 0L); case 99: return jjMoveStringLiteralDfa3_0(active0, 0x6000400000L, active1, 0L, active2, 0x1008000L, active3, 0x40000L); case 100: return jjMoveStringLiteralDfa3_0(active0, 0x2000000100100000L, active1, 0x804000080000040L, active2, 0L, active3, 0L); case 101: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0x480000L, active2, 0xe00000108L, active3, 0x12000000L); case 102: return jjMoveStringLiteralDfa3_0(active0, 0x20000000000L, active1, 0x40000001000L, active2, 0x4L, active3, 0x200000000L); case 103: if ((active1 & 0x400000000L) != 0L) return jjStartNfaWithStates_0(2, 98, 6); return jjMoveStringLiteralDfa3_0(active0, 0x40000000000L, active1, 0x840000000L, active2, 0L, active3, 0L); case 104: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x8000000000000000L, active3, 0x1L); case 105: return jjMoveStringLiteralDfa3_0(active0, 0x802000000000040L, active1, 0x20a000001L, active2, 0x2000000004820030L, active3, 0x8000180L); case 108: if ((active0 & 0x800000L) != 0L) return jjStartNfaWithStates_0(2, 23, 6); return jjMoveStringLiteralDfa3_0(active0, 0x3b1000000000000L, active1, 0x1080000001200090L, active2, 0L, active3, 0L); case 109: return jjMoveStringLiteralDfa3_0(active0, 0x200000L, active1, 0x8000000000L, active2, 0L, active3, 0x20020000L); case 110: return jjMoveStringLiteralDfa3_0(active0, 0x400000000000000L, active1, 0xc400102000000100L, active2, 0x1400000000200000L, active3, 0x5000L); case 111: return jjMoveStringLiteralDfa3_0(active0, 0x804051120c000000L, active1, 0x48400100000002L, active2, 0x10000000L, active3, 0x242L); case 112: if ((active1 & 0x20L) != 0L) { jjmatchedKind = 69; jjmatchedPos = 2; } return jjMoveStringLiteralDfa3_0(active0, 0x80010000000L, active1, 0x3080014000000L, active2, 0x1000L, active3, 0x20L); case 114: return jjMoveStringLiteralDfa3_0(active0, 0x1000000L, active1, 0x210020000844000L, active2, 0xc0014000L, active3, 0x80000800L); case 115: return jjMoveStringLiteralDfa3_0(active0, 0x2000000L, active1, 0x5000000800L, active2, 0x108002000L, active3, 0x44000000L); case 116: return jjMoveStringLiteralDfa3_0(active0, 0x5000008420000000L, active1, 0x20012000L, active2, 0x2000002L, active3, 0x100200008L); case 117: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x4080000000000000L, active3, 0x80000L); case 118: return jjMoveStringLiteralDfa3_0(active0, 0L, active1, 0L, active2, 0x281L, active3, 0L); case 119: if ((active1 & 0x100000L) != 0L) return jjStartNfaWithStates_0(2, 84, 6); break; case 120: if ((active0 & 0x4000000000000L) != 0L) return jjStartNfaWithStates_0(2, 50, 6); return jjMoveStringLiteralDfa3_0(active0, 0xc0000000L, active1, 0x120000000000200L, active2, 0xa00000000440000L, active3, 0x1a400L); case 121: if ((active1 & 0x4L) != 0L) { jjmatchedKind = 66; jjmatchedPos = 2; } return jjMoveStringLiteralDfa3_0(active0, 0xa00000000000L, active1, 0x10000000008L, active2, 0L, active3, 0L); case 122: return jjMoveStringLiteralDfa3_0(active0, 0x8000000000000L, active1, 0L, active2, 0L, active3, 0L); default : break; } return jjStartNfa_0(1, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa3_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(1, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(2, active0, active1, active2, active3); return 3; } switch(curChar) { case 65: return jjMoveStringLiteralDfa4_0(active0, 0x200000000000L, active1, 0x300L, active2, 0L, active3, 0L); case 66: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0L, active2, 0x600000000000000L, active3, 0L); case 67: return jjMoveStringLiteralDfa4_0(active0, 0x800000100000L, active1, 0x120000080000000L, active2, 0L, active3, 0L); case 69: if ((active0 & 0x800L) != 0L) return jjStartNfaWithStates_0(3, 11, 6); return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x2000L); case 70: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0L, active2, 0x1800000000000000L, active3, 0L); case 76: if ((active0 & 0x80L) != 0L) return jjStartNfaWithStates_0(3, 7, 6); break; case 79: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x8L); case 80: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0L, active2, 0x600000L, active3, 0L); case 83: return jjMoveStringLiteralDfa4_0(active0, 0x1000L, active1, 0L, active2, 0L, active3, 0L); case 84: return jjMoveStringLiteralDfa4_0(active0, 0x20000L, active1, 0x80800000000L, active2, 0L, active3, 0L); case 85: return jjMoveStringLiteralDfa4_0(active0, 0x80000000000L, active1, 0L, active2, 0L, active3, 0L); case 86: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0x8L, active2, 0L, active3, 0L); case 97: return jjMoveStringLiteralDfa4_0(active0, 0x1000000L, active1, 0x14400000L, active2, 0x1000000L, active3, 0x200000L); case 98: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0x400L, active2, 0L, active3, 0L); case 99: return jjMoveStringLiteralDfa4_0(active0, 0x402000000L, active1, 0x200000000000L, active2, 0x8L, active3, 0x80100L); case 100: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0L, active2, 0x800L, active3, 0L); case 101: if ((active0 & 0x8000000000000L) != 0L) return jjStartNfaWithStates_0(3, 51, 6); else if ((active0 & 0x400000000000000L) != 0L) return jjStartNfaWithStates_0(3, 58, 6); else if ((active0 & 0x2000000000000000L) != 0L) return jjStartNfaWithStates_0(3, 61, 6); else if ((active2 & 0x1000L) != 0L) return jjStartNfaWithStates_0(3, 140, 6); return jjMoveStringLiteralDfa4_0(active0, 0x30000000L, active1, 0x807000000054000L, active2, 0x8060300082060103L, active3, 0x8020021L); case 102: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0L, active2, 0x4L, active3, 0L); case 103: return jjMoveStringLiteralDfa4_0(active0, 0x800000000000000L, active1, 0x8000900000000000L, active2, 0x80000L, active3, 0x1000L); case 104: return jjMoveStringLiteralDfa4_0(active0, 0x40000400000L, active1, 0L, active2, 0L, active3, 0L); case 105: return jjMoveStringLiteralDfa4_0(active0, 0x100000000L, active1, 0x10000290410200d0L, active2, 0x10080048002280L, active3, 0x44000a00L); case 107: return jjMoveStringLiteralDfa4_0(active0, 0x6000000000L, active1, 0x800L, active2, 0L, active3, 0L); case 108: return jjMoveStringLiteralDfa4_0(active0, 0x31000000000040L, active1, 0x10000000000L, active2, 0x804820000000L, active3, 0x190c40000L); case 109: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0x21000000a800000L, active2, 0x14000L, active3, 0x44L); case 110: return jjMoveStringLiteralDfa4_0(active0, 0x10000000000L, active1, 0x200000001L, active2, 0x81008604900060L, active3, 0x1100000L); case 111: if ((active3 & 0x200000000L) != 0L) return jjStartNfaWithStates_0(3, 225, 6); return jjMoveStringLiteralDfa4_0(active0, 0x380000000200000L, active1, 0x80000000202000L, active2, 0x2006433000000000L, active3, 0L); case 112: if ((active0 & 0x200000000L) != 0L) return jjStartNfaWithStates_0(3, 33, 6); else if ((active3 & 0x20000000L) != 0L) return jjStartNfaWithStates_0(3, 221, 6); return jjMoveStringLiteralDfa4_0(active0, 0x1000000000L, active1, 0L, active2, 0L, active3, 0x10L); case 114: return jjMoveStringLiteralDfa4_0(active0, 0x8000000800000000L, active1, 0x48000020000002L, active2, 0x4000000000000000L, active3, 0L); case 115: return jjMoveStringLiteralDfa4_0(active0, 0x2000000000000L, active1, 0x100001000L, active2, 0x10L, active3, 0L); case 116: if ((active3 & 0x400L) != 0L) { jjmatchedKind = 202; jjmatchedPos = 3; } return jjMoveStringLiteralDfa4_0(active0, 0x50000280c0000000L, active1, 0x4000046000000000L, active2, 0x108040100008400L, active3, 0x1c082L); case 117: return jjMoveStringLiteralDfa4_0(active0, 0x500000000000L, active1, 0x400000000000L, active2, 0L, active3, 0L); case 118: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0x400000000080000L, active2, 0L, active3, 0L); case 119: return jjMoveStringLiteralDfa4_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x2000000L); case 120: return jjMoveStringLiteralDfa4_0(active0, 0x4000000c000000L, active1, 0L, active2, 0x10000000L, active3, 0L); default : break; } return jjStartNfa_0(2, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa4_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(2, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(3, active0, active1, active2, active3); return 4; } switch(curChar) { case 65: return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0x800L, active2, 0L, active3, 0L); case 67: return jjMoveStringLiteralDfa5_0(active0, 0x4000000L, active1, 0L, active2, 0L, active3, 0L); case 69: if ((active0 & 0x1000L) != 0L) return jjStartNfaWithStates_0(4, 12, 6); else if ((active0 & 0x20000L) != 0L) return jjStartNfaWithStates_0(4, 17, 6); return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0L, active2, 0x100000000L, active3, 0L); case 76: return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x2L); case 79: return jjMoveStringLiteralDfa5_0(active0, 0x2000000000000L, active1, 0x2000L, active2, 0L, active3, 0L); case 83: return jjMoveStringLiteralDfa5_0(active0, 0x8000000L, active1, 0x2000000000L, active2, 0L, active3, 0x24000L); case 84: return jjMoveStringLiteralDfa5_0(active0, 0x1000000000L, active1, 0x40000000000L, active2, 0L, active3, 0L); case 85: return jjMoveStringLiteralDfa5_0(active0, 0x24000000000L, active1, 0L, active2, 0L, active3, 0L); case 87: return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x4L); case 97: return jjMoveStringLiteralDfa5_0(active0, 0x10000000L, active1, 0x213000080880008L, active2, 0x600000020014400L, active3, 0x20L); case 98: return jjMoveStringLiteralDfa5_0(active0, 0x1000000000000L, active1, 0x1000000000L, active2, 0x2000L, active3, 0x40000000L); case 99: return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0x14044000L, active2, 0x4060300080000000L, active3, 0x280L); case 100: if ((active1 & 0x1000000L) != 0L) return jjStartNfaWithStates_0(4, 88, 6); else if ((active1 & 0x8000000000000L) != 0L) { jjmatchedKind = 115; jjmatchedPos = 4; } else if ((active2 & 0x100L) != 0L) return jjStartNfaWithStates_0(4, 136, 6); else if ((active2 & 0x800000000L) != 0L) { jjmatchedKind = 163; jjmatchedPos = 4; } else if ((active2 & 0x80000000000000L) != 0L) return jjStartNfaWithStates_0(4, 183, 6); return jjMoveStringLiteralDfa5_0(active0, 0x8000000000000040L, active1, 0x40000000000002L, active2, 0x2010000000000L, active3, 0x90000000L); case 101: if ((active1 & 0x200000000L) != 0L) return jjStartNfaWithStates_0(4, 97, 6); else if ((active1 & 0x10000000000L) != 0L) return jjStartNfaWithStates_0(4, 104, 6); else if ((active1 & 0x8000000000000000L) != 0L) return jjStartNfaWithStates_0(4, 127, 6); else if ((active2 & 0x80000L) != 0L) return jjStartNfaWithStates_0(4, 147, 6); else if ((active3 & 0x10L) != 0L) return jjStartNfaWithStates_0(4, 196, 6); else if ((active3 & 0x400000L) != 0L) { jjmatchedKind = 214; jjmatchedPos = 4; } else if ((active3 & 0x100000000L) != 0L) return jjStartNfaWithStates_0(4, 224, 6); return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0x440080000a021000L, active2, 0x100080L, active3, 0x840040L); case 102: return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x8L); case 103: return jjMoveStringLiteralDfa5_0(active0, 0x2000000000L, active1, 0L, active2, 0x200L, active3, 0L); case 104: if ((active0 & 0x400000000L) != 0L) return jjStartNfaWithStates_0(4, 34, 6); return jjMoveStringLiteralDfa5_0(active0, 0x800000000100000L, active1, 0L, active2, 0L, active3, 0x80100L); case 105: return jjMoveStringLiteralDfa5_0(active0, 0x30000000000000L, active1, 0x204000000000L, active2, 0x100000010000020L, active3, 0L); case 108: if ((active2 & 0x1L) != 0L) return jjStartNfaWithStates_0(4, 128, 6); return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0x8000000400L, active2, 0x402000040800L, active3, 0L); case 109: return jjMoveStringLiteralDfa5_0(active0, 0x1000000L, active1, 0L, active2, 0x10080000000000L, active3, 0L); case 110: return jjMoveStringLiteralDfa5_0(active0, 0x700000000000L, active1, 0x1000000040010390L, active2, 0x2020000L, active3, 0x8000800L); case 111: return jjMoveStringLiteralDfa5_0(active0, 0x5000808100400000L, active1, 0x120080000000000L, active2, 0x805000608000L, active3, 0L); case 112: if ((active1 & 0x400000000000L) != 0L) return jjStartNfaWithStates_0(4, 110, 6); return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0L, active2, 0x40000000L, active3, 0x2000000L); case 114: if ((active0 & 0x80000000000000L) != 0L) { jjmatchedKind = 55; jjmatchedPos = 4; } else if ((active0 & 0x100000000000000L) != 0L) { jjmatchedKind = 56; jjmatchedPos = 4; } return jjMoveStringLiteralDfa5_0(active0, 0x200080022000000L, active1, 0x80000000200000L, active2, 0xb808040000000002L, active3, 0x1L); case 115: return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0x100400000L, active2, 0x50L, active3, 0x1100000L); case 116: if ((active1 & 0x1L) != 0L) return jjStartNfaWithStates_0(4, 64, 6); return jjMoveStringLiteralDfa5_0(active0, 0x50800000000L, active1, 0L, active2, 0x502860d800000L, active3, 0x4201000L); case 117: return jjMoveStringLiteralDfa5_0(active0, 0xc0000000L, active1, 0x100020000040L, active2, 0xcL, active3, 0x18000L); case 118: return jjMoveStringLiteralDfa5_0(active0, 0x200000L, active1, 0L, active2, 0L, active3, 0L); case 120: return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0x804000000000000L, active2, 0L, active3, 0x2000L); case 121: if ((active0 & 0x40000000000000L) != 0L) return jjStartNfaWithStates_0(4, 54, 6); return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0x800000000L, active2, 0L, active3, 0L); case 122: return jjMoveStringLiteralDfa5_0(active0, 0L, active1, 0x20000000000L, active2, 0L, active3, 0L); default : break; } return jjStartNfa_0(3, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa5_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(3, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(4, active0, active1, active2, active3); return 5; } switch(curChar) { case 50: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0L, active2, 0x20100000000000L, active3, 0L); case 51: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0L, active2, 0x41208000000000L, active3, 0L); case 66: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0x80000000000L, active2, 0L, active3, 0L); case 67: return jjMoveStringLiteralDfa6_0(active0, 0x100000000L, active1, 0x40000000L, active2, 0L, active3, 0x100L); case 73: return jjMoveStringLiteralDfa6_0(active0, 0x200000000000000L, active1, 0xc0000000000000L, active2, 0x200000000L, active3, 0x80040000L); case 76: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0L, active2, 0x800000L, active3, 0L); case 79: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0L, active2, 0x400000000L, active3, 0x10800000L); case 80: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0x200000L, active2, 0L, active3, 0L); case 83: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0x100000000L, active2, 0x4100000L, active3, 0x80000L); case 84: return jjMoveStringLiteralDfa6_0(active0, 0x800000000L, active1, 0x800000000000L, active2, 0x40080L, active3, 0L); case 85: return jjMoveStringLiteralDfa6_0(active0, 0x50000000000L, active1, 0L, active2, 0L, active3, 0L); case 97: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0x100000000000L, active2, 0x104824000000200L, active3, 0L); case 99: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0L, active2, 0x600000000000000L, active3, 0L); case 100: return jjMoveStringLiteralDfa6_0(active0, 0x20500000000000L, active1, 0x90L, active2, 0L, active3, 0L); case 101: if ((active1 & 0x1000000000000000L) != 0L) return jjStartNfaWithStates_0(5, 124, 6); else if ((active2 & 0x10000000000L) != 0L) return jjStartNfaWithStates_0(5, 168, 6); else if ((active2 & 0x80000000000L) != 0L) return jjStartNfaWithStates_0(5, 171, 6); else if ((active2 & 0x2000000000000L) != 0L) return jjStartNfaWithStates_0(5, 177, 6); else if ((active2 & 0x10000000000000L) != 0L) return jjStartNfaWithStates_0(5, 180, 6); else if ((active2 & 0x4000000000000000L) != 0L) return jjStartNfaWithStates_0(5, 190, 6); else if ((active2 & 0x8000000000000000L) != 0L) { jjmatchedKind = 191; jjmatchedPos = 5; } else if ((active3 & 0x200L) != 0L) return jjStartNfaWithStates_0(5, 201, 6); return jjMoveStringLiteralDfa6_0(active0, 0x5200000L, active1, 0x804000000400400L, active2, 0L, active3, 0x20001L); case 102: return jjMoveStringLiteralDfa6_0(active0, 0x2000000000000L, active1, 0x4000002000L, active2, 0L, active3, 0x100008L); case 103: if ((active3 & 0x800L) != 0L) return jjStartNfaWithStates_0(5, 203, 6); return jjMoveStringLiteralDfa6_0(active0, 0x200000000000L, active1, 0x300L, active2, 0L, active3, 0L); case 104: if ((active3 & 0x80L) != 0L) return jjStartNfaWithStates_0(5, 199, 6); else if ((active3 & 0x1000L) != 0L) return jjStartNfaWithStates_0(5, 204, 6); break; case 105: return jjMoveStringLiteralDfa6_0(active0, 0x800000102a100000L, active1, 0x1014000002L, active2, 0x2008040009002812L, active3, 0x44200006L); case 108: if ((active0 & 0x80000000000L) != 0L) return jjStartNfaWithStates_0(5, 43, 6); else if ((active1 & 0x10000000000000L) != 0L) { jjmatchedKind = 116; jjmatchedPos = 5; } else if ((active2 & 0x4000L) != 0L) { jjmatchedKind = 142; jjmatchedPos = 5; } else if ((active2 & 0x1000000000L) != 0L) return jjStartNfaWithStates_0(5, 164, 6); return jjMoveStringLiteralDfa6_0(active0, 0x800000000000L, active1, 0x200000000800008L, active2, 0x10008L, active3, 0x1000000L); case 109: if ((active0 & 0x4000000000000000L) != 0L) { jjmatchedKind = 62; jjmatchedPos = 5; } return jjMoveStringLiteralDfa6_0(active0, 0x1000008000000000L, active1, 0L, active2, 0x10000000L, active3, 0L); case 110: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0x20000a020800L, active2, 0x20L, active3, 0L); case 111: return jjMoveStringLiteralDfa6_0(active0, 0x1000000000000L, active1, 0x120060000000000L, active2, 0x1800402000000000L, active3, 0x2000000L); case 112: if ((active1 & 0x80000000L) != 0L) return jjStartNfaWithStates_0(5, 95, 6); return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0x800000000L, active2, 0x40L, active3, 0L); case 114: if ((active0 & 0x400000L) != 0L) return jjStartNfaWithStates_0(5, 22, 6); else if ((active1 & 0x4000000000000000L) != 0L) return jjStartNfaWithStates_0(5, 126, 6); else if ((active2 & 0x8000L) != 0L) return jjStartNfaWithStates_0(5, 143, 6); return jjMoveStringLiteralDfa6_0(active0, 0x260d0000040L, active1, 0L, active2, 0x20000400L, active3, 0x18020L); case 115: if ((active1 & 0x40L) != 0L) return jjStartNfaWithStates_0(5, 70, 6); return jjMoveStringLiteralDfa6_0(active0, 0x10000000000000L, active1, 0x20010000L, active2, 0x600004L, active3, 0L); case 116: if ((active0 & 0x800000000000000L) != 0L) return jjStartNfaWithStates_0(5, 59, 6); else if ((active1 & 0x1000L) != 0L) return jjStartNfaWithStates_0(5, 76, 6); else if ((active2 & 0x40000000L) != 0L) return jjStartNfaWithStates_0(5, 158, 6); return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0x30020000c4000L, active2, 0x80020000L, active3, 0x8006040L); case 117: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0L, active2, 0x2000000L, active3, 0L); case 118: return jjMoveStringLiteralDfa6_0(active0, 0L, active1, 0L, active2, 0x100000000L, active3, 0L); case 120: if ((active1 & 0x400000000000000L) != 0L) return jjStartNfaWithStates_0(5, 122, 6); break; case 121: if ((active1 & 0x8000000000L) != 0L) return jjStartNfaWithStates_0(5, 103, 6); break; default : break; } return jjStartNfa_0(4, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa6_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(4, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(5, active0, active1, active2, active3); return 6; } switch(curChar) { case 50: if ((active2 & 0x8000000000L) != 0L) return jjStartNfaWithStates_0(6, 167, 6); else if ((active2 & 0x1000000000000L) != 0L) return jjStartNfaWithStates_0(6, 176, 6); break; case 65: return jjMoveStringLiteralDfa7_0(active0, 0x100000000000L, active1, 0x400000L, active2, 0L, active3, 0x8L); case 67: return jjMoveStringLiteralDfa7_0(active0, 0x400000200000L, active1, 0L, active2, 0L, active3, 0L); case 73: return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0x200000000000000L, active2, 0x20010000L, active3, 0L); case 79: return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0L, active2, 0x80000000L, active3, 0L); case 80: return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0x800000L, active2, 0L, active3, 0L); case 82: return jjMoveStringLiteralDfa7_0(active0, 0x1002000000000000L, active1, 0x40000000000L, active2, 0L, active3, 0L); case 83: if ((active1 & 0x1000000000000L) != 0L) return jjStartNfaWithStates_0(6, 112, 6); return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0L, active2, 0x400L, active3, 0x1L); case 84: if ((active1 & 0x2000000000000L) != 0L) return jjStartNfaWithStates_0(6, 113, 6); break; case 85: return jjMoveStringLiteralDfa7_0(active0, 0x8000000000L, active1, 0L, active2, 0L, active3, 0L); case 97: return jjMoveStringLiteralDfa7_0(active0, 0x1000030000000L, active1, 0x40000000L, active2, 0x10202004aL, active3, 0x9000020L); case 100: if ((active1 & 0x400L) != 0L) return jjStartNfaWithStates_0(6, 74, 6); return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0x804000000000000L, active2, 0L, active3, 0x4L); case 101: if ((active0 & 0x80000000L) != 0L) { jjmatchedKind = 31; jjmatchedPos = 6; } else if ((active0 & 0x20000000000000L) != 0L) return jjStartNfaWithStates_0(6, 53, 6); else if ((active1 & 0x800000000L) != 0L) return jjStartNfaWithStates_0(6, 99, 6); return jjMoveStringLiteralDfa7_0(active0, 0x40000040L, active1, 0x800100200090L, active2, 0x41400a4L, active3, 0x9a000L); case 102: if ((active2 & 0x100000000000L) != 0L) return jjStartNfaWithStates_0(6, 172, 6); else if ((active2 & 0x200000000000L) != 0L) return jjStartNfaWithStates_0(6, 173, 6); else if ((active2 & 0x20000000000000L) != 0L) return jjStartNfaWithStates_0(6, 181, 6); else if ((active2 & 0x40000000000000L) != 0L) return jjStartNfaWithStates_0(6, 182, 6); return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0x2000L, active2, 0L, active3, 0x10000000L); case 103: if ((active1 & 0x200000000000L) != 0L) return jjStartNfaWithStates_0(6, 109, 6); return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0x100000000800L, active2, 0x800L, active3, 0x2L); case 104: return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x100L); case 105: return jjMoveStringLiteralDfa7_0(active0, 0x10000800000000L, active1, 0x200d4000L, active2, 0x10e00000L, active3, 0x2000000L); case 107: if ((active2 & 0x200000000000000L) != 0L) return jjStartNfaWithStates_0(6, 185, 6); else if ((active2 & 0x400000000000000L) != 0L) return jjStartNfaWithStates_0(6, 186, 6); break; case 108: if ((active0 & 0x4000000000L) != 0L) return jjStartNfaWithStates_0(6, 38, 6); else if ((active0 & 0x20000000000L) != 0L) return jjStartNfaWithStates_0(6, 41, 6); return jjMoveStringLiteralDfa7_0(active0, 0x200100100000L, active1, 0x1000000300L, active2, 0x100000000002000L, active3, 0x40000000L); case 109: return jjMoveStringLiteralDfa7_0(active0, 0x1000000000L, active1, 0L, active2, 0L, active3, 0L); case 110: if ((active2 & 0x200000000L) != 0L) return jjStartNfaWithStates_0(6, 161, 6); return jjMoveStringLiteralDfa7_0(active0, 0x8200000004000000L, active1, 0xc0020014000002L, active2, 0x1808040000000000L, active3, 0x80060000L); case 111: return jjMoveStringLiteralDfa7_0(active0, 0x802000000000L, active1, 0x80000000000L, active2, 0x9000000L, active3, 0x4300000L); case 112: return jjMoveStringLiteralDfa7_0(active0, 0x2000000L, active1, 0L, active2, 0L, active3, 0L); case 114: if ((active2 & 0x2000000000L) != 0L) return jjStartNfaWithStates_0(6, 165, 6); else if ((active2 & 0x400000000000L) != 0L) return jjStartNfaWithStates_0(6, 174, 6); return jjMoveStringLiteralDfa7_0(active0, 0x50000000000L, active1, 0x120000000000000L, active2, 0L, active3, 0x800040L); case 115: return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0xa000000L, active2, 0L, active3, 0L); case 116: if ((active2 & 0x4000000000L) != 0L) return jjStartNfaWithStates_0(6, 166, 6); else if ((active2 & 0x800000000000L) != 0L) return jjStartNfaWithStates_0(6, 175, 6); return jjMoveStringLiteralDfa7_0(active0, 0x1000000L, active1, 0x20000L, active2, 0x2004020000000200L, active3, 0L); case 117: return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0x8L, active2, 0x400000000L, active3, 0L); case 118: return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0L, active2, 0x10L, active3, 0L); case 121: if ((active1 & 0x4000000000L) != 0L) return jjStartNfaWithStates_0(6, 102, 6); return jjMoveStringLiteralDfa7_0(active0, 0L, active1, 0x2000000000L, active2, 0L, active3, 0x4000L); case 122: return jjMoveStringLiteralDfa7_0(active0, 0x8000000L, active1, 0L, active2, 0L, active3, 0L); default : break; } return jjStartNfa_0(5, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa7_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(5, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(6, active0, active1, active2, active3); return 7; } switch(curChar) { case 67: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0L, active2, 0x4L, active3, 0x8000L); case 70: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0x4000000000000L, active2, 0L, active3, 0L); case 73: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0x20000L, active2, 0L, active3, 0L); case 76: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0x800000000000000L, active2, 0L, active3, 0L); case 84: return jjMoveStringLiteralDfa8_0(active0, 0x40000000L, active1, 0L, active2, 0L, active3, 0x10000L); case 86: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x10000000L); case 97: return jjMoveStringLiteralDfa8_0(active0, 0x9000000000000000L, active1, 0x2L, active2, 0L, active3, 0L); case 99: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0x100000000L, active2, 0L, active3, 0L); case 100: if ((active1 & 0x20000000000000L) != 0L) { jjmatchedKind = 117; jjmatchedPos = 7; } return jjMoveStringLiteralDfa8_0(active0, 0x100000L, active1, 0x1c0000000000000L, active2, 0L, active3, 0L); case 101: if ((active0 & 0x8000000L) != 0L) return jjStartNfaWithStates_0(7, 27, 6); else if ((active0 & 0x1000000000L) != 0L) return jjStartNfaWithStates_0(7, 36, 6); else if ((active0 & 0x200000000000L) != 0L) return jjStartNfaWithStates_0(7, 45, 6); else if ((active1 & 0x8L) != 0L) return jjStartNfaWithStates_0(7, 67, 6); else if ((active1 & 0x100L) != 0L) return jjStartNfaWithStates_0(7, 72, 6); else if ((active1 & 0x200L) != 0L) return jjStartNfaWithStates_0(7, 73, 6); else if ((active1 & 0x100000000000L) != 0L) return jjStartNfaWithStates_0(7, 108, 6); return jjMoveStringLiteralDfa8_0(active0, 0x1000000L, active1, 0x800000L, active2, 0x10L, active3, 0x1L); case 102: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x80000000L); case 103: if ((active1 & 0x4000000L) != 0L) return jjStartNfaWithStates_0(7, 90, 6); else if ((active1 & 0x10000000L) != 0L) return jjStartNfaWithStates_0(7, 92, 6); else if ((active2 & 0x40000000000L) != 0L) return jjStartNfaWithStates_0(7, 170, 6); else if ((active2 & 0x8000000000000L) != 0L) return jjStartNfaWithStates_0(7, 179, 6); return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0L, active2, 0x800000L, active3, 0L); case 104: return jjMoveStringLiteralDfa8_0(active0, 0x200000L, active1, 0L, active2, 0x800L, active3, 0x2L); case 105: return jjMoveStringLiteralDfa8_0(active0, 0x100000000L, active1, 0x4100a000000L, active2, 0x104020000002600L, active3, 0x40800000L); case 108: if ((active0 & 0x20000000L) != 0L) return jjStartNfaWithStates_0(7, 29, 6); else if ((active0 & 0x10000000000L) != 0L) return jjStartNfaWithStates_0(7, 40, 6); else if ((active0 & 0x40000000000L) != 0L) return jjStartNfaWithStates_0(7, 42, 6); else if ((active2 & 0x2L) != 0L) return jjStartNfaWithStates_0(7, 129, 6); return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0x2000000800L, active2, 0x100000000L, active3, 0x4000L); case 109: return jjMoveStringLiteralDfa8_0(active0, 0x800000000L, active1, 0L, active2, 0L, active3, 0L); case 110: if ((active0 & 0x40L) != 0L) return jjStartNfaWithStates_0(7, 6, 6); else if ((active2 & 0x1000000L) != 0L) return jjStartNfaWithStates_0(7, 152, 6); else if ((active3 & 0x200000L) != 0L) return jjStartNfaWithStates_0(7, 213, 6); else if ((active3 & 0x4000000L) != 0L) return jjStartNfaWithStates_0(7, 218, 6); return jjMoveStringLiteralDfa8_0(active0, 0x100010000000L, active1, 0x200000000400000L, active2, 0x28110000L, active3, 0x2082028L); case 111: return jjMoveStringLiteralDfa8_0(active0, 0x12400000000000L, active1, 0x200c4000L, active2, 0L, active3, 0x100L); case 112: if ((active1 & 0x40000000L) != 0L) return jjStartNfaWithStates_0(7, 94, 6); break; case 114: if ((active0 & 0x800000000000L) != 0L) return jjStartNfaWithStates_0(7, 47, 6); else if ((active1 & 0x10L) != 0L) { jjmatchedKind = 68; jjmatchedPos = 7; } return jjMoveStringLiteralDfa8_0(active0, 0x1008000000000L, active1, 0x200080L, active2, 0x48L, active3, 0x100000L); case 115: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0x2000L, active2, 0x20L, active3, 0x20000L); case 116: if ((active2 & 0x4000000L) != 0L) return jjStartNfaWithStates_0(7, 154, 6); else if ((active2 & 0x400000000L) != 0L) return jjStartNfaWithStates_0(7, 162, 6); else if ((active2 & 0x800000000000000L) != 0L) return jjStartNfaWithStates_0(7, 187, 6); else if ((active2 & 0x1000000000000000L) != 0L) return jjStartNfaWithStates_0(7, 188, 6); return jjMoveStringLiteralDfa8_0(active0, 0x200000006000000L, active1, 0xa0000010000L, active2, 0x12620000L, active3, 0x9040004L); case 117: return jjMoveStringLiteralDfa8_0(active0, 0x2000000000L, active1, 0L, active2, 0x80000000L, active3, 0L); case 120: return jjMoveStringLiteralDfa8_0(active0, 0L, active1, 0x800000000000L, active2, 0x40080L, active3, 0L); case 121: if ((active2 & 0x2000000000000000L) != 0L) return jjStartNfaWithStates_0(7, 189, 6); else if ((active3 & 0x40L) != 0L) return jjStartNfaWithStates_0(7, 198, 6); break; default : break; } return jjStartNfa_0(6, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa8_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(6, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(7, active0, active1, active2, active3); return 8; } switch(curChar) { case 67: return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0L, active2, 0x18L, active3, 0L); case 73: return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0x100000000000000L, active2, 0x8000000L, active3, 0L); case 83: return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0x80L, active2, 0L, active3, 0L); case 86: return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0x200000L, active2, 0L, active3, 0L); case 97: return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0x4020000000000L, active2, 0L, active3, 0L); case 99: return jjMoveStringLiteralDfa9_0(active0, 0x10000000L, active1, 0L, active2, 0L, active3, 0x20L); case 100: if ((active0 & 0x1000000000000L) != 0L) return jjStartNfaWithStates_0(8, 48, 6); return jjMoveStringLiteralDfa9_0(active0, 0x1000000000000000L, active1, 0x200000000000000L, active2, 0L, active3, 0L); case 101: if ((active0 & 0x800000000L) != 0L) return jjStartNfaWithStates_0(8, 35, 6); else if ((active1 & 0x800L) != 0L) return jjStartNfaWithStates_0(8, 75, 6); else if ((active1 & 0x2000000000L) != 0L) return jjStartNfaWithStates_0(8, 101, 6); else if ((active3 & 0x4000L) != 0L) return jjStartNfaWithStates_0(8, 206, 6); return jjMoveStringLiteralDfa9_0(active0, 0x200000004000000L, active1, 0xc0000000002000L, active2, 0x40L, active3, 0x840000L); case 103: return jjMoveStringLiteralDfa9_0(active0, 0x100000000000L, active1, 0x40000400000L, active2, 0L, active3, 0x8L); case 104: if ((active3 & 0x4L) != 0L) return jjStartNfaWithStates_0(8, 194, 6); return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0L, active2, 0x800000L, active3, 0L); case 105: return jjMoveStringLiteralDfa9_0(active0, 0x2200000L, active1, 0x800000000000000L, active2, 0x2620000L, active3, 0x19000100L); case 108: if ((active0 & 0x8000000000L) != 0L) return jjStartNfaWithStates_0(8, 39, 6); return jjMoveStringLiteralDfa9_0(active0, 0x400000000000L, active1, 0L, active2, 0L, active3, 0L); case 109: if ((active3 & 0x100000L) != 0L) return jjStartNfaWithStates_0(8, 212, 6); break; case 110: if ((active0 & 0x10000000000000L) != 0L) return jjStartNfaWithStates_0(8, 52, 6); else if ((active1 & 0x40000L) != 0L) return jjStartNfaWithStates_0(8, 82, 6); else if ((active1 & 0x20000000L) != 0L) return jjStartNfaWithStates_0(8, 93, 6); return jjMoveStringLiteralDfa9_0(active0, 0x2000000000L, active1, 0xa4000L, active2, 0L, active3, 0x1L); case 111: if ((active3 & 0x80000000L) != 0L) return jjStartNfaWithStates_0(8, 223, 6); return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0xa000000L, active2, 0x4020000000204L, active3, 0x28000L); case 112: if ((active0 & 0x100000000L) != 0L) return jjStartNfaWithStates_0(8, 32, 6); break; case 114: if ((active0 & 0x1000000L) != 0L) return jjStartNfaWithStates_0(8, 24, 6); return jjMoveStringLiteralDfa9_0(active0, 0x40100000L, active1, 0x800000L, active2, 0L, active3, 0x10000L); case 115: if ((active2 & 0x20L) != 0L) return jjStartNfaWithStates_0(8, 133, 6); return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0L, active2, 0x100000L, active3, 0x80000L); case 116: if ((active2 & 0x800L) != 0L) return jjStartNfaWithStates_0(8, 139, 6); else if ((active3 & 0x2L) != 0L) return jjStartNfaWithStates_0(8, 193, 6); else if ((active3 & 0x2000L) != 0L) return jjStartNfaWithStates_0(8, 205, 6); else if ((active3 & 0x2000000L) != 0L) return jjStartNfaWithStates_0(8, 217, 6); return jjMoveStringLiteralDfa9_0(active0, 0x8002000000000000L, active1, 0x881100000002L, active2, 0xa0052080L, active3, 0x40000000L); case 117: return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0L, active2, 0x100000000L, active3, 0L); case 121: if ((active1 & 0x10000L) != 0L) return jjStartNfaWithStates_0(8, 80, 6); return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0L, active2, 0x10000000L, active3, 0L); case 122: return jjMoveStringLiteralDfa9_0(active0, 0L, active1, 0L, active2, 0x100000000000400L, active3, 0L); default : break; } return jjStartNfa_0(7, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa9_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(7, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(8, active0, active1, active2, active3); return 9; } switch(curChar) { case 71: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0x80000L, active2, 0L, active3, 0L); case 83: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0L, active2, 0x10000000L, active3, 0L); case 86: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0x800000L, active2, 0L, active3, 0L); case 97: return jjMoveStringLiteralDfa10_0(active0, 0x2000040000000L, active1, 0x4000L, active2, 0x100000000L, active3, 0x10000L); case 99: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0x4000000000000L, active2, 0L, active3, 0x100L); case 100: if ((active0 & 0x2000000000L) != 0L) return jjStartNfaWithStates_0(9, 37, 6); break; case 101: if ((active0 & 0x10000000L) != 0L) return jjStartNfaWithStates_0(9, 28, 6); else if ((active0 & 0x8000000000000000L) != 0L) { jjmatchedKind = 63; jjmatchedPos = 9; } else if ((active2 & 0x400L) != 0L) return jjStartNfaWithStates_0(9, 138, 6); else if ((active2 & 0x100000000000000L) != 0L) return jjStartNfaWithStates_0(9, 184, 6); else if ((active3 & 0x20L) != 0L) return jjStartNfaWithStates_0(9, 197, 6); return jjMoveStringLiteralDfa10_0(active0, 0x100000L, active1, 0x200000000200082L, active2, 0x20010000L, active3, 0x10000000L); case 104: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0x40000000000L, active2, 0L, active3, 0L); case 105: return jjMoveStringLiteralDfa10_0(active0, 0x1000000000000000L, active1, 0x100000000L, active2, 0L, active3, 0L); case 108: if ((active1 & 0x20000000000L) != 0L) return jjStartNfaWithStates_0(9, 105, 6); return jjMoveStringLiteralDfa10_0(active0, 0x100000200000L, active1, 0x400000L, active2, 0x4L, active3, 0x8L); case 110: if ((active1 & 0x2000000L) != 0L) return jjStartNfaWithStates_0(9, 89, 6); else if ((active1 & 0x8000000L) != 0L) return jjStartNfaWithStates_0(9, 91, 6); else if ((active2 & 0x20000000000L) != 0L) return jjStartNfaWithStates_0(9, 169, 6); else if ((active2 & 0x4000000000000L) != 0L) return jjStartNfaWithStates_0(9, 178, 6); return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0x900000000000000L, active2, 0x8000240L, active3, 0x800000L); case 111: return jjMoveStringLiteralDfa10_0(active0, 0x400002000000L, active1, 0x80000000000L, active2, 0x2720018L, active3, 0x9088000L); case 112: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0L, active2, 0x80000000L, active3, 0L); case 114: if ((active0 & 0x4000000L) != 0L) return jjStartNfaWithStates_0(9, 26, 6); else if ((active3 & 0x20000L) != 0L) return jjStartNfaWithStates_0(9, 209, 6); return jjMoveStringLiteralDfa10_0(active0, 0x200000000000000L, active1, 0L, active2, 0L, active3, 0x40000L); case 115: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x1L); case 116: if ((active1 & 0x2000L) != 0L) return jjStartNfaWithStates_0(9, 77, 6); else if ((active2 & 0x800000L) != 0L) return jjStartNfaWithStates_0(9, 151, 6); return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0x20000L, active2, 0L, active3, 0L); case 117: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0x800000000000L, active2, 0x40080L, active3, 0L); case 120: if ((active1 & 0x40000000000000L) != 0L) return jjStartNfaWithStates_0(9, 118, 6); else if ((active1 & 0x80000000000000L) != 0L) return jjStartNfaWithStates_0(9, 119, 6); break; case 121: return jjMoveStringLiteralDfa10_0(active0, 0L, active1, 0x1000000000L, active2, 0x2000L, active3, 0x40000000L); default : break; } return jjStartNfa_0(8, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa10_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(8, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(9, active0, active1, active2, active3); return 10; } switch(curChar) { case 73: return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0x2L, active2, 0x200L, active3, 0L); case 76: return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0L, active2, 0x2000L, active3, 0L); case 82: return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0x1000000000L, active2, 0L, active3, 0L); case 83: return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x40000000L); case 99: return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0L, active2, 0x40L, active3, 0L); case 100: return jjMoveStringLiteralDfa11_0(active0, 0x200000L, active1, 0x100000000000000L, active2, 0L, active3, 0L); case 101: if ((active0 & 0x100000000000L) != 0L) return jjStartNfaWithStates_0(10, 44, 6); else if ((active1 & 0x400000L) != 0L) return jjStartNfaWithStates_0(10, 86, 6); else if ((active3 & 0x8L) != 0L) return jjStartNfaWithStates_0(10, 195, 6); else if ((active3 & 0x100L) != 0L) return jjStartNfaWithStates_0(10, 200, 6); return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0x804000000820000L, active2, 0x10000000L, active3, 0L); case 108: return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0x4000L, active2, 0x18L, active3, 0L); case 109: if ((active1 & 0x80000000000L) != 0L) return jjStartNfaWithStates_0(10, 107, 6); break; case 110: if ((active0 & 0x100000L) != 0L) return jjStartNfaWithStates_0(10, 20, 6); else if ((active0 & 0x2000000L) != 0L) return jjStartNfaWithStates_0(10, 25, 6); else if ((active2 & 0x200000L) != 0L) return jjStartNfaWithStates_0(10, 149, 6); else if ((active2 & 0x400000L) != 0L) return jjStartNfaWithStates_0(10, 150, 6); else if ((active2 & 0x2000000L) != 0L) return jjStartNfaWithStates_0(10, 153, 6); else if ((active3 & 0x1000000L) != 0L) return jjStartNfaWithStates_0(10, 216, 6); else if ((active3 & 0x8000000L) != 0L) return jjStartNfaWithStates_0(10, 219, 6); return jjMoveStringLiteralDfa11_0(active0, 0x40000000L, active1, 0x80L, active2, 0x20000L, active3, 0x10000L); case 111: return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0x100000000L, active2, 0x4L, active3, 0x1L); case 112: return jjMoveStringLiteralDfa11_0(active0, 0x200000000000000L, active1, 0L, active2, 0L, active3, 0L); case 114: if ((active0 & 0x400000000000L) != 0L) return jjStartNfaWithStates_0(10, 46, 6); else if ((active2 & 0x100000L) != 0L) return jjStartNfaWithStates_0(10, 148, 6); else if ((active3 & 0x80000L) != 0L) return jjStartNfaWithStates_0(10, 211, 6); return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0x800000280000L, active2, 0x20050080L, active3, 0x8000L); case 116: if ((active1 & 0x40000000000L) != 0L) return jjStartNfaWithStates_0(10, 106, 6); return jjMoveStringLiteralDfa11_0(active0, 0x2000000000000L, active1, 0L, active2, 0x108000000L, active3, 0x800000L); case 117: return jjMoveStringLiteralDfa11_0(active0, 0x1000000000000000L, active1, 0L, active2, 0x80000000L, active3, 0L); case 118: return jjMoveStringLiteralDfa11_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x40000L); case 119: if ((active3 & 0x10000000L) != 0L) return jjStartNfaWithStates_0(10, 220, 6); break; case 120: if ((active1 & 0x200000000000000L) != 0L) return jjStartNfaWithStates_0(10, 121, 6); break; default : break; } return jjStartNfa_0(9, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa11_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(9, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(10, active0, active1, active2, active3); return 11; } switch(curChar) { case 73: return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0L, active2, 0x20000L, active3, 0L); case 76: return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0x4000L, active2, 0L, active3, 0L); case 83: return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0x804000000000000L, active2, 0L, active3, 0L); case 97: return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0x1000000000L, active2, 0L, active3, 0x840000L); case 100: return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x8000L); case 101: if ((active1 & 0x800000000000L) != 0L) return jjStartNfaWithStates_0(11, 111, 6); else if ((active2 & 0x80L) != 0L) return jjStartNfaWithStates_0(11, 135, 6); else if ((active2 & 0x40000L) != 0L) return jjStartNfaWithStates_0(11, 146, 6); else if ((active2 & 0x100000000L) != 0L) return jjStartNfaWithStates_0(11, 160, 6); return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0x100000000000000L, active2, 0x8000000L, active3, 0x40000000L); case 105: return jjMoveStringLiteralDfa12_0(active0, 0x2000000000000L, active1, 0x80000L, active2, 0x2000L, active3, 0L); case 110: if ((active1 & 0x100000000L) != 0L) return jjStartNfaWithStates_0(11, 96, 6); return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0x20002L, active2, 0x10000200L, active3, 0L); case 111: return jjMoveStringLiteralDfa12_0(active0, 0x200000000000000L, active1, 0L, active2, 0x18L, active3, 0L); case 112: return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0L, active2, 0x20010000L, active3, 0L); case 114: if ((active2 & 0x4L) != 0L) return jjStartNfaWithStates_0(11, 130, 6); else if ((active3 & 0x1L) != 0L) return jjStartNfaWithStates_0(11, 192, 6); return jjMoveStringLiteralDfa12_0(active0, 0x200000L, active1, 0x800000L, active2, 0L, active3, 0L); case 115: if ((active0 & 0x1000000000000000L) != 0L) return jjStartNfaWithStates_0(11, 60, 6); return jjMoveStringLiteralDfa12_0(active0, 0x40000000L, active1, 0x80L, active2, 0L, active3, 0x10000L); case 116: if ((active2 & 0x80000000L) != 0L) return jjStartNfaWithStates_0(11, 159, 6); return jjMoveStringLiteralDfa12_0(active0, 0L, active1, 0x200000L, active2, 0L, active3, 0L); case 121: if ((active2 & 0x40L) != 0L) return jjStartNfaWithStates_0(11, 134, 6); break; default : break; } return jjStartNfa_0(10, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa12_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(10, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(11, active0, active1, active2, active3); return 12; } switch(curChar) { case 100: if ((active1 & 0x80000L) != 0L) return jjStartNfaWithStates_0(12, 83, 6); break; case 101: return jjMoveStringLiteralDfa13_0(active0, 0x200000L, active1, 0x804000000200000L, active2, 0L, active3, 0L); case 102: return jjMoveStringLiteralDfa13_0(active0, 0x40000000L, active1, 0L, active2, 0x200L, active3, 0x10000L); case 105: return jjMoveStringLiteralDfa13_0(active0, 0L, active1, 0x4000L, active2, 0L, active3, 0x8000L); case 108: if ((active3 & 0x40000L) != 0L) return jjStartNfaWithStates_0(12, 210, 6); return jjMoveStringLiteralDfa13_0(active0, 0x200000000000000L, active1, 0L, active2, 0L, active3, 0L); case 109: return jjMoveStringLiteralDfa13_0(active0, 0L, active1, 0L, active2, 0x2000L, active3, 0L); case 110: return jjMoveStringLiteralDfa13_0(active0, 0L, active1, 0x1000000000L, active2, 0x20000L, active3, 0x40000000L); case 111: return jjMoveStringLiteralDfa13_0(active0, 0x2000000000000L, active1, 0x80L, active2, 0x20010000L, active3, 0L); case 114: if ((active2 & 0x8L) != 0L) return jjStartNfaWithStates_0(12, 131, 6); else if ((active2 & 0x10L) != 0L) return jjStartNfaWithStates_0(12, 132, 6); return jjMoveStringLiteralDfa13_0(active0, 0L, active1, 0L, active2, 0x8000000L, active3, 0L); case 115: return jjMoveStringLiteralDfa13_0(active0, 0L, active1, 0x20000L, active2, 0x10000000L, active3, 0L); case 116: return jjMoveStringLiteralDfa13_0(active0, 0L, active1, 0x800002L, active2, 0L, active3, 0x800000L); case 120: if ((active1 & 0x100000000000000L) != 0L) return jjStartNfaWithStates_0(12, 120, 6); break; default : break; } return jjStartNfa_0(11, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa13_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(11, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(12, active0, active1, active2, active3); return 13; } switch(curChar) { case 97: return jjMoveStringLiteralDfa14_0(active0, 0x200000000000000L, active1, 0L, active2, 0L, active3, 0L); case 101: return jjMoveStringLiteralDfa14_0(active0, 0L, active1, 0x800002L, active2, 0L, active3, 0L); case 103: return jjMoveStringLiteralDfa14_0(active0, 0L, active1, 0x1000004000L, active2, 0L, active3, 0L); case 105: return jjMoveStringLiteralDfa14_0(active0, 0L, active1, 0x20000L, active2, 0x2000L, active3, 0x800000L); case 108: return jjMoveStringLiteralDfa14_0(active0, 0L, active1, 0L, active2, 0x20010000L, active3, 0L); case 110: if ((active0 & 0x200000L) != 0L) return jjStartNfaWithStates_0(13, 21, 6); else if ((active0 & 0x2000000000000L) != 0L) return jjStartNfaWithStates_0(13, 49, 6); return jjMoveStringLiteralDfa14_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x8000L); case 111: if ((active2 & 0x200L) != 0L) return jjStartNfaWithStates_0(13, 137, 6); return jjMoveStringLiteralDfa14_0(active0, 0x40000000L, active1, 0L, active2, 0x10000000L, active3, 0x10000L); case 112: return jjMoveStringLiteralDfa14_0(active0, 0L, active1, 0L, active2, 0x8000000L, active3, 0L); case 114: if ((active1 & 0x80L) != 0L) return jjStartNfaWithStates_0(13, 71, 6); break; case 115: return jjMoveStringLiteralDfa14_0(active0, 0L, active1, 0L, active2, 0L, active3, 0x40000000L); case 116: if ((active1 & 0x4000000000000L) != 0L) return jjStartNfaWithStates_0(13, 114, 6); else if ((active1 & 0x800000000000000L) != 0L) return jjStartNfaWithStates_0(13, 123, 6); return jjMoveStringLiteralDfa14_0(active0, 0L, active1, 0L, active2, 0x20000L, active3, 0L); case 120: if ((active1 & 0x200000L) != 0L) return jjStartNfaWithStates_0(13, 85, 6); break; default : break; } return jjStartNfa_0(12, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa14_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(12, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(13, active0, active1, active2, active3); return 14; } switch(curChar) { case 97: return jjMoveStringLiteralDfa15_0(active0, 0L, active1, 0L, active2, 0x20010000L, active3, 0x8000L); case 101: if ((active1 & 0x1000000000L) != 0L) return jjStartNfaWithStates_0(14, 100, 6); return jjMoveStringLiteralDfa15_0(active0, 0L, active1, 0L, active2, 0x20000L, active3, 0L); case 104: return jjMoveStringLiteralDfa15_0(active0, 0L, active1, 0x4000L, active2, 0L, active3, 0L); case 111: return jjMoveStringLiteralDfa15_0(active0, 0L, active1, 0L, active2, 0x8000000L, active3, 0x40800000L); case 114: if ((active2 & 0x10000000L) != 0L) return jjStartNfaWithStates_0(14, 156, 6); return jjMoveStringLiteralDfa15_0(active0, 0x40000000L, active1, 0x2L, active2, 0L, active3, 0x10000L); case 116: if ((active2 & 0x2000L) != 0L) return jjStartNfaWithStates_0(14, 141, 6); return jjMoveStringLiteralDfa15_0(active0, 0x200000000000000L, active1, 0x20000L, active2, 0L, active3, 0L); case 120: if ((active1 & 0x800000L) != 0L) return jjStartNfaWithStates_0(14, 87, 6); break; default : break; } return jjStartNfa_0(13, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa15_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(13, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(14, active0, active1, active2, active3); return 15; } switch(curChar) { case 108: return jjMoveStringLiteralDfa16_0(active0, 0L, active1, 0L, active2, 0x8000000L, active3, 0L); case 109: if ((active0 & 0x40000000L) != 0L) return jjStartNfaWithStates_0(15, 30, 6); else if ((active3 & 0x10000L) != 0L) return jjStartNfaWithStates_0(15, 208, 6); break; case 110: if ((active3 & 0x800000L) != 0L) return jjStartNfaWithStates_0(15, 215, 6); break; case 111: return jjMoveStringLiteralDfa16_0(active0, 0x200000000000000L, active1, 0L, active2, 0L, active3, 0L); case 112: return jjMoveStringLiteralDfa16_0(active0, 0L, active1, 0x2L, active2, 0L, active3, 0L); case 114: if ((active3 & 0x40000000L) != 0L) return jjStartNfaWithStates_0(15, 222, 6); return jjMoveStringLiteralDfa16_0(active0, 0L, active1, 0L, active2, 0x20000L, active3, 0L); case 116: if ((active1 & 0x4000L) != 0L) return jjStartNfaWithStates_0(15, 78, 6); return jjMoveStringLiteralDfa16_0(active0, 0L, active1, 0L, active2, 0x20010000L, active3, 0x8000L); case 121: if ((active1 & 0x20000L) != 0L) return jjStartNfaWithStates_0(15, 81, 6); break; default : break; } return jjStartNfa_0(14, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa16_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(14, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(15, active0, active1, active2, active3); return 16; } switch(curChar) { case 97: return jjMoveStringLiteralDfa17_0(active0, 0L, active1, 0L, active2, 0x8000000L, active3, 0L); case 101: if ((active3 & 0x8000L) != 0L) return jjStartNfaWithStates_0(16, 207, 6); break; case 111: return jjMoveStringLiteralDfa17_0(active0, 0L, active1, 0x2L, active2, 0x20010000L, active3, 0L); case 112: return jjMoveStringLiteralDfa17_0(active0, 0L, active1, 0L, active2, 0x20000L, active3, 0L); case 114: if ((active0 & 0x200000000000000L) != 0L) return jjStartNfaWithStates_0(16, 57, 6); break; default : break; } return jjStartNfa_0(15, active0, active1, active2, active3); } private final int jjMoveStringLiteralDfa17_0(long old0, long active0, long old1, long active1, long old2, long active2, long old3, long active3) { if (((active0 &= old0) | (active1 &= old1) | (active2 &= old2) | (active3 &= old3)) == 0L) return jjStartNfa_0(15, old0, old1, old2, old3); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(16, 0L, active1, active2, 0L); return 17; } switch(curChar) { case 108: return jjMoveStringLiteralDfa18_0(active1, 0x2L, active2, 0L); case 111: return jjMoveStringLiteralDfa18_0(active1, 0L, active2, 0x20000L); case 114: if ((active2 & 0x10000L) != 0L) return jjStartNfaWithStates_0(17, 144, 6); else if ((active2 & 0x20000000L) != 0L) return jjStartNfaWithStates_0(17, 157, 6); break; case 116: return jjMoveStringLiteralDfa18_0(active1, 0L, active2, 0x8000000L); default : break; } return jjStartNfa_0(16, 0L, active1, active2, 0L); } private final int jjMoveStringLiteralDfa18_0(long old1, long active1, long old2, long active2) { if (((active1 &= old1) | (active2 &= old2)) == 0L) return jjStartNfa_0(16, 0L, old1, old2, 0L); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(17, 0L, active1, active2, 0L); return 18; } switch(curChar) { case 97: return jjMoveStringLiteralDfa19_0(active1, 0x2L, active2, 0L); case 108: return jjMoveStringLiteralDfa19_0(active1, 0L, active2, 0x20000L); case 111: return jjMoveStringLiteralDfa19_0(active1, 0L, active2, 0x8000000L); default : break; } return jjStartNfa_0(17, 0L, active1, active2, 0L); } private final int jjMoveStringLiteralDfa19_0(long old1, long active1, long old2, long active2) { if (((active1 &= old1) | (active2 &= old2)) == 0L) return jjStartNfa_0(17, 0L, old1, old2, 0L); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(18, 0L, active1, active2, 0L); return 19; } switch(curChar) { case 97: return jjMoveStringLiteralDfa20_0(active1, 0L, active2, 0x20000L); case 114: if ((active2 & 0x8000000L) != 0L) return jjStartNfaWithStates_0(19, 155, 6); break; case 116: return jjMoveStringLiteralDfa20_0(active1, 0x2L, active2, 0L); default : break; } return jjStartNfa_0(18, 0L, active1, active2, 0L); } private final int jjMoveStringLiteralDfa20_0(long old1, long active1, long old2, long active2) { if (((active1 &= old1) | (active2 &= old2)) == 0L) return jjStartNfa_0(18, 0L, old1, old2, 0L); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(19, 0L, active1, active2, 0L); return 20; } switch(curChar) { case 111: return jjMoveStringLiteralDfa21_0(active1, 0x2L, active2, 0L); case 116: return jjMoveStringLiteralDfa21_0(active1, 0L, active2, 0x20000L); default : break; } return jjStartNfa_0(19, 0L, active1, active2, 0L); } private final int jjMoveStringLiteralDfa21_0(long old1, long active1, long old2, long active2) { if (((active1 &= old1) | (active2 &= old2)) == 0L) return jjStartNfa_0(19, 0L, old1, old2, 0L); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(20, 0L, active1, active2, 0L); return 21; } switch(curChar) { case 111: return jjMoveStringLiteralDfa22_0(active1, 0L, active2, 0x20000L); case 114: if ((active1 & 0x2L) != 0L) return jjStartNfaWithStates_0(21, 65, 6); break; default : break; } return jjStartNfa_0(20, 0L, active1, active2, 0L); } private final int jjMoveStringLiteralDfa22_0(long old1, long active1, long old2, long active2) { if (((active1 &= old1) | (active2 &= old2)) == 0L) return jjStartNfa_0(20, 0L, old1, old2, 0L); try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { jjStopStringLiteralDfa_0(21, 0L, 0L, active2, 0L); return 22; } switch(curChar) { case 114: if ((active2 & 0x20000L) != 0L) return jjStartNfaWithStates_0(22, 145, 6); break; default : break; } return jjStartNfa_0(21, 0L, 0L, active2, 0L); } private final void jjCheckNAdd(int state) { if (jjrounds[state] != jjround) { jjstateSet[jjnewStateCnt++] = state; jjrounds[state] = jjround; } } private final void jjAddStates(int start, int end) { do { jjstateSet[jjnewStateCnt++] = jjnextStates[start]; } while (start++ != end); } private final void jjCheckNAddTwoStates(int state1, int state2) { jjCheckNAdd(state1); jjCheckNAdd(state2); } private final void jjCheckNAddStates(int start, int end) { do { jjCheckNAdd(jjnextStates[start]); } while (start++ != end); } private final void jjCheckNAddStates(int start) { jjCheckNAdd(jjnextStates[start]); jjCheckNAdd(jjnextStates[start + 1]); } static final long[] jjbitVec0 = { 0x0L, 0x0L, 0xffffffffffffffffL, 0xffffffffffffffffL }; private final int jjMoveNfa_0(int startState, int curPos) { int[] nextStates; int startsAt = 0; jjnewStateCnt = 41; int i = 1; jjstateSet[0] = startState; int j, kind = 0x7fffffff; for (;;) { if (++jjround == 0x7fffffff) ReInitRounds(); if (curChar < 64) { long l = 1L << curChar; MatchLoop: do { switch(jjstateSet[--i]) { case 0: if ((0x3ff000000000000L & l) != 0L) { if (kind > 226) kind = 226; jjCheckNAddStates(0, 5); } else if ((0xa47200000000L & l) != 0L) { if (kind > 227) kind = 227; jjCheckNAdd(6); } else if (curChar == 46) jjCheckNAdd(17); else if (curChar == 34) jjCheckNAddStates(6, 8); else if (curChar == 35) jjCheckNAddStates(9, 11); if ((0x280000000000L & l) != 0L) jjCheckNAddStates(12, 15); else if (curChar == 48) jjstateSet[jjnewStateCnt++] = 23; break; case 26: if ((0x3ffa47200000000L & l) != 0L) { if (kind > 227) kind = 227; jjCheckNAdd(6); } if ((0x3ff000000000000L & l) != 0L) { if (kind > 231) kind = 231; jjCheckNAdd(27); } else if ((0x280000000000L & l) != 0L) jjCheckNAdd(27); break; case 1: if ((0xffffffffffffdbffL & l) != 0L) jjCheckNAddStates(9, 11); break; case 2: case 3: if (curChar == 10 && kind > 5) kind = 5; break; case 4: if (curChar == 13) jjstateSet[jjnewStateCnt++] = 3; break; case 5: if ((0xa47200000000L & l) == 0L) break; if (kind > 227) kind = 227; jjCheckNAdd(6); break; case 6: if ((0x3ffa47200000000L & l) == 0L) break; if (kind > 227) kind = 227; jjCheckNAdd(6); break; case 7: if (curChar == 34) jjCheckNAddStates(6, 8); break; case 8: if ((0xfffffffbffffdbffL & l) != 0L) jjCheckNAddStates(6, 8); break; case 10: if ((0x8400000000L & l) != 0L) jjCheckNAddStates(6, 8); break; case 11: if (curChar == 34 && kind > 228) kind = 228; break; case 12: if ((0xff000000000000L & l) != 0L) jjCheckNAddStates(16, 19); break; case 13: if ((0xff000000000000L & l) != 0L) jjCheckNAddStates(6, 8); break; case 14: if ((0xf000000000000L & l) != 0L) jjstateSet[jjnewStateCnt++] = 15; break; case 15: if ((0xff000000000000L & l) != 0L) jjCheckNAdd(13); break; case 16: if (curChar == 46) jjCheckNAdd(17); break; case 17: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 229) kind = 229; jjCheckNAddStates(20, 22); break; case 19: if ((0x280000000000L & l) != 0L) jjCheckNAdd(20); break; case 20: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 229) kind = 229; jjCheckNAddTwoStates(20, 21); break; case 22: if (curChar == 48) jjstateSet[jjnewStateCnt++] = 23; break; case 24: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 230) kind = 230; jjstateSet[jjnewStateCnt++] = 24; break; case 27: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 231) kind = 231; jjCheckNAdd(27); break; case 28: if ((0x280000000000L & l) != 0L) jjCheckNAddStates(12, 15); break; case 29: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 226) kind = 226; jjCheckNAdd(29); break; case 30: if ((0x3ff000000000000L & l) != 0L) jjCheckNAddTwoStates(30, 31); break; case 31: if (curChar != 46) break; if (kind > 229) kind = 229; jjCheckNAddStates(23, 25); break; case 32: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 229) kind = 229; jjCheckNAddStates(23, 25); break; case 34: if ((0x280000000000L & l) != 0L) jjCheckNAdd(35); break; case 35: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 229) kind = 229; jjCheckNAddTwoStates(35, 21); break; case 36: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 229) kind = 229; jjCheckNAddStates(26, 28); break; case 38: if ((0x280000000000L & l) != 0L) jjCheckNAdd(39); break; case 39: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 229) kind = 229; jjCheckNAddTwoStates(39, 21); break; case 40: if ((0x3ff000000000000L & l) == 0L) break; if (kind > 226) kind = 226; jjCheckNAddStates(0, 5); break; default : break; } } while(i != startsAt); } else if (curChar < 128) { long l = 1L << (curChar & 077); MatchLoop: do { switch(jjstateSet[--i]) { case 0: if ((0x57fffffec7fffffeL & l) != 0L) { if (kind > 227) kind = 227; jjCheckNAdd(6); } if ((0x2000000020L & l) != 0L) jjAddStates(29, 30); break; case 26: case 6: if ((0x57fffffec7fffffeL & l) == 0L) break; if (kind > 227) kind = 227; jjCheckNAdd(6); break; case 1: jjAddStates(9, 11); break; case 5: if ((0x57fffffec7fffffeL & l) == 0L) break; if (kind > 227) kind = 227; jjCheckNAdd(6); break; case 8: if ((0xffffffffefffffffL & l) != 0L) jjCheckNAddStates(6, 8); break; case 9: if (curChar == 92) jjAddStates(31, 33); break; case 10: if ((0x14404410000000L & l) != 0L) jjCheckNAddStates(6, 8); break; case 18: if ((0x2000000020L & l) != 0L) jjAddStates(34, 35); break; case 21: if ((0x5000000050L & l) != 0L && kind > 229) kind = 229; break; case 23: if ((0x100000001000000L & l) != 0L) jjCheckNAdd(24); break; case 24: if ((0x7e0000007eL & l) == 0L) break; if (kind > 230) kind = 230; jjCheckNAdd(24); break; case 25: if ((0x2000000020L & l) != 0L) jjAddStates(29, 30); break; case 33: if ((0x2000000020L & l) != 0L) jjAddStates(36, 37); break; case 37: if ((0x2000000020L & l) != 0L) jjAddStates(38, 39); break; default : break; } } while(i != startsAt); } else { int i2 = (curChar & 0xff) >> 6; long l2 = 1L << (curChar & 077); MatchLoop: do { switch(jjstateSet[--i]) { case 1: if ((jjbitVec0[i2] & l2) != 0L) jjAddStates(9, 11); break; case 8: if ((jjbitVec0[i2] & l2) != 0L) jjAddStates(6, 8); break; default : break; } } while(i != startsAt); } if (kind != 0x7fffffff) { jjmatchedKind = kind; jjmatchedPos = curPos; kind = 0x7fffffff; } ++curPos; if ((i = jjnewStateCnt) == (startsAt = 41 - (jjnewStateCnt = startsAt))) return curPos; try { curChar = input_stream.readChar(); } catch(java.io.IOException e) { return curPos; } } } static final int[] jjnextStates = { 29, 30, 31, 36, 37, 21, 8, 9, 11, 1, 2, 4, 29, 30, 16, 36, 8, 9, 13, 11, 17, 18, 21, 32, 33, 21, 36, 37, 21, 26, 27, 10, 12, 14, 19, 20, 34, 35, 38, 39, }; public static final String[] jjstrLiteralImages = { "", null, null, null, null, null, "\143\150\151\154\144\162\145\156", "\116\125\114\114", "\173", "\175", "\54", "\124\122\125\105", "\106\101\114\123\105", "\133", "\135", "\104\105\106", "\125\123\105", "\122\117\125\124\105", "\124\117", "\56", "\141\144\144\103\150\151\154\144\162\145\156", "\162\145\155\157\166\145\103\150\151\154\144\162\145\156", "\101\156\143\150\157\162", "\165\162\154", "\160\141\162\141\155\145\164\145\162", "\144\145\163\143\162\151\160\164\151\157\156", "\142\142\157\170\103\145\156\164\145\162", "\142\142\157\170\123\151\172\145", "\101\160\160\145\141\162\141\156\143\145", "\155\141\164\145\162\151\141\154", "\164\145\170\164\165\162\145\124\162\141\156\163\146\157\162\155", "\164\145\170\164\165\162\145", "\101\165\144\151\157\103\154\151\160", "\154\157\157\160", "\160\151\164\143\150", "\163\164\141\162\164\124\151\155\145", "\163\164\157\160\124\151\155\145", "\102\141\143\153\147\162\157\165\156\144", "\142\141\143\153\125\162\154", "\142\157\164\164\157\155\125\162\154", "\146\162\157\156\164\125\162\154", "\154\145\146\164\125\162\154", "\162\151\147\150\164\125\162\154", "\164\157\160\125\162\154", "\147\162\157\165\156\144\101\156\147\154\145", "\163\153\171\101\156\147\154\145", "\147\162\157\165\156\144\103\157\154\157\162", "\163\153\171\103\157\154\157\162", "\102\151\154\154\142\157\141\162\144", "\141\170\151\163\117\146\122\157\164\141\164\151\157\156", "\102\157\170", "\163\151\172\145", "\103\157\154\154\151\163\151\157\156", "\143\157\154\154\151\144\145", "\160\162\157\170\171", "\103\157\154\157\162", "\143\157\154\157\162", "\103\157\154\157\162\111\156\164\145\162\160\157\154\141\164\157\162", "\103\157\156\145", "\150\145\151\147\150\164", "\142\157\164\164\157\155\122\141\144\151\165\163", "\163\151\144\145", "\142\157\164\164\157\155", "\103\157\157\162\144\151\156\141\164\145", "\160\157\151\156\164", "\103\157\157\162\144\151\156\141\164\145\111\156\164\145\162\160\157\154\141\164\157\162", "\153\145\171", "\153\145\171\126\141\154\165\145", "\103\171\154\151\156\144\145\162", "\164\157\160", "\162\141\144\151\165\163", "\103\171\154\151\156\144\145\162\123\145\156\163\157\162", "\155\151\156\101\156\147\154\145", "\155\141\170\101\156\147\154\145", "\145\156\141\142\154\145\144", "\144\151\163\153\101\156\147\154\145", "\157\146\146\163\145\164", "\141\165\164\157\117\146\146\163\145\164", "\104\151\162\145\143\164\151\157\156\141\154\114\151\147\150\164", "\157\156", "\151\156\164\145\156\163\151\164\171", "\141\155\142\151\145\156\164\111\156\164\145\156\163\151\164\171", "\144\151\162\145\143\164\151\157\156", "\105\154\145\166\141\164\151\157\156\107\162\151\144", "\143\143\167", "\143\157\154\157\162\120\145\162\126\145\162\164\145\170", "\143\162\145\141\163\145\101\156\147\154\145", "\156\157\162\155\141\154\120\145\162\126\145\162\164\145\170", "\163\157\154\151\144", "\170\104\151\155\145\156\163\151\157\156", "\170\123\160\141\143\151\156\147", "\172\104\151\155\145\156\163\151\157\156", "\172\123\160\141\143\151\156\147", "\105\170\164\162\165\163\151\157\156", "\142\145\147\151\156\103\141\160", "\145\156\144\103\141\160", "\143\162\157\163\163\123\145\143\164\151\157\156", "\163\160\151\156\145", "\106\157\147", "\146\157\147\124\171\160\145", "\166\151\163\151\142\151\154\151\164\171\122\141\156\147\145", "\106\157\156\164\123\164\171\154\145", "\152\165\163\164\151\146\171", "\146\141\155\151\154\171", "\163\164\171\154\145", "\150\157\162\151\172\157\156\164\141\154", "\154\145\146\164\124\157\122\151\147\150\164", "\164\157\160\124\157\102\157\164\164\157\155", "\154\141\156\147\165\141\147\145", "\163\160\141\143\151\156\147", "\107\162\157\165\160", "\111\155\141\147\145\124\145\170\164\165\162\145", "\162\145\160\145\141\164\123", "\162\145\160\145\141\164\124", "\111\156\144\145\170\145\144\106\141\143\145\123\145\164", "\143\157\157\162\144", "\156\157\162\155\141\154", "\164\145\170\103\157\157\162\144", "\143\157\157\162\144\111\156\144\145\170", "\143\157\154\157\162\111\156\144\145\170", "\164\145\170\103\157\157\162\144\111\156\144\145\170", "\156\157\162\155\141\154\111\156\144\145\170", "\143\157\156\166\145\170", "\111\156\144\145\170\145\144\114\151\156\145\123\145\164", "\111\156\154\151\156\145", "\114\117\104", "\143\145\156\164\145\162", "\162\141\156\147\145", "\154\145\166\145\154", "\115\141\164\145\162\151\141\154", "\144\151\146\146\165\163\145\103\157\154\157\162", "\163\160\145\143\165\154\141\162\103\157\154\157\162", "\145\155\151\163\163\151\166\145\103\157\154\157\162", "\163\150\151\156\151\156\145\163\163", "\164\162\141\156\163\160\141\162\145\156\143\171", "\115\157\166\151\145\124\145\170\164\165\162\145", "\163\160\145\145\144", "\116\141\166\151\147\141\164\151\157\156\111\156\146\157", "\141\166\141\164\141\162\123\151\172\145", "\150\145\141\144\154\151\147\150\164", "\164\171\160\145", "\166\151\163\151\142\151\154\151\164\171\114\151\155\151\164", "\116\157\162\155\141\154", "\166\145\143\164\157\162", "\116\157\162\155\141\154\111\156\164\145\162\160\157\154\141\164\157\162", "\117\162\151\145\156\164\141\164\151\157\156\111\156\164\145\162\160\157\154\141\164\157\162", "\120\151\170\145\154\124\145\170\164\165\162\145", "\151\155\141\147\145", "\120\154\141\156\145\123\145\156\163\157\162", "\155\151\156\120\157\163\151\164\151\157\156", "\155\141\170\120\157\163\151\164\151\157\156", "\120\157\151\156\164\114\151\147\150\164", "\154\157\143\141\164\151\157\156", "\141\164\164\145\156\165\141\164\151\157\156", "\120\157\151\156\164\123\145\164", "\120\157\163\151\164\151\157\156\111\156\164\145\162\160\157\154\141\164\157\162", "\120\162\157\170\151\155\151\164\171\123\145\156\163\157\162", "\123\143\141\154\141\162\111\156\164\145\162\160\157\154\141\164\157\162", "\123\143\162\151\160\164", "\144\151\162\145\143\164\117\165\164\160\165\164", "\155\165\163\164\105\166\141\154\165\141\164\145", "\145\166\145\156\164\111\156", "\145\166\145\156\164\117\165\164", "\146\151\145\154\144", "\123\106\102\157\157\154", "\123\106\103\157\154\157\162", "\123\106\106\154\157\141\164", "\123\106\111\156\164\63\62", "\123\106\116\157\144\145", "\123\106\122\157\164\141\164\151\157\156", "\123\106\123\164\162\151\156\147", "\123\106\124\151\155\145", "\123\106\126\145\143\62\146", "\123\106\126\145\143\63\146", "\115\106\103\157\154\157\162", "\115\106\106\154\157\141\164", "\115\106\111\156\164\63\62", "\115\106\116\157\144\145", "\115\106\122\157\164\141\164\151\157\156", "\115\106\123\164\162\151\156\147", "\115\106\124\151\155\145", "\115\106\126\145\143\62\146", "\115\106\126\145\143\63\146", "\123\157\165\156\144", "\163\160\141\164\151\141\154\151\172\145", "\155\141\170\102\141\143\153", "\155\151\156\102\141\143\153", "\155\141\170\106\162\157\156\164", "\155\151\156\106\162\157\156\164", "\160\162\151\157\162\151\164\171", "\163\157\165\162\143\145", "\123\160\150\145\162\145", "\123\160\150\145\162\145\123\145\156\163\157\162", "\123\160\157\164\114\151\147\150\164", "\142\145\141\155\127\151\144\164\150", "\143\165\164\117\146\146\101\156\147\154\145", "\123\150\141\160\145", "\141\160\160\145\141\162\141\156\143\145", "\147\145\157\155\145\164\162\171", "\123\167\151\164\143\150", "\167\150\151\143\150\103\150\157\151\143\145", "\143\150\157\151\143\145", "\124\145\170\164", "\163\164\162\151\156\147", "\154\145\156\147\164\150", "\155\141\170\105\170\164\145\156\164", "\146\157\156\164\123\164\171\154\145", "\124\145\170\164\165\162\145\103\157\157\162\144\151\156\141\164\145", "\124\145\170\164\165\162\145\124\162\141\156\163\146\157\162\155", "\124\151\155\145\123\145\156\163\157\162", "\143\171\143\154\145\111\156\164\145\162\166\141\154", "\124\157\165\143\150\123\145\156\163\157\162", "\124\162\141\156\163\146\157\162\155", "\162\157\164\141\164\151\157\156", "\163\143\141\154\145", "\163\143\141\154\145\117\162\151\145\156\164\141\164\151\157\156", "\164\162\141\156\163\154\141\164\151\157\156", "\126\151\145\167\160\157\151\156\164", "\160\157\163\151\164\151\157\156", "\157\162\151\145\156\164\141\164\151\157\156", "\146\151\145\154\144\117\146\126\151\145\167", "\152\165\155\160", "\126\151\163\151\142\151\154\151\164\171\123\145\156\163\157\162", "\127\157\162\154\144\111\156\146\157", "\164\151\164\154\145", "\151\156\146\157", null, null, null, null, null, null, }; public static final String[] lexStateNames = { "DEFAULT", }; static final long[] jjtoToken = { 0xffffffffffffffc1L, 0xffffffffffffffffL, 0xffffffffffffffffL, 0xffffffffffL, }; static final long[] jjtoSkip = { 0x3eL, 0x0L, 0x0L, 0x0L, }; private SimpleCharStream input_stream; private final int[] jjrounds = new int[41]; private final int[] jjstateSet = new int[82]; protected char curChar; public VRML97ParserTokenManager(SimpleCharStream stream) { if (SimpleCharStream.staticFlag) throw new Error("ERROR: Cannot use a static CharStream class with a non-static lexical analyzer."); input_stream = stream; } public VRML97ParserTokenManager(SimpleCharStream stream, int lexState) { this(stream); SwitchTo(lexState); } public void ReInit(SimpleCharStream stream) { jjmatchedPos = jjnewStateCnt = 0; curLexState = defaultLexState; input_stream = stream; ReInitRounds(); } private final void ReInitRounds() { int i; jjround = 0x80000001; for (i = 41; i-- > 0;) jjrounds[i] = 0x80000000; } public void ReInit(SimpleCharStream stream, int lexState) { ReInit(stream); SwitchTo(lexState); } public void SwitchTo(int lexState) { if (lexState >= 1 || lexState < 0) throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE); else curLexState = lexState; } private final Token jjFillToken() { Token t = Token.newToken(jjmatchedKind); t.kind = jjmatchedKind; String im = jjstrLiteralImages[jjmatchedKind]; t.image = (im == null) ? input_stream.GetImage() : im; t.beginLine = input_stream.getBeginLine(); t.beginColumn = input_stream.getBeginColumn(); t.endLine = input_stream.getEndLine(); t.endColumn = input_stream.getEndColumn(); return t; } int curLexState = 0; int defaultLexState = 0; int jjnewStateCnt; int jjround; int jjmatchedPos; int jjmatchedKind; public final Token getNextToken() { int kind; Token specialToken = null; Token matchedToken; int curPos = 0; EOFLoop : for (;;) { try { curChar = input_stream.BeginToken(); } catch(java.io.IOException e) { jjmatchedKind = 0; matchedToken = jjFillToken(); return matchedToken; } try { input_stream.backup(0); while (curChar <= 32 && (0x100002600L & (1L << curChar)) != 0L) curChar = input_stream.BeginToken(); } catch (java.io.IOException e1) { continue EOFLoop; } jjmatchedKind = 0x7fffffff; jjmatchedPos = 0; curPos = jjMoveStringLiteralDfa0_0(); if (jjmatchedKind != 0x7fffffff) { if (jjmatchedPos + 1 < curPos) input_stream.backup(curPos - jjmatchedPos - 1); if ((jjtoToken[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L) { matchedToken = jjFillToken(); return matchedToken; } else { continue EOFLoop; } } int error_line = input_stream.getEndLine(); int error_column = input_stream.getEndColumn(); String error_after = null; boolean EOFSeen = false; try { input_stream.readChar(); input_stream.backup(1); } catch (java.io.IOException e1) { EOFSeen = true; error_after = curPos <= 1 ? "" : input_stream.GetImage(); if (curChar == '\n' || curChar == '\r') { error_line++; error_column = 0; } else error_column++; } if (!EOFSeen) { input_stream.backup(1); error_after = curPos <= 1 ? "" : input_stream.GetImage(); } throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR); } } }
bsd-3-clause
bhav0904/eclipse-collections
eclipse-collections/src/main/java/org/eclipse/collections/impl/block/procedure/CaseProcedure.java
2629
/* * Copyright (c) 2016 Goldman Sachs. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * and Eclipse Distribution License v. 1.0 which accompany this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. */ package org.eclipse.collections.impl.block.procedure; import java.util.List; import org.eclipse.collections.api.block.predicate.Predicate; import org.eclipse.collections.api.block.procedure.Procedure; import org.eclipse.collections.api.tuple.Pair; import org.eclipse.collections.impl.factory.Lists; import org.eclipse.collections.impl.tuple.Tuples; /** * CaseProcedure allows developers to create an object form of a case statement, which instead of being based on * a single switch value is based on a list of predicate / procedure combinations. For the first predicate * that returns true for a given value in the case statement, the corresponding procedure will be executed. */ public final class CaseProcedure<T> implements Procedure<T> { private static final long serialVersionUID = 1L; private final List<Pair<Predicate<? super T>, Procedure<? super T>>> predicateProcedures = Lists.mutable.empty(); private Procedure<? super T> defaultProcedure; public CaseProcedure(Procedure<? super T> defaultProcedure) { this.defaultProcedure = defaultProcedure; } public CaseProcedure() { } public CaseProcedure<T> addCase(Predicate<? super T> predicate, Procedure<? super T> procedure) { this.predicateProcedures.add(Tuples.pair(predicate, procedure)); return this; } public CaseProcedure<T> setDefault(Procedure<? super T> procedure) { this.defaultProcedure = procedure; return this; } @Override public void value(T argument) { int localSize = this.predicateProcedures.size(); for (int i = 0; i < localSize; i++) { Pair<Predicate<? super T>, Procedure<? super T>> pair = this.predicateProcedures.get(i); if (pair.getOne().accept(argument)) { pair.getTwo().value(argument); return; } } if (this.defaultProcedure != null) { this.defaultProcedure.value(argument); } } @Override public String toString() { return "new CaseProcedure(" + this.predicateProcedures + ')'; } }
bsd-3-clause
firebears-2014/FB2012
src/org/firebears/subsystems/Collector.java
2307
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package org.firebears.subsystems; import edu.wpi.first.wpilibj.CANJaguar; import edu.wpi.first.wpilibj.DigitalInput; import edu.wpi.first.wpilibj.can.CANTimeoutException; import edu.wpi.first.wpilibj.command.Subsystem; import org.firebears.RobotMap; import org.firebears.commands.ConveyerStop; /** * * @author paul */ public class Collector extends Subsystem { // Put methods for controlling this subsystem // here. Call these from Commands. public static final double MAX_FWD = -1.0; public static final double MAX_REV = 1.0; DigitalInput m_CollectorBallSensor; CANJaguar m_collector; public void initDefaultCommand() { // setDefaultCommand(new CollectorFeed());//requires ball sensor stop setDefaultCommand(new ConveyerStop()); } public Collector () { try { m_collector = new CANJaguar(RobotMap.ELEVATOR_SUCKER_JAG); m_collector.changeControlMode(CANJaguar.ControlMode.kPercentVbus); m_CollectorBallSensor = new DigitalInput(RobotMap.BALL_IN_COLLECTOR_CH); } catch (CANTimeoutException ex) { ex.printStackTrace(); } } public void startCollector(){ try{ m_collector.setX(MAX_FWD); //TODO if ball in collector chamber, stop collector if (RobotMap.DEBUG) {System.out.println("Elevator is running In");} } catch (CANTimeoutException ex) { ex.printStackTrace(); } } public void startCollectorEject(){ try{ m_collector.setX(MAX_REV); //m_elevatorConv.setX(MAX_FWD); if (RobotMap.DEBUG) {System.out.println("Elevator is running Eject");} } catch (CANTimeoutException ex) { ex.printStackTrace(); } } public void stopCollector(){//May not need this try{ m_collector.setX(0.0); if (RobotMap.DEBUG) {System.out.println("Elevator is Stopped");} } catch (CANTimeoutException ex) { ex.printStackTrace(); } } }
bsd-3-clause
sahara-labs/scheduling-server
Reports/src/au/edu/uts/eng/remotelabs/schedserver/reports/ReportsActivator.java
3532
/** * SAHARA Scheduling Server * * Schedules and assigns local laboratory rigs. * * @license See LICENSE in the top level directory for complete license terms. * * Copyright (c) 2009, University of Technology, Sydney * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the University of Technology, Sydney nor the names * of its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * @author Tania Machet (tmachet) * @date 29th November 2010 */ package au.edu.uts.eng.remotelabs.schedserver.reports; import org.apache.axis2.transport.http.AxisServlet; import org.osgi.framework.BundleActivator; import org.osgi.framework.BundleContext; import org.osgi.framework.ServiceRegistration; import au.edu.uts.eng.remotelabs.schedserver.logger.Logger; import au.edu.uts.eng.remotelabs.schedserver.logger.LoggerActivator; import au.edu.uts.eng.remotelabs.schedserver.server.ServletContainer; import au.edu.uts.eng.remotelabs.schedserver.server.ServletContainerService; public class ReportsActivator implements BundleActivator { /** Service registration for the Reports SOAP interface. */ private ServiceRegistration<ServletContainerService> soapReg; /** Logger. */ private Logger logger; @Override public void start(final BundleContext context) throws Exception { this.logger = LoggerActivator.getLogger(); this.logger.info("Starting the Reports bundle..."); /* Register the reports service. */ this.logger.debug("Registering the Reports SOAP interface service."); final ServletContainerService soapService = new ServletContainerService(); soapService.addServlet(new ServletContainer(new AxisServlet(), true)); this.soapReg = context.registerService(ServletContainerService.class, soapService, null); } @Override public void stop(final BundleContext context) throws Exception { this.logger.info("Stopping the Reports bundle..."); this.logger.info("Shutting down the Reports bundle."); this.soapReg.unregister(); } }
bsd-3-clause
motech-implementations/mim
testing/src/test/java/org/motechproject/nms/testing/it/mcts/util/MockWsHttpServletForASHAValidation.java
913
package org.motechproject.nms.testing.it.mcts.util; import org.apache.commons.io.IOUtils; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; /* test servlet for data with non ASHA designation among them */ public class MockWsHttpServletForASHAValidation extends HttpServlet { @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { String requestBody = IOUtils.toString(req.getInputStream()); String response; response = MctsImportTestHelper.getAnmAshaResponseDataASHAValidation(); resp.setStatus(HttpServletResponse.SC_OK); resp.setContentLength(response.length()); IOUtils.write(response, resp.getOutputStream()); } }
bsd-3-clause
nwjs/chromium.src
chrome/android/javatests/src/org/chromium/chrome/browser/contextmenu/ContextMenuTest.java
51635
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chrome.browser.contextmenu; import static org.mockito.Mockito.when; import static org.chromium.chrome.browser.contextmenu.ContextMenuCoordinator.ListItemType.CONTEXT_MENU_ITEM; import android.content.ClipData; import android.content.ClipboardManager; import android.content.Context; import android.os.Looper; import android.support.test.InstrumentationRegistry; import android.view.KeyEvent; import androidx.test.filters.LargeTest; import androidx.test.filters.MediumTest; import androidx.test.filters.SmallTest; import org.hamcrest.Matchers; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import org.chromium.base.Callback; import org.chromium.base.metrics.RecordHistogram; import org.chromium.base.test.util.CallbackHelper; import org.chromium.base.test.util.CloseableOnMainThread; import org.chromium.base.test.util.CommandLineFlags; import org.chromium.base.test.util.Criteria; import org.chromium.base.test.util.CriteriaHelper; import org.chromium.base.test.util.Feature; import org.chromium.base.test.util.FlakyTest; import org.chromium.base.test.util.Restriction; import org.chromium.chrome.R; import org.chromium.chrome.browser.compositor.bottombar.ephemeraltab.EphemeralTabCoordinator; import org.chromium.chrome.browser.compositor.layouts.LayoutManagerImpl; import org.chromium.chrome.browser.download.DownloadTestRule; import org.chromium.chrome.browser.firstrun.FirstRunStatus; import org.chromium.chrome.browser.flags.ChromeFeatureList; import org.chromium.chrome.browser.flags.ChromeSwitches; import org.chromium.chrome.browser.share.LensUtils; import org.chromium.chrome.browser.share.ShareDelegate; import org.chromium.chrome.browser.share.ShareHelper; import org.chromium.chrome.browser.tab.Tab; import org.chromium.chrome.browser.tab.TabCreationState; import org.chromium.chrome.browser.tab.state.CriticalPersistedTabData; import org.chromium.chrome.browser.tabmodel.TabModel; import org.chromium.chrome.browser.tabmodel.TabModelSelectorObserver; import org.chromium.chrome.test.ChromeJUnit4ClassRunner; import org.chromium.chrome.test.util.ChromeTabUtils; import org.chromium.chrome.test.util.browser.Features; import org.chromium.chrome.test.util.browser.contextmenu.ContextMenuUtils; import org.chromium.components.embedder_support.contextmenu.ContextMenuParams; import org.chromium.components.externalauth.ExternalAuthUtils; import org.chromium.components.policy.test.annotations.Policies; import org.chromium.content_public.browser.test.util.DOMUtils; import org.chromium.content_public.browser.test.util.TestThreadUtils; import org.chromium.content_public.browser.test.util.TestTouchUtils; import org.chromium.net.test.EmbeddedTestServer; import org.chromium.ui.base.Clipboard; import org.chromium.ui.base.MenuSourceType; import org.chromium.ui.test.util.UiRestriction; import org.chromium.url.GURL; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; /** * Instrumentation tests for the context menu. */ @RunWith(ChromeJUnit4ClassRunner.class) // clang-format off @CommandLineFlags.Add({ChromeSwitches.DISABLE_FIRST_RUN_EXPERIENCE, ChromeSwitches.GOOGLE_BASE_URL + "=http://example.com/"}) public class ContextMenuTest implements DownloadTestRule.CustomMainActivityStart { @Mock private ContextMenuItemDelegate mItemDelegate; @Mock private ShareDelegate mShareDelegate; // clang-format on @Rule public DownloadTestRule mDownloadTestRule = new DownloadTestRule(this); private static final String TEST_PATH = "/chrome/test/data/android/contextmenu/context_menu_test.html"; private EmbeddedTestServer mTestServer; private String mTestUrl; private static final String FILENAME_GIF = "download.gif"; private static final String FILENAME_PNG = "test_image.png"; private static final String FILENAME_WEBM = "test.webm"; private static final String TEST_GIF_IMAGE_FILE_EXTENSION = ".gif"; private static final String TEST_JPG_IMAGE_FILE_EXTENSION = ".jpg"; // Test chip delegate that always returns valid chip render params. private static final ChipDelegate FAKE_CHIP_DELEGATE = new ChipDelegate() { @Override public boolean isChipSupported() { return true; } @Override public void getChipRenderParams(Callback<ChipRenderParams> callback) { // Do nothing. } @Override public void onMenuClosed() { // Do nothing. } @Override public boolean isValidChipRenderParams(ChipRenderParams chipRenderParams) { return true; } }; // Test Lens chip delegate that always returns valid chip render params. private void setupLensChipDelegate() { LensChipDelegate.setShouldSkipIsEnabledCheckForTesting(true); } private static final String[] TEST_FILES = new String[] {FILENAME_GIF, FILENAME_PNG, FILENAME_WEBM}; @Before public void setUp() { TestThreadUtils.runOnUiThreadBlocking(() -> FirstRunStatus.setFirstRunFlowComplete(true)); setupLensChipDelegate(); } @Override public void customMainActivityStart() { mTestServer = EmbeddedTestServer.createAndStartServer(InstrumentationRegistry.getContext()); mTestUrl = mTestServer.getURL(TEST_PATH); deleteTestFiles(); mDownloadTestRule.startMainActivityWithURL(mTestUrl); mDownloadTestRule.assertWaitForPageScaleFactorMatch(0.5f); } @After public void tearDown() { mTestServer.stopAndDestroyServer(); TestThreadUtils.runOnUiThreadBlocking(() -> FirstRunStatus.setFirstRunFlowComplete(false)); deleteTestFiles(); } @Test @MediumTest public void testCopyLinkURL() throws Throwable { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); // Allow DiskWrites temporarily in main thread to avoid // violation during copying under emulator environment. try (CloseableOnMainThread ignored = CloseableOnMainThread.StrictMode.allowDiskWrites()) { ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, "testLink", R.id.contextmenu_copy_link_address); } assertStringContains("test_link.html", getClipboardText()); } @Test @MediumTest @Feature({"Browser"}) public void testCopyImageLinkCopiesLinkURL() throws Throwable { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); // Allow DiskWrites temporarily in main thread to avoid // violation during copying under emulator environment. try (CloseableOnMainThread ignored = CloseableOnMainThread.StrictMode.allowDiskWrites()) { ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, "testImageLink", R.id.contextmenu_copy_link_address); } assertStringContains("test_link.html", getClipboardText()); } @Test @MediumTest @Feature({"Browser"}) @Features.EnableFeatures({ChromeFeatureList.CONTEXT_MENU_ENABLE_LENS_SHOPPING_ALLOWLIST}) @Features.DisableFeatures({ChromeFeatureList.CONTEXT_MENU_SEARCH_WITH_GOOGLE_LENS}) public void testLensShoppingAllowlistWithLensFeaturesDisabled() throws Throwable { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); LensUtils.setFakePassableLensEnvironmentForTesting(true); LensUtils.setFakeImageUrlInShoppingAllowlistForTesting(true); ShareHelper.setIgnoreActivityNotFoundExceptionForTesting(true); hardcodeTestImageForSharing(TEST_JPG_IMAGE_FILE_EXTENSION); ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, "testImage", R.id.contextmenu_search_by_image); Assert.assertEquals(1, RecordHistogram.getHistogramTotalCountForTesting( "ContextMenu.SelectedOptionAndroid.Image")); Assert.assertEquals(1, RecordHistogram.getHistogramTotalCountForTesting( "ContextMenu.SelectedOptionAndroid.Image.ShoppingDomain")); } @Test @MediumTest @Feature({"Browser"}) public void testLongPressOnImage() throws TimeoutException { checkOpenImageInNewTab("testImage", "/chrome/test/data/android/contextmenu/test_image.png"); } /** * @MediumTest * @Feature({"Browser"}) * @CommandLineFlags.Add(ChromeSwitches.DISABLE_DOCUMENT_MODE) */ @Test @FlakyTest(message = "http://crbug.com/606939") public void testLongPressOnImageLink() throws TimeoutException { checkOpenImageInNewTab( "testImageLink", "/chrome/test/data/android/contextmenu/test_image.png"); } private void checkOpenImageInNewTab(String domId, final String expectedPath) throws TimeoutException { final Tab activityTab = mDownloadTestRule.getActivity().getActivityTab(); final CallbackHelper newTabCallback = new CallbackHelper(); final AtomicReference<Tab> newTab = new AtomicReference<>(); TestThreadUtils.runOnUiThreadBlocking(() -> { mDownloadTestRule.getActivity().getTabModelSelector().addObserver( new TabModelSelectorObserver() { @Override public void onNewTabCreated(Tab tab, @TabCreationState int creationState) { if (CriticalPersistedTabData.from(tab).getParentId() != activityTab.getId()) { return; } newTab.set(tab); newTabCallback.notifyCalled(); mDownloadTestRule.getActivity().getTabModelSelector().removeObserver( this); } }); }); int callbackCount = newTabCallback.getCallCount(); ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), activityTab, domId, R.id.contextmenu_open_image_in_new_tab); try { newTabCallback.waitForCallback(callbackCount); } catch (TimeoutException ex) { Assert.fail("New tab never created from context menu press"); } // Only check for the URL matching as the tab will not be fully created in svelte mode. final String expectedUrl = mTestServer.getURL(expectedPath); CriteriaHelper.pollUiThread( () -> Criteria.checkThat(ChromeTabUtils.getUrlStringOnUiThread(newTab.get()), Matchers.is(expectedUrl))); } @Test @MediumTest public void testDismissContextMenuOnBack() throws TimeoutException { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); Assert.assertNotNull("Context menu was not properly created", menuCoordinator); CriteriaHelper.pollUiThread(() -> { return !mDownloadTestRule.getActivity().hasWindowFocus(); }, "Context menu did not have window focus"); InstrumentationRegistry.getInstrumentation().sendKeyDownUpSync(KeyEvent.KEYCODE_BACK); CriteriaHelper.pollUiThread(() -> { return mDownloadTestRule.getActivity().hasWindowFocus(); }, "Activity did not regain focus."); } @Test @MediumTest @Feature({"Browser"}) @Features.EnableFeatures({ChromeFeatureList.CONTEXT_MENU_TRANSLATE_WITH_GOOGLE_LENS}) public void testLensTranslateChipNotShowingIfNotEnabled() throws Throwable { // Required to avoid runtime error. Looper.prepare(); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); hardcodeTestImageForSharing(TEST_JPG_IMAGE_FILE_EXTENSION); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); // Needs to run on UI thread so creation happens on same thread as dismissal. TestThreadUtils.runOnUiThreadBlocking(() -> { Assert.assertNull("Chip popoup was initialized.", menuCoordinator.getCurrentPopupWindowForTesting()); }); } @Test @MediumTest @Feature({"Browser"}) public void testSelectLensTranslateChip() throws Throwable { // Required to avoid runtime error. Looper.prepare(); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ShareHelper.setIgnoreActivityNotFoundExceptionForTesting(true); hardcodeTestImageForSharing(TEST_JPG_IMAGE_FILE_EXTENSION); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); // Needs to run on UI thread so creation happens on same thread as dismissal. TestThreadUtils.runOnUiThreadBlocking(() -> { menuCoordinator.simulateTranslateImageClassificationForTesting(); Assert.assertTrue("Chip popoup not showing.", menuCoordinator.getCurrentPopupWindowForTesting().isShowing()); menuCoordinator.clickChipForTesting(); }); Assert.assertEquals("Selection histogram pings not equal to one", 1, RecordHistogram.getHistogramValueCountForTesting( "ContextMenu.LensChip.Event", ContextMenuChipController.ChipEvent.CLICKED)); Assert.assertFalse("Chip popoup still showing.", menuCoordinator.getCurrentPopupWindowForTesting().isShowing()); } @Test @MediumTest @Feature({"Browser"}) @Features.EnableFeatures({ChromeFeatureList.CONTEXT_MENU_TRANSLATE_WITH_GOOGLE_LENS}) public void testLensChipNotShowingAfterMenuDismissed() throws Throwable { // Required to avoid runtime error. Looper.prepare(); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ShareHelper.setIgnoreActivityNotFoundExceptionForTesting(true); hardcodeTestImageForSharing(TEST_JPG_IMAGE_FILE_EXTENSION); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); // Dismiss context menu. TestTouchUtils.singleClickView(InstrumentationRegistry.getInstrumentation(), tab.getView(), tab.getView().getWidth() - 5, tab.getView().getHeight() - 5); // Needs to run on UI thread so creation happens on same thread as dismissal. TestThreadUtils.runOnUiThreadBlocking(() -> { ChipRenderParams chipRenderParams = menuCoordinator.simulateImageClassificationForTesting(); menuCoordinator.getChipRenderParamsCallbackForTesting(FAKE_CHIP_DELEGATE) .bind(chipRenderParams) .run(); Assert.assertNull("Chip popoup was initialized.", menuCoordinator.getCurrentPopupWindowForTesting()); }); } // Assert that focus is unchanged and that the chip popup does not block the dismissal of the // context menu. @Test @MediumTest @Features.EnableFeatures({ChromeFeatureList.CONTEXT_MENU_TRANSLATE_WITH_GOOGLE_LENS}) public void testDismissContextMenuOnClickLensTranslateChipEnabled() throws TimeoutException { // Required to avoid runtime error. Looper.prepare(); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); // Needs to run on UI thread so creation happens on same thread as dismissal. TestThreadUtils.runOnUiThreadBlocking( () -> menuCoordinator.simulateTranslateImageClassificationForTesting()); Assert.assertNotNull("Context menu was not properly created", menuCoordinator); CriteriaHelper.pollUiThread(() -> { return !mDownloadTestRule.getActivity().hasWindowFocus(); }, "Context menu did not have window focus"); TestTouchUtils.singleClickView(InstrumentationRegistry.getInstrumentation(), tab.getView(), tab.getView().getWidth() - 5, tab.getView().getHeight() - 5); CriteriaHelper.pollUiThread(() -> { return mDownloadTestRule.getActivity().hasWindowFocus(); }, "Activity did not regain focus."); } @Test @MediumTest @Feature({"Browser"}) @Features.EnableFeatures({ChromeFeatureList.CONTEXT_MENU_GOOGLE_LENS_CHIP}) public void testLensShoppingChipNotShowingIfNotEnabled() throws Throwable { // Required to avoid runtime error. Looper.prepare(); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); hardcodeTestImageForSharing(TEST_JPG_IMAGE_FILE_EXTENSION); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); // Needs to run on UI thread so creation happens on same thread as dismissal. TestThreadUtils.runOnUiThreadBlocking(() -> { Assert.assertNull("Chip popoup was initialized.", menuCoordinator.getCurrentPopupWindowForTesting()); }); } @Test @MediumTest @Feature({"Browser"}) public void testSelectLensShoppingChip() throws Throwable { // Required to avoid runtime error. Looper.prepare(); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ShareHelper.setIgnoreActivityNotFoundExceptionForTesting(true); hardcodeTestImageForSharing(TEST_JPG_IMAGE_FILE_EXTENSION); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); // Needs to run on UI thread so creation happens on same thread as dismissal. TestThreadUtils.runOnUiThreadBlocking(() -> { menuCoordinator.simulateShoppyImageClassificationForTesting(); Assert.assertTrue("Chip popoup not showing.", menuCoordinator.getCurrentPopupWindowForTesting().isShowing()); menuCoordinator.clickChipForTesting(); }); Assert.assertEquals("Selection histogram pings not equal to one", 1, RecordHistogram.getHistogramValueCountForTesting( "ContextMenu.LensChip.Event", ContextMenuChipController.ChipEvent.CLICKED)); Assert.assertFalse("Chip popoup still showing.", menuCoordinator.getCurrentPopupWindowForTesting().isShowing()); } // Assert that focus is unchanged and that the chip popup does not block the dismissal of the // context menu. @Test @MediumTest public void testDismissContextMenuOnClickShoppingLensChipEnabled() throws TimeoutException { // Required to avoid runtime error. Looper.prepare(); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); // Needs to run on UI thread so creation happens on same thread as dismissal. TestThreadUtils.runOnUiThreadBlocking( () -> menuCoordinator.simulateShoppyImageClassificationForTesting()); Assert.assertNotNull("Context menu was not properly created", menuCoordinator); CriteriaHelper.pollUiThread(() -> { return !mDownloadTestRule.getActivity().hasWindowFocus(); }, "Context menu did not have window focus"); TestTouchUtils.singleClickView(InstrumentationRegistry.getInstrumentation(), tab.getView(), tab.getView().getWidth() - 5, tab.getView().getHeight() - 5); CriteriaHelper.pollUiThread(() -> { return mDownloadTestRule.getActivity().hasWindowFocus(); }, "Activity did not regain focus."); } @Test @MediumTest public void testDismissContextMenuOnClick() throws TimeoutException { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menuCoordinator = ContextMenuUtils.openContextMenu(tab, "testImage"); Assert.assertNotNull("Context menu was not properly created", menuCoordinator); CriteriaHelper.pollUiThread(() -> { return !mDownloadTestRule.getActivity().hasWindowFocus(); }, "Context menu did not have window focus"); TestTouchUtils.singleClickView(InstrumentationRegistry.getInstrumentation(), tab.getView(), tab.getView().getWidth() - 5, tab.getView().getHeight() - 5); CriteriaHelper.pollUiThread(() -> { return mDownloadTestRule.getActivity().hasWindowFocus(); }, "Activity did not regain focus."); } @Test @MediumTest public void testCopyEmailAddress() throws Throwable { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); // Allow all thread policies temporarily in main thread to avoid // DiskWrite and UnBufferedIo violations during copying under // emulator environment. try (CloseableOnMainThread ignored = CloseableOnMainThread.StrictMode.allowAllThreadPolicies()) { ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, "testEmail", R.id.contextmenu_copy); } Assert.assertEquals("Copied email address is not correct", "someone1@example.com,someone2@example.com", getClipboardText()); } @Test @MediumTest public void testCopyTelNumber() throws Throwable { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); // Allow DiskWrites temporarily in main thread to avoid // violation during copying under emulator environment. try (CloseableOnMainThread ignored = CloseableOnMainThread.StrictMode.allowDiskWrites()) { ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, "testTel", R.id.contextmenu_copy); } Assert.assertEquals("Copied tel number is not correct", "10000000000", getClipboardText()); } @Test @LargeTest public void testSaveDataUrl() throws TimeoutException, SecurityException, IOException { saveMediaFromContextMenu("dataUrlIcon", R.id.contextmenu_save_image, FILENAME_GIF); } @Test @LargeTest public void testSaveImage() throws TimeoutException, SecurityException, IOException { saveMediaFromContextMenu("testImage", R.id.contextmenu_save_image, FILENAME_PNG); } @Test @LargeTest public void testSaveVideo() throws TimeoutException, SecurityException, IOException { saveMediaFromContextMenu("videoDOMElement", R.id.contextmenu_save_video, FILENAME_WEBM); } /** * Opens a link and image in new tabs and verifies the order of the tabs. Also verifies that * the parent page remains in front after opening links in new tabs. * * This test only applies in tabbed mode. In document mode, Android handles the ordering of the * tabs. */ @Test @LargeTest public void testOpenLinksInNewTabsAndVerifyTabIndexOrdering() throws TimeoutException { TabModel tabModel = mDownloadTestRule.getActivity().getCurrentTabModel(); int numOpenedTabs = tabModel.getCount(); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, "testLink", R.id.contextmenu_open_in_new_tab); int indexOfLinkPage = numOpenedTabs; final int expectedNumOpenedTabs = indexOfLinkPage + 1; CriteriaHelper.pollUiThread(() -> { Criteria.checkThat("Number of open tabs does not match", tabModel.getCount(), Matchers.is(expectedNumOpenedTabs)); }); numOpenedTabs = expectedNumOpenedTabs; // Wait for any new tab animation to finish if we're being driven by the compositor. final LayoutManagerImpl layoutDriver = mDownloadTestRule.getActivity() .getCompositorViewHolderForTesting() .getLayoutManager(); CriteriaHelper.pollUiThread(() -> { return layoutDriver.getActiveLayout().shouldDisplayContentOverlay(); }, "Background tab animation not finished."); ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, "testLink2", R.id.contextmenu_open_in_new_tab); int indexOfLinkPage2 = numOpenedTabs; final int expectedNumOpenedTabs2 = indexOfLinkPage2 + 1; CriteriaHelper.pollUiThread(() -> { Criteria.checkThat("Number of open tabs does not match", tabModel.getCount(), Matchers.is(expectedNumOpenedTabs2)); }); numOpenedTabs = expectedNumOpenedTabs2; // Verify the Url is still the same of Parent page. Assert.assertEquals(mTestUrl, ChromeTabUtils.getUrlStringOnUiThread( mDownloadTestRule.getActivity().getActivityTab())); // Verify that the background tabs were opened in the expected order. String newTabUrl = mTestServer.getURL("/chrome/test/data/android/contextmenu/test_link.html"); Assert.assertEquals(newTabUrl, ChromeTabUtils.getUrlStringOnUiThread(tabModel.getTabAt(indexOfLinkPage))); String imageUrl = mTestServer.getURL("/chrome/test/data/android/contextmenu/test_link2.html"); Assert.assertEquals(imageUrl, ChromeTabUtils.getUrlStringOnUiThread(tabModel.getTabAt(indexOfLinkPage2))); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) public void testContextMenuRetrievesLinkOptions() throws TimeoutException { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testLink"); Integer[] expectedItems = {R.id.contextmenu_open_in_new_tab, R.id.contextmenu_open_in_incognito_tab, R.id.contextmenu_save_link_as, R.id.contextmenu_copy_link_text, R.id.contextmenu_copy_link_address, R.id.contextmenu_share_link}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, new Integer[] {R.id.contextmenu_open_in_ephemeral_tab}); expectedItems = addItemsIf(ChromeFeatureList.isEnabled(ChromeFeatureList.READ_LATER), expectedItems, new Integer[] {R.id.contextmenu_read_later}); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Features.DisableFeatures({ChromeFeatureList.CONTEXT_MENU_SEARCH_WITH_GOOGLE_LENS}) public void testContextMenuRetrievesImageOptions() throws TimeoutException { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_search_by_image, R.id.contextmenu_share_image, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Features.DisableFeatures({ChromeFeatureList.CONTEXT_MENU_SEARCH_WITH_GOOGLE_LENS}) @Features.EnableFeatures({ChromeFeatureList.CONTEXT_MENU_ENABLE_LENS_SHOPPING_ALLOWLIST}) public void testContextMenuRetrievesImageOptionsWithLensShoppingAllowlist() throws TimeoutException { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_search_by_image, R.id.contextmenu_share_image, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Restriction(UiRestriction.RESTRICTION_TYPE_PHONE) public void testContextMenuRetrievesImageOptionsLensEnabled() throws TimeoutException { LensUtils.setFakePassableLensEnvironmentForTesting(true); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_search_with_google_lens, R.id.contextmenu_share_image, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Features.DisableFeatures({ChromeFeatureList.CONTEXT_MENU_GOOGLE_LENS_CHIP}) @Features.EnableFeatures({ChromeFeatureList.CONTEXT_MENU_ENABLE_LENS_SHOPPING_ALLOWLIST, ChromeFeatureList.CONTEXT_MENU_SHOP_WITH_GOOGLE_LENS}) @Restriction(UiRestriction.RESTRICTION_TYPE_PHONE) public void testContextMenuLensEnabledShopImageWithGoogleLens() throws TimeoutException { LensUtils.setFakePassableLensEnvironmentForTesting(true); LensUtils.setFakeImageUrlInShoppingAllowlistForTesting(true); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_share_image, R.id.contextmenu_shop_image_with_google_lens, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Features.DisableFeatures({ChromeFeatureList.CONTEXT_MENU_SHOP_WITH_GOOGLE_LENS}) @CommandLineFlags. Add({"enable-features=" + ChromeFeatureList.CONTEXT_MENU_ENABLE_LENS_SHOPPING_ALLOWLIST + "<FakeStudyName", "force-fieldtrials=FakeStudyName/Enabled", "force-fieldtrial-params=FakeStudyName.Enabled:shoppingUrlPatterns/^shopping-site.*"}) @Restriction(UiRestriction.RESTRICTION_TYPE_PHONE) public void testContextMenuLensDisableShopWithGoogleLensForShoppingUrl() throws TimeoutException { LensUtils.setFakePassableLensEnvironmentForTesting(true); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_search_with_google_lens, R.id.contextmenu_share_image, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Policies.Add({ @Policies.Item(key = "DefaultSearchProviderEnabled", string = "false") }) public void testContextMenuRetrievesImageOptions_NoDefaultSearchEngine() throws TimeoutException { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_share_image, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Policies.Add({ @Policies.Item(key = "DefaultSearchProviderEnabled", string = "false") }) public void testContextMenuRetrievesImageOptions_NoDefaultSearchEngineLensEnabled() throws TimeoutException { LensUtils.setFakePassableLensEnvironmentForTesting(true); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); // Search with Google Lens is only supported when Google is the default search provider. Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_share_image, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Features.DisableFeatures({ChromeFeatureList.CONTEXT_MENU_SEARCH_WITH_GOOGLE_LENS}) public void testContextMenuRetrievesImageLinkOptions() throws TimeoutException { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImageLink"); Integer[] expectedItems = {R.id.contextmenu_open_in_new_tab, R.id.contextmenu_open_in_incognito_tab, R.id.contextmenu_copy_link_address, R.id.contextmenu_save_link_as, R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_search_by_image, R.id.contextmenu_share_image, R.id.contextmenu_share_link, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_in_ephemeral_tab, R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @Restriction(UiRestriction.RESTRICTION_TYPE_PHONE) public void testContextMenuRetrievesImageLinkOptionsSearchLensEnabled() throws TimeoutException { LensUtils.setFakePassableLensEnvironmentForTesting(true); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImageLink"); Integer[] expectedItems = {R.id.contextmenu_open_in_new_tab, R.id.contextmenu_open_in_incognito_tab, R.id.contextmenu_copy_link_address, R.id.contextmenu_save_link_as, R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_search_with_google_lens, R.id.contextmenu_share_image, R.id.contextmenu_share_link, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_in_ephemeral_tab, R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @CommandLineFlags. Add({"enable-features=" + ChromeFeatureList.CONTEXT_MENU_GOOGLE_LENS_CHIP + "<FakeStudyName", "force-fieldtrials=FakeStudyName/Enabled", "force-fieldtrial-params=FakeStudyName.Enabled:orderShareImageBeforeLens/true"}) @Restriction(UiRestriction.RESTRICTION_TYPE_PHONE) public void testContextMenuShareImageStillAddedWhenReordered() throws TimeoutException { LensUtils.setFakePassableLensEnvironmentForTesting(true); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImageLink"); Integer[] expectedItems = {R.id.contextmenu_open_in_new_tab, R.id.contextmenu_open_in_incognito_tab, R.id.contextmenu_copy_link_address, R.id.contextmenu_save_link_as, R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_share_image, R.id.contextmenu_search_with_google_lens, R.id.contextmenu_share_link, R.id.contextmenu_copy_image}; Integer[] featureItems = {R.id.contextmenu_open_in_ephemeral_tab, R.id.contextmenu_open_image_in_ephemeral_tab}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, featureItems); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) public void testContextMenuRetrievesVideoOptions() throws TimeoutException { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); DOMUtils.clickNode( mDownloadTestRule.getActivity().getCurrentWebContents(), "videoDOMElement"); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "videoDOMElement"); Integer[] expectedItems = {R.id.contextmenu_save_video}; assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @CommandLineFlags.Add({"enable-features=" + ChromeFeatureList.CONTEXT_MENU_SEARCH_WITH_GOOGLE_LENS + "<FakeStudyName", "force-fieldtrials=FakeStudyName/Enabled"}) @Restriction(UiRestriction.RESTRICTION_TYPE_PHONE) public void testSearchWithGoogleLensMenuItemName() throws Throwable { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); LensUtils.setFakePassableLensEnvironmentForTesting(true); ShareHelper.setIgnoreActivityNotFoundExceptionForTesting(true); hardcodeTestImageForSharing(TEST_JPG_IMAGE_FILE_EXTENSION); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_share_image, R.id.contextmenu_copy_image, R.id.contextmenu_search_with_google_lens}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, new Integer[] {R.id.contextmenu_open_image_in_ephemeral_tab}); String title = getMenuTitleFromItem(menu, R.id.contextmenu_search_with_google_lens); Assert.assertTrue("Context menu item name should be \'Search with Google Lens\'.", title.startsWith("Search with Google Lens")); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) @CommandLineFlags.Add({"enable-features=" + ChromeFeatureList.CONTEXT_MENU_SEARCH_WITH_GOOGLE_LENS + "<FakeStudyName", "force-fieldtrials=FakeStudyName/Enabled", "force-fieldtrial-params=FakeStudyName.Enabled:useSearchImageWithGoogleLensItemName/true"}) @Restriction(UiRestriction.RESTRICTION_TYPE_PHONE) public void testSearchImageWithGoogleLensMenuItemName() throws Throwable { Tab tab = mDownloadTestRule.getActivity().getActivityTab(); LensUtils.setFakePassableLensEnvironmentForTesting(true); ShareHelper.setIgnoreActivityNotFoundExceptionForTesting(true); hardcodeTestImageForSharing(TEST_JPG_IMAGE_FILE_EXTENSION); ContextMenuCoordinator menu = ContextMenuUtils.openContextMenu(tab, "testImage"); Integer[] expectedItems = {R.id.contextmenu_save_image, R.id.contextmenu_open_image_in_new_tab, R.id.contextmenu_share_image, R.id.contextmenu_copy_image, R.id.contextmenu_search_with_google_lens}; expectedItems = addItemsIf(EphemeralTabCoordinator.isSupported(), expectedItems, new Integer[] {R.id.contextmenu_open_image_in_ephemeral_tab}); String title = getMenuTitleFromItem(menu, R.id.contextmenu_search_with_google_lens); Assert.assertTrue("Context menu item name should be \'Search image with Google Lens\'.", title.startsWith("Search image with Google Lens")); assertMenuItemsAreEqual(menu, expectedItems); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) public void testCopyImage() throws Throwable { // Clear the clipboard. Clipboard.getInstance().setText(""); hardcodeTestImageForSharing(TEST_GIF_IMAGE_FILE_EXTENSION); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); // Allow all thread policies temporarily in main thread to avoid // DiskWrite and UnBufferedIo violations during copying under // emulator environment. try (CloseableOnMainThread ignored = CloseableOnMainThread.StrictMode.allowAllThreadPolicies()) { ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, "dataUrlIcon", R.id.contextmenu_copy_image); } CriteriaHelper.pollUiThread(() -> { Criteria.checkThat(Clipboard.getInstance().getImageUri(), Matchers.notNullValue()); }); String imageUriString = Clipboard.getInstance().getImageUri().toString(); Assert.assertTrue("Image content prefix is not correct", imageUriString.startsWith( "content://org.chromium.chrome.tests.FileProvider/images/screenshot/")); Assert.assertTrue("Image extension is not correct", imageUriString.endsWith(TEST_GIF_IMAGE_FILE_EXTENSION)); // Clean up the clipboard. Clipboard.getInstance().setText(""); } @Test @SmallTest @Feature({"Browser", "ContextMenu"}) public void testContextMenuOpenedFromHighlight() { MockitoAnnotations.initMocks(this); when(mItemDelegate.isIncognito()).thenReturn(false); when(mItemDelegate.getPageTitle()).thenReturn(""); Tab tab = mDownloadTestRule.getActivity().getActivityTab(); ContextMenuHelper contextMenuHelper = ContextMenuHelper.createForTesting(0, tab.getWebContents()); ContextMenuParams params = new ContextMenuParams(0, 0, new GURL("http://example.com/"), GURL.emptyGURL(), "", GURL.emptyGURL(), GURL.emptyGURL(), "", null, false, 0, 0, MenuSourceType.MENU_SOURCE_TOUCH, /*getOpenedFromHighlight*/ true); ContextMenuPopulatorFactory populatorFactory = new ChromeContextMenuPopulatorFactory( mItemDelegate, () -> mShareDelegate, ChromeContextMenuPopulator.ContextMenuMode.NORMAL, ExternalAuthUtils.getInstance()); Integer[] expectedItems = {R.id.contextmenu_share_highlight, R.id.contextmenu_remove_highlight, R.id.contextmenu_learn_more}; TestThreadUtils.runOnUiThreadBlocking(() -> { ContextMenuHelper.setMenuShownCallbackForTests((coordinator) -> { assertMenuItemsAreEqual(coordinator, expectedItems); Assert.assertEquals( 1, RecordHistogram.getHistogramTotalCountForTesting("ContextMenu.Shown")); Assert.assertEquals(1, RecordHistogram.getHistogramTotalCountForTesting( "ContextMenu.Shown.SharedHighlightingInteraction")); ContextMenuHelper.setMenuShownCallbackForTests(null); }); contextMenuHelper.showContextMenuForTesting( populatorFactory, params, null, tab.getView(), 0); }); } // TODO(benwgold): Add more test coverage for histogram recording of other context menu types. /** * Takes all the visible items on the menu and compares them to a the list of expected items. * @param menu A context menu that is displaying visible items. * @param expectedItems A list of items that is expected to appear within a context menu. The * list does not need to be ordered. */ private void assertMenuItemsAreEqual(ContextMenuCoordinator menu, Integer... expectedItems) { List<Integer> actualItems = new ArrayList<>(); for (int i = 0; i < menu.getCount(); i++) { if (menu.getItem(i).type >= CONTEXT_MENU_ITEM) { actualItems.add(menu.getItem(i).model.get(ContextMenuItemProperties.MENU_ID)); } } Assert.assertThat("Populated menu items were:" + getMenuTitles(menu), actualItems, Matchers.containsInAnyOrder(expectedItems)); } private String getMenuTitles(ContextMenuCoordinator menu) { StringBuilder items = new StringBuilder(); for (int i = 0; i < menu.getCount(); i++) { if (menu.getItem(i).type >= CONTEXT_MENU_ITEM) { items.append("\n").append( menu.getItem(i).model.get(ContextMenuItemProperties.TEXT)); } } return items.toString(); } private String getMenuTitleFromItem(ContextMenuCoordinator menu, int itemId) { StringBuilder itemName = new StringBuilder(); for (int i = 0; i < menu.getCount(); i++) { if (menu.getItem(i).type >= CONTEXT_MENU_ITEM) { if (menu.getItem(i).model.get(ContextMenuItemProperties.MENU_ID) == itemId) { itemName.append(menu.getItem(i).model.get(ContextMenuItemProperties.TEXT)); return itemName.toString(); } } } return null; } /** * Adds items to the baseItems if the given condition is true. * @param condition The condition to check for whether to add items or not. * @param baseItems The base list of items to add to. * @param additionalItems The additional items to add. * @return An array of items that has the additional items added if the condition is true. */ private Integer[] addItemsIf( boolean condition, Integer[] baseItems, Integer[] additionalItems) { List<Integer> variableItems = new ArrayList<>(); variableItems.addAll(Arrays.asList(baseItems)); if (condition) { for (int i = 0; i < additionalItems.length; i++) variableItems.add(additionalItems[i]); } return variableItems.toArray(baseItems); } private void saveMediaFromContextMenu(String mediaDOMElement, int saveMenuID, String expectedFilename) throws TimeoutException, SecurityException, IOException { // Select "save [image/video]" in that menu. Tab tab = mDownloadTestRule.getActivity().getActivityTab(); int callCount = mDownloadTestRule.getChromeDownloadCallCount(); ContextMenuUtils.selectContextMenuItem(InstrumentationRegistry.getInstrumentation(), mDownloadTestRule.getActivity(), tab, mediaDOMElement, saveMenuID); // Wait for the download to complete and see if we got the right file Assert.assertTrue(mDownloadTestRule.waitForChromeDownloadToFinish(callCount)); mDownloadTestRule.checkLastDownload(expectedFilename); } private String getClipboardText() throws Throwable { final AtomicReference<String> clipboardTextRef = new AtomicReference<>(); mDownloadTestRule.runOnUiThread(() -> { ClipboardManager clipMgr = (ClipboardManager) mDownloadTestRule.getActivity().getSystemService( Context.CLIPBOARD_SERVICE); ClipData clipData = clipMgr.getPrimaryClip(); Assert.assertNotNull("Primary clip is null", clipData); Assert.assertTrue("Primary clip contains no items.", clipData.getItemCount() > 0); clipboardTextRef.set(clipData.getItemAt(0).getText().toString()); }); return clipboardTextRef.get(); } /** * Hardcode image bytes to non-null arbitrary data. * @param extension Image file extension. */ private void hardcodeTestImageForSharing(String extension) { // This string just needs to be not empty in order for the code to accept it as valid // image data and generate the temp file for sharing. In the future we could explore // transcoding the actual test image from png to jpeg to make the test more realistic. String mockImageData = "randomdata"; byte[] mockImageByteArray = mockImageData.getBytes(); // See function javadoc for more context. ContextMenuNativeDelegateImpl.setHardcodedImageBytesForTesting( mockImageByteArray, extension); } private void assertStringContains(String subString, String superString) { Assert.assertTrue("String '" + superString + "' does not contain '" + subString + "'", superString.contains(subString)); } /** * Makes sure there are no files with names identical to the ones this test uses in the * downloads directory */ private void deleteTestFiles() { mDownloadTestRule.deleteFilesInDownloadDirectory(TEST_FILES); } }
bsd-3-clause
DataBiosphere/terra-workspace-manager
service/src/main/java/bio/terra/workspace/service/workspace/exceptions/BufferServiceDisabledException.java
496
package bio.terra.workspace.service.workspace.exceptions; import bio.terra.common.exception.BadRequestException; /** * Exception thrown when a user attempts to use Buffer Service in an environment where it's disabled * or not configured. TODO(PF-302): Remove this exception when buffer is enabled and used in all * environments. */ public class BufferServiceDisabledException extends BadRequestException { public BufferServiceDisabledException(String message) { super(message); } }
bsd-3-clause
delkyd/Oracle-Cloud
PaaS_SaaS_Accelerator_RESTFulFacade/XJC_Beans/src/com/oracle/xmlns/apps/crmcommon/salesparties/salespartiesservice/types/DeleteSalesAccountResource.java
2292
// // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2013.10.24 at 02:08:50 PM BST // package com.oracle.xmlns.apps.crmcommon.salesparties.salespartiesservice.types; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlType; import com.oracle.xmlns.apps.crmcommon.salesparties.salespartiesservice.SalesAccountResource; /** * <p>Java class for anonymous complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="salesAccountResource" type="{http://xmlns.oracle.com/apps/crmCommon/salesParties/salesPartiesService/}SalesAccountResource"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = { "salesAccountResource" }) @XmlRootElement(name = "deleteSalesAccountResource") public class DeleteSalesAccountResource { @XmlElement(required = true) protected SalesAccountResource salesAccountResource; /** * Gets the value of the salesAccountResource property. * * @return * possible object is * {@link SalesAccountResource } * */ public SalesAccountResource getSalesAccountResource() { return salesAccountResource; } /** * Sets the value of the salesAccountResource property. * * @param value * allowed object is * {@link SalesAccountResource } * */ public void setSalesAccountResource(SalesAccountResource value) { this.salesAccountResource = value; } }
bsd-3-clause
rpau/java-symbol-solver
java-symbol-solver-core/src/main/java/com/github/javaparser/symbolsolver/javaparsermodel/contexts/TryWithResourceContext.java
3839
/* * Copyright 2016 Federico Tomassetti * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.javaparser.symbolsolver.javaparsermodel.contexts; import com.github.javaparser.ast.body.VariableDeclarator; import com.github.javaparser.ast.expr.Expression; import com.github.javaparser.ast.expr.VariableDeclarationExpr; import com.github.javaparser.ast.stmt.BlockStmt; import com.github.javaparser.ast.stmt.TryStmt; import com.github.javaparser.resolution.declarations.ResolvedMethodDeclaration; import com.github.javaparser.resolution.declarations.ResolvedValueDeclaration; import com.github.javaparser.resolution.types.ResolvedType; import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserSymbolDeclaration; import com.github.javaparser.symbolsolver.model.resolution.SymbolReference; import com.github.javaparser.symbolsolver.model.resolution.TypeSolver; import com.github.javaparser.symbolsolver.model.resolution.Value; import java.util.List; import java.util.Optional; import static com.github.javaparser.symbolsolver.javaparser.Navigator.getParentNode; public class TryWithResourceContext extends AbstractJavaParserContext<TryStmt> { public TryWithResourceContext(TryStmt wrappedNode, TypeSolver typeSolver) { super(wrappedNode, typeSolver); } @Override public Optional<Value> solveSymbolAsValue(String name, TypeSolver typeSolver) { for (Expression expr : wrappedNode.getResources()) { if (expr instanceof VariableDeclarationExpr) { for (VariableDeclarator v : ((VariableDeclarationExpr)expr).getVariables()) { if (v.getName().getIdentifier().equals(name)) { JavaParserSymbolDeclaration decl = JavaParserSymbolDeclaration.localVar(v, typeSolver); return Optional.of(Value.from(decl)); } } } } if (getParentNode(wrappedNode) instanceof BlockStmt) { return StatementContext.solveInBlockAsValue(name, typeSolver, wrappedNode); } else { return getParent().solveSymbolAsValue(name, typeSolver); } } @Override public SymbolReference<? extends ResolvedValueDeclaration> solveSymbol(String name, TypeSolver typeSolver) { for (Expression expr : wrappedNode.getResources()) { if (expr instanceof VariableDeclarationExpr) { for (VariableDeclarator v : ((VariableDeclarationExpr)expr).getVariables()) { if (v.getName().getIdentifier().equals(name)) { return SymbolReference.solved(JavaParserSymbolDeclaration.localVar(v, typeSolver)); } } } } if (getParentNode(wrappedNode) instanceof BlockStmt) { return StatementContext.solveInBlock(name, typeSolver, wrappedNode); } else { return getParent().solveSymbol(name, typeSolver); } } @Override public SymbolReference<ResolvedMethodDeclaration> solveMethod(String name, List<ResolvedType> argumentsTypes, boolean staticOnly, TypeSolver typeSolver) { return getParent().solveMethod(name, argumentsTypes, false, typeSolver); } }
mit
szpak/mockito
src/main/java/org/mockito/internal/matchers/Or.java
725
/* * Copyright (c) 2007 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockito.internal.matchers; import java.io.Serializable; import org.mockito.ArgumentMatcher; @SuppressWarnings({ "unchecked", "serial","rawtypes" }) public class Or implements ArgumentMatcher<Object>, Serializable { private final ArgumentMatcher m1; private final ArgumentMatcher m2; public Or(ArgumentMatcher<?> m1, ArgumentMatcher<?> m2) { this.m1 = m1; this.m2 = m2; } public boolean matches(Object actual) { return m1.matches(actual) || m2.matches(actual); } public String toString() { return "or("+m1+", "+m2+")"; } }
mit
stengerh/flow-netbeans-markdown
src/flow/netbeans/markdown/csl/MarkdownAbbreviationsRootItem.java
2932
package flow.netbeans.markdown.csl; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; import javax.swing.ImageIcon; import org.netbeans.modules.csl.api.ElementHandle; import org.netbeans.modules.csl.api.ElementKind; import org.netbeans.modules.csl.api.HtmlFormatter; import org.netbeans.modules.csl.api.Modifier; import org.netbeans.modules.csl.api.StructureItem; import org.openide.filesystems.FileObject; import org.openide.util.NbBundle; import org.pegdown.ast.AbbreviationNode; import org.pegdown.ast.RootNode; /** * * @author Holger */ @NbBundle.Messages({ "TXT_MarkdownAbbreviationsRootItem=Abbreviations" }) public class MarkdownAbbreviationsRootItem implements StructureItem { private final FileObject file; private final RootNode node; private final List<MarkdownAbbreviationsEntryItem> nestedItems; public MarkdownAbbreviationsRootItem(FileObject file, RootNode node) { this.file = file; this.node = node; nestedItems = new ArrayList<MarkdownAbbreviationsEntryItem>(); for (AbbreviationNode abbrNode : node.getAbbreviations()) { nestedItems.add(new MarkdownAbbreviationsEntryItem(file, abbrNode)); } } @Override public String getName() { return Bundle.TXT_MarkdownAbbreviationsRootItem(); } @Override public String getSortText() { return "3Abbreviations"; } @Override public String getHtml(HtmlFormatter formatter) { formatter.appendText(getName()); return formatter.getText(); } @Override public ElementHandle getElementHandle() { return null; } @Override public ElementKind getKind() { return ElementKind.PACKAGE; } @Override public Set<Modifier> getModifiers() { return Collections.emptySet(); } @Override public boolean isLeaf() { return false; } @Override public List<? extends StructureItem> getNestedItems() { return Collections.unmodifiableList(nestedItems); } @Override public long getPosition() { return 0; } @Override public long getEndPosition() { return 0; } @Override public ImageIcon getCustomIcon() { return null; } @Override public int hashCode() { int hash = 7; hash = 61 * hash + (this.file != null ? this.file.hashCode() : 0); return hash; } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } final MarkdownAbbreviationsRootItem other = (MarkdownAbbreviationsRootItem) obj; if (this.file != other.file && (this.file == null || !this.file.equals(other.file))) { return false; } return true; } }
mit
jenkinsci/github-pullrequest-plugin
github-pullrequest-plugin/src/test/java/org/jenkinsci/plugins/github_integration/its/MatrixProjectITest.java
2100
package org.jenkinsci.plugins.github_integration.its; import hudson.matrix.AxisList; import hudson.matrix.MatrixBuild; import hudson.matrix.MatrixProject; import hudson.matrix.MatrixRun; import hudson.matrix.TextAxis; import hudson.tasks.Shell; import org.jenkinsci.plugins.github.pullrequest.GitHubPRMessage; import org.jenkinsci.plugins.github.pullrequest.builders.GitHubPRStatusBuilder; import org.jenkinsci.plugins.github.pullrequest.publishers.impl.GitHubPRBuildStatusPublisher; import org.jenkinsci.plugins.github.pullrequest.publishers.impl.GitHubPRCommentPublisher; import org.junit.Test; import static org.jenkinsci.plugins.github_integration.junit.GHRule.getPreconfiguredProperty; import static org.jenkinsci.plugins.github_integration.junit.GHRule.getPreconfiguredPRTrigger; /** * @author Kanstantsin Shautsou */ public class MatrixProjectITest extends AbstractPRTest { @Test public void testChildStatuses() throws Exception { final MatrixProject matrixProject = jRule.jenkins.createProject(MatrixProject.class, "matrix-project"); matrixProject.addProperty(getPreconfiguredProperty(ghRule.getGhRepo())); matrixProject.addTrigger(getPreconfiguredPRTrigger()); matrixProject.getBuildersList().add(new GitHubPRStatusBuilder()); matrixProject.getBuildersList().add(new Shell("sleep 10")); matrixProject.getPublishersList().add(new GitHubPRBuildStatusPublisher()); matrixProject.getPublishersList().add(new GitHubPRCommentPublisher(new GitHubPRMessage("Comment"), null, null)); matrixProject.setAxes( new AxisList( new TextAxis("first_axis", "first_value1", "first_value2"), new TextAxis("second_axis", "sec_value1", "sec_value2") ) ); matrixProject.save(); super.basicTest(matrixProject); for (MatrixBuild build : matrixProject.getBuilds()) { for (MatrixRun matrixRun : build.getRuns()) { jRule.assertLogNotContains("\tat", matrixRun); } } } }
mit
partheinstein/bc-java
pkix/src/main/java/org/bouncycastle/cert/crmf/jcajce/CRMFHelper.java
15753
package org.bouncycastle.cert.crmf.jcajce; import java.io.IOException; import java.security.AlgorithmParameterGenerator; import java.security.AlgorithmParameters; import java.security.GeneralSecurityException; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.Key; import java.security.KeyFactory; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.security.NoSuchProviderException; import java.security.PublicKey; import java.security.SecureRandom; import java.security.spec.InvalidParameterSpecException; import java.security.spec.X509EncodedKeySpec; import java.util.HashMap; import java.util.Map; import javax.crypto.Cipher; import javax.crypto.KeyGenerator; import javax.crypto.Mac; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.RC2ParameterSpec; import org.bouncycastle.asn1.ASN1Encodable; import org.bouncycastle.asn1.ASN1Null; import org.bouncycastle.asn1.ASN1ObjectIdentifier; import org.bouncycastle.asn1.ASN1OctetString; import org.bouncycastle.asn1.ASN1Primitive; import org.bouncycastle.asn1.DERNull; import org.bouncycastle.asn1.iana.IANAObjectIdentifiers; import org.bouncycastle.asn1.nist.NISTObjectIdentifiers; import org.bouncycastle.asn1.oiw.OIWObjectIdentifiers; import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; import org.bouncycastle.asn1.x509.AlgorithmIdentifier; import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; import org.bouncycastle.asn1.x9.X9ObjectIdentifiers; import org.bouncycastle.cert.crmf.CRMFException; import org.bouncycastle.cms.CMSAlgorithm; import org.bouncycastle.jcajce.util.JcaJceHelper; import org.bouncycastle.jcajce.util.JcaJceUtils; class CRMFHelper { protected static final Map BASE_CIPHER_NAMES = new HashMap(); protected static final Map CIPHER_ALG_NAMES = new HashMap(); protected static final Map DIGEST_ALG_NAMES = new HashMap(); protected static final Map KEY_ALG_NAMES = new HashMap(); protected static final Map MAC_ALG_NAMES = new HashMap(); static { BASE_CIPHER_NAMES.put(PKCSObjectIdentifiers.des_EDE3_CBC, "DESEDE"); BASE_CIPHER_NAMES.put(NISTObjectIdentifiers.id_aes128_CBC, "AES"); BASE_CIPHER_NAMES.put(NISTObjectIdentifiers.id_aes192_CBC, "AES"); BASE_CIPHER_NAMES.put(NISTObjectIdentifiers.id_aes256_CBC, "AES"); CIPHER_ALG_NAMES.put(CMSAlgorithm.DES_EDE3_CBC, "DESEDE/CBC/PKCS5Padding"); CIPHER_ALG_NAMES.put(CMSAlgorithm.AES128_CBC, "AES/CBC/PKCS5Padding"); CIPHER_ALG_NAMES.put(CMSAlgorithm.AES192_CBC, "AES/CBC/PKCS5Padding"); CIPHER_ALG_NAMES.put(CMSAlgorithm.AES256_CBC, "AES/CBC/PKCS5Padding"); CIPHER_ALG_NAMES.put(new ASN1ObjectIdentifier(PKCSObjectIdentifiers.rsaEncryption.getId()), "RSA/ECB/PKCS1Padding"); DIGEST_ALG_NAMES.put(OIWObjectIdentifiers.idSHA1, "SHA1"); DIGEST_ALG_NAMES.put(NISTObjectIdentifiers.id_sha224, "SHA224"); DIGEST_ALG_NAMES.put(NISTObjectIdentifiers.id_sha256, "SHA256"); DIGEST_ALG_NAMES.put(NISTObjectIdentifiers.id_sha384, "SHA384"); DIGEST_ALG_NAMES.put(NISTObjectIdentifiers.id_sha512, "SHA512"); MAC_ALG_NAMES.put(IANAObjectIdentifiers.hmacSHA1, "HMACSHA1"); MAC_ALG_NAMES.put(PKCSObjectIdentifiers.id_hmacWithSHA1, "HMACSHA1"); MAC_ALG_NAMES.put(PKCSObjectIdentifiers.id_hmacWithSHA224, "HMACSHA224"); MAC_ALG_NAMES.put(PKCSObjectIdentifiers.id_hmacWithSHA256, "HMACSHA256"); MAC_ALG_NAMES.put(PKCSObjectIdentifiers.id_hmacWithSHA384, "HMACSHA384"); MAC_ALG_NAMES.put(PKCSObjectIdentifiers.id_hmacWithSHA512, "HMACSHA512"); KEY_ALG_NAMES.put(PKCSObjectIdentifiers.rsaEncryption, "RSA"); KEY_ALG_NAMES.put(X9ObjectIdentifiers.id_dsa, "DSA"); } private JcaJceHelper helper; CRMFHelper(JcaJceHelper helper) { this.helper = helper; } PublicKey toPublicKey(SubjectPublicKeyInfo subjectPublicKeyInfo) throws CRMFException { try { X509EncodedKeySpec xspec = new X509EncodedKeySpec(subjectPublicKeyInfo.getEncoded()); AlgorithmIdentifier keyAlg = subjectPublicKeyInfo.getAlgorithm(); return createKeyFactory(keyAlg.getAlgorithm()).generatePublic(xspec); } catch (Exception e) { throw new CRMFException("invalid key: " + e.getMessage(), e); } } Cipher createCipher(ASN1ObjectIdentifier algorithm) throws CRMFException { try { String cipherName = (String)CIPHER_ALG_NAMES.get(algorithm); if (cipherName != null) { try { // this is reversed as the Sun policy files now allow unlimited strength RSA return helper.createCipher(cipherName); } catch (NoSuchAlgorithmException e) { // Ignore } } return helper.createCipher(algorithm.getId()); } catch (GeneralSecurityException e) { throw new CRMFException("cannot create cipher: " + e.getMessage(), e); } } public KeyGenerator createKeyGenerator(ASN1ObjectIdentifier algorithm) throws CRMFException { try { String cipherName = (String)BASE_CIPHER_NAMES.get(algorithm); if (cipherName != null) { try { // this is reversed as the Sun policy files now allow unlimited strength RSA return helper.createKeyGenerator(cipherName); } catch (NoSuchAlgorithmException e) { // Ignore } } return helper.createKeyGenerator(algorithm.getId()); } catch (GeneralSecurityException e) { throw new CRMFException("cannot create key generator: " + e.getMessage(), e); } } Cipher createContentCipher(final Key sKey, final AlgorithmIdentifier encryptionAlgID) throws CRMFException { return (Cipher)execute(new JCECallback() { public Object doInJCE() throws CRMFException, InvalidAlgorithmParameterException, InvalidKeyException, InvalidParameterSpecException, NoSuchAlgorithmException, NoSuchPaddingException, NoSuchProviderException { Cipher cipher = createCipher(encryptionAlgID.getAlgorithm()); ASN1Primitive sParams = (ASN1Primitive)encryptionAlgID.getParameters(); ASN1ObjectIdentifier encAlg = encryptionAlgID.getAlgorithm(); if (sParams != null && !(sParams instanceof ASN1Null)) { try { AlgorithmParameters params = createAlgorithmParameters(encryptionAlgID.getAlgorithm()); try { JcaJceUtils.loadParameters(params, sParams); } catch (IOException e) { throw new CRMFException("error decoding algorithm parameters.", e); } cipher.init(Cipher.DECRYPT_MODE, sKey, params); } catch (NoSuchAlgorithmException e) { if (encAlg.equals(CMSAlgorithm.DES_EDE3_CBC) || encAlg.equals(CMSAlgorithm.IDEA_CBC) || encAlg.equals(CMSAlgorithm.AES128_CBC) || encAlg.equals(CMSAlgorithm.AES192_CBC) || encAlg.equals(CMSAlgorithm.AES256_CBC)) { cipher.init(Cipher.DECRYPT_MODE, sKey, new IvParameterSpec( ASN1OctetString.getInstance(sParams).getOctets())); } else { throw e; } } } else { if (encAlg.equals(CMSAlgorithm.DES_EDE3_CBC) || encAlg.equals(CMSAlgorithm.IDEA_CBC) || encAlg.equals(CMSAlgorithm.CAST5_CBC)) { cipher.init(Cipher.DECRYPT_MODE, sKey, new IvParameterSpec(new byte[8])); } else { cipher.init(Cipher.DECRYPT_MODE, sKey); } } return cipher; } }); } AlgorithmParameters createAlgorithmParameters(ASN1ObjectIdentifier algorithm) throws NoSuchAlgorithmException, NoSuchProviderException { String algorithmName = (String)BASE_CIPHER_NAMES.get(algorithm); if (algorithmName != null) { try { // this is reversed as the Sun policy files now allow unlimited strength RSA return helper.createAlgorithmParameters(algorithmName); } catch (NoSuchAlgorithmException e) { // Ignore } } return helper.createAlgorithmParameters(algorithm.getId()); } KeyFactory createKeyFactory(ASN1ObjectIdentifier algorithm) throws CRMFException { try { String algName = (String)KEY_ALG_NAMES.get(algorithm); if (algName != null) { try { // this is reversed as the Sun policy files now allow unlimited strength RSA return helper.createKeyFactory(algName); } catch (NoSuchAlgorithmException e) { // Ignore } } return helper.createKeyFactory(algorithm.getId()); } catch (GeneralSecurityException e) { throw new CRMFException("cannot create cipher: " + e.getMessage(), e); } } MessageDigest createDigest(ASN1ObjectIdentifier algorithm) throws CRMFException { try { String digestName = (String)DIGEST_ALG_NAMES.get(algorithm); if (digestName != null) { try { // this is reversed as the Sun policy files now allow unlimited strength RSA return helper.createDigest(digestName); } catch (NoSuchAlgorithmException e) { // Ignore } } return helper.createDigest(algorithm.getId()); } catch (GeneralSecurityException e) { throw new CRMFException("cannot create cipher: " + e.getMessage(), e); } } Mac createMac(ASN1ObjectIdentifier algorithm) throws CRMFException { try { String macName = (String)MAC_ALG_NAMES.get(algorithm); if (macName != null) { try { // this is reversed as the Sun policy files now allow unlimited strength RSA return helper.createMac(macName); } catch (NoSuchAlgorithmException e) { // Ignore } } return helper.createMac(algorithm.getId()); } catch (GeneralSecurityException e) { throw new CRMFException("cannot create mac: " + e.getMessage(), e); } } AlgorithmParameterGenerator createAlgorithmParameterGenerator(ASN1ObjectIdentifier algorithm) throws GeneralSecurityException { String algorithmName = (String)BASE_CIPHER_NAMES.get(algorithm); if (algorithmName != null) { try { // this is reversed as the Sun policy files now allow unlimited strength RSA return helper.createAlgorithmParameterGenerator(algorithmName); } catch (NoSuchAlgorithmException e) { // Ignore } } return helper.createAlgorithmParameterGenerator(algorithm.getId()); } AlgorithmParameters generateParameters(ASN1ObjectIdentifier encryptionOID, SecretKey encKey, SecureRandom rand) throws CRMFException { try { AlgorithmParameterGenerator pGen = createAlgorithmParameterGenerator(encryptionOID); if (encryptionOID.equals(CMSAlgorithm.RC2_CBC)) { byte[] iv = new byte[8]; rand.nextBytes(iv); try { pGen.init(new RC2ParameterSpec(encKey.getEncoded().length * 8, iv), rand); } catch (InvalidAlgorithmParameterException e) { throw new CRMFException("parameters generation error: " + e, e); } } return pGen.generateParameters(); } catch (NoSuchAlgorithmException e) { return null; } catch (GeneralSecurityException e) { throw new CRMFException("exception creating algorithm parameter generator: " + e, e); } } AlgorithmIdentifier getAlgorithmIdentifier(ASN1ObjectIdentifier encryptionOID, AlgorithmParameters params) throws CRMFException { ASN1Encodable asn1Params; if (params != null) { try { asn1Params = JcaJceUtils.extractParameters(params); } catch (IOException e) { throw new CRMFException("cannot encode parameters: " + e.getMessage(), e); } } else { asn1Params = DERNull.INSTANCE; } return new AlgorithmIdentifier( encryptionOID, asn1Params); } static Object execute(JCECallback callback) throws CRMFException { try { return callback.doInJCE(); } catch (NoSuchAlgorithmException e) { throw new CRMFException("can't find algorithm.", e); } catch (InvalidKeyException e) { throw new CRMFException("key invalid in message.", e); } catch (NoSuchProviderException e) { throw new CRMFException("can't find provider.", e); } catch (NoSuchPaddingException e) { throw new CRMFException("required padding not supported.", e); } catch (InvalidAlgorithmParameterException e) { throw new CRMFException("algorithm parameters invalid.", e); } catch (InvalidParameterSpecException e) { throw new CRMFException("MAC algorithm parameter spec invalid.", e); } } static interface JCECallback { Object doInJCE() throws CRMFException, InvalidAlgorithmParameterException, InvalidKeyException, InvalidParameterSpecException, NoSuchAlgorithmException, NoSuchPaddingException, NoSuchProviderException; } }
mit
jayhorn/cav_experiments
jayhorn_cav16_ae/cbmc_benchmarks/if_acmp1/if_acmp1.java
569
class A { } class B { } class if_acmp1 { private static B get_B() { B b = new B(); return b; } public static void main(String[] args) { A a0 = new A(); A a1 = new A(); A a2 = new A(); A a3 = new A(); A a4 = new A(); assert a0 == a0; assert a1 == a1; assert a2 == a2; assert a3 == a3; assert a4 == a4; assert a1 != a2; assert a2 != a3; assert a3 != a4; assert a0 != null; A a5 = null; assert a5 == null; B b = get_B(); Object o0 = a0; Object o1 = b; assert o0 != o1; } }
mit
Peter-Maximilian/settlers-remake
jsettlers.logic/src/main/java/jsettlers/main/replay/ReplayUtils.java
10500
/******************************************************************************* * Copyright (c) 2015 - 2018 * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. *******************************************************************************/ package jsettlers.main.replay; import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Arrays; import java.util.List; import jsettlers.common.menu.IStartedGame; import jsettlers.common.menu.IStartingGame; import jsettlers.common.resources.ResourceManager; import jsettlers.common.utils.mutables.MutableInt; import jsettlers.input.tasks.EGuiAction; import jsettlers.input.tasks.SimpleGuiTask; import jsettlers.logic.constants.MatchConstants; import jsettlers.logic.map.loading.MapLoadException; import jsettlers.logic.map.loading.MapLoader; import jsettlers.logic.map.loading.list.MapList; import jsettlers.logic.player.PlayerSetting; import jsettlers.main.JSettlersGame; import jsettlers.main.JSettlersGame.GameRunner; import jsettlers.main.ReplayStartInformation; import jsettlers.network.NetworkConstants; import jsettlers.network.client.OfflineNetworkConnector; import jsettlers.network.client.interfaces.IGameClock; import jsettlers.network.client.interfaces.INetworkConnector; import static java8.util.J8Arrays.stream; /** * * @author Andreas Eberle * */ public class ReplayUtils { public static MapLoader replayAndCreateSavegame(IReplayStreamProvider replayFile, int targetGameTimeMinutes, String newReplayFile) throws MapLoadException, IOException { OfflineNetworkConnector networkConnector = createPausingOfflineNetworkConnector(); ReplayStartInformation replayStartInformation = new ReplayStartInformation(); JSettlersGame game = loadGameFromReplay(replayFile, networkConnector, replayStartInformation); IStartedGame startedGame = startGame(game); // before we can save the clock reference, the game must be started IGameClock gameClock = MatchConstants.clock(); // after the game, the clock cannot be accessed any more => save reference before the game MapLoader newSavegame = playGameToTargetTimeAndGetSavegames(startedGame, networkConnector, targetGameTimeMinutes)[0]; // create a jsettlers.integration.replay basing on the savegame and containing the remaining tasks. createReplayOfRemainingTasks(newSavegame, replayStartInformation, newReplayFile, gameClock); System.out.println("Replayed: " + replayFile + " and created savegame: " + newSavegame); return newSavegame; } public static MapLoader[] replayAndCreateSavegames(IReplayStreamProvider replayFile, int[] targetGameTimeMinutes) throws MapLoadException { OfflineNetworkConnector networkConnector = createPausingOfflineNetworkConnector(); ReplayStartInformation replayStartInformation = new ReplayStartInformation(); JSettlersGame game = loadGameFromReplay(replayFile, networkConnector, replayStartInformation); MapLoader[] newSavegame = playGameToTargetTimeAndGetSavegames(game, networkConnector, targetGameTimeMinutes); System.out.println("Replayed: " + replayFile + " and created savegames: " + Arrays.asList(newSavegame)); return newSavegame; } private static OfflineNetworkConnector createPausingOfflineNetworkConnector() { OfflineNetworkConnector networkConnector = new OfflineNetworkConnector(); networkConnector.getGameClock().setPausing(true); return networkConnector; } private static MapLoader[] playGameToTargetTimeAndGetSavegames(JSettlersGame game, OfflineNetworkConnector networkConnector, final int... targetGameTimesMinutes) { IStartedGame startedGame = startGame(game); return playGameToTargetTimeAndGetSavegames(startedGame, networkConnector, targetGameTimesMinutes); } private static MapLoader[] playGameToTargetTimeAndGetSavegames(IStartedGame startedGame, OfflineNetworkConnector networkConnector, final int... targetGameTimesMinutes) { final int[] targetGameTimesMs = getGameTimeMsFromMinutes(targetGameTimesMinutes); // schedule the save task and run the game to the target game time MapLoader[] savegames = new MapLoader[targetGameTimesMs.length]; for (int i = 0; i < targetGameTimesMs.length; i++) { final int targetGameTimeMs = targetGameTimesMs[i]; networkConnector.scheduleTaskAt(targetGameTimeMs / NetworkConstants.Client.LOCKSTEP_PERIOD, new SimpleGuiTask(EGuiAction.QUICK_SAVE, (byte) 0) ); MatchConstants.clock().fastForwardTo(targetGameTimeMs + 1000); savegames[i] = getNewestSavegame(); } awaitShutdown(startedGame); return savegames; } private static int[] getGameTimeMsFromMinutes(final int... targetGameTimesMinutes) { return stream(targetGameTimesMinutes).map(minute -> minute * 60 * 1000).sorted().toArray(); } public static MapLoader getNewestSavegame() { List<? extends MapLoader> savedMaps = MapList.getDefaultList().getSavedMaps().getItems(); if (savedMaps.isEmpty()) { throw new RuntimeException("No saved games found."); } MapLoader newest = savedMaps.get(0); for (MapLoader map : savedMaps) { if (newest.getCreationDate().before(map.getCreationDate())) { newest = map; } } return newest; } public static void awaitShutdown(IStartedGame startedGame) { final MutableInt gameStopped = new MutableInt(0); startedGame.setGameExitListener(game -> { gameStopped.value = 1; synchronized (gameStopped) { gameStopped.notifyAll(); } }); ((GameRunner) startedGame).stopGame(); synchronized (gameStopped) { while (gameStopped.value == 0 && !startedGame.isShutdownFinished()) { try { gameStopped.wait(); } catch (InterruptedException e) { } } } } private static IStartedGame startGame(JSettlersGame game) { IStartingGame startingGame = game.start(); return waitForGameStartup(startingGame); } public static IStartedGame waitForGameStartup(IStartingGame game) { DummyStartingGameListener startingGameListener = new DummyStartingGameListener(); game.setListener(startingGameListener); return startingGameListener.waitForGameStartup(); } private static JSettlersGame loadGameFromReplay(IReplayStreamProvider replayFile, INetworkConnector networkConnector, ReplayStartInformation replayStartInformation) throws MapLoadException { System.out.println("Found loadable jsettlers.integration.replay file. Started loading it: " + replayFile); return JSettlersGame.loadFromReplayFile(replayFile, networkConnector, replayStartInformation); } private static void createReplayOfRemainingTasks(MapLoader newSavegame, ReplayStartInformation replayStartInformation, String newReplayFile, IGameClock gameClock) throws IOException { System.out.println("Creating new jsettlers.integration.replay file (" + newReplayFile + ")..."); ReplayStartInformation replayInfo = new ReplayStartInformation(0, newSavegame.getMapName(), newSavegame.getMapId(), replayStartInformation.getPlayerId(), replayStartInformation.getPlayerSettings() ); DataOutputStream dos = new DataOutputStream(ResourceManager.writeUserFile(newReplayFile)); replayInfo.serialize(dos); gameClock.saveRemainingTasks(dos); dos.close(); System.out.println("New jsettlers.integration.replay file successfully created!"); } public static PlayMapResult playMapToTargetTimes(MapLoader map, byte playerId, final int... targetTimeMinutes) { OfflineNetworkConnector networkConnector = ReplayUtils.createPausingOfflineNetworkConnector(); JSettlersGame game = new JSettlersGame(map, 0L, networkConnector, playerId, PlayerSetting.createDefaultSettings(playerId, (byte) map.getMaxPlayers())) { @Override protected OutputStream createReplayWriteStream() throws IOException { return ResourceManager.writeConfigurationFile("jsettlers.integration.replay"); } }; final MapLoader[] savegames = ReplayUtils.playGameToTargetTimeAndGetSavegames(game, networkConnector, targetTimeMinutes); return new PlayMapResult(map, savegames); } public interface IReplayStreamProvider { InputStream openStream() throws IOException; MapLoader getMap(ReplayStartInformation replayStartInformation) throws MapLoadException; } /** * A jsettlers.integration.replay file using the default list. * * @see MapList#defaultList */ public static class ReplayFile implements IReplayStreamProvider { private final File file; public ReplayFile(File file) { this.file = file; } @Override public InputStream openStream() throws IOException { return new FileInputStream(file); } @Override public MapLoader getMap(ReplayStartInformation replayStartInformation) { return MapList.getDefaultList().getMapById(replayStartInformation.getMapId()); } } public static class PlayMapResult implements IReplayStreamProvider { private final MapLoader map; private final MapLoader[] savegames; PlayMapResult(MapLoader map, MapLoader[] savegames) { this.map = map; this.savegames = savegames; } @Override public InputStream openStream() throws IOException { return ResourceManager.getResourcesFileStream("jsettlers.integration.replay"); } @Override public MapLoader getMap(ReplayStartInformation replayStartInformation) throws MapLoadException { if (map.getMapId().equals(replayStartInformation.getMapId())) { return map; } throw new MapLoadException("No file found for " + replayStartInformation); } public MapLoader[] getSavegames() { return savegames; } } }
mit
jvm-bloggers/jvm-bloggers
src/main/java/com/jvm_bloggers/frontend/public_area/common_layout/HeaderFrontend.java
1617
package com.jvm_bloggers.frontend.public_area.common_layout; import com.googlecode.wicket.jquery.ui.markup.html.link.BookmarkablePageLink; import com.jvm_bloggers.frontend.public_area.AboutPage; import com.jvm_bloggers.frontend.public_area.FireInOvhPage; import com.jvm_bloggers.frontend.public_area.all_issues.AllIssuesPage; import com.jvm_bloggers.frontend.public_area.blogs.PersonalBlogsPage; import com.jvm_bloggers.frontend.public_area.contributors.ContributorsPage; import com.jvm_bloggers.frontend.public_area.jvm_poland_slack.JvmPolandSlackPage; import com.jvm_bloggers.frontend.public_area.rss.RssInformationPage; import com.jvm_bloggers.frontend.public_area.search_posts.SearchPostsPage; import com.jvm_bloggers.frontend.public_area.top_posts.TopPostsPage; import org.apache.wicket.markup.html.panel.Panel; public class HeaderFrontend extends Panel { public HeaderFrontend(String id) { super(id); add(new BookmarkablePageLink<>("blogs", PersonalBlogsPage.class)); add(new BookmarkablePageLink<>("topPosts", TopPostsPage.class)); add(new BookmarkablePageLink<>("allIssues", AllIssuesPage.class)); add(new BookmarkablePageLink<>("contributors", ContributorsPage.class)); add(new BookmarkablePageLink<>("about", AboutPage.class)); add(new BookmarkablePageLink<>("fireInOvh", FireInOvhPage.class)); add(new BookmarkablePageLink<>("rss", RssInformationPage.class)); add(new BookmarkablePageLink<>("jvm-poland-slack", JvmPolandSlackPage.class)); add(new BookmarkablePageLink<>("searchPosts", SearchPostsPage.class)); } }
mit
Safewhere/kombit-web-java
kombit-opensaml-2.5.1/src/org/opensaml/xacml/policy/impl/AttributeSelectorTypeUnmarshaller.java
2638
/* * Licensed to the University Corporation for Advanced Internet Development, * Inc. (UCAID) under one or more contributor license agreements. See the * NOTICE file distributed with this work for additional information regarding * copyright ownership. The UCAID licenses this file to You under the Apache * License, Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.opensaml.xacml.policy.impl; import org.opensaml.xacml.impl.AbstractXACMLObjectUnmarshaller; import org.opensaml.xacml.policy.AttributeSelectorType; import org.opensaml.xml.XMLObject; import org.opensaml.xml.io.UnmarshallingException; import org.opensaml.xml.schema.XSBooleanValue; import org.opensaml.xml.util.DatatypeHelper; import org.w3c.dom.Attr; /** *Unmarshaller for {@link AttributeSelectorType}. */ public class AttributeSelectorTypeUnmarshaller extends AbstractXACMLObjectUnmarshaller { /** Constructor. */ public AttributeSelectorTypeUnmarshaller() { super(); } /** {@inheritDoc} */ protected void processAttribute(XMLObject xmlObject, Attr attribute) throws UnmarshallingException { AttributeSelectorType attributeSelectorType = (AttributeSelectorType) xmlObject; if (attribute.getLocalName().equals(AttributeSelectorType.REQUEST_CONTEXT_PATH_ATTRIB_NAME)){ attributeSelectorType.setRequestContextPath(DatatypeHelper.safeTrimOrNullString(attribute.getValue())); } else if (attribute.getLocalName().equals(AttributeSelectorType.DATA_TYPE_ATTRIB_NAME)){ attributeSelectorType.setDataType(DatatypeHelper.safeTrimOrNullString(attribute.getValue())); } else if (attribute.getLocalName().equals(AttributeSelectorType.MUST_BE_PRESENT_ATTRIB_NAME)){ if (attribute.getValue().equals("True") || attribute.getValue().equals("true")) { attributeSelectorType.setMustBePresentXSBoolean(XSBooleanValue.valueOf("1")); } else { attributeSelectorType.setMustBePresentXSBoolean(XSBooleanValue.valueOf("0")); } } else { super.processAttribute(xmlObject, attribute); } } }
mit
Safewhere/kombit-web-java
kombit-opensaml-2.5.1/src/org/opensaml/saml2/core/impl/StatusMessageImpl.java
2106
/* * Licensed to the University Corporation for Advanced Internet Development, * Inc. (UCAID) under one or more contributor license agreements. See the * NOTICE file distributed with this work for additional information regarding * copyright ownership. The UCAID licenses this file to You under the Apache * License, Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * */ package org.opensaml.saml2.core.impl; import java.util.List; import org.opensaml.common.impl.AbstractSAMLObject; import org.opensaml.saml2.core.StatusMessage; import org.opensaml.xml.XMLObject; /** * Concrete implementation of {@link org.opensaml.saml2.core.StatusMessage}. */ public class StatusMessageImpl extends AbstractSAMLObject implements StatusMessage { /** The message string. */ private String message; /** * Constructor. * * @param namespaceURI the namespace the element is in * @param elementLocalName the local name of the XML element this Object represents * @param namespacePrefix the prefix for the given namespace */ protected StatusMessageImpl(String namespaceURI, String elementLocalName, String namespacePrefix) { super(namespaceURI, elementLocalName, namespacePrefix); } /** {@inheritDoc} */ public String getMessage() { return this.message; } /** {@inheritDoc} */ public void setMessage(String newMessage) { this.message = prepareForAssignment(this.message, newMessage); } /** {@inheritDoc} */ public List<XMLObject> getOrderedChildren() { // No children for this element return null; } }
mit
theyelllowdart/jmockit
samples/TimingFramework/src/org/jdesktop/animation/timing/interpolation/KeyTimes.java
3949
/** * Copyright (c) 2005-2006, Sun Microsystems, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the TimingFramework project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.jdesktop.animation.timing.interpolation; import java.util.ArrayList; import java.util.List; /** * Stores a list of times from 0 to 1 (the elapsed fraction of an animation * cycle) that are used in calculating interpolated * values for PropertySetter given a matching set of KeyValues and * Interpolators for those time intervals. In the simplest case, a * KeyFrame will consist of just two times in KeyTimes: 0 and 1. * * @author Chet */ public final class KeyTimes { private final List<Float> times = new ArrayList<Float>(); /** * Creates a new instance of KeyTimes. Times should be in increasing * order and should all be in the range [0,1], with the first value * being zero and the last being 1 * @throws IllegalArgumentException Time values must be ordered in * increasing value, the first value must be 0 and the last value * must be 1 */ public KeyTimes(float... times) { if (times[0] != 0) { throw new IllegalArgumentException("First time value must be zero"); } //noinspection FloatingPointEquality if (times[times.length - 1] != 1.0f) { throw new IllegalArgumentException("Last time value must be one"); } float prevTime = 0; for (float time : times) { if (time < prevTime) { throw new IllegalArgumentException("Time values must be in increasing order"); } this.times.add(time); prevTime = time; } } int getSize() { return times.size(); } /** * Returns time interval that contains this time fraction. */ int getInterval(float fraction) { int prevIndex = 0; for (int i = 1; i < times.size(); ++i) { float time = times.get(i); if (time >= fraction) { // inclusive of start time at next interval. So fraction==1 // will return the final interval (times.size() - 1) return prevIndex; } prevIndex = i; } return prevIndex; } float getTime(int index) { return times.get(index); } }
mit
jrmunson/Android-Dev
outsystems-app-android/Outsytems/platforms/android/src/com/outsystems/android/model/DLOperationType.java
631
package com.outsystems.android.model; public enum DLOperationType { dlLoginOperation("login"), dlOpenUrlOperation("openurl"), dlInvalidOperation("invalid"); public final String name; DLOperationType(String name){ this.name = name; } public String toString(){ return this.getName(); } public String getName(){ return this.name; } public static DLOperationType getOperationType(String name){ DLOperationType result = dlInvalidOperation; for(DLOperationType t: DLOperationType.values()){ if(t.getName().equalsIgnoreCase(name)){ result = t; break; } } return result; } }
mit
marszczybrew/Diorite
DioriteAPI/src/main/java/org/diorite/material/blocks/wooden/wood/fencegate/OakFenceGateMat.java
5868
package org.diorite.material.blocks.wooden.wood.fencegate; import java.util.Map; import org.diorite.BlockFace; import org.diorite.material.Material; import org.diorite.material.WoodTypeMat; import org.diorite.material.blocks.FenceGateMat; import org.diorite.utils.collections.maps.CaseInsensitiveMap; import gnu.trove.map.TByteObjectMap; import gnu.trove.map.hash.TByteObjectHashMap; /** * Class representing block "OakFenceGate" and all its subtypes. */ public class OakFenceGateMat extends WoodenFenceGateMat { /** * Sub-ids used by diorite/minecraft by default */ public static final int USED_DATA_VALUES = 8; public static final OakFenceGateMat OAK_FENCE_GATE_SOUTH = new OakFenceGateMat(); public static final OakFenceGateMat OAK_FENCE_GATE_WEST = new OakFenceGateMat(BlockFace.WEST, false); public static final OakFenceGateMat OAK_FENCE_GATE_NORTH = new OakFenceGateMat(BlockFace.NORTH, false); public static final OakFenceGateMat OAK_FENCE_GATE_EAST = new OakFenceGateMat(BlockFace.EAST, false); public static final OakFenceGateMat OAK_FENCE_GATE_SOUTH_OPEN = new OakFenceGateMat(BlockFace.SOUTH, true); public static final OakFenceGateMat OAK_FENCE_GATE_WEST_OPEN = new OakFenceGateMat(BlockFace.WEST, true); public static final OakFenceGateMat OAK_FENCE_GATE_NORTH_OPEN = new OakFenceGateMat(BlockFace.NORTH, true); public static final OakFenceGateMat OAK_FENCE_GATE_EAST_OPEN = new OakFenceGateMat(BlockFace.EAST, true); private static final Map<String, OakFenceGateMat> byName = new CaseInsensitiveMap<>(USED_DATA_VALUES, SMALL_LOAD_FACTOR); private static final TByteObjectMap<OakFenceGateMat> byID = new TByteObjectHashMap<>(USED_DATA_VALUES, SMALL_LOAD_FACTOR, Byte.MIN_VALUE); @SuppressWarnings("MagicNumber") protected OakFenceGateMat() { super("OAK_FENCE_GATE", 107, "minecraft:fence_gate", WoodTypeMat.OAK, BlockFace.SOUTH, false, 2, 15); } protected OakFenceGateMat(final BlockFace face, final boolean open) { super(OAK_FENCE_GATE_SOUTH.name(), OAK_FENCE_GATE_SOUTH.ordinal(), OAK_FENCE_GATE_SOUTH.getMinecraftId(), WoodTypeMat.OAK, face, open, OAK_FENCE_GATE_SOUTH.getHardness(), OAK_FENCE_GATE_SOUTH.getBlastResistance()); } protected OakFenceGateMat(final String enumName, final int id, final String minecraftId, final int maxStack, final String typeName, final byte type, final WoodTypeMat woodType, final BlockFace face, final boolean open, final float hardness, final float blastResistance) { super(enumName, id, minecraftId, maxStack, typeName, type, woodType, face, open, hardness, blastResistance); } @Override public Material ensureValidInventoryItem() { return Material.OAK_FENCE_GATE; } @Override public OakFenceGateMat getBlockFacing(final BlockFace face) { return getByID(FenceGateMat.combine(face, this.open)); } @Override public OakFenceGateMat getOpen(final boolean open) { return getByID(FenceGateMat.combine(this.face, open)); } @Override public OakFenceGateMat getType(final String name) { return getByEnumName(name); } @Override public OakFenceGateMat getType(final int id) { return getByID(id); } @Override public OakFenceGateMat getType(final BlockFace face, final boolean open) { return getByID(FenceGateMat.combine(face, open)); } /** * Returns one of OakFenceGate sub-type based on sub-id, may return null * * @param id sub-type id * * @return sub-type of OakFenceGate or null */ public static OakFenceGateMat getByID(final int id) { return byID.get((byte) id); } /** * Returns one of OakFenceGate sub-type based on name (selected by diorite team), may return null * If block contains only one type, sub-name of it will be this same as name of material. * * @param name name of sub-type * * @return sub-type of OakFenceGate or null */ public static OakFenceGateMat getByEnumName(final String name) { return byName.get(name); } /** * Returns one of OakFenceGate sub-type based on facing direction and open state. * It will never return null. * * @param blockFace facing direction of gate. * @param open if gate should be open. * * @return sub-type of OakFenceGate */ public static OakFenceGateMat getOakFenceGate(final BlockFace blockFace, final boolean open) { return getByID(FenceGateMat.combine(blockFace, open)); } /** * Register new sub-type, may replace existing sub-types. * Should be used only if you know what are you doing, it will not create fully usable material. * * @param element sub-type to register */ public static void register(final OakFenceGateMat element) { byID.put((byte) element.getType(), element); byName.put(element.getTypeName(), element); } @Override public OakFenceGateMat[] types() { return OakFenceGateMat.oakFenceGateTypes(); } /** * @return array that contains all sub-types of this block. */ public static OakFenceGateMat[] oakFenceGateTypes() { return byID.values(new OakFenceGateMat[byID.size()]); } static { OakFenceGateMat.register(OAK_FENCE_GATE_SOUTH); OakFenceGateMat.register(OAK_FENCE_GATE_WEST); OakFenceGateMat.register(OAK_FENCE_GATE_NORTH); OakFenceGateMat.register(OAK_FENCE_GATE_EAST); OakFenceGateMat.register(OAK_FENCE_GATE_SOUTH_OPEN); OakFenceGateMat.register(OAK_FENCE_GATE_WEST_OPEN); OakFenceGateMat.register(OAK_FENCE_GATE_NORTH_OPEN); OakFenceGateMat.register(OAK_FENCE_GATE_EAST_OPEN); } }
mit
Iroxsmyth/Brisca-AI-2017
src/main/java/aima/core/learning/inductive/DecisionTree.java
2953
package aima.core.learning.inductive; import java.util.ArrayList; import java.util.Hashtable; import java.util.List; import aima.core.learning.framework.DataSet; import aima.core.learning.framework.Example; import aima.core.util.Util; /** * @author Ravi Mohan * */ public class DecisionTree { private String attributeName; // each node modelled as a hash of attribute_value/decisiontree private Hashtable<String, DecisionTree> nodes; protected DecisionTree() { } public DecisionTree(String attributeName) { this.attributeName = attributeName; nodes = new Hashtable<String, DecisionTree>(); } public void addLeaf(String attributeValue, String decision) { nodes.put(attributeValue, new ConstantDecisonTree(decision)); } public void addNode(String attributeValue, DecisionTree tree) { nodes.put(attributeValue, tree); } public Object predict(Example e) { String attrValue = e.getAttributeValueAsString(attributeName); if (nodes.containsKey(attrValue)) { return nodes.get(attrValue).predict(e); } else { throw new RuntimeException("no node exists for attribute value " + attrValue); } } public static DecisionTree getStumpFor(DataSet ds, String attributeName, String attributeValue, String returnValueIfMatched, List<String> unmatchedValues, String returnValueIfUnmatched) { DecisionTree dt = new DecisionTree(attributeName); dt.addLeaf(attributeValue, returnValueIfMatched); for (String unmatchedValue : unmatchedValues) { dt.addLeaf(unmatchedValue, returnValueIfUnmatched); } return dt; } public static List<DecisionTree> getStumpsFor(DataSet ds, String returnValueIfMatched, String returnValueIfUnmatched) { List<String> attributes = ds.getNonTargetAttributes(); List<DecisionTree> trees = new ArrayList<DecisionTree>(); for (String attribute : attributes) { List<String> values = ds.getPossibleAttributeValues(attribute); for (String value : values) { List<String> unmatchedValues = Util.removeFrom( ds.getPossibleAttributeValues(attribute), value); DecisionTree tree = getStumpFor(ds, attribute, value, returnValueIfMatched, unmatchedValues, returnValueIfUnmatched); trees.add(tree); } } return trees; } /** * @return Returns the attributeName. */ public String getAttributeName() { return attributeName; } @Override public String toString() { return toString(1, new StringBuffer()); } public String toString(int depth, StringBuffer buf) { if (attributeName != null) { buf.append(Util.ntimes("\t", depth)); buf.append(Util.ntimes("***", 1)); buf.append(attributeName + " \n"); for (String attributeValue : nodes.keySet()) { buf.append(Util.ntimes("\t", depth + 1)); buf.append("+" + attributeValue); buf.append("\n"); DecisionTree child = nodes.get(attributeValue); buf.append(child.toString(depth + 1, new StringBuffer())); } } return buf.toString(); } }
mit
jon-hanson/funcj
core/src/main/java/org/typemeta/funcj/kleisli/WriterMK.java
2430
package org.typemeta.funcj.kleisli; import org.typemeta.funcj.control.WriterM; import org.typemeta.funcj.functions.Functions; /** * {@code WriterMK} models composable operations that return a {@code Reader}. * @param <W> the input type of the returned {@code Reader} type * @param <A> the input type of the returned {@code F} type * @param <B> the return type of the returned {@code Reader} type */ @FunctionalInterface public interface WriterMK<W, A, B> { /** * Construct a {@code WriterMK} value from a writer. * @param wB the writer * @param <W> the writer (fixed) input type * @param <A> the input type of the returned {@code F} type * @param <B> the return type of the returned {@code F} type * @return the new {@code WriterMK} */ static <W, A, B> WriterMK<W, A, B> of(Functions.F<A, WriterM<W, B>> wB) { return wB::apply; } /** * Apply this {@code WriterMK} operation * @param a the input value * @return the result of the operation */ WriterM<W, B> apply(A a); /** * Compose this {@code WriterMK} with another by applying this one first, * then the other. * @param kBC the {@code WriterMK} to be applied after this one * @param <C> the second {@code WriterMK}'s return type * @return the composed {@code WriterMK} */ default <C> WriterMK<W, A, C> andThen(WriterMK<W, B, C> kBC) { return t -> apply(t).flatMap(kBC::apply); } /** * Compose this {@code WriterMK} with another by applying the other one first, * and then this one. * @param kCA the {@code WriterMK} to be applied after this one * @param <C> the first {@code WriterMK}'s input type * @return the composed {@code WriterMK} */ default <C> WriterMK<W, C, B> compose(WriterMK<W, C, A> kCA) { return s -> kCA.apply(s).flatMap(this::apply); } /** * Compose this {@code WriterMK} with a function, * by applying this {@code WriterMK} first, * and then mapping the function over the result. * @param fC the function * @param <C> the function return type * @return the composed {@code WriterMK} */ default <C> WriterMK<W, A, C> map(Functions.F<B, C> fC) { return t -> apply(t).map(fC); } }
mit
diegocedrim/sururu
test/br/pucrio/opus/smells/tests/metrics/NOAMTest.java
1449
package br.pucrio.opus.smells.tests.metrics; import java.io.IOException; import org.eclipse.jdt.core.dom.CompilationUnit; import org.eclipse.jdt.core.dom.TypeDeclaration; import org.junit.Assert; import org.junit.Test; import br.pucrio.opus.smells.ast.visitors.TypeDeclarationCollector; import br.pucrio.opus.smells.metrics.calculators.NOAMCalculator; import br.pucrio.opus.smells.tests.util.CompilationUnitLoader; public class NOAMTest { private Double getNOAM(String dummyClassName) throws IOException { CompilationUnit unit = CompilationUnitLoader.getCompilationUnitDummyClass(dummyClassName); TypeDeclarationCollector visitor = new TypeDeclarationCollector(); unit.accept(visitor); TypeDeclaration type = visitor.getNodesCollected().get(0); NOAMCalculator calculator = new NOAMCalculator(); return calculator.getValue(type); } @Test public void anonymousClassTest() throws IOException { Double noam = getNOAM("AnonymousClass.java"); Assert.assertEquals(new Double(0), noam); } @Test public void emptyClassTest() throws IOException { Double noam = getNOAM("EmptyClass.java"); Assert.assertEquals(new Double(0), noam); } @Test public void ccTest() throws IOException { Double noam = getNOAM("CC.java"); Assert.assertEquals(new Double(0), noam); } @Test public void assessorsTest() throws IOException { Double noam = getNOAM("Assessors.java"); Assert.assertEquals(new Double(9), noam); } }
mit
ex3ndr/telegram
app/src/main/java/org/telegram/android/kernel/compat/state/CompatSessionKey.java
536
package org.telegram.android.kernel.compat.state; import java.io.Serializable; /** * Created by ex3ndr on 17.11.13. */ public class CompatSessionKey implements Serializable { private int dcId; private byte[] session; private int seqNo; private long lastMessageId; public int getDcId() { return dcId; } public byte[] getSession() { return session; } public int getSeqNo() { return seqNo; } public long getLastMessageId() { return lastMessageId; } }
mit
mandeepdhami/netvirt-ctrl
sdnplatform/src/main/java/org/sdnplatform/vendor/OFBigSwitchVendorExtensions.java
2917
/* * Copyright (c) 2013 Big Switch Networks, Inc. * * Licensed under the Eclipse Public License, Version 1.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.eclipse.org/legal/epl-v10.html * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package org.sdnplatform.vendor; import org.openflow.protocol.vendor.OFBasicVendorDataType; import org.openflow.protocol.vendor.OFBasicVendorId; import org.openflow.protocol.vendor.OFVendorId; public class OFBigSwitchVendorExtensions { private static boolean initialized = false; public static synchronized void initialize() { if (initialized) return; OFBasicVendorId bsnVendorId = new OFBasicVendorId(OFBigSwitchVendorData.BSN_VENDOR_ID, 4); OFVendorId.registerVendorId(bsnVendorId); // register data types used for big tap OFBasicVendorDataType setEntryVendorData = new OFBasicVendorDataType( OFNetmaskSetVendorData.BSN_SET_IP_MASK_ENTRY, OFNetmaskSetVendorData.getInstantiable()); bsnVendorId.registerVendorDataType(setEntryVendorData); OFBasicVendorDataType getEntryVendorDataRequest = new OFBasicVendorDataType( OFNetmaskGetVendorDataRequest.BSN_GET_IP_MASK_ENTRY_REQUEST, OFNetmaskGetVendorDataRequest.getInstantiable()); bsnVendorId.registerVendorDataType(getEntryVendorDataRequest); OFBasicVendorDataType getEntryVendorDataReply = new OFBasicVendorDataType( OFNetmaskGetVendorDataReply.BSN_GET_IP_MASK_ENTRY_REPLY, OFNetmaskGetVendorDataReply.getInstantiable()); bsnVendorId.registerVendorDataType(getEntryVendorDataReply); // register data types used for tunneling OFBasicVendorDataType getIntfIPVendorDataRequest = new OFBasicVendorDataType( OFInterfaceIPRequestVendorData.BSN_GET_INTERFACE_IP_REQUEST, OFInterfaceIPRequestVendorData.getInstantiable()); bsnVendorId.registerVendorDataType(getIntfIPVendorDataRequest); OFBasicVendorDataType getIntfIPVendorDataReply = new OFBasicVendorDataType( OFInterfaceIPReplyVendorData.BSN_GET_INTERFACE_IP_REPLY, OFInterfaceIPReplyVendorData.getInstantiable()); bsnVendorId.registerVendorDataType(getIntfIPVendorDataReply); } }
epl-1.0
alastrina123/debrief
org.mwc.debrief.core/src/org/mwc/debrief/core/contenttype/AISContentDescriber.java
1163
package org.mwc.debrief.core.contenttype; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import org.eclipse.core.internal.content.TextContentDescriber; import org.eclipse.core.runtime.Status; import org.eclipse.core.runtime.content.IContentDescription; import org.mwc.cmap.core.CorePlugin; public class AISContentDescriber extends TextContentDescriber { @Override public int describe(InputStream contents, IContentDescription description) throws IOException { BufferedReader r = null; int res = INVALID; try { r = new BufferedReader(new InputStreamReader(contents)); String firstLine = r.readLine(); if ((firstLine != null) && (firstLine.contains("!AIVDM"))) { res = VALID; } else { // just double-check that it's invalid res = INVALID; } } catch (Exception e) { CorePlugin.logError(Status.ERROR, "AIS content type error", e); } finally { try { if (r != null) r.close(); } catch (IOException e) { CorePlugin.logError(Status.ERROR, "Couldn't close file file", e); } } return res; } }
epl-1.0
boniatillo-com/PhaserEditor
source/thirdparty/jsdt/org.eclipse.wst.jsdt.ui/src/org/eclipse/wst/jsdt/ui/actions/FindReadReferencesInWorkingSetAction.java
4023
/******************************************************************************* * Copyright (c) 2000, 2008 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package org.eclipse.wst.jsdt.ui.actions; import org.eclipse.ui.IWorkbenchSite; import org.eclipse.ui.IWorkingSet; import org.eclipse.ui.PlatformUI; import org.eclipse.wst.jsdt.core.IField; import org.eclipse.wst.jsdt.core.ILocalVariable; import org.eclipse.wst.jsdt.core.search.IJavaScriptSearchConstants; import org.eclipse.wst.jsdt.internal.ui.IJavaHelpContextIds; import org.eclipse.wst.jsdt.internal.ui.JavaPluginImages; import org.eclipse.wst.jsdt.internal.ui.javaeditor.JavaEditor; import org.eclipse.wst.jsdt.internal.ui.search.SearchMessages; /** * Finds field read accesses of the selected element in working sets. * The action is applicable to selections representing a JavaScript field. * * <p> * This class may be instantiated; it is not intended to be subclassed. * </p> * * * Provisional API: This class/interface is part of an interim API that is still under development and expected to * change significantly before reaching stability. It is being made available at this early stage to solicit feedback * from pioneering adopters on the understanding that any code that uses this API will almost certainly be broken * (repeatedly) as the API evolves. */ public class FindReadReferencesInWorkingSetAction extends FindReferencesInWorkingSetAction { /** * Creates a new <code>FindReadReferencesInWorkingSetAction</code>. The action * requires that the selection provided by the site's selection provider is of type * <code>org.eclipse.jface.viewers.IStructuredSelection</code>. The user will be * prompted to select the working sets. * * @param site the site providing context information for this action */ public FindReadReferencesInWorkingSetAction(IWorkbenchSite site) { super(site); } /** * Creates a new <code>FindReadReferencesInWorkingSetAction</code>. The action * requires that the selection provided by the site's selection provider is of type * <code>org.eclipse.jface.viewers.IStructuredSelection</code>. * * @param site the site providing context information for this action * @param workingSets the working sets to be used in the search */ public FindReadReferencesInWorkingSetAction(IWorkbenchSite site, IWorkingSet[] workingSets) { super(site, workingSets); } /** * Note: This constructor is for internal use only. Clients should not call this constructor. * @param editor the JavaScript editor */ public FindReadReferencesInWorkingSetAction(JavaEditor editor) { super(editor); } /** * Note: This constructor is for internal use only. Clients should not call this constructor. * @param editor the JavaScript editor * @param workingSets the working sets to be used in the search */ public FindReadReferencesInWorkingSetAction(JavaEditor editor, IWorkingSet[] workingSets) { super(editor, workingSets); } Class[] getValidTypes() { return new Class[] { IField.class, ILocalVariable.class }; } void init() { setText(SearchMessages.Search_FindReadReferencesInWorkingSetAction_label); setToolTipText(SearchMessages.Search_FindReadReferencesInWorkingSetAction_tooltip); setImageDescriptor(JavaPluginImages.DESC_OBJS_SEARCH_REF); PlatformUI.getWorkbench().getHelpSystem().setHelp(this, IJavaHelpContextIds.FIND_READ_REFERENCES_IN_WORKING_SET_ACTION); } int getLimitTo() { return IJavaScriptSearchConstants.READ_ACCESSES; } String getOperationUnavailableMessage() { return SearchMessages.JavaElementAction_operationUnavailable_field; } }
epl-1.0
gavinying/kura
kura/org.eclipse.kura.broker.artemis.xml/src/main/java/org/eclipse/kura/broker/artemis/xml/ServiceComponent.java
3995
/******************************************************************************* * Copyright (c) 2017 Red Hat Inc * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html *******************************************************************************/ package org.eclipse.kura.broker.artemis.xml; import java.util.Arrays; import java.util.HashSet; import java.util.Map; import java.util.Set; import org.eclipse.kura.broker.artemis.core.ServerConfiguration; import org.eclipse.kura.broker.artemis.core.ServerManager; import org.eclipse.kura.broker.artemis.core.UserAuthentication; import org.eclipse.kura.configuration.ConfigurableComponent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ServiceComponent implements ConfigurableComponent { private static final Logger logger = LoggerFactory.getLogger(ServiceComponent.class); private ServerConfiguration configuration; private ServerManager server; public void activate(final Map<String, Object> properties) throws Exception { final ServerConfiguration cfg = parse(properties); if (cfg != null) { start(cfg); } } public void modified(final Map<String, Object> properties) throws Exception { final ServerConfiguration cfg = parse(properties); if (this.configuration == cfg) { logger.debug("Configuration identical .... skipping update"); return; } if (this.configuration != null && this.configuration.equals(cfg)) { logger.debug("Configuration equal .... skipping update"); return; } stop(); if (cfg != null) { start(cfg); } } public void deactivate() throws Exception { stop(); } private void start(final ServerConfiguration configuration) throws Exception { logger.info("Starting Artemis"); this.server = new ServerManager(configuration); this.server.start(); this.configuration = configuration; } private void stop() throws Exception { logger.info("Stopping Artemis"); if (this.server != null) { this.server.stop(); this.server = null; } this.configuration = null; } private ServerConfiguration parse(final Map<String, Object> properties) { // is enabled? if (!Boolean.TRUE.equals(properties.get("enabled"))) { return null; } // parse broker XML final String brokerXml = (String) properties.get("brokerXml"); if (brokerXml == null || brokerXml.isEmpty()) { return null; } // parse required protocols final Set<String> requiredProtocols = new HashSet<>(); { final Object v = properties.get("requiredProtocols"); if (v instanceof String[]) { requiredProtocols.addAll(Arrays.asList((String[]) v)); } else if (v instanceof String) { final String vs = (String) v; final String[] reqs = vs.split("\\s*,\\s*"); requiredProtocols.addAll(Arrays.asList(reqs)); } } // create security configuration final UserAuthentication.Builder auth = new UserAuthentication.Builder(); final String defaultUser = (String) properties.get("defaultUser"); if (defaultUser != null) { auth.defaultUser(defaultUser); } auth.parseUsers((String) properties.get("users")); // create result final ServerConfiguration cfg = new ServerConfiguration(); cfg.setBrokerXml(brokerXml); cfg.setRequiredProtocols(requiredProtocols); cfg.setUserAuthentication(auth.build()); return cfg; } }
epl-1.0
rfellows/mondrian
testsrc/main/mondrian/test/clearview/MetricFilterTest.java
1448
/* // This software is subject to the terms of the Eclipse Public License v1.0 // Agreement, available at the following URL: // http://www.eclipse.org/legal/epl-v10.html. // You must accept the terms of that agreement to use this software. // // Copyright (C) 2007-2007 Pentaho and others // All Rights Reserved. */ package mondrian.test.clearview; import mondrian.test.DiffRepository; import junit.framework.TestSuite; /** * <code>MetricFilterTest</code> is a test suite which tests scenarios of * filtering out measures' values in the FoodMart database. * MDX queries and their expected results are maintained separately in * MetricFilterTest.ref.xml file.If you would prefer to see them as inlined * Java string literals, run ant target "generateDiffRepositoryJUnit" and * then use file MetricFilterTestJUnit.java which will be generated in * this directory. * * @author Khanh Vu */ public class MetricFilterTest extends ClearViewBase { public MetricFilterTest() { super(); } public MetricFilterTest(String name) { super(name); } public DiffRepository getDiffRepos() { return getDiffReposStatic(); } private static DiffRepository getDiffReposStatic() { return DiffRepository.lookup(MetricFilterTest.class); } public static TestSuite suite() { return constructSuite(getDiffReposStatic(), MetricFilterTest.class); } } // End MetricFilterTest.java
epl-1.0
andiikaa/openhab2
addons/binding/org.openhab.binding.tankerkoenig/src/main/java/org/openhab/binding/tankerkoenig/internal/config/TankerkoenigListResult.java
1429
/** * Copyright (c) 2010-2018 by the respective copyright holders. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.tankerkoenig.internal.config; import org.openhab.binding.tankerkoenig.TankerkoenigBindingConstants; /*** * The {@link TankerkoenigListResult} class is the representing java model for the json result of the tankerkoenig.de * api * * @author Dennis Dollinger * */ public class TankerkoenigListResult { private boolean ok; private Prices prices; private String message; public boolean isOk() { return ok; } public void setOk(boolean ok) { this.ok = ok; } public Prices getPrices() { return prices; } public void setPrices(Prices prices) { this.prices = prices; } public static TankerkoenigListResult emptyResult() { TankerkoenigListResult emptyResult = new TankerkoenigListResult(); emptyResult.setOk(false); emptyResult.setMessage(TankerkoenigBindingConstants.NO_VALID_RESPONSE); return emptyResult; } public String getMessage() { return message; } public void setMessage(String message) { this.message = message; } }
epl-1.0
boniatillo-com/PhaserEditor
source/thirdparty/jsdt/org.eclipse.wst.jsdt.ui/src/org/eclipse/wst/jsdt/ui/actions/GenerateActionGroup.java
21469
/******************************************************************************* * Copyright (c) 2000, 2008 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package org.eclipse.wst.jsdt.ui.actions; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.eclipse.core.runtime.Assert; import org.eclipse.jface.action.IAction; import org.eclipse.jface.action.IMenuManager; import org.eclipse.jface.action.MenuManager; import org.eclipse.jface.action.Separator; import org.eclipse.jface.commands.ActionHandler; import org.eclipse.jface.viewers.ISelection; import org.eclipse.jface.viewers.ISelectionChangedListener; import org.eclipse.jface.viewers.ISelectionProvider; import org.eclipse.jface.viewers.IStructuredSelection; import org.eclipse.ui.IActionBars; import org.eclipse.ui.IViewPart; import org.eclipse.ui.IWorkbenchSite; import org.eclipse.ui.actions.ActionGroup; import org.eclipse.ui.actions.AddBookmarkAction; import org.eclipse.ui.handlers.IHandlerActivation; import org.eclipse.ui.handlers.IHandlerService; import org.eclipse.ui.ide.IDEActionFactory; import org.eclipse.ui.part.Page; import org.eclipse.ui.texteditor.IUpdate; import org.eclipse.ui.texteditor.IWorkbenchActionDefinitionIds; import org.eclipse.wst.jsdt.internal.ui.IUIConstants; import org.eclipse.wst.jsdt.internal.ui.actions.ActionMessages; import org.eclipse.wst.jsdt.internal.ui.actions.AddTaskAction; import org.eclipse.wst.jsdt.internal.ui.actions.AllCleanUpsAction; import org.eclipse.wst.jsdt.internal.ui.actions.FindBrokenNLSKeysAction; import org.eclipse.wst.jsdt.internal.ui.actions.JDTQuickMenuAction; import org.eclipse.wst.jsdt.internal.ui.actions.MultiSortMembersAction; import org.eclipse.wst.jsdt.internal.ui.javaeditor.CompilationUnitEditor; import org.eclipse.wst.jsdt.ui.IContextMenuConstants; /** * Action group that adds the source and generate actions to a part's context * menu and installs handlers for the corresponding global menu actions. * * <p> * This class may be instantiated; it is not intended to be subclassed. * </p> * * * Provisional API: This class/interface is part of an interim API that is still under development and expected to * change significantly before reaching stability. It is being made available at this early stage to solicit feedback * from pioneering adopters on the understanding that any code that uses this API will almost certainly be broken * (repeatedly) as the API evolves. */ public class GenerateActionGroup extends ActionGroup { /** * Pop-up menu: id of the source sub menu (value <code>org.eclipse.wst.jsdt.ui.source.menu</code>). * * */ public static final String MENU_ID= "org.eclipse.wst.jsdt.ui.source.menu"; //$NON-NLS-1$ /** * Pop-up menu: id of the import group of the source sub menu (value * <code>importGroup</code>). * * */ public static final String GROUP_IMPORT= "importGroup"; //$NON-NLS-1$ /** * Pop-up menu: id of the generate group of the source sub menu (value * <code>generateGroup</code>). * * */ public static final String GROUP_GENERATE= "generateGroup"; //$NON-NLS-1$ /** * Pop-up menu: id of the code group of the source sub menu (value * <code>codeGroup</code>). * * */ public static final String GROUP_CODE= "codeGroup"; //$NON-NLS-1$ /** * Pop-up menu: id of the externalize group of the source sub menu (value * <code>externalizeGroup</code>). * * TODO: Make API */ private static final String GROUP_EXTERNALIZE= "externalizeGroup"; //$NON-NLS-1$ /** * Pop-up menu: id of the comment group of the source sub menu (value * <code>commentGroup</code>). * * TODO: Make API */ private static final String GROUP_COMMENT= "commentGroup"; //$NON-NLS-1$ /** * Pop-up menu: id of the edit group of the source sub menu (value * <code>editGroup</code>). * * TODO: Make API */ private static final String GROUP_EDIT= "editGroup"; //$NON-NLS-1$ private CompilationUnitEditor fEditor; private IWorkbenchSite fSite; private String fGroupName= IContextMenuConstants.GROUP_REORGANIZE; private List fRegisteredSelectionListeners; // private AddImportOnSelectionAction fAddImport; private OverrideMethodsAction fOverrideMethods; // private GenerateHashCodeEqualsAction fHashCodeEquals; private AddGetterSetterAction fAddGetterSetter; private AddDelegateMethodsAction fAddDelegateMethods; // private AddUnimplementedConstructorsAction fAddUnimplementedConstructors; private GenerateNewConstructorUsingFieldsAction fGenerateConstructorUsingFields; private AddJavaDocStubAction fAddJavaDocStub; private AddBookmarkAction fAddBookmark; private AddTaskAction fAddTaskAction; private ExternalizeStringsAction fExternalizeStrings; private AllCleanUpsAction fCleanUp; private FindBrokenNLSKeysAction fFindNLSProblems; // private OrganizeImportsAction fOrganizeImports; private MultiSortMembersAction fSortMembers; private FormatAllAction fFormatAll; private static final String QUICK_MENU_ID= "org.eclipse.wst.jsdt.ui.edit.text.java.source.quickMenu"; //$NON-NLS-1$ private class SourceQuickAccessAction extends JDTQuickMenuAction { public SourceQuickAccessAction(CompilationUnitEditor editor) { super(editor, QUICK_MENU_ID); } protected void fillMenu(IMenuManager menu) { fillQuickMenu(menu); } } private JDTQuickMenuAction fQuickAccessAction; private IHandlerActivation fQuickAccessHandlerActivation; private IHandlerService fHandlerService; /** * Note: This constructor is for internal use only. Clients should not call this constructor. * @param editor the compilation unit editor * @param groupName the group name to add the action to */ public GenerateActionGroup(CompilationUnitEditor editor, String groupName) { fSite= editor.getSite(); fEditor= editor; fGroupName= groupName; // fAddImport= new AddImportOnSelectionAction(editor); // fAddImport.setActionDefinitionId(IJavaEditorActionDefinitionIds.ADD_IMPORT); // fAddImport.update(); // editor.setAction("AddImport", fAddImport); //$NON-NLS-1$ // // fOrganizeImports= new OrganizeImportsAction(editor); // fOrganizeImports.setActionDefinitionId(IJavaEditorActionDefinitionIds.ORGANIZE_IMPORTS); // editor.setAction("OrganizeImports", fOrganizeImports); //$NON-NLS-1$ fSortMembers= new MultiSortMembersAction(editor); fSortMembers.setActionDefinitionId(IJavaEditorActionDefinitionIds.SORT_MEMBERS); editor.setAction("SortMembers", fSortMembers); //$NON-NLS-1$ // IAction pastAction= editor.getAction(ITextEditorActionConstants.PASTE);//IWorkbenchActionDefinitionIds.PASTE); // fCopyQualifiedNameAction= new CopyQualifiedNameAction(editor, null, pastAction); // fCopyQualifiedNameAction.setActionDefinitionId(CopyQualifiedNameAction.JAVA_EDITOR_ACTION_DEFINITIONS_ID); // editor.setAction("CopyQualifiedName", fCopyQualifiedNameAction); //$NON-NLS-1$ if (IUIConstants.SUPPORT_REFACTORING) { fOverrideMethods= new OverrideMethodsAction(editor); fOverrideMethods.setActionDefinitionId(IJavaEditorActionDefinitionIds.OVERRIDE_METHODS); editor.setAction("OverrideMethods", fOverrideMethods); //$NON-NLS-1$ fAddGetterSetter= new AddGetterSetterAction(editor); fAddGetterSetter.setActionDefinitionId(IJavaEditorActionDefinitionIds.CREATE_GETTER_SETTER); editor.setAction("AddGetterSetter", fAddGetterSetter); //$NON-NLS-1$ fAddDelegateMethods= new AddDelegateMethodsAction(editor); fAddDelegateMethods.setActionDefinitionId(IJavaEditorActionDefinitionIds.CREATE_DELEGATE_METHODS); editor.setAction("AddDelegateMethods", fAddDelegateMethods); //$NON-NLS-1$ // fAddUnimplementedConstructors= new AddUnimplementedConstructorsAction(editor); // fAddUnimplementedConstructors.setActionDefinitionId(IJavaEditorActionDefinitionIds.ADD_UNIMPLEMENTED_CONTRUCTORS); // editor.setAction("AddUnimplementedConstructors", fAddUnimplementedConstructors); //$NON-NLS-1$ fGenerateConstructorUsingFields= new GenerateNewConstructorUsingFieldsAction(editor); fGenerateConstructorUsingFields.setActionDefinitionId(IJavaEditorActionDefinitionIds.GENERATE_CONSTRUCTOR_USING_FIELDS); editor.setAction("GenerateConstructorUsingFields", fGenerateConstructorUsingFields); //$NON-NLS-1$ } // fHashCodeEquals= new GenerateHashCodeEqualsAction(editor); // fHashCodeEquals.setActionDefinitionId(IJavaEditorActionDefinitionIds.GENERATE_HASHCODE_EQUALS); // editor.setAction("GenerateHashCodeEquals", fHashCodeEquals); //$NON-NLS-1$ fAddJavaDocStub= new AddJavaDocStubAction(editor); fAddJavaDocStub.setActionDefinitionId(IJavaEditorActionDefinitionIds.ADD_JAVADOC_COMMENT); editor.setAction("AddJavadocComment", fAddJavaDocStub); //$NON-NLS-1$ fCleanUp= new AllCleanUpsAction(editor); fCleanUp.setActionDefinitionId(IJavaEditorActionDefinitionIds.CLEAN_UP); editor.setAction("CleanUp", fCleanUp); //$NON-NLS-1$ if (IUIConstants.SUPPORT_REFACTORING) { fExternalizeStrings= new ExternalizeStringsAction(editor); fExternalizeStrings.setActionDefinitionId(IJavaEditorActionDefinitionIds.EXTERNALIZE_STRINGS); editor.setAction("ExternalizeStrings", fExternalizeStrings); //$NON-NLS-1$ } installQuickAccessAction(); } /** * Creates a new <code>GenerateActionGroup</code>. The group * requires that the selection provided by the page's selection provider * is of type <code>org.eclipse.jface.viewers.IStructuredSelection</code>. * * @param page the page that owns this action group */ public GenerateActionGroup(Page page) { this(page.getSite()); } /** * Creates a new <code>GenerateActionGroup</code>. The group * requires that the selection provided by the part's selection provider * is of type <code>org.eclipse.jface.viewers.IStructuredSelection</code>. * * @param part the view part that owns this action group */ public GenerateActionGroup(IViewPart part) { this(part.getSite()); } private GenerateActionGroup(IWorkbenchSite site) { fSite= site; ISelectionProvider provider= fSite.getSelectionProvider(); ISelection selection= provider.getSelection(); if (IUIConstants.SUPPORT_REFACTORING) { fOverrideMethods = new OverrideMethodsAction(site); fOverrideMethods .setActionDefinitionId(IJavaEditorActionDefinitionIds.OVERRIDE_METHODS); fAddGetterSetter = new AddGetterSetterAction(site); fAddGetterSetter .setActionDefinitionId(IJavaEditorActionDefinitionIds.CREATE_GETTER_SETTER); fAddDelegateMethods = new AddDelegateMethodsAction(site); fAddDelegateMethods .setActionDefinitionId(IJavaEditorActionDefinitionIds.CREATE_DELEGATE_METHODS); fGenerateConstructorUsingFields = new GenerateNewConstructorUsingFieldsAction( site); fGenerateConstructorUsingFields .setActionDefinitionId(IJavaEditorActionDefinitionIds.GENERATE_CONSTRUCTOR_USING_FIELDS); } fAddJavaDocStub= new AddJavaDocStubAction(site); fAddJavaDocStub.setActionDefinitionId(IJavaEditorActionDefinitionIds.ADD_JAVADOC_COMMENT); fAddBookmark= new AddBookmarkAction(site.getShell()); fAddBookmark.setActionDefinitionId(IWorkbenchActionDefinitionIds.ADD_BOOKMARK); // context-menu only -> no action definition ids fAddTaskAction= new AddTaskAction(site); fAddTaskAction.setActionDefinitionId(IWorkbenchActionDefinitionIds.ADD_TASK); if (IUIConstants.SUPPORT_REFACTORING) { fExternalizeStrings = new ExternalizeStringsAction(site); fExternalizeStrings .setActionDefinitionId(IJavaEditorActionDefinitionIds.EXTERNALIZE_STRINGS); fFindNLSProblems = new FindBrokenNLSKeysAction(site); fFindNLSProblems .setActionDefinitionId(FindBrokenNLSKeysAction.FIND_BROKEN_NLS_KEYS_ACTION_ID); } fSortMembers= new MultiSortMembersAction(site); fSortMembers.setActionDefinitionId(IJavaEditorActionDefinitionIds.SORT_MEMBERS); fFormatAll= new FormatAllAction(site); fFormatAll.setActionDefinitionId(IJavaEditorActionDefinitionIds.FORMAT); fCleanUp= new AllCleanUpsAction(site); fCleanUp.setActionDefinitionId(IJavaEditorActionDefinitionIds.CLEAN_UP); fAddJavaDocStub.update(selection); if (IUIConstants.SUPPORT_REFACTORING) { fOverrideMethods.update(selection); fAddGetterSetter.update(selection); fAddDelegateMethods.update(selection); // fAddUnimplementedConstructors.update(selection); fGenerateConstructorUsingFields.update(selection); // fHashCodeEquals.update(selection); fExternalizeStrings.update(selection); fFindNLSProblems.update(selection); } fCleanUp.update(selection); fAddTaskAction.update(selection); // fOrganizeImports.update(selection); fSortMembers.update(selection); fFormatAll.update(selection); if (selection instanceof IStructuredSelection) { IStructuredSelection ss= (IStructuredSelection)selection; fAddBookmark.selectionChanged(ss); } else { fAddBookmark.setEnabled(false); } if (IUIConstants.SUPPORT_REFACTORING) { registerSelectionListener(provider, fOverrideMethods); registerSelectionListener(provider, fAddGetterSetter); registerSelectionListener(provider, fAddDelegateMethods); // registerSelectionListener(provider, fAddUnimplementedConstructors); registerSelectionListener(provider, fGenerateConstructorUsingFields); registerSelectionListener(provider, fExternalizeStrings); registerSelectionListener(provider, fFindNLSProblems); } // registerSelectionListener(provider, fHashCodeEquals); registerSelectionListener(provider, fAddJavaDocStub); registerSelectionListener(provider, fAddBookmark); // registerSelectionListener(provider, fOrganizeImports); registerSelectionListener(provider, fFormatAll); registerSelectionListener(provider, fSortMembers); registerSelectionListener(provider, fAddTaskAction); registerSelectionListener(provider, fCleanUp); installQuickAccessAction(); } private void installQuickAccessAction() { fHandlerService= (IHandlerService)fSite.getService(IHandlerService.class); if (fHandlerService != null) { fQuickAccessAction= new SourceQuickAccessAction(fEditor); fQuickAccessHandlerActivation= fHandlerService.activateHandler(fQuickAccessAction.getActionDefinitionId(), new ActionHandler(fQuickAccessAction)); } } private void registerSelectionListener(ISelectionProvider provider, ISelectionChangedListener listener) { if (fRegisteredSelectionListeners == null) fRegisteredSelectionListeners= new ArrayList(20); provider.addSelectionChangedListener(listener); fRegisteredSelectionListeners.add(listener); } /* * The state of the editor owning this action group has changed. * This method does nothing if the group's owner isn't an * editor. */ /** * Note: This method is for internal use only. Clients should not call this method. */ public void editorStateChanged() { Assert.isTrue(isEditorOwner()); } /* (non-Javadoc) * Method declared in ActionGroup */ public void fillActionBars(IActionBars actionBar) { super.fillActionBars(actionBar); setGlobalActionHandlers(actionBar); } /* (non-Javadoc) * Method declared in ActionGroup */ public void fillContextMenu(IMenuManager menu) { super.fillContextMenu(menu); String menuText= ActionMessages.SourceMenu_label; if (fQuickAccessAction != null) { menuText= fQuickAccessAction.addShortcut(menuText); } IMenuManager subMenu= new MenuManager(menuText, MENU_ID); int added= 0; if (isEditorOwner()) { added= fillEditorSubMenu(subMenu); } else { added= fillViewSubMenu(subMenu); } if (added > 0) menu.appendToGroup(fGroupName, subMenu); } private void fillQuickMenu(IMenuManager menu) { if (isEditorOwner()) { fillEditorSubMenu(menu); } else { fillViewSubMenu(menu); } } private int fillEditorSubMenu(IMenuManager source) { int added= 0; source.add(new Separator(GROUP_COMMENT)); added+= addEditorAction(source, "ToggleComment"); //$NON-NLS-1$ added+= addEditorAction(source, "AddBlockComment"); //$NON-NLS-1$ added+= addEditorAction(source, "RemoveBlockComment"); //$NON-NLS-1$ added+= addAction(source, fAddJavaDocStub); source.add(new Separator(GROUP_EDIT)); added+= addEditorAction(source, "Indent"); //$NON-NLS-1$ added+= addEditorAction(source, "Format"); //$NON-NLS-1$ source.add(new Separator(GROUP_IMPORT)); // added+= addAction(source, fAddImport); // added+= addAction(source, fOrganizeImports); added+= addAction(source, fSortMembers); added+= addAction(source, fCleanUp); if (IUIConstants.SUPPORT_REFACTORING) { source.add(new Separator(GROUP_GENERATE)); added += addAction(source, fOverrideMethods); added += addAction(source, fAddGetterSetter); added += addAction(source, fAddDelegateMethods); // added+= addAction(source, fHashCodeEquals); added += addAction(source, fGenerateConstructorUsingFields); } // added+= addAction(source, fAddUnimplementedConstructors); source.add(new Separator(GROUP_CODE)); source.add(new Separator(GROUP_EXTERNALIZE)); if (IUIConstants.SUPPORT_REFACTORING) added+= addAction(source, fExternalizeStrings); return added; } private int fillViewSubMenu(IMenuManager source) { int added= 0; source.add(new Separator(GROUP_COMMENT)); added+= addAction(source, fAddJavaDocStub); source.add(new Separator(GROUP_EDIT)); added+= addAction(source, fFormatAll); source.add(new Separator(GROUP_IMPORT)); // added+= addAction(source, fAddImport); // added+= addAction(source, fOrganizeImports); added+= addAction(source, fSortMembers); added+= addAction(source, fCleanUp); if (IUIConstants.SUPPORT_REFACTORING) { source.add(new Separator(GROUP_GENERATE)); added+= addAction(source, fOverrideMethods); added+= addAction(source, fAddGetterSetter); added+= addAction(source, fAddDelegateMethods); // added+= addAction(source, fHashCodeEquals); added+= addAction(source, fGenerateConstructorUsingFields); // added+= addAction(source, fAddUnimplementedConstructors); } source.add(new Separator(GROUP_CODE)); source.add(new Separator(GROUP_EXTERNALIZE)); if (IUIConstants.SUPPORT_REFACTORING) { added+= addAction(source, fExternalizeStrings); added+= addAction(source, fFindNLSProblems); } return added; } /* (non-Javadoc) * Method declared in ActionGroup */ public void dispose() { if (fRegisteredSelectionListeners != null) { ISelectionProvider provider= fSite.getSelectionProvider(); for (Iterator iter= fRegisteredSelectionListeners.iterator(); iter.hasNext();) { ISelectionChangedListener listener= (ISelectionChangedListener) iter.next(); provider.removeSelectionChangedListener(listener); } } if (fQuickAccessHandlerActivation != null && fHandlerService != null) { fHandlerService.deactivateHandler(fQuickAccessHandlerActivation); } fEditor= null; fCleanUp.dispose(); super.dispose(); } private void setGlobalActionHandlers(IActionBars actionBar) { // actionBar.setGlobalActionHandler(JdtActionConstants.ADD_IMPORT, fAddImport); actionBar.setGlobalActionHandler(JdtActionConstants.OVERRIDE_METHODS, fOverrideMethods); actionBar.setGlobalActionHandler(JdtActionConstants.GENERATE_GETTER_SETTER, fAddGetterSetter); actionBar.setGlobalActionHandler(JdtActionConstants.GENERATE_DELEGATE_METHODS, fAddDelegateMethods); // actionBar.setGlobalActionHandler(JdtActionConstants.ADD_CONSTRUCTOR_FROM_SUPERCLASS, fAddUnimplementedConstructors); actionBar.setGlobalActionHandler(JdtActionConstants.GENERATE_CONSTRUCTOR_USING_FIELDS, fGenerateConstructorUsingFields); // actionBar.setGlobalActionHandler(JdtActionConstants.GENERATE_HASHCODE_EQUALS, fHashCodeEquals); actionBar.setGlobalActionHandler(JdtActionConstants.ADD_JAVA_DOC_COMMENT, fAddJavaDocStub); actionBar.setGlobalActionHandler(JdtActionConstants.EXTERNALIZE_STRINGS, fExternalizeStrings); actionBar.setGlobalActionHandler(JdtActionConstants.CLEAN_UP, fCleanUp); actionBar.setGlobalActionHandler(FindBrokenNLSKeysAction.ACTION_HANDLER_ID, fFindNLSProblems); // actionBar.setGlobalActionHandler(JdtActionConstants.ORGANIZE_IMPORTS, fOrganizeImports); actionBar.setGlobalActionHandler(JdtActionConstants.SORT_MEMBERS, fSortMembers); if (!isEditorOwner()) { // editor provides its own implementation of these actions. actionBar.setGlobalActionHandler(IDEActionFactory.BOOKMARK.getId(), fAddBookmark); actionBar.setGlobalActionHandler(IDEActionFactory.ADD_TASK.getId(), fAddTaskAction); actionBar.setGlobalActionHandler(JdtActionConstants.FORMAT, fFormatAll); } } private int addAction(IMenuManager menu, IAction action) { if (action != null && action.isEnabled()) { menu.add(action); return 1; } return 0; } private int addEditorAction(IMenuManager menu, String actionID) { if (fEditor == null) return 0; IAction action= fEditor.getAction(actionID); if (action == null) return 0; if (action instanceof IUpdate) ((IUpdate)action).update(); if (action.isEnabled()) { menu.add(action); return 1; } return 0; } private boolean isEditorOwner() { return fEditor != null; } }
epl-1.0
openhab/openhab2
bundles/org.openhab.binding.bluetooth/src/main/java/org/openhab/binding/bluetooth/BluetoothDeviceListener.java
3487
/** * Copyright (c) 2010-2020 Contributors to the openHAB project * * See the NOTICE file(s) distributed with this work for additional * information. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.openhab.binding.bluetooth; import org.eclipse.jdt.annotation.NonNullByDefault; import org.openhab.binding.bluetooth.notification.BluetoothConnectionStatusNotification; import org.openhab.binding.bluetooth.notification.BluetoothScanNotification; /** * The {@link BluetoothDeviceListener} class defines the a callback interface where devices are notified of updates to a * BLE device * * @author Chris Jackson - Initial contribution * @author Kai Kreuzer - Added descriptor updates */ @NonNullByDefault public interface BluetoothDeviceListener { /** * Called when a scan record is received for the device * * @param scanNotification the {@link BluetoothScanNotification} providing the scan packet information */ void onScanRecordReceived(BluetoothScanNotification scanNotification); /** * Called when the connection status changes * * @param connectionNotification the {@link BluetoothConnectionStatusNotification} providing the updated connection * information */ void onConnectionStateChange(BluetoothConnectionStatusNotification connectionNotification); /** * Called when a devices services and characteristics have been completely read */ void onServicesDiscovered(); /** * Called when a read request completes * * @param characteristic the {@link BluetoothCharacteristic} that has completed the read request * @param status the {@link BluetoothCompletionStatus} of the read request */ void onCharacteristicReadComplete(BluetoothCharacteristic characteristic, BluetoothCompletionStatus status); /** * Called when a write request completes * * @param characteristic the {@link BluetoothCharacteristic} that has completed the write request * @param status the {@link BluetoothCompletionStatus} of the write request */ void onCharacteristicWriteComplete(BluetoothCharacteristic characteristic, BluetoothCompletionStatus status); /** * Called when a characteristic value is received. Implementations should call this whenever a value * is received from the BLE device even if there is no change to the value. * * @param characteristic the updated {@link BluetoothCharacteristic} */ void onCharacteristicUpdate(BluetoothCharacteristic characteristic); /** * Called when a descriptor value is received. Implementations should call this whenever a value * is received from the BLE device even if there is no change to the value. * * @param characteristic the updated {@link BluetoothCharacteristic} */ void onDescriptorUpdate(BluetoothDescriptor bluetoothDescriptor); /** * Called when the BluetoothAdapter for this BluetoothDevice changes. * Implementations should call this whenever they change the adapter used by this device. * Note: In general this is only called by a RoamingBluetoothDevice * * @param adapter the new {@link BluetoothAdapter} used by this device */ void onAdapterChanged(BluetoothAdapter adapter); }
epl-1.0
openhab/openhab2
bundles/org.openhab.binding.yioremote/src/main/java/org/openhab/binding/yioremote/internal/utils/WebsocketInterface.java
858
/** * Copyright (c) 2010-2020 Contributors to the openHAB project * * See the NOTICE file(s) distributed with this work for additional * information. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.openhab.binding.yioremote.internal.utils; import org.eclipse.jdt.annotation.NonNullByDefault; /** * The {@link WebsocketInterface} is responsible for interfacing the Websocket. * * @author Michael Loercher - Initial contribution */ @NonNullByDefault public interface WebsocketInterface { public void onConnect(boolean connected); public void onClose(); public void onMessage(String decodedmessage); public void onError(Throwable cause); }
epl-1.0
mandeepdhami/netvirt-ctrl
sdnplatform/src/main/java/org/sdnplatform/perfmon/IPktInProcessingTimeService.java
1767
/* * Copyright (c) 2013 Big Switch Networks, Inc. * * Licensed under the Eclipse Public License, Version 1.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.eclipse.org/legal/epl-v10.html * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package org.sdnplatform.perfmon; import java.util.List; import org.openflow.protocol.OFMessage; import org.sdnplatform.core.ListenerContext; import org.sdnplatform.core.IOFMessageListener; import org.sdnplatform.core.IOFSwitch; import org.sdnplatform.core.module.IPlatformService; public interface IPktInProcessingTimeService extends IPlatformService { /** * Creates time buckets for a set of modules to measure their performance * @param listeners The message listeners to create time buckets for */ public void bootstrap(List<IOFMessageListener> listeners); /** * Stores a timestamp in ns. Used right before a service handles an * OF message. Only stores if the service is enabled. */ public void recordStartTimeComp(IOFMessageListener listener); public void recordEndTimeComp(IOFMessageListener listener); public void recordStartTimePktIn(); public void recordEndTimePktIn(IOFSwitch sw, OFMessage m, ListenerContext cntx); public boolean isEnabled(); public void setEnabled(boolean enabled); public CumulativeTimeBucket getCtb(); }
epl-1.0
maxeler/eclipse
eclipse.jdt.ui/org.eclipse.jdt.ui.tests.refactoring/resources/ExtractMethodWorkSpace/ExtractMethodTests/duplicates_in/A_test964.java
215
package duplicates_in; public class A_test964 { void test() { new Object() { public void yes() { yes(); System.out.println("hello world"); } }; /*[*/System.out.println("hello world");/*]*/ } }
epl-1.0
openhab/openhab2
bundles/org.openhab.binding.cm11a/src/main/java/org/openhab/binding/cm11a/internal/X10ReceivedData.java
4258
/** * Copyright (c) 2010-2020 Contributors to the openHAB project * * See the NOTICE file(s) distributed with this work for additional * information. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.openhab.binding.cm11a.internal; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; /** * Container for data received from the powerline by the cm11a interface. When data is received one of more of these * objects is created and then passed to interested objects. * * @author Bob Raker - Initial contribution */ public class X10ReceivedData { /** * All of the possible X10 commands * */ public enum X10COMMAND { ALL_UNITS_OFF, ALL_LIGHTS_ON, ON, OFF, DIM, BRIGHT, ALL_LIGHTS_OFF, EXTENDED_CODE, HAIL_REQ, HAIL_ACK, PRESET_DIM_1, PRESET_DIM_2, EXTD_DATA_XFER, STATUS_ON, STATUS_OFF, STATUS_REQ, UNDEF // If no match, which shouldn't happen } /** * Used to decode the function bits received from the cm11a into an X10 function code * */ protected static final Map<Integer, X10COMMAND> COMMAND_MAP; static { Map<Integer, X10COMMAND> tempMap = new HashMap<>(); tempMap.put(0, X10COMMAND.ALL_UNITS_OFF); tempMap.put(1, X10COMMAND.ALL_LIGHTS_ON); tempMap.put(2, X10COMMAND.ON); tempMap.put(3, X10COMMAND.OFF); tempMap.put(4, X10COMMAND.DIM); tempMap.put(5, X10COMMAND.BRIGHT); tempMap.put(6, X10COMMAND.ALL_LIGHTS_OFF); tempMap.put(7, X10COMMAND.EXTENDED_CODE); tempMap.put(8, X10COMMAND.HAIL_REQ); tempMap.put(9, X10COMMAND.HAIL_ACK); tempMap.put(10, X10COMMAND.PRESET_DIM_1); tempMap.put(11, X10COMMAND.PRESET_DIM_2); tempMap.put(12, X10COMMAND.EXTD_DATA_XFER); tempMap.put(13, X10COMMAND.STATUS_ON); tempMap.put(14, X10COMMAND.STATUS_OFF); tempMap.put(15, X10COMMAND.STATUS_REQ); COMMAND_MAP = Collections.unmodifiableMap(tempMap); } /** * Lookup table to convert House code received from the cm11a into an X10 house code */ public static final char HOUSE_CODE[] = new char[] { 'M', 'E', 'C', 'K', 'O', 'G', 'A', 'I', 'N', 'F', 'D', 'L', 'P', 'H', 'B', 'J' }; /** * Lookup table to convert Unit code received from the cm11a into an X10 unit code */ public static final byte UNIT_CODE[] = new byte[] { 13, 5, 3, 11, 15, 7, 1, 9, 14, 6, 4, 12, 16, 8, 2, 10 }; private String[] addr; private X10COMMAND cmd; private int dims; /** * Constructor */ public X10ReceivedData(String[] addr, X10COMMAND cmd, int dims) { this.addr = addr; this.cmd = cmd; this.dims = dims; } public String[] getAddr() { return addr; } public X10COMMAND getCmd() { return cmd; } public int getDims() { return dims; } @Override public String toString() { return "X10ReceivedData [addr=" + Arrays.toString(addr) + ", cmd=" + cmd + ", dims=" + dims + "]"; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + Arrays.hashCode(addr); result = prime * result + ((cmd == null) ? 0 : cmd.hashCode()); result = prime * result + dims; return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } X10ReceivedData other = (X10ReceivedData) obj; if (!Arrays.equals(addr, other.addr)) { return false; } if (cmd != other.cmd) { return false; } if (dims != other.dims) { return false; } return true; } }
epl-1.0
bmaggi/Papyrus-SysML11
plugins/org.eclipse.papyrus.sysml.service.types/src/org/eclipse/papyrus/sysml/service/types/helper/advice/AssociationNoneDirectedEditHelperAdvice.java
1461
/***************************************************************************** * Copyright (c) 2011 CEA LIST. * * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * *****************************************************************************/ package org.eclipse.papyrus.sysml.service.types.helper.advice; import org.eclipse.uml2.uml.Association; import org.eclipse.uml2.uml.Classifier; import org.eclipse.uml2.uml.Property; import org.eclipse.uml2.uml.Type; /** * Edit helper advice for {@link Association} with "aggregation = none" and navigable in one way (used for creation purpose only). */ public class AssociationNoneDirectedEditHelperAdvice extends AssociationNoneEditHelperAdvice { /** * <pre> * {@inheritDoc} * * The currently created {@link Association} in the default case (aggregation = none), and directed * (meaning navigable in one direction only) which in SysML means the target end is owned by the association itself. * * Moreover this end name should not be set in that case, this latter rule is not followed here for now. * * </pre> */ @Override protected void addTargetInModel(Property targetEnd, Classifier owner, Type sourceType, Association association) { association.getOwnedEnds().add(targetEnd); } }
epl-1.0
openhab/openhab2
bundles/org.openhab.binding.lcn/src/main/java/org/openhab/binding/lcn/internal/subhandler/LcnModuleBinarySensorSubHandler.java
2177
/** * Copyright (c) 2010-2020 Contributors to the openHAB project * * See the NOTICE file(s) distributed with this work for additional * information. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.openhab.binding.lcn.internal.subhandler; import java.util.Collection; import java.util.Collections; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.IntStream; import org.eclipse.jdt.annotation.NonNullByDefault; import org.openhab.binding.lcn.internal.LcnBindingConstants; import org.openhab.binding.lcn.internal.LcnModuleHandler; import org.openhab.binding.lcn.internal.common.LcnChannelGroup; import org.openhab.binding.lcn.internal.common.LcnDefs; import org.openhab.binding.lcn.internal.connection.ModInfo; import org.openhab.core.library.types.OpenClosedType; /** * Handles State changes of binary sensors of an LCN module. * * @author Fabian Wolter - Initial contribution */ @NonNullByDefault public class LcnModuleBinarySensorSubHandler extends AbstractLcnModuleSubHandler { private static final Pattern PATTERN = Pattern.compile(LcnBindingConstants.ADDRESS_REGEX + "Bx(?<byteValue>\\d+)"); public LcnModuleBinarySensorSubHandler(LcnModuleHandler handler, ModInfo info) { super(handler, info); } @Override public void handleRefresh(LcnChannelGroup channelGroup, int number) { info.refreshBinarySensors(); } @Override public void handleStatusMessage(Matcher matcher) { info.onBinarySensorsResponseReceived(); boolean[] states = LcnDefs.getBooleanValue(Integer.parseInt(matcher.group("byteValue"))); IntStream.range(0, LcnChannelGroup.BINARYSENSOR.getCount()) .forEach(i -> fireUpdate(LcnChannelGroup.BINARYSENSOR, i, states[i] ? OpenClosedType.OPEN : OpenClosedType.CLOSED)); } @Override public Collection<Pattern> getPckStatusMessagePatterns() { return Collections.singleton(PATTERN); } }
epl-1.0
igool/mybatis-shard
shard/src/main/java/org/lysu/shard/parser/ast/stmt/extension/ExtDDLCreatePolicy.java
1823
/* * Copyright 1999-2012 Alibaba Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * (created at 2012-8-14) */ package org.lysu.shard.parser.ast.stmt.extension; import java.util.ArrayList; import java.util.List; import org.lysu.shard.parser.ast.expression.Expression; import org.lysu.shard.parser.ast.expression.primary.Identifier; import org.lysu.shard.parser.ast.stmt.ddl.DDLStatement; import org.lysu.shard.parser.util.Pair; import org.lysu.shard.parser.visitor.SQLASTVisitor; /** * @author <a href="mailto:shuo.qius@alibaba-inc.com">QIU Shuo</a> */ public class ExtDDLCreatePolicy implements DDLStatement { private final Identifier name; private final List<Pair<Integer, Expression>> proportion; public ExtDDLCreatePolicy(Identifier name) { this.name = name; this.proportion = new ArrayList<Pair<Integer, Expression>>(1); } public Identifier getName() { return name; } public List<Pair<Integer, Expression>> getProportion() { return proportion; } public ExtDDLCreatePolicy addProportion(Integer id, Expression val) { proportion.add(new Pair<Integer, Expression>(id, val)); return this; } @Override public void accept(SQLASTVisitor visitor) { visitor.visit(this); } }
epl-1.0
devjunix/libjt400-java
src/com/ibm/as400/access/ConvTable833.java
3489
/////////////////////////////////////////////////////////////////////////////// // // JTOpen (IBM Toolbox for Java - OSS version) // // Filename: ConvTable833.java // // The source code contained herein is licensed under the IBM Public License // Version 1.0, which has been approved by the Open Source Initiative. // Copyright (C) 1997-2004 International Business Machines Corporation and // others. All rights reserved. // /////////////////////////////////////////////////////////////////////////////// package com.ibm.as400.access; class ConvTable833 extends ConvTableSingleMap { private static final String copyright = "Copyright (C) 1997-2004 International Business Machines Corporation and others."; private static final String toUnicode_ = "\u0000\u0001\u0002\u0003\u009C\t\u0086\u007F\u0097\u008D\u008E\u000B\f\r\u000E\u000F" + "\u0010\u0011\u0012\u0013\u009D\u0085\b\u0087\u0018\u0019\u0092\u008F\u001C\u001D\u001E\u001F" + "\u0080\u0081\u0082\u0083\u0084\n\u0017\u001B\u0088\u0089\u008A\u008B\u008C\u0005\u0006\u0007" + "\u0090\u0091\u0016\u0093\u0094\u0095\u0096\u0004\u0098\u0099\u009A\u009B\u0014\u0015\u009E\u001A" + "\u0020\u001A\uFFA0\uFFA1\uFFA2\uFFA3\uFFA4\uFFA5\uFFA6\uFFA7\u00A2\u002E\u003C\u0028\u002B\u007C" + "\u0026\u001A\uFFA8\uFFA9\uFFAA\uFFAB\uFFAC\uFFAD\uFFAE\uFFAF\u0021\u0024\u002A\u0029\u003B\u00AC" + "\u002D\u002F\uFFB0\uFFB1\uFFB2\uFFB3\uFFB4\uFFB5\uFFB6\uFFB7\u00A6\u002C\u0025\u005F\u003E\u003F" + "\u005B\u001A\uFFB8\uFFB9\uFFBA\uFFBB\uFFBC\uFFBD\uFFBE\u0060\u003A\u0023\u0040\'\u003D\"" + "\u005D\u0061\u0062\u0063\u0064\u0065\u0066\u0067\u0068\u0069\uFFC2\uFFC3\uFFC4\uFFC5\uFFC6\uFFC7" + "\u001A\u006A\u006B\u006C\u006D\u006E\u006F\u0070\u0071\u0072\uFFCA\uFFCB\uFFCC\uFFCD\uFFCE\uFFCF" + "\u203E\u007E\u0073\u0074\u0075\u0076\u0077\u0078\u0079\u007A\uFFD2\uFFD3\uFFD4\uFFD5\uFFD6\uFFD7" + "\u005E\u001A\\\u001A\u001A\u001A\u001A\u001A\u001A\u001A\uFFDA\uFFDB\uFFDC\u001A\u001A\u001A" + "\u007B\u0041\u0042\u0043\u0044\u0045\u0046\u0047\u0048\u0049\u001A\u001A\u001A\u001A\u001A\u001A" + "\u007D\u004A\u004B\u004C\u004D\u004E\u004F\u0050\u0051\u0052\u001A\u001A\u001A\u001A\u001A\u001A" + "\u20A9\u001A\u0053\u0054\u0055\u0056\u0057\u0058\u0059\u005A\u001A\u001A\u001A\u001A\u001A\u001A" + "\u0030\u0031\u0032\u0033\u0034\u0035\u0036\u0037\u0038\u0039\u001A\u001A\u001A\u001A\u001A\u009F"; private static final String fromUnicode_ = "\u0001\u0203\u372D\u2E2F\u1605\u250B\u0C0D\u0E0F\u1011\u1213\u3C3D\u3226\u1819\u3F27\u1C1D\u1E1F" + "\u405A\u7F7B\u5B6C\u507D\u4D5D\u5C4E\u6B60\u4B61\uF0F1\uF2F3\uF4F5\uF6F7\uF8F9\u7A5E\u4C7E\u6E6F" + "\u7CC1\uC2C3\uC4C5\uC6C7\uC8C9\uD1D2\uD3D4\uD5D6\uD7D8\uD9E2\uE3E4\uE5E6\uE7E8\uE970\uB280\uB06D" + "\u7981\u8283\u8485\u8687\u8889\u9192\u9394\u9596\u9798\u99A2\uA3A4\uA5A6\uA7A8\uA9C0\u4FD0\uA107" + "\u2021\u2223\u2415\u0617\u2829\u2A2B\u2C09\u0A1B\u3031\u1A33\u3435\u3608\u3839\u3A3B\u0414\u3EFF" + "\u3F3F\u4A3F\u3F3F\u6A3F\u3F3F\u3F3F\u5F3F\uFFFF\u0FC8\u3F3F\uA03F\uFFFF\u0034\u3F3F\u3FE0\uFFFF" + "\u6F7B\u3F3F\u4243\u4445\u4647\u4849\u5253\u5455\u5657\u5859\u6263\u6465\u6667\u6869\u7273\u7475" + "\u7677\u783F\u3F3F\u8A8B\u8C8D\u8E8F\u3F3F\u9A9B\u9C9D\u9E9F\u3F3F\uAAAB\uACAD\uAEAF\u3F3F\uBABB" + "\uBC3F\uFFFF\u0011\u3F3F"; ConvTable833() { super(833, toUnicode_.toCharArray(), fromUnicode_.toCharArray()); } }
epl-1.0
b-cuts/esper
esper/src/main/java/com/espertech/esper/epl/spec/ContextDetailCategory.java
2285
/* * ************************************************************************************* * Copyright (C) 2006-2015 EsperTech, Inc. All rights reserved. * * http://www.espertech.com/esper * * http://www.espertech.com * * ---------------------------------------------------------------------------------- * * The software in this package is published under the terms of the GPL license * * a copy of which has been included with this distribution in the license.txt file. * * ************************************************************************************* */ package com.espertech.esper.epl.spec; import com.espertech.esper.filter.FilterSpecCompiled; import com.espertech.esper.filter.FilterValueSetParam; import java.util.ArrayList; import java.util.List; public class ContextDetailCategory implements ContextDetail { private static final long serialVersionUID = 8141827106254268831L; private final List<ContextDetailCategoryItem> items; private final FilterSpecRaw filterSpecRaw; private transient FilterSpecCompiled filterSpecCompiled; private transient FilterValueSetParam[][] filterParamsCompiled; public ContextDetailCategory(List<ContextDetailCategoryItem> items, FilterSpecRaw filterSpecRaw) { this.items = items; this.filterSpecRaw = filterSpecRaw; } public List<FilterSpecCompiled> getFilterSpecsIfAny() { List<FilterSpecCompiled> filters = new ArrayList<FilterSpecCompiled>(1); filters.add(filterSpecCompiled); return filters; } public FilterSpecRaw getFilterSpecRaw() { return filterSpecRaw; } public List<ContextDetailCategoryItem> getItems() { return items; } public void setFilterSpecCompiled(FilterSpecCompiled filterSpec) { this.filterSpecCompiled = filterSpec; this.filterParamsCompiled = filterSpecCompiled.getValueSet(null, null, null).getParameters(); } public FilterSpecCompiled getFilterSpecCompiled() { return filterSpecCompiled; } public FilterValueSetParam[][] getFilterParamsCompiled() { return filterParamsCompiled; } }
gpl-2.0
sanjithuom/Stanford-corenlp
src/edu/stanford/nlp/optimization/QNMinimizer.java
79140
package edu.stanford.nlp.optimization; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintWriter; import java.text.DecimalFormat; import java.text.NumberFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Set; import edu.stanford.nlp.io.RuntimeIOException; import edu.stanford.nlp.math.ArrayMath; import edu.stanford.nlp.util.Generics; /** * * An implementation of L-BFGS for Quasi Newton unconstrained minimization. * * The general outline of the algorithm is taken from: * <blockquote> * <i>Numerical Optimization</i> (second edition) 2006 * Jorge Nocedal and Stephen J. Wright * </blockquote> * A variety of different options are available. * * <h3>LINESEARCHES</h3> * * BACKTRACKING: This routine * simply starts with a guess for step size of 1. If the step size doesn't * supply a sufficient decrease in the function value the step is updated * through step = 0.1*step. This method is certainly simpler, but doesn't allow * for an increase in step size, and isn't well suited for Quasi Newton methods. * * MINPACK: This routine is based off of the implementation used in MINPACK. * This routine finds a point satisfying the Wolfe conditions, which state that * a point must have a sufficiently smaller function value, and a gradient of * smaller magnitude. This provides enough to prove theoretically quadratic * convergence. In order to find such a point the linesearch first finds an * interval which must contain a satisfying point, and then progressively * reduces that interval all using cubic or quadratic interpolation. * * SCALING: L-BFGS allows the initial guess at the hessian to be updated at each * step. Standard BFGS does this by approximating the hessian as a scaled * identity matrix. To use this method set the scaleOpt to SCALAR. A better way * of approximate the hessian is by using a scaling diagonal matrix. The * diagonal can then be updated as more information comes in. This method can be * used by setting scaleOpt to DIAGONAL. * * * CONVERGENCE: Previously convergence was gauged by looking at the average * decrease per step dividing that by the current value and terminating when * that value because smaller than TOL. This method fails when the function * value approaches zero, so two other convergence criteria are used. The first * stores the initial gradient norm |g0|, then terminates when the new gradient * norm, |g| is sufficiently smaller: i.e., |g| &lt; eps*|g0| the second checks if * |g| &lt; eps*max( 1 , |x| ) which is essentially checking to see if the gradient * is numerically zero. * Another convergence criteria is added where termination is triggered if no * improvements are observed after X (set by terminateOnEvalImprovementNumOfEpoch) * iterations over some validation test set as evaluated by Evaluator * * Each of these convergence criteria can be turned on or off by setting the * flags: * <blockquote><code> * private boolean useAveImprovement = true; * private boolean useRelativeNorm = true; * private boolean useNumericalZero = true; * private boolean useEvalImprovement = false; * </code></blockquote> * * To use the QNMinimizer first construct it using * <blockquote><code> * QNMinimizer qn = new QNMinimizer(mem, true) * </code></blockquote> * mem - the number of previous estimate vector pairs to * store, generally 15 is plenty. true - this tells the QN to use the MINPACK * linesearch with DIAGONAL scaling. false would lead to the use of the criteria * used in the old QNMinimizer class. * * Then call: * <blockquote><code> * qn.minimize(dfunction,convergenceTolerance,initialGuess,maxFunctionEvaluations); * </code></blockquote> * * @author akleeman */ public class QNMinimizer implements Minimizer<DiffFunction>, HasEvaluators { private int fevals = 0; // the number of function evaluations private int maxFevals = -1; private int mem = 10; // the number of s,y pairs to retain for BFGS private int its = 0; // the number of iterations private final Function monitor; private boolean quiet; private static final NumberFormat nf = new DecimalFormat("0.000E0"); private static final NumberFormat nfsec = new DecimalFormat("0.00"); // for times private static final double ftol = 1e-4; // Linesearch parameters private double gtol = 0.9; private static final double aMin = 1e-12; // Min step size private static final double aMax = 1e12; // Max step size private static final double p66 = 0.66; // used to check getting more than 2/3 of width improvement private static final double p5 = 0.5; // Some other magic constant private static final int a = 0; // used as array index private static final int f = 1; // used as array index private static final int g = 2; // used as array index public boolean outputToFile = false; private boolean success = false; private boolean bracketed = false; // used for linesearch private QNInfo presetInfo = null; private boolean noHistory = true; // parameters for OWL-QN (L-BFGS with L1-regularization) private boolean useOWLQN = false; private double lambdaOWL = 0; private boolean useAveImprovement = true; private boolean useRelativeNorm = true; private boolean useNumericalZero = true; private boolean useEvalImprovement = false; private boolean useMaxItr = false; private int maxItr = 0; private boolean suppressTestPrompt = false; private int terminateOnEvalImprovementNumOfEpoch = 1; private int evaluateIters = 0; // Evaluate every x iterations (0 = no evaluation) private int startEvaluateIters = 0; // starting evaluation after x iterations private Evaluator[] evaluators; // separate set of evaluators to check how optimization is going public enum eState { TERMINATE_MAXEVALS, TERMINATE_RELATIVENORM, TERMINATE_GRADNORM, TERMINATE_AVERAGEIMPROVE, CONTINUE, TERMINATE_EVALIMPROVE, TERMINATE_MAXITR } public enum eLineSearch { BACKTRACK, MINPACK } public enum eScaling { DIAGONAL, SCALAR } eLineSearch lsOpt = eLineSearch.MINPACK;// eLineSearch.MINPACK; eScaling scaleOpt = eScaling.DIAGONAL;// eScaling.DIAGONAL; eState state = eState.CONTINUE; public QNMinimizer() { this((Function) null); } public QNMinimizer(int m) { this(null, m); } public QNMinimizer(int m, boolean useRobustOptions) { this(null, m, useRobustOptions); } public QNMinimizer(Function monitor) { this.monitor = monitor; } public QNMinimizer(Function monitor, int m) { this(monitor, m, false); } public QNMinimizer(Function monitor, int m, boolean useRobustOptions) { this.monitor = monitor; mem = m; if (useRobustOptions) { this.setRobustOptions(); } } public QNMinimizer(FloatFunction monitor) { throw new UnsupportedOperationException("Doesn't support floats yet"); } public void setOldOptions() { useAveImprovement = true; useRelativeNorm = false; useNumericalZero = false; lsOpt = eLineSearch.BACKTRACK; scaleOpt = eScaling.SCALAR; } public final void setRobustOptions() { useAveImprovement = true; useRelativeNorm = true; useNumericalZero = true; lsOpt = eLineSearch.MINPACK; scaleOpt = eScaling.DIAGONAL; } @Override public void setEvaluators(int iters, Evaluator[] evaluators) { this.evaluateIters = iters; this.evaluators = evaluators; } public void setEvaluators(int iters, int startEvaluateIters, Evaluator[] evaluators) { this.evaluateIters = iters; this.startEvaluateIters = startEvaluateIters; this.evaluators = evaluators; } public void terminateOnRelativeNorm(boolean toTerminate) { useRelativeNorm = toTerminate; } public void terminateOnNumericalZero(boolean toTerminate) { useNumericalZero = toTerminate; } public void terminateOnAverageImprovement(boolean toTerminate) { useAveImprovement = toTerminate; } public void terminateOnEvalImprovement(boolean toTerminate) { useEvalImprovement = toTerminate; } public void terminateOnMaxItr(int maxItr) { if (maxItr > 0) { useMaxItr = true; this.maxItr = maxItr; } } public void suppressTestPrompt(boolean suppressTestPrompt) { this.suppressTestPrompt = suppressTestPrompt; } public void setTerminateOnEvalImprovementNumOfEpoch(int terminateOnEvalImprovementNumOfEpoch) { this.terminateOnEvalImprovementNumOfEpoch = terminateOnEvalImprovementNumOfEpoch; } public void useMinPackSearch() { lsOpt = eLineSearch.MINPACK; } public void useBacktracking() { lsOpt = eLineSearch.BACKTRACK; } public void useDiagonalScaling() { scaleOpt = eScaling.DIAGONAL; } public void useScalarScaling() { scaleOpt = eScaling.SCALAR; } public boolean wasSuccessful() { return success; } public void shutUp() { this.quiet = true; } public void setM(int m) { mem = m; } public static class SurpriseConvergence extends Throwable { private static final long serialVersionUID = 4290178321643529559L; public SurpriseConvergence(String s) { super(s); } } private static class MaxEvaluationsExceeded extends Throwable { private static final long serialVersionUID = 8044806163343218660L; public MaxEvaluationsExceeded(String s) { super(s); } } /** * * The Record class is used to collect information about the function value * over a series of iterations. This information is used to determine * convergence, and to (attempt to) ensure numerical errors are not an issue. * It can also be used for plotting the results of the optimization routine. * * @author akleeman */ public class Record { // convergence options. // have average difference like before // zero gradient. // for convergence test private final List<Double> evals = new ArrayList<Double>(); private final List<Double> values = new ArrayList<Double>(); List<Double> gNorms = new ArrayList<Double>(); // List<Double> xNorms = new ArrayList<Double>(); private final List<Integer> funcEvals = new ArrayList<Integer>(); private final List<Double> time = new ArrayList<Double>(); // gNormInit: This makes it so that if for some reason // you try and divide by the initial norm before it's been // initialized you don't get a NAN but you will also never // get false convergence. private double gNormInit = Double.MIN_VALUE; private double relativeTOL = 1e-8; private double TOL = 1e-6; private double EPS = 1e-6; private long startTime; private double gNormLast; // This is used for convergence. private double[] xLast; private int maxSize = 100; // This will control the number of func values / // gradients to retain. private Function mon = null; private boolean quiet = false; private boolean memoryConscious = true; private PrintWriter outputFile = null; private int noImproveItrCount = 0; private double[] xBest; public Record(boolean beQuiet, Function monitor, double tolerance) { this.quiet = beQuiet; this.mon = monitor; this.TOL = tolerance; } public Record(boolean beQuiet, Function monitor, double tolerance, PrintWriter output) { this.quiet = beQuiet; this.mon = monitor; this.TOL = tolerance; this.outputFile = output; } public Record(boolean beQuiet, Function monitor, double tolerance, double eps) { this.quiet = beQuiet; this.mon = monitor; this.TOL = tolerance; this.EPS = eps; } public void setEPS(double eps) { EPS = eps; } public void setTOL(double tolerance) { TOL = tolerance; } public void start(double val, double[] grad) { start(val, grad, null); } /* * Stops output to stdout. */ public void shutUp() { this.quiet = true; } /* * Initialize the class, this starts the timer, and initiates the gradient * norm for use with convergence. */ public void start(double val, double[] grad, double[] x) { startTime = System.currentTimeMillis(); gNormInit = ArrayMath.norm(grad); xLast = x; writeToFile(1, val, gNormInit, 0.0); if (x != null) { monitorX(x); } } private void writeToFile(double fevals, double val, double gNorm, double time) { if (outputFile != null) { outputFile.println(fevals + "," + val + "," + gNorm + "," + time); } } public void add(double val, double[] grad, double[] x, int fevals, double evalScore) { if (!memoryConscious) { if (gNorms.size() > maxSize) { gNorms.remove(0); } if (time.size() > maxSize) { time.remove(0); } if (funcEvals.size() > maxSize) { funcEvals.remove(0); } gNorms.add(gNormLast); time.add(howLong()); funcEvals.add(fevals); } else { maxSize = 10; } gNormLast = ArrayMath.norm(grad); if (values.size() > maxSize) { values.remove(0); } values.add(val); if (evalScore != Double.NEGATIVE_INFINITY) evals.add(evalScore); writeToFile(fevals, val, gNormLast, howLong()); say(nf.format(val) + " " + nfsec.format(howLong()) + "s"); xLast = x; monitorX(x); } public void monitorX(double[] x) { if (this.mon != null) { this.mon.valueAt(x); } } /** * This function checks for convergence through first * order optimality, numerical convergence (i.e., zero numerical * gradient), and also by checking the average improvement. * * @return A value of the enumeration type <p>eState</p> which tells the * state of the optimization routine indicating whether the routine should * terminate, and if so why. */ public eState toContinue() { double relNorm = gNormLast / gNormInit; int size = values.size(); double newestVal = values.get(size - 1); double previousVal = (size >= 10 ? values.get(size - 10) : values.get(0)); double averageImprovement = (previousVal - newestVal) / (size >= 10 ? 10 : size); int evalsSize = evals.size(); if (useMaxItr && its >= maxItr) return eState.TERMINATE_MAXITR; if (useEvalImprovement) { int bestInd = -1; double bestScore = Double.NEGATIVE_INFINITY; for (int i = 0; i < evalsSize; i++) { if (evals.get(i) >= bestScore) { bestScore = evals.get(i); bestInd = i; } } if (bestInd == evalsSize-1) { // copy xBest if (xBest == null) xBest = Arrays.copyOf(xLast, xLast.length); else System.arraycopy( xLast, 0, xBest, 0, xLast.length ); } if ((evalsSize - bestInd) >= terminateOnEvalImprovementNumOfEpoch) return eState.TERMINATE_EVALIMPROVE; } // This is used to be able to reproduce results that were trained on the // QNMinimizer before // convergence criteria was updated. if (useAveImprovement && (size > 5 && Math.abs(averageImprovement / newestVal) < TOL)) { return eState.TERMINATE_AVERAGEIMPROVE; } // Check to see if the gradient is sufficiently small if (useRelativeNorm && relNorm <= relativeTOL) { return eState.TERMINATE_RELATIVENORM; } if (useNumericalZero) { // This checks if the gradient is sufficiently small compared to x that // it is treated as zero. if (gNormLast < EPS * Math.max(1.0, ArrayMath.norm_1(xLast))) { // |g| < |x|_1 // First we do the one norm, because that's easiest, and always bigger. if (gNormLast < EPS * Math.max(1.0, ArrayMath.norm(xLast))) { // |g| < max(1,|x|) // Now actually compare with the two norm if we have to. System.err .println("Gradient is numerically zero, stopped on machine epsilon."); return eState.TERMINATE_GRADNORM; } } // give user information about the norms. } say(" |" + nf.format(gNormLast) + "| {" + nf.format(relNorm) + "} " + nf.format(Math.abs(averageImprovement / newestVal)) + " " + (evalsSize > 0 ? evals.get(evalsSize-1).toString() : "-") + " "); return eState.CONTINUE; } /** * Return the time in seconds since this class was created. * @return The time in seconds since this class was created. */ public double howLong() { return ((System.currentTimeMillis() - startTime)) / 1000.0; } public double[] getBest() { return xBest; } } // end class Record /** * The QNInfo class is used to store information about the Quasi Newton * update. it holds all the s,y pairs, updates the diagonal and scales * everything as needed. */ public class QNInfo { // Diagonal Options // Linesearch Options // Memory stuff private List<double[]> s = null; private List<double[]> y = null; private List<Double> rho = null; private double gamma; public double[] d = null; private int mem; private int maxMem = 20; public eScaling scaleOpt = eScaling.SCALAR; public QNInfo(int size) { s = new ArrayList<double[]>(); y = new ArrayList<double[]>(); rho = new ArrayList<Double>(); gamma = 1; mem = size; } public QNInfo() { s = new ArrayList<double[]>(); y = new ArrayList<double[]>(); rho = new ArrayList<Double>(); gamma = 1; mem = maxMem; } public QNInfo(List<double[]> sList, List<double[]> yList) { s = new ArrayList<double[]>(); y = new ArrayList<double[]>(); rho = new ArrayList<Double>(); gamma = 1; setHistory(sList, yList); } public int size() { return s.size(); } public double getRho(int ind) { return rho.get(ind); } public double[] getS(int ind) { return s.get(ind); } public double[] getY(int ind) { return y.get(ind); } public void useDiagonalScaling() { this.scaleOpt = eScaling.DIAGONAL; } public void useScalarScaling() { this.scaleOpt = eScaling.SCALAR; } /* * Free up that memory. */ public void free() { s = null; y = null; rho = null; d = null; } public void clear() { s.clear(); y.clear(); rho.clear(); d = null; } /* * applyInitialHessian(double[] x) * * This function takes the vector x, and applies the best guess at the * initial hessian to this vector, based off available information from * previous updates. */ public void setHistory(List<double[]> sList, List<double[]> yList) { int size = sList.size(); for (int i = 0; i < size; i++) { update(sList.get(i), yList.get(i), ArrayMath.innerProduct(yList.get(i), yList.get(i)), ArrayMath.innerProduct(sList.get(i), yList.get(i)), 0, 1.0); } } public double[] applyInitialHessian(double[] x) { switch (scaleOpt) { case SCALAR: say("I"); ArrayMath.multiplyInPlace(x, gamma); break; case DIAGONAL: say("D"); if (d != null) { // Check sizes if (x.length != d.length) { throw new IllegalArgumentException("Vector of incorrect size passed to applyInitialHessian in QNInfo class"); } // Scale element-wise for (int i = 0; i < x.length; i++) { x[i] = x[i] / (d[i]); } } break; } return x; } /* * The update function is used to update the hessian approximation used by * the quasi newton optimization routine. * * If everything has behaved nicely, this involves deciding on a new initial * hessian through scaling or diagonal update, and then storing of the * secant pairs s = x - previousX and y = grad - previousGrad. * * Things can go wrong, if any non convex behavior is detected (s^T y < 0) * or numerical errors are likely the update is skipped. * */ public int update(double[] newX, double[] x, double[] newGrad, double[] grad, double step) throws SurpriseConvergence { // todo: add outofmemory error. double[] newS, newY; double sy, yy, sg; // allocate arrays for new s,y pairs (or replace if the list is already // full) if (mem > 0 && s.size() == mem || s.size() == maxMem) { newS = s.remove(0); newY = y.remove(0); rho.remove(0); } else { newS = new double[x.length]; newY = new double[x.length]; } // Here we construct the new pairs, and check for positive definiteness. sy = 0; yy = 0; sg = 0; for (int i = 0; i < x.length; i++) { newS[i] = newX[i] - x[i]; newY[i] = newGrad[i] - grad[i]; sy += newS[i] * newY[i]; yy += newY[i] * newY[i]; sg += newS[i] * newGrad[i]; } // Apply the updates used for the initial hessian. return update(newS, newY, yy, sy, sg, step); } private class NegativeCurvature extends Throwable { /** * */ private static final long serialVersionUID = 4676562552506850519L; public NegativeCurvature() { } } private class ZeroGradient extends Throwable { /** * */ private static final long serialVersionUID = -4001834044987928521L; public ZeroGradient() { } } public int update(double[] newS, double[] newY, double yy, double sy, double sg, double step) { // Initialize diagonal to the identity if (scaleOpt == eScaling.DIAGONAL && d == null) { d = new double[newS.length]; for (int i = 0; i < d.length; i++) { d[i] = 1.0; } } try { if (sy < 0) { throw new NegativeCurvature(); } if (yy == 0.0) { throw new ZeroGradient(); } switch (scaleOpt) { /* * SCALAR: The standard L-BFGS initial approximation which is just a * scaled identity. */ case SCALAR: gamma = sy / yy; break; /* * DIAGONAL: A diagonal scaling matrix is used as the initial * approximation. The updating method used is used thanks to Andrew * Bradley of the ICME dept. */ case DIAGONAL: double sDs; // Gamma is designed to scale such that a step length of one is // generally accepted. gamma = sy / (step * (sy - sg)); sDs = 0.0; for (int i = 0; i < d.length; i++) { d[i] = gamma * d[i]; sDs += newS[i] * d[i] * newS[i]; } // This diagonal update was introduced by Andrew Bradley for (int i = 0; i < d.length; i++) { d[i] = (1 - d[i] * newS[i] * newS[i] / sDs) * d[i] + newY[i] * newY[i] / sy; } // Here we make sure that the diagonal is alright double minD = ArrayMath.min(d); double maxD = ArrayMath.max(d); // If things have gone bad, just fill with the SCALAR approx. if (minD <= 0 || Double.isInfinite(maxD) || maxD / minD > 1e12) { System.err .println("QNInfo:update() : PROBLEM WITH DIAGONAL UPDATE"); double fill = yy / sy; for (int i = 0; i < d.length; i++) { d[i] = fill; } } } // If s is already of size mem, remove the oldest vector and free it up. if (mem > 0 && s.size() == mem || s.size() == maxMem) { s.remove(0); y.remove(0); rho.remove(0); } // Actually add the pair. s.add(newS); y.add(newY); rho.add(1 / sy); } catch (NegativeCurvature nc) { // NOTE: if applying QNMinimizer to a non convex problem, we would still // like to update the matrix // or we could get stuck in a series of skipped updates. say(" Negative curvature detected, update skipped "); } catch (ZeroGradient zg) { say(" Either convergence, or floating point errors combined with extremely linear region "); } return s.size(); } // end update } // end class QNInfo public void setHistory(List<double[]> s, List<double[]> y) { presetInfo = new QNInfo(s, y); } /* * computeDir() * * This function will calculate an approximation of the inverse hessian based * off the seen s,y vector pairs. This particular approximation uses the BFGS * update. * */ private void computeDir(double[] dir, double[] fg, double[] x, QNInfo qn, Function func) throws SurpriseConvergence { System.arraycopy(fg, 0, dir, 0, fg.length); int mmm = qn.size(); double[] as = new double[mmm]; for (int i = mmm - 1; i >= 0; i--) { as[i] = qn.getRho(i) * ArrayMath.innerProduct(qn.getS(i), dir); plusAndConstMult(dir, qn.getY(i), -as[i], dir); } // multiply by hessian approximation qn.applyInitialHessian(dir); for (int i = 0; i < mmm; i++) { double b = qn.getRho(i) * ArrayMath.innerProduct(qn.getY(i), dir); plusAndConstMult(dir, qn.getS(i), as[i] - b, dir); } ArrayMath.multiplyInPlace(dir, -1); if (useOWLQN) { // step (2) in Galen & Gao 2007 constrainSearchDir(dir, fg, x, func); } } // computes d = a + b * c private static double[] plusAndConstMult(double[] a, double[] b, double c, double[] d) { for (int i = 0; i < a.length; i++) { d[i] = a[i] + c * b[i]; } return d; } private double doEvaluation(double[] x) { // Evaluate solution if (evaluators == null) return Double.NEGATIVE_INFINITY; double score = 0; for (Evaluator eval:evaluators) { if (!suppressTestPrompt) say(" Evaluating: " + eval.toString()); score = eval.evaluate(x); } return score; } public float[] minimize(DiffFloatFunction function, float functionTolerance, float[] initial) { throw new UnsupportedOperationException("Float not yet supported for QN"); } @Override public double[] minimize(DiffFunction function, double functionTolerance, double[] initial) { return minimize(function, functionTolerance, initial, -1); } @Override public double[] minimize(DiffFunction dfunction, double functionTolerance, double[] initial, int maxFunctionEvaluations) { return minimize(dfunction, functionTolerance, initial, maxFunctionEvaluations, null); } public double[] minimize(DiffFunction dfunction, double functionTolerance, double[] initial, int maxFunctionEvaluations, QNInfo qn) { say("QNMinimizer called on double function of " + dfunction.domainDimension() + " variables,"); if (mem > 0) { sayln(" using M = " + mem + "."); } else { sayln(" using dynamic setting of M."); } if (qn == null && presetInfo == null) { qn = new QNInfo(mem); noHistory = true; } else if (presetInfo != null) { qn = presetInfo; noHistory = false; } else if (qn != null) { noHistory = false; } double[] x, newX, grad, newGrad, dir; double value; its = 0; fevals = 0; success = false; qn.scaleOpt = scaleOpt; // initialize weights x = initial; // initialize gradient grad = new double[x.length]; newGrad = new double[x.length]; newX = new double[x.length]; dir = new double[x.length]; // initialize function value and gradient (gradient is stored in grad inside // evaluateFunction) value = evaluateFunction(dfunction, x, grad); if (useOWLQN) { double norm = l1NormOWL(x, dfunction); value += norm * lambdaOWL; grad = pseudoGradientOWL(x, grad, dfunction); // step (1) in Galen & Gao except we are not computing v yet } PrintWriter outFile = null; PrintWriter infoFile = null; if (outputToFile) { try { String baseName = "QN_m" + mem + "_" + lsOpt.toString() + "_" + scaleOpt.toString(); outFile = new PrintWriter(new FileOutputStream(baseName + ".output"), true); infoFile = new PrintWriter(new FileOutputStream(baseName + ".info"), true); infoFile.println(dfunction.domainDimension() + "; DomainDimension "); infoFile.println(mem + "; memory"); } catch (IOException e) { throw new RuntimeIOException("Caught IOException outputting QN data to file", e); } } Record rec = new Record(quiet, monitor, functionTolerance, outFile); // sets the original gradient and x. Also stores the monitor. rec.start(value, grad, x); // Check if max Evaluations and Iterations have been provided. maxFevals = (maxFunctionEvaluations > 0) ? maxFunctionEvaluations : Integer.MAX_VALUE; // maxIterations = (maxIterations > 0) ? maxIterations : Integer.MAX_VALUE; sayln(" An explanation of the output:"); sayln("Iter The number of iterations"); sayln("evals The number of function evaluations"); sayln("SCALING <D> Diagonal scaling was used; <I> Scaled Identity"); sayln("LINESEARCH [## M steplength] Minpack linesearch"); sayln(" 1-Function value was too high"); sayln(" 2-Value ok, gradient positive, positive curvature"); sayln(" 3-Value ok, gradient negative, positive curvature"); sayln(" 4-Value ok, gradient negative, negative curvature"); sayln(" [.. B] Backtracking"); sayln("VALUE The current function value"); sayln("TIME Total elapsed time"); sayln("|GNORM| The current norm of the gradient"); sayln("{RELNORM} The ratio of the current to initial gradient norms"); sayln("AVEIMPROVE The average improvement / current value"); sayln("EVALSCORE The last available eval score"); sayln(); sayln("Iter ## evals ## <SCALING> [LINESEARCH] VALUE TIME |GNORM| {RELNORM} AVEIMPROVE EVALSCORE"); // Beginning of the loop. do { try { sayln(); boolean doEval = (its >= 0 && its >= startEvaluateIters && evaluateIters > 0 && its % evaluateIters == 0); its += 1; double newValue; double[] newPoint = new double[3]; // initialized in loop say("Iter " + its + " evals " + fevals + " "); // Compute the search direction say("<"); computeDir(dir, grad, x, qn, dfunction); say("> "); // sanity check dir boolean hasNaNDir = false; boolean hasNaNGrad = false; for (int i = 0; i < dir.length; i++) { if (dir[i] != dir[i]) hasNaNDir = true; if (grad[i] != grad[i]) hasNaNGrad = true; } if (hasNaNDir && !hasNaNGrad) { say("(NaN dir likely due to Hessian approx - resetting) "); qn.clear(); // re-compute the search direction say("<"); computeDir(dir, grad, x, qn, dfunction); say("> "); } // perform line search say("["); if (useOWLQN) { // only linear search is allowed for OWL-QN newPoint = lineSearchBacktrackOWL(dfunction, dir, x, newX, grad, value); say("B"); } else { // switch between line search options. switch (lsOpt) { case BACKTRACK: newPoint = lineSearchBacktrack(dfunction, dir, x, newX, grad, value); say("B"); break; case MINPACK: newPoint = lineSearchMinPack(dfunction, dir, x, newX, grad, value, functionTolerance); say("M"); break; default: sayln("Invalid line search option for QNMinimizer. "); System.exit(1); break; } } newValue = newPoint[f]; System.err.print(" " + nf.format(newPoint[a])); say("] "); // This shouldn't actually evaluate anything since that should have been // done in the lineSearch. System.arraycopy(dfunction.derivativeAt(newX), 0, newGrad, 0, newGrad.length); // This is where all the s, y updates are applied. qn.update(newX, x, newGrad, grad, newPoint[a]); // step (4) in Galen & Gao 2007 if (useOWLQN) { // pseudo gradient newGrad = pseudoGradientOWL(newX, newGrad, dfunction); } double evalScore = Double.NEGATIVE_INFINITY; if (doEval) { evalScore = doEvaluation(newX); } // Add the current value and gradient to the records, this also monitors // X and writes to output rec.add(newValue, newGrad, newX, fevals, evalScore); // shift value = newValue; // double[] temp = x; // x = newX; // newX = temp; System.arraycopy(newX, 0, x, 0, x.length); System.arraycopy(newGrad, 0, grad, 0, newGrad.length); if (quiet) { System.err.print("."); } if (fevals > maxFevals) { throw new MaxEvaluationsExceeded(" Exceeded in minimize() loop "); } } catch (SurpriseConvergence s) { sayln(); sayln("QNMinimizer aborted due to surprise convergence"); break; } catch (MaxEvaluationsExceeded m) { sayln(); sayln("QNMinimizer aborted due to maximum number of function evaluations"); sayln(m.toString()); sayln("** This is not an acceptable termination of QNMinimizer, consider"); sayln("** increasing the max number of evaluations, or safeguarding your"); sayln("** program by checking the QNMinimizer.wasSuccessful() method."); break; } catch (OutOfMemoryError oome) { sayln(); if ( ! qn.s.isEmpty()) { qn.s.remove(0); qn.y.remove(0); qn.rho.remove(0); qn.mem = qn.s.size(); System.err.println("Caught OutOfMemoryError, changing m = " + qn.mem); } else { throw oome; } } } while ((state = rec.toContinue()) == eState.CONTINUE); // do if (evaluateIters > 0) { // do final evaluation double evalScore = (useEvalImprovement ? doEvaluation(rec.getBest()) : doEvaluation(x)); sayln("final evalScore is: " + evalScore); } // // Announce the reason minimization has terminated. // System.err.println(); switch (state) { case TERMINATE_GRADNORM: System.err .println("QNMinimizer terminated due to numerically zero gradient: |g| < EPS max(1,|x|) "); success = true; break; case TERMINATE_RELATIVENORM: System.err .println("QNMinimizer terminated due to sufficient decrease in gradient norms: |g|/|g0| < TOL "); success = true; break; case TERMINATE_AVERAGEIMPROVE: System.err .println("QNMinimizer terminated due to average improvement: | newest_val - previous_val | / |newestVal| < TOL "); success = true; break; case TERMINATE_MAXITR: System.err .println("QNMinimizer terminated due to reached max iteration " + maxItr ); success = true; break; case TERMINATE_EVALIMPROVE: System.err .println("QNMinimizer terminated due to no improvement on eval "); success = true; x = rec.getBest(); break; default: System.err.println("QNMinimizer terminated without converging"); success = false; break; } double completionTime = rec.howLong(); sayln("Total time spent in optimization: " + nfsec.format(completionTime) + "s"); if (outputToFile) { infoFile.println(completionTime + "; Total Time "); infoFile.println(fevals + "; Total evaluations"); infoFile.close(); outFile.close(); } qn.free(); return x; } // end minimize() private void sayln() { if (!quiet) { System.err.println(); } } private void sayln(String s) { if (!quiet) { System.err.println(s); } } private void say(String s) { if (!quiet) { System.err.print(s); } } // todo [cdm 2013]: Can this be sped up by returning a Pair rather than copying array? private double evaluateFunction(DiffFunction dfunc, double[] x, double[] grad) { System.arraycopy(dfunc.derivativeAt(x), 0, grad, 0, grad.length); fevals += 1; return dfunc.valueAt(x); } public void useOWLQN(boolean use, double lambda) { this.useOWLQN = use; this.lambdaOWL = lambda; } private static Set<Integer> initializeParamRange(Function func, double[] x) { Set<Integer> paramRange; if (func instanceof HasRegularizerParamRange) { paramRange = ((HasRegularizerParamRange)func).getRegularizerParamRange(x); } else { paramRange = Generics.newHashSet(x.length); for (int i = 0; i < x.length; i++) { paramRange.add(i); } } return paramRange; } private static double[] projectOWL(double[] x, double[] orthant, Function func) { Set<Integer> paramRange = initializeParamRange(func, x); for (int i : paramRange) { if (x[i] * orthant[i] <= 0) x[i] = 0; } return x; } private static double l1NormOWL(double[] x, Function func) { Set<Integer> paramRange = initializeParamRange(func, x); double sum = 0.0; for (int i: paramRange) { sum += Math.abs(x[i]); } return sum; } private static void constrainSearchDir(double[] dir, double[] fg, double[] x, Function func) { Set<Integer> paramRange = initializeParamRange(func, x); for (int i: paramRange) { if (dir[i] * fg[i] >= 0.0) { dir[i] = 0.0; } } } private double[] pseudoGradientOWL(double[] x, double[] grad, Function func) { Set<Integer> paramRange = initializeParamRange(func, x); // initialized below double[] newGrad = new double[grad.length]; // compute pseudo gradient for (int i = 0; i < x.length; i++) { if (paramRange.contains(i)) { if (x[i] < 0.0) { // Differentiable newGrad[i] = grad[i] - lambdaOWL; } else if (x[i] > 0.0) { // Differentiable newGrad[i] = grad[i] + lambdaOWL; } else { if (grad[i] < -lambdaOWL) { // Take the right partial derivative newGrad[i] = grad[i] + lambdaOWL; } else if (grad[i] > lambdaOWL) { // Take the left partial derivative newGrad[i] = grad[i] - lambdaOWL; } else { newGrad[i] = 0.0; } } } else { newGrad[i] = grad[i]; } } return newGrad; } /* * lineSearchBacktrackOWL is the linesearch used for L1 regularization. * it only satisfies sufficient descent not the Wolfe conditions. */ private double[] lineSearchBacktrackOWL(Function func, double[] dir, double[] x, double[] newX, double[] grad, double lastValue) throws MaxEvaluationsExceeded { /* Choose the orthant for the new point. */ double[] orthant = new double[x.length]; for (int i = 0; i < orthant.length; i++) { orthant[i] = (x[i] == 0.0) ? -grad[i] : x[i]; } // c1 can be anything between 0 and 1, exclusive (usu. 1/10 - 1/2) double step, c1; // for first few steps, we have less confidence in our initial step-size a // so scale back quicker if (its <= 2) { step = 0.1; c1 = 0.1; } else { step = 1.0; c1 = 0.1; } // should be small e.g. 10^-5 ... 10^-1 double c = 0.01; // c = c * normGradInDir; double[] newPoint = new double[3]; while (true) { plusAndConstMult(x, dir, step, newX); // The current point is projected onto the orthant projectOWL(newX, orthant, func); // step (3) in Galen & Gao 2007 // Evaluate the function and gradient values double value = func.valueAt(newX); // Compute the L1 norm of the variables and add it to the object value double norm = l1NormOWL(newX, func); value += norm * lambdaOWL; newPoint[f] = value; double dgtest = 0.0; for (int i = 0;i < x.length ;i++) { dgtest += (newX[i] - x[i]) * grad[i]; } if (newPoint[f] <= lastValue + c * dgtest) break; else { if (newPoint[f] < lastValue) { // an improvement, but not good enough... suspicious! say("!"); } else { say("."); } } step = c1 * step; } newPoint[a] = step; fevals += 1; if (fevals > maxFevals) { throw new MaxEvaluationsExceeded( " Exceeded during linesearch() Function "); } return newPoint; } /* * lineSearchBacktrack is the original linesearch used for the first version * of QNMinimizer. it only satisfies sufficient descent not the Wolfe * conditions. */ private double[] lineSearchBacktrack(Function func, double[] dir, double[] x, double[] newX, double[] grad, double lastValue) throws MaxEvaluationsExceeded { double normGradInDir = ArrayMath.innerProduct(dir, grad); say("(" + nf.format(normGradInDir) + ")"); if (normGradInDir > 0) { say("{WARNING--- direction of positive gradient chosen!}"); } // c1 can be anything between 0 and 1, exclusive (usu. 1/10 - 1/2) double step, c1; // for first few steps, we have less confidence in our initial step-size a // so scale back quicker if (its <= 2) { step = 0.1; c1 = 0.1; } else { step = 1.0; c1 = 0.1; } // should be small e.g. 10^-5 ... 10^-1 double c = 0.01; // double v = func.valueAt(x); // c = c * mult(grad, dir); c = c * normGradInDir; double[] newPoint = new double[3]; while ((newPoint[f] = func.valueAt((plusAndConstMult(x, dir, step, newX)))) > lastValue + c * step) { fevals += 1; if (newPoint[f] < lastValue) { // an improvement, but not good enough... suspicious! say("!"); } else { say("."); } step = c1 * step; } newPoint[a] = step; fevals += 1; if (fevals > maxFevals) { throw new MaxEvaluationsExceeded( " Exceeded during linesearch() Function "); } return newPoint; } private double[] lineSearchMinPack(DiffFunction dfunc, double[] dir, double[] x, double[] newX, double[] grad, double f0, double tol) throws MaxEvaluationsExceeded { double xtrapf = 4.0; int info = 0; int infoc = 1; bracketed = false; boolean stage1 = true; double width = aMax - aMin; double width1 = 2 * width; // double[] wa = x; // Should check input parameters double g0 = ArrayMath.innerProduct(grad, dir); if (g0 >= 0) { // We're looking in a direction of positive gradient. This won't work. // set dir = -grad for (int i = 0; i < x.length; i++) { dir[i] = -grad[i]; } g0 = ArrayMath.innerProduct(grad, dir); } double gTest = ftol * g0; double[] newPt = new double[3]; double[] bestPt = new double[3]; double[] endPt = new double[3]; newPt[a] = 1.0; // Always guess 1 first, this should be right if the // function is "nice" and BFGS is working. if (its == 1 && noHistory) { newPt[a] = 1e-1; } bestPt[a] = 0.0; bestPt[f] = f0; bestPt[g] = g0; endPt[a] = 0.0; endPt[f] = f0; endPt[g] = g0; // int cnt = 0; do { double stpMin; // = aMin; [cdm: this initialization was always overridden below] double stpMax; // = aMax; [cdm: this initialization was always overridden below] if (bracketed) { stpMin = Math.min(bestPt[a], endPt[a]); stpMax = Math.max(bestPt[a], endPt[a]); } else { stpMin = bestPt[a]; stpMax = newPt[a] + xtrapf * (newPt[a] - bestPt[a]); } newPt[a] = Math.max(newPt[a], aMin); newPt[a] = Math.min(newPt[a], aMax); // Use the best point if we have some sort of strange termination // conditions. if ((bracketed && (newPt[a] <= stpMin || newPt[a] >= stpMax)) || fevals >= maxFevals || infoc == 0 || (bracketed && stpMax - stpMin <= tol * stpMax)) { // todo: below.. plusAndConstMult(x, dir, bestPt[a], newX); newPt[f] = bestPt[f]; newPt[a] = bestPt[a]; } newPt[f] = dfunc.valueAt((plusAndConstMult(x, dir, newPt[a], newX))); newPt[g] = ArrayMath.innerProduct(dfunc.derivativeAt(newX), dir); double fTest = f0 + newPt[a] * gTest; fevals += 1; // Check and make sure everything is normal. if ((bracketed && (newPt[a] <= stpMin || newPt[a] >= stpMax)) || infoc == 0) { info = 6; say(" line search failure: bracketed but no feasible found "); } if (newPt[a] == aMax && newPt[f] <= fTest && newPt[g] <= gTest) { info = 5; say(" line search failure: sufficient decrease, but gradient is more negative "); } if (newPt[a] == aMin && (newPt[f] > fTest || newPt[g] >= gTest)) { info = 4; say(" line search failure: minimum step length reached "); } if (fevals >= maxFevals) { info = 3; throw new MaxEvaluationsExceeded( " Exceeded during lineSearchMinPack() Function "); } if (bracketed && stpMax - stpMin <= tol * stpMax) { info = 2; say(" line search failure: interval is too small "); } if (newPt[f] <= fTest && Math.abs(newPt[g]) <= -gtol * g0) { info = 1; } if (info != 0) { return newPt; } // this is the first stage where we look for a point that is lower and // increasing if (stage1 && newPt[f] <= fTest && newPt[g] >= Math.min(ftol, gtol) * g0) { stage1 = false; } // A modified function is used to predict the step only if // we have not obtained a step for which the modified // function has a non-positive function value and non-negative // derivative, and if a lower function value has been // obtained but the decrease is not sufficient. if (stage1 && newPt[f] <= bestPt[f] && newPt[f] > fTest) { newPt[f] = newPt[f] - newPt[a] * gTest; bestPt[f] = bestPt[f] - bestPt[a] * gTest; endPt[f] = endPt[f] - endPt[a] * gTest; newPt[g] = newPt[g] - gTest; bestPt[g] = bestPt[g] - gTest; endPt[g] = endPt[g] - gTest; infoc = getStep(/* x, dir, newX, f0, g0, */ newPt, bestPt, endPt, stpMin, stpMax); bestPt[f] = bestPt[f] + bestPt[a] * gTest; endPt[f] = endPt[f] + endPt[a] * gTest; bestPt[g] = bestPt[g] + gTest; endPt[g] = endPt[g] + gTest; } else { infoc = getStep(/* x, dir, newX, f0, g0, */ newPt, bestPt, endPt, stpMin, stpMax); } if (bracketed) { if (Math.abs(endPt[a] - bestPt[a]) >= p66 * width1) { newPt[a] = bestPt[a] + p5 * (endPt[a] - bestPt[a]); } width1 = width; width = Math.abs(endPt[a] - bestPt[a]); } } while (true); } /** * getStep() * * THIS FUNCTION IS A TRANSLATION OF A TRANSLATION OF THE MINPACK SUBROUTINE * cstep(). Dianne O'Leary July 1991 * * It was then interpreted from the implementation supplied by Andrew * Bradley. Modifications have been made for this particular application. * * This function is used to find a new safe guarded step to be used for * line search procedures. * */ private int getStep( /* double[] x, double[] dir, double[] newX, double f0, double g0, // None of these were used */ double[] newPt, double[] bestPt, double[] endPt, double stpMin, double stpMax) throws MaxEvaluationsExceeded { // Should check for input errors. int info; // = 0; always set in the if below boolean bound; // = false; always set in the if below double theta, gamma, p, q, r, s, stpc, stpq, stpf; double signG = newPt[g] * bestPt[g] / Math.abs(bestPt[g]); // // First case. A higher function value. // The minimum is bracketed. If the cubic step is closer // to stx than the quadratic step, the cubic step is taken, // else the average of the cubic and quadratic steps is taken. // if (newPt[f] > bestPt[f]) { info = 1; bound = true; theta = 3 * (bestPt[f] - newPt[f]) / (newPt[a] - bestPt[a]) + bestPt[g] + newPt[g]; s = Math.max(Math.max(theta, newPt[g]), bestPt[g]); gamma = s * Math.sqrt((theta / s) * (theta / s) - (bestPt[g] / s) * (newPt[g] / s)); if (newPt[a] < bestPt[a]) { gamma = -gamma; } p = (gamma - bestPt[g]) + theta; q = ((gamma - bestPt[g]) + gamma) + newPt[g]; r = p / q; stpc = bestPt[a] + r * (newPt[a] - bestPt[a]); stpq = bestPt[a] + ((bestPt[g] / ((bestPt[f] - newPt[f]) / (newPt[a] - bestPt[a]) + bestPt[g])) / 2) * (newPt[a] - bestPt[a]); if (Math.abs(stpc - bestPt[a]) < Math.abs(stpq - bestPt[a])) { stpf = stpc; } else { stpf = stpq; // stpf = stpc + (stpq - stpc)/2; } bracketed = true; if (newPt[a] < 0.1) { stpf = 0.01 * stpf; } } else if (signG < 0.0) { // // Second case. A lower function value and derivatives of // opposite sign. The minimum is bracketed. If the cubic // step is closer to stx than the quadratic (secant) step, // the cubic step is taken, else the quadratic step is taken. // info = 2; bound = false; theta = 3 * (bestPt[f] - newPt[f]) / (newPt[a] - bestPt[a]) + bestPt[g] + newPt[g]; s = Math.max(Math.max(theta, bestPt[g]), newPt[g]); gamma = s * Math.sqrt((theta / s) * (theta / s) - (bestPt[g] / s) * (newPt[g] / s)); if (newPt[a] > bestPt[a]) { gamma = -gamma; } p = (gamma - newPt[g]) + theta; q = ((gamma - newPt[g]) + gamma) + bestPt[g]; r = p / q; stpc = newPt[a] + r * (bestPt[a] - newPt[a]); stpq = newPt[a] + (newPt[g] / (newPt[g] - bestPt[g])) * (bestPt[a] - newPt[a]); if (Math.abs(stpc - newPt[a]) > Math.abs(stpq - newPt[a])) { stpf = stpc; } else { stpf = stpq; } bracketed = true; } else if (Math.abs(newPt[g]) < Math.abs(bestPt[g])) { // // Third case. A lower function value, derivatives of the // same sign, and the magnitude of the derivative decreases. // The cubic step is only used if the cubic tends to infinity // in the direction of the step or if the minimum of the cubic // is beyond stp. Otherwise the cubic step is defined to be // either stpmin or stpmax. The quadratic (secant) step is also // computed and if the minimum is bracketed then the the step // closest to stx is taken, else the step farthest away is taken. // info = 3; bound = true; theta = 3 * (bestPt[f] - newPt[f]) / (newPt[a] - bestPt[a]) + bestPt[g] + newPt[g]; s = Math.max(Math.max(theta, bestPt[g]), newPt[g]); gamma = s * Math.sqrt(Math.max(0.0, (theta / s) * (theta / s) - (bestPt[g] / s) * (newPt[g] / s))); if (newPt[a] < bestPt[a]) { gamma = -gamma; } p = (gamma - bestPt[g]) + theta; q = ((gamma - bestPt[g]) + gamma) + newPt[g]; r = p / q; if (r < 0.0 && gamma != 0.0) { stpc = newPt[a] + r * (bestPt[a] - newPt[a]); } else if (newPt[a] > bestPt[a]) { stpc = stpMax; } else { stpc = stpMin; } stpq = newPt[a] + (newPt[g] / (newPt[g] - bestPt[g])) * (bestPt[a] - newPt[a]); if (bracketed) { if (Math.abs(newPt[a] - stpc) < Math.abs(newPt[a] - stpq)) { stpf = stpc; } else { stpf = stpq; } } else { if (Math.abs(newPt[a] - stpc) > Math.abs(newPt[a] - stpq)) { stpf = stpc; } else { stpf = stpq; } } } else { // // Fourth case. A lower function value, derivatives of the // same sign, and the magnitude of the derivative does // not decrease. If the minimum is not bracketed, the step // is either stpmin or stpmax, else the cubic step is taken. // info = 4; bound = false; if (bracketed) { theta = 3 * (bestPt[f] - newPt[f]) / (newPt[a] - bestPt[a]) + bestPt[g] + newPt[g]; s = Math.max(Math.max(theta, bestPt[g]), newPt[g]); gamma = s * Math.sqrt((theta / s) * (theta / s) - (bestPt[g] / s) * (newPt[g] / s)); if (newPt[a] > bestPt[a]) { gamma = -gamma; } p = (gamma - newPt[g]) + theta; q = ((gamma - newPt[g]) + gamma) + bestPt[g]; r = p / q; stpc = newPt[a] + r * (bestPt[a] - newPt[a]); stpf = stpc; } else if (newPt[a] > bestPt[a]) { stpf = stpMax; } else { stpf = stpMin; } } // // Update the interval of uncertainty. This update does not // depend on the new step or the case analysis above. // if (newPt[f] > bestPt[f]) { copy(newPt, endPt); } else { if (signG < 0.0) { copy(bestPt, endPt); } copy(newPt, bestPt); } say(String.valueOf(info)); // // Compute the new step and safeguard it. // stpf = Math.min(stpMax, stpf); stpf = Math.max(stpMin, stpf); newPt[a] = stpf; if (bracketed && bound) { if (endPt[a] > bestPt[a]) { newPt[a] = Math.min(bestPt[a] + p66 * (endPt[a] - bestPt[a]), newPt[a]); } else { newPt[a] = Math.max(bestPt[a] + p66 * (endPt[a] - bestPt[a]), newPt[a]); } } return info; } private static void copy(double[] src, double[] dest) { System.arraycopy(src, 0, dest, 0, src.length); } // // // // private double[] lineSearchNocedal(DiffFunction dfunc, double[] dir, // double[] x, double[] newX, double[] grad, double f0) throws // MaxEvaluationsExceeded { // // // double g0 = ArrayMath.innerProduct(grad,dir); // if(g0 > 0){ // //We're looking in a direction of positive gradient. This wont' work. // //set dir = -grad // plusAndConstMult(new double[x.length],grad,-1,dir); // g0 = ArrayMath.innerProduct(grad,dir); // } // say("(" + nf.format(g0) + ")"); // // // double[] newPoint = new double[3]; // double[] prevPoint = new double[3]; // newPoint[a] = 1.0; //Always guess 1 first, this should be right if the // function is "nice" and BFGS is working. // // //Special guess for the first iteration. // if(its == 1){ // double aLin = - f0 / (ftol*g0); // //Keep aLin within aMin and 1 for the first guess. But make a more // intelligent guess based off the gradient // aLin = Math.min(1.0, aLin); // aLin = Math.max(aMin, aLin); // newPoint[a] = aLin; // Guess low at first since we have no idea of scale at // first. // } // // prevPoint[a] = 0.0; // prevPoint[f] = f0; // prevPoint[g] = g0; // // int cnt = 0; // // do{ // newPoint[f] = dfunc.valueAt((plusAndConstMult(x, dir, newPoint[a], newX))); // newPoint[g] = ArrayMath.innerProduct(dfunc.derivativeAt(newX),dir); // fevals += 1; // // //If fNew > f0 + small*aNew*g0 or fNew > fPrev // if( (newPoint[f] > f0 + ftol*newPoint[a]*g0) || newPoint[f] > prevPoint[f] // ){ // //We know there must be a point that satisfies the strong wolfe conditions // between // //the previous and new point, so search between these points. // say("->"); // return zoom(dfunc,x,dir,newX,f0,g0,prevPoint,newPoint); // } // // //Here we check if the magnitude of the gradient has decreased, if // //it is more negative we can expect to find a much better point // //by stepping a little farther. // // //If |gNew| < 0.9999 |g0| // if( Math.abs(newPoint[g]) <= -gtol*g0 ){ // //This is exactly what we wanted // return newPoint; // } // // if (newPoint[g] > 0){ // //Hmm, our step is too big to be a satisfying point, lets look backwards. // say("<-");//say("^"); // // return zoom(dfunc,x,dir,newX,f0,g0,newPoint,prevPoint); // } // // //if we made it here, our function value has decreased enough, but the // gradient is more negative. // //we should increase our step size, since we have potential to decrease the // function // //value a lot more. // newPoint[a] *= 10; // this is stupid, we should interpolate it. since we // already have info for quadratic at least. // newPoint[f] = Double.NaN; // newPoint[g] = Double.NaN; // cnt +=1; // say("*"); // // //if(cnt > 10 || fevals > maxFevals){ // if(fevals > maxFevals){ throw new MaxEvaluationsExceeded(" Exceeded during // zoom() Function ");} // // if(newPoint[a] > aMax){ // System.err.println(" max stepsize reached. This is unusual. "); // System.exit(1); // } // // }while(true); // // } // private double interpolate( double[] point0, double[] point1){ // double newAlpha; // double intvl = Math.abs(point0[a] -point1[a]); // //if(point2 == null){ // if( Double.isNaN(point0[g]) ){ // //We dont know the gradient at aLow so do bisection // newAlpha = 0.5*(point0[a] + point1[a]); // }else{ // //We know the gradient so do Quadratic 2pt // newAlpha = interpolateQuadratic2pt(point0,point1); // } // //If the newAlpha is outside of the bounds just do bisection. // if( ((newAlpha > point0[a]) && (newAlpha > point1[a])) || // ((newAlpha < point0[a]) && (newAlpha < point1[a])) ){ // //bisection. // return 0.5*(point0[a] + point1[a]); // } // //If we aren't moving fast enough, revert to bisection. // if( ((newAlpha/intvl) < 1e-6) || ((newAlpha/intvl) > (1- 1e-6)) ){ // //say("b"); // return 0.5*(point0[a] + point1[a]); // } // return newAlpha; // } /* * private double interpolate( List<double[]> pointList ,) { * * int n = pointList.size(); double newAlpha = 0.0; * * if( n > 2){ newAlpha = * interpolateCubic(pointList.get(0),pointList.get(n-2),pointList.get(n-1)); * }else if(n == 2){ * * //Only have two points * * if( Double.isNaN(pointList.get(0)[gInd]) ){ // We don't know the gradient at * aLow so do bisection newAlpha = 0.5*(pointList.get(0)[aInd] + * pointList.get(1)[aInd]); }else{ // We know the gradient so do Quadratic 2pt * newAlpha = interpolateQuadratic2pt(pointList.get(0),pointList.get(1)); } * * }else { //not enough info to interpolate with! * System.err.println("QNMinimizer:interpolate() attempt to interpolate with * only one point."); System.exit(1); } * * return newAlpha; * } */ // Returns the minimizer of a quadratic running through point (a0,f0) with // derivative g0 and passing through (a1,f1). // private double interpolateQuadratic2pt(double[] pt0, double[] pt1){ // if( Double.isNaN(pt0[g]) ){ // System.err.println("QNMinimizer:interpolateQuadratic - Gradient at point // zero doesn't exist, interpolation failed"); // System.exit(1); // } // double aDif = pt1[a]-pt0[a]; // double fDif = pt1[f]-pt0[f]; // return (- pt0[g]*aDif*aDif)/(2*(fDif-pt0[g]*aDif)) + pt0[a]; // } // private double interpolateCubic(double[] pt0, double[] pt1, double[] pt2){ // double a0 = pt1[a]-pt0[a]; // double a1 = pt2[a]-pt0[a]; // double f0 = pt1[f]-pt0[f]; // double f1 = pt2[f]-pt0[f]; // double g0 = pt0[g]; // double[][] mat = new double[2][2]; // double[] rhs = new double[2]; // double[] coefs = new double[2]; // double scale = 1/(a0*a0*a1*a1*(a1-a0)); // mat[0][0] = a0*a0; // mat[0][1] = -a1*a1; // mat[1][0] = -a0*a0*a0; // mat[1][1] = a1*a1*a1; // rhs[0] = f1 - g0*a1; // rhs[1] = f0 - g0*a0; // for(int i=0;i<2;i++){ // for(int j=0;j<2;j++){ // coefs[i] += mat[i][j]*rhs[j]; // } // coefs[i] *= scale; // } // double a = coefs[0]; // double b = coefs[1]; // double root = b*b-3*a*g0; // if( root < 0 ){ // System.err.println("QNminimizer:interpolateCubic - interpolate failed"); // System.exit(1); // } // return (-b+Math.sqrt(root))/(3*a); // } // private double[] zoom(DiffFunction dfunc, double[] x, double[] dir, // double[] newX, double f0, double g0, double[] bestPoint, double[] endPoint) // throws MaxEvaluationsExceeded { // return zoom(dfunc,x, dir, newX,f0,g0, bestPoint, endPoint,null); // } // private double[] zoom(DiffFunction dfunc, double[] x, double[] dir, // double[] newX, double f0, double g0, double[] bestPt, double[] endPt, // double[] newPt) throws MaxEvaluationsExceeded { // double width = Math.abs(bestPt[a] - endPt[a]); // double reduction = 1.0; // double p66 = 0.66; // int info = 0; // double stpf; // double theta,gamma,s,p,q,r,stpc,stpq; // boolean bound = false; // boolean bracketed = false; // int cnt = 1; // if(newPt == null){ newPt = new double[3]; newPt[a] = // interpolate(bestPt,endPt);}// quadratic interp // do{ // say("."); // newPt[f] = dfunc.valueAt((plusAndConstMult(x, dir, newPt[a] , newX))); // newPt[g] = ArrayMath.innerProduct(dfunc.derivativeAt(newX),dir); // fevals += 1; // //If we have satisfied Wolfe... // //fNew <= f0 + small*aNew*g0 // //|gNew| <= 0.9999*|g0| // //return the point. // if( (newPt[f] <= f0 + ftol*newPt[a]*g0) && Math.abs(newPt[g]) <= -gtol*g0 // ){ // //Sweet, we found a point that satisfies the strong wolfe conditions!!! // lets return it. // return newPt; // }else{ // double signG = newPt[g]*bestPt[g]/Math.abs(bestPt[g]); // //Our new point has a higher function value // if( newPt[f] > bestPt[f]){ // info = 1; // bound = true; // theta = 3*(bestPt[f] - newPt[f])/(newPt[a] - bestPt[a]) + bestPt[g] + // newPt[g]; // s = Math.max(Math.max(theta,newPt[g]), bestPt[g]); // gamma = s*Math.sqrt( (theta/s)*(theta/s) - (bestPt[g]/s)*(newPt[g]/s) ); // if (newPt[a] < bestPt[a]){ // gamma = -gamma; // } // p = (gamma - bestPt[g]) + theta; // q = ((gamma-bestPt[g]) + gamma) + newPt[g]; // r = p/q; // stpc = bestPt[a] + r*(newPt[a] - bestPt[a]); // stpq = bestPt[a] + // ((bestPt[g]/((bestPt[f]-newPt[f])/(newPt[a]-bestPt[a])+bestPt[g]))/2)*(newPt[a] // - bestPt[a]); // if ( Math.abs(stpc-bestPt[a]) < Math.abs(stpq - bestPt[a] )){ // stpf = stpc; // } else{ // stpf = stpq; // //stpf = stpc + (stpq - stpc)/2; // } // bracketed = true; // if (newPt[a] < 0.1){ // stpf = 0.01*stpf; // } // } else if (signG < 0.0){ // info = 2; // bound = false; // theta = 3*(bestPt[f] - newPt[f])/(newPt[a] - bestPt[a]) + bestPt[g] + // newPt[g]; // s = Math.max(Math.max(theta,bestPt[g]),newPt[g]); // gamma = s*Math.sqrt((theta/s)*(theta/s) - (bestPt[g]/s)*(newPt[g]/s)); // if (newPt[a] > bestPt[a]) { // gamma = -gamma; // } // p = (gamma - newPt[g]) + theta; // q = ((gamma - newPt[g]) + gamma) + bestPt[g]; // r = p/q; // stpc = newPt[a] + r*(bestPt[a] - newPt[a]); // stpq = newPt[a] + (newPt[g]/(newPt[g]-bestPt[g]))*(bestPt[a] - newPt[a]); // if (Math.abs(stpc-newPt[a]) > Math.abs(stpq-newPt[a])){ // stpf = stpc; // } else { // stpf = stpq; // } // bracketed = true; // } else if ( Math.abs(newPt[g]) < Math.abs(bestPt[g])){ // info = 3; // bound = true; // theta = 3*(bestPt[f] - newPt[f])/(newPt[a] - bestPt[a]) + bestPt[g] + // newPt[g]; // s = Math.max(Math.max(theta,bestPt[g]),newPt[g]); // gamma = s*Math.sqrt(Math.max(0.0,(theta/s)*(theta/s) - // (bestPt[g]/s)*(newPt[g]/s))); // if (newPt[a] < bestPt[a]){ // gamma = -gamma; // } // p = (gamma - bestPt[g]) + theta; // q = ((gamma-bestPt[g]) + gamma) + newPt[g]; // r = p/q; // if (r < 0.0 && gamma != 0.0){ // stpc = newPt[a] + r*(bestPt[a] - newPt[a]); // } else if (newPt[a] > bestPt[a]){ // stpc = aMax; // } else{ // stpc = aMin; // } // stpq = newPt[a] + (newPt[g]/(newPt[g]-bestPt[g]))*(bestPt[a] - newPt[a]); // if(bracketed){ // if (Math.abs(newPt[a]-stpc) < Math.abs(newPt[a]-stpq)){ // stpf = stpc; // } else { // stpf = stpq; // } // } else{ // if (Math.abs(newPt[a]-stpc) > Math.abs(newPt[a]-stpq)){ // stpf = stpc; // } else { // stpf = stpq; // } // } // }else{ // info = 4; // bound = false; // if (bracketed){ // theta = 3*(bestPt[f] - newPt[f])/(newPt[a] - bestPt[a]) + bestPt[g] + // newPt[g]; // s = Math.max(Math.max(theta,bestPt[g]),newPt[g]); // gamma = s*Math.sqrt((theta/s)*(theta/s) - (bestPt[g]/s)*(newPt[g]/s)); // if (newPt[a] > bestPt[a]) { // gamma = -gamma; // } // p = (gamma - newPt[g]) + theta; // q = ((gamma - newPt[g]) + gamma) + bestPt[g]; // r = p/q; // stpc = newPt[a] + r*(bestPt[a] - newPt[a]); // stpf = stpc; // }else if( newPt[a] > bestPt[a]){ // stpf = aMax; // } else { // stpf = aMin; // } // } // //Reduce the interval of uncertainty // if (newPt[f] > bestPt[f]) { // copy(newPt,endPt); // }else{ // if (signG < 0.0){ // copy(bestPt,endPt); // } // copy(newPt,bestPt); // } // say("" + info ); // newPt[a] = stpf; // if(bracketed && bound){ // if (endPt[a] > bestPt[a]){ // newPt[a] = Math.min(bestPt[a]+p66*(endPt[a]-bestPt[a]),newPt[a]); // }else{ // newPt[a] = Math.max(bestPt[a]+p66*(endPt[a]-bestPt[a]),newPt[a]); // } // } // } // //Check to see if the step has reached an extreme. // newPt[a] = Math.max(aMin, newPt[a]); // newPt[a] = Math.min(aMax,newPt[a]); // if( newPt[a] == aMin || newPt[a] == aMax){ // return newPt; // } // cnt +=1; // if(fevals > maxFevals){ // throw new MaxEvaluationsExceeded(" Exceeded during zoom() Function ");} // }while(true); // } // private double[] zoom2(DiffFunction dfunc, double[] x, double[] dir, // double[] newX, double f0, double g0, double[] bestPoint, double[] endPoint) // throws MaxEvaluationsExceeded { // // double[] newPoint = new double[3]; // double width = Math.abs(bestPoint[a] - endPoint[a]); // double reduction = 0.0; // // int cnt = 1; // // //make sure the interval reduces enough. // //if(reduction >= 0.66){ // //say(" |" + nf.format(reduction)+"| "); // //newPoint[a] = 0.5*(bestPoint[a]+endPoint[a]); // //} else{ // newPoint[a] = interpolate(bestPoint,endPoint);// quadratic interp // //} // // do{ // //Check to see if the step has reached an extreme. // newPoint[a] = Math.max(aMin, newPoint[a]); // newPoint[a] = Math.min(aMax,newPoint[a]); // // newPoint[f] = dfunc.valueAt((plusAndConstMult(x, dir, newPoint[a] , // newX))); // newPoint[g] = ArrayMath.innerProduct(dfunc.derivativeAt(newX),dir); // fevals += 1; // // //fNew > f0 + small*aNew*g0 or fNew > fLow // if( (newPoint[f] > f0 + ftol*newPoint[a]*g0) || newPoint[f] > bestPoint[f] // ){ // //Our new point didn't beat the best point, so just reduce the interval // copy(newPoint,endPoint); // say(".");//say("l"); // }else{ // // //if |gNew| <= 0.9999*|g0| If gNew is slightly smaller than g0 // if( Math.abs(newPoint[g]) <= -gtol*g0 ){ // //Sweet, we found a point that satisfies the strong wolfe conditions!!! // lets return it. // return newPoint; // } // // //If we made it this far, we've found a point that has satisfied descent, // but hasn't satsified // //the decrease in gradient. if the new gradient is telling us >0 we need to // look behind us // //if the new gradient is negative still we can increase the step. // if(newPoint[g]*(endPoint[a] - bestPoint[a] ) >= 0){ // //Get going the right way. // say(".");//say("f"); // copy(bestPoint,endPoint); // } // // if( (Math.abs(newPoint[a]-bestPoint[a]) < 1e-6) || // (Math.abs(newPoint[a]-endPoint[a]) < 1e-6) ){ // //Not moving fast enough. // sayln("had to improvise a bit"); // newPoint[a] = 0.5*(bestPoint[a] + endPoint[a]); // } // // say(".");//say("r"); // copy(newPoint,bestPoint); // } // // // if( newPoint[a] == aMin || newPoint[a] == aMax){ // return newPoint; // } // // reduction = Math.abs(bestPoint[a] - endPoint[a]) / width; // width = Math.abs(bestPoint[a] - endPoint[a]); // // cnt +=1; // // // //if(Math.abs(bestPoint[a] -endPoint[a]) < 1e-12 ){ // //sayln(); // //sayln("!!!!!!!!!!!!!!!!!!"); // //sayln("points are too close"); // //sayln("!!!!!!!!!!!!!!!!!!"); // //sayln("f0 " + nf.format(f0)); // //sayln("f0+crap " + nf.format(f0 + cVal*bestPoint[a]*g0)); // //sayln("g0 " + nf.format(g0)); // //sayln("ptLow"); // //printPt(bestPoint); // //sayln(); // //sayln("ptHigh"); // //printPt(endPoint); // //sayln(); // // //DiffFunctionTester.test(dfunc, x,1e-4); // //System.exit(1); // ////return dfunc.valueAt((plusAndConstMult(x, dir, aMin , newX))); // //} // // //if( (cnt > 20) ){ // // //sayln("!!!!!!!!!!!!!!!!!!"); // //sayln("! " + cnt + " iterations. I think we're out of luck"); // //sayln("!!!!!!!!!!!!!!!!!!"); // //sayln("f0" + nf.format(f0)); // //sayln("f0+crap" + nf.format(f0 + cVal*bestPoint[a]*g0)); // //sayln("g0 " + nf.format(g0)); // //sayln("bestPoint"); // //printPt(bestPoint); // //sayln(); // //sayln("ptHigh"); // //printPt(endPoint); // //sayln(); // // // // ////if( cnt > 25 || fevals > maxFevals){ // ////System.err.println("Max evaluations exceeded."); // ////System.exit(1); // ////return dfunc.valueAt((plusAndConstMult(x, dir, aMin , newX))); // ////} // //} // // if(fevals > maxFevals){ throw new MaxEvaluationsExceeded(" Exceeded during // zoom() Function ");} // // }while(true); // // } // // private double lineSearchNocedal(DiffFunction dfunc, double[] dir, double[] // x, double[] newX, double[] grad, double f0, int maxEvals){ // // boolean bracketed = false; // boolean stage1 = false; // double width = aMax - aMin; // double width1 = 2*width; // double stepMin = 0.0; // double stepMax = 0.0; // double xtrapf = 4.0; // int nFevals = 0; // double TOL = 1e-4; // double X_TOL = 1e-8; // int info = 0; // int infoc = 1; // // double g0 = ArrayMath.innerProduct(grad,dir); // if(g0 > 0){ // //We're looking in a direction of positive gradient. This wont' work. // //set dir = -grad // plusAndConstMult(new double[x.length],grad,-1,dir); // g0 = ArrayMath.innerProduct(grad,dir); // System.err.println("Searching in direction of positive gradient."); // } // say("(" + nf.format(g0) + ")"); // // // double[] newPt = new double[3]; // double[] bestPt = new double[3]; // double[] endPt = new double[3]; // // newPt[a] = 1.0; //Always guess 1 first, this should be right if the // function is "nice" and BFGS is working. // // if(its == 1){ // newPt[a] = 1e-6; // Guess low at first since we have no idea of scale. // } // // bestPt[a] = 0.0; // bestPt[f] = f0; // bestPt[g] = g0; // // endPt[a] = 0.0; // endPt[f] = f0; // endPt[g] = g0; // // int cnt = 0; // // do{ // //Determine the max and min step size given what we know already. // if(bracketed){ // stepMin = Math.min(bestPt[a], endPt[a]); // stepMax = Math.max(bestPt[a], endPt[a]); // } else{ // stepMin = bestPt[a]; // stepMax = newPt[a] + xtrapf*(newPt[a] - bestPt[a]); // } // // //Make sure our next guess is within the bounds // newPt[a] = Math.max(newPt[a], stepMin); // newPt[a] = Math.min(newPt[a], stepMax); // // if( (bracketed && (newPt[a] <= stepMin || newPt[a] >= stepMax) ) // || nFevals > maxEvals || (bracketed & (stepMax-stepMin) <= TOL*stepMax)){ // System.err.println("Linesearch for QN, Need to make srue that newX is set // before returning bestPt. -akleeman"); // System.exit(1); // return bestPt[f]; // } // // // newPt[f] = dfunc.valueAt((plusAndConstMult(x, dir, newPt[a], newX))); // newPt[g] = ArrayMath.innerProduct(dfunc.derivativeAt(newX),dir); // nFevals += 1; // // double fTest = f0 + newPt[a]*g0; // // System.err.println("fTest " + fTest + " new" + newPt[a] + " newf" + // newPt[f] + " newg" + newPt[g] ); // // if( ( bracketed && (newPt[a] <= stepMin | newPt[a] >= stepMax )) || infoc // == 0){ // info = 6; // } // // if( newPt[a] == stepMax && ( newPt[f] <= fTest || newPt[g] >= ftol*g0 )){ // info = 5; // } // // if( (newPt[a] == stepMin && ( newPt[f] > fTest || newPt[g] >= ftol*g0 ) )){ // info = 4; // } // // if( (nFevals >= maxEvals)){ // info = 3; // } // // if( bracketed && stepMax-stepMin <= X_TOL*stepMax){ // info = 2; // } // // if( (newPt[f] <= fTest) && (Math.abs(newPt[g]) <= - gtol*g0) ){ // info = 1; // } // // if(info != 0){ // return newPt[f]; // } // // if(stage1 && newPt[f]< fTest && newPt[g] >= ftol*g0){ // stage1 = false; // } // // // if( stage1 && f<= bestPt[f] && f > fTest){ // // double[] newPtMod = new double[3]; // double[] bestPtMod = new double[3]; // double[] endPtMod = new double[3]; // // newPtMod[f] = newPt[f] - newPt[a]*ftol*g0; // newPtMod[g] = newPt[g] - ftol*g0; // bestPtMod[f] = bestPt[f] - bestPt[a]*ftol*g0; // bestPtMod[g] = bestPt[g] - ftol*g0; // endPtMod[f] = endPt[f] - endPt[a]*ftol*g0; // endPtMod[g] = endPt[g] - ftol*g0; // // //this.cstep(newPtMod, bestPtMod, endPtMod, bracketed); // // bestPt[f] = bestPtMod[f] + bestPt[a]*ftol*g0; // bestPt[g] = bestPtMod[g] + ftol*g0; // endPt[f] = endPtMod[f] + endPt[a]*ftol*g0; // endPt[g] = endPtMod[g] + ftol*g0; // // }else{ // //this.cstep(newPt, bestPt, endPt, bracketed); // } // // double p66 = 0.66; // double p5 = 0.5; // // if(bracketed){ // if ( Math.abs(endPt[a] - bestPt[a]) >= p66*width1){ // newPt[a] = bestPt[a] + p5*(endPt[a]-bestPt[a]); // } // width1 = width; // width = Math.abs(endPt[a]-bestPt[a]); // } // // // // }while(true); // // } // // private double cstepBackup( double[] newPt, double[] bestPt, double[] // endPt, boolean bracketed ){ // // double p66 = 0.66; // int info = 0; // double stpf; // double theta,gamma,s,p,q,r,stpc,stpq; // boolean bound = false; // // double signG = newPt[g]*bestPt[g]/Math.abs(bestPt[g]); // // // //Our new point has a higher function value // if( newPt[f] > bestPt[f]){ // info = 1; // bound = true; // theta = 3*(bestPt[f] - newPt[f])/(newPt[a] - bestPt[a]) + bestPt[g] + // newPt[g]; // s = Math.max(Math.max(theta,newPt[g]), bestPt[g]); // gamma = s*Math.sqrt( (theta/s)*(theta/s) - (bestPt[g]/s)*(newPt[g]/s) ); // if (newPt[a] < bestPt[a]){ // gamma = -gamma; // } // p = (gamma - bestPt[g]) + theta; // q = ((gamma-bestPt[g]) + gamma) + newPt[g]; // r = p/q; // stpc = bestPt[a] + r*(newPt[a] - bestPt[a]); // stpq = bestPt[a] + // ((bestPt[g]/((bestPt[f]-newPt[f])/(newPt[a]-bestPt[a])+bestPt[g]))/2)*(newPt[a] // - bestPt[a]); // // if ( Math.abs(stpc-bestPt[a]) < Math.abs(stpq - bestPt[a] )){ // stpf = stpc; // } else{ // stpf = stpc + (stpq - stpc)/2; // } // bracketed = true; // // } else if (signG < 0.0){ // // info = 2; // bound = false; // theta = 3*(bestPt[f] - newPt[f])/(newPt[a] - bestPt[a]) + bestPt[g] + // newPt[g]; // s = Math.max(Math.max(theta,bestPt[g]),newPt[g]); // gamma = s*Math.sqrt((theta/s)*(theta/s) - (bestPt[g]/s)*(newPt[g]/s)); // if (newPt[a] > bestPt[a]) { // gamma = -gamma; // } // p = (gamma - newPt[g]) + theta; // q = ((gamma - newPt[g]) + gamma) + bestPt[g]; // r = p/q; // stpc = newPt[a] + r*(bestPt[a] - newPt[a]); // stpq = newPt[a] + (newPt[g]/(newPt[g]-bestPt[g]))*(bestPt[a] - newPt[a]); // if (Math.abs(stpc-newPt[a]) > Math.abs(stpq-newPt[a])){ // stpf = stpc; // } else { // stpf = stpq; // } // bracketed = true; // } else if ( Math.abs(newPt[g]) < Math.abs(bestPt[g])){ // info = 3; // bound = true; // theta = 3*(bestPt[f] - newPt[f])/(newPt[a] - bestPt[a]) + bestPt[g] + // newPt[g]; // s = Math.max(Math.max(theta,bestPt[g]),newPt[g]); // gamma = s*Math.sqrt(Math.max(0.0,(theta/s)*(theta/s) - // (bestPt[g]/s)*(newPt[g]/s))); // if (newPt[a] < bestPt[a]){ // gamma = -gamma; // } // p = (gamma - bestPt[g]) + theta; // q = ((gamma-bestPt[g]) + gamma) + newPt[g]; // r = p/q; // if (r < 0.0 && gamma != 0.0){ // stpc = newPt[a] + r*(bestPt[a] - newPt[a]); // } else if (newPt[a] > bestPt[a]){ // stpc = aMax; // } else{ // stpc = aMin; // } // stpq = newPt[a] + (newPt[g]/(newPt[g]-bestPt[g]))*(bestPt[a] - newPt[a]); // if (bracketed){ // if (Math.abs(newPt[a]-stpc) < Math.abs(newPt[a]-stpq)){ // stpf = stpc; // } else { // stpf = stpq; // } // } else { // if (Math.abs(newPt[a]-stpc) > Math.abs(newPt[a]-stpq)){ // System.err.println("modified to take only quad"); // stpf = stpq; // }else{ // stpf = stpq; // } // } // // // }else{ // info = 4; // bound = false; // // if(bracketed){ // theta = 3*(bestPt[f] - newPt[f])/(newPt[a] - bestPt[a]) + bestPt[g] + // newPt[g]; // s = Math.max(Math.max(theta,bestPt[g]),newPt[g]); // gamma = s*Math.sqrt((theta/s)*(theta/s) - (bestPt[g]/s)*(newPt[g]/s)); // if (newPt[a] > bestPt[a]) { // gamma = -gamma; // } // p = (gamma - newPt[g]) + theta; // q = ((gamma - newPt[g]) + gamma) + bestPt[g]; // r = p/q; // stpc = newPt[a] + r*(bestPt[a] - newPt[a]); // stpf = stpc; // }else if (newPt[a] > bestPt[a]){ // stpf = aMax; // }else{ // stpf = aMin; // } // // } // // // if (newPt[f] > bestPt[f]) { // copy(newPt,endPt); // }else{ // if (signG < 0.0){ // copy(bestPt,endPt); // } // copy(newPt,bestPt); // } // // stpf = Math.min(aMax,stpf); // stpf = Math.max(aMin,stpf); // newPt[a] = stpf; // if (bracketed & bound){ // if (endPt[a] > bestPt[a]){ // newPt[a] = Math.min(bestPt[a]+p66*(endPt[a]-bestPt[a]),newPt[a]); // }else{ // newPt[a] = Math.max(bestPt[a]+p66*(endPt[a]-bestPt[a]),newPt[a]); // } // } // // //newPt[f] = // System.err.println("cstep " + nf.format(newPt[a]) + " info " + info); // return newPt[a]; // // } }
gpl-2.0