gt stringclasses 1
value | context stringlengths 2.05k 161k |
|---|---|
/*******************************************************************************
* Copyright 2011 See AUTHORS file.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.badlogic.gdx.backends.headless.mock.graphics;
import com.badlogic.gdx.Application;
import com.badlogic.gdx.Graphics;
import com.badlogic.gdx.graphics.Cursor;
import com.badlogic.gdx.graphics.GL20;
import com.badlogic.gdx.graphics.GL30;
import com.badlogic.gdx.graphics.Pixmap;
import com.badlogic.gdx.graphics.Cursor.SystemCursor;
import com.badlogic.gdx.graphics.glutils.GLVersion;
/** The headless backend does its best to mock elements. This is intended to make code-sharing between
* server and client as simple as possible.
*/
public class MockGraphics implements Graphics {
long frameId = -1;
float deltaTime = 0;
long frameStart = 0;
int frames = 0;
int fps;
long lastTime = System.nanoTime();
GLVersion glVersion = new GLVersion(Application.ApplicationType.HeadlessDesktop, "", "", "");
@Override
public boolean isGL30Available() {
return false;
}
@Override
public GL20 getGL20() {
return null;
}
@Override
public void setGL20 (GL20 gl20) {
}
@Override
public GL30 getGL30() {
return null;
}
@Override
public void setGL30 (GL30 gl30) {
}
@Override
public int getWidth() {
return 0;
}
@Override
public int getHeight() {
return 0;
}
@Override
public int getBackBufferWidth() {
return 0;
}
@Override
public int getBackBufferHeight() {
return 0;
}
@Override
public long getFrameId() {
return frameId;
}
@Override
public float getDeltaTime() {
return deltaTime;
}
@Override
public float getRawDeltaTime() {
return 0;
}
@Override
public int getFramesPerSecond() {
return fps;
}
@Override
public GraphicsType getType() {
return GraphicsType.Mock;
}
@Override
public GLVersion getGLVersion () {
return glVersion;
}
@Override
public float getPpiX() {
return 0;
}
@Override
public float getPpiY() {
return 0;
}
@Override
public float getPpcX() {
return 0;
}
@Override
public float getPpcY() {
return 0;
}
@Override
public float getDensity() {
return 0;
}
@Override
public boolean supportsDisplayModeChange() {
return false;
}
@Override
public DisplayMode[] getDisplayModes() {
return new DisplayMode[0];
}
@Override
public DisplayMode getDisplayMode() {
return null;
}
@Override
public int getSafeInsetLeft() {
return 0;
}
@Override
public int getSafeInsetTop() {
return 0;
}
@Override
public int getSafeInsetBottom() {
return 0;
}
@Override
public int getSafeInsetRight() {
return 0;
}
@Override
public boolean setFullscreenMode(DisplayMode displayMode) {
return false;
}
@Override
public boolean setWindowedMode(int width, int height) {
return false;
}
@Override
public void setTitle(String title) {
}
@Override
public void setVSync(boolean vsync) {
}
@Override
public BufferFormat getBufferFormat() {
return null;
}
@Override
public boolean supportsExtension(String extension) {
return false;
}
@Override
public void setContinuousRendering(boolean isContinuous) {
}
@Override
public boolean isContinuousRendering() {
return false;
}
@Override
public void requestRendering() {
}
@Override
public boolean isFullscreen() {
return false;
}
public void updateTime () {
long time = System.nanoTime();
deltaTime = (time - lastTime) / 1000000000.0f;
lastTime = time;
if (time - frameStart >= 1000000000) {
fps = frames;
frames = 0;
frameStart = time;
}
frames++;
}
public void incrementFrameId () {
frameId++;
}
@Override
public Cursor newCursor (Pixmap pixmap, int xHotspot, int yHotspot) {
return null;
}
@Override
public void setCursor (Cursor cursor) {
}
@Override
public void setSystemCursor (SystemCursor systemCursor) {
}
@Override
public Monitor getPrimaryMonitor() {
return null;
}
@Override
public Monitor getMonitor() {
return null;
}
@Override
public Monitor[] getMonitors() {
return null;
}
@Override
public DisplayMode[] getDisplayModes(Monitor monitor) {
return null;
}
@Override
public DisplayMode getDisplayMode(Monitor monitor) {
return null;
}
@Override
public void setUndecorated(boolean undecorated) {
}
@Override
public void setResizable(boolean resizable) {
}
}
| |
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils.nio;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.exception.NioConnectionException;
import org.apache.cloudstack.utils.security.SSLUtils;
import org.apache.log4j.Logger;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import java.io.IOException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.ClosedSelectorException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
/**
* NioConnection abstracts the NIO socket operations. The Java implementation
* provides that.
*/
public abstract class NioConnection implements Callable<Boolean> {
private static final Logger s_logger = Logger.getLogger(NioConnection.class);;
protected Selector _selector;
protected ExecutorService _threadExecutor;
protected Future<Boolean> _futureTask;
protected boolean _isRunning;
protected boolean _isStartup;
protected int _port;
protected List<ChangeRequest> _todos;
protected HandlerFactory _factory;
protected String _name;
protected ExecutorService _executor;
protected ExecutorService _sslHandshakeExecutor;
public NioConnection(final String name, final int port, final int workers, final HandlerFactory factory) {
_name = name;
_isRunning = false;
_selector = null;
_port = port;
_factory = factory;
_executor = new ThreadPoolExecutor(workers, 5 * workers, 1, TimeUnit.DAYS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(name + "-Handler"));
_sslHandshakeExecutor = Executors.newCachedThreadPool(new NamedThreadFactory(name + "-SSLHandshakeHandler"));
}
public void start() throws NioConnectionException {
_todos = new ArrayList<ChangeRequest>();
try {
init();
} catch (final ConnectException e) {
s_logger.warn("Unable to connect to remote: is there a server running on port " + _port);
return;
} catch (final IOException e) {
s_logger.error("Unable to initialize the threads.", e);
throw new NioConnectionException(e.getMessage(), e);
} catch (final Exception e) {
s_logger.error("Unable to initialize the threads due to unknown exception.", e);
throw new NioConnectionException(e.getMessage(), e);
}
_isStartup = true;
_threadExecutor = Executors.newSingleThreadExecutor(new NamedThreadFactory(this._name + "-NioConnectionHandler"));
_isRunning = true;
_futureTask = _threadExecutor.submit(this);
}
public void stop() {
_executor.shutdown();
_isRunning = false;
if (_threadExecutor != null) {
_futureTask.cancel(false);
_threadExecutor.shutdown();
}
}
public boolean isRunning() {
return !_futureTask.isDone();
}
public boolean isStartup() {
return _isStartup;
}
@Override
public Boolean call() throws NioConnectionException {
while (_isRunning) {
try {
_selector.select(100);
// Someone is ready for I/O, get the ready keys
final Set<SelectionKey> readyKeys = _selector.selectedKeys();
final Iterator<SelectionKey> i = readyKeys.iterator();
if (s_logger.isTraceEnabled()) {
s_logger.trace("Keys Processing: " + readyKeys.size());
}
// Walk through the ready keys collection.
while (i.hasNext()) {
final SelectionKey sk = i.next();
i.remove();
if (!sk.isValid()) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Selection Key is invalid: " + sk.toString());
}
final Link link = (Link)sk.attachment();
if (link != null) {
link.terminated();
} else {
closeConnection(sk);
}
} else if (sk.isReadable()) {
read(sk);
} else if (sk.isWritable()) {
write(sk);
} else if (sk.isAcceptable()) {
accept(sk);
} else if (sk.isConnectable()) {
connect(sk);
}
}
s_logger.trace("Keys Done Processing.");
processTodos();
} catch (final ClosedSelectorException e) {
/*
* Exception occurred when calling java.nio.channels.Selector.selectedKeys() method. It means the connection has not yet been established. Let's continue trying
* We do not log it here otherwise we will fill the disk with messages.
*/
} catch (final IOException e) {
s_logger.error("Agent will die due to this IOException!", e);
throw new NioConnectionException(e.getMessage(), e);
}
}
_isStartup = false;
return true;
}
abstract void init() throws IOException;
abstract void registerLink(InetSocketAddress saddr, Link link);
abstract void unregisterLink(InetSocketAddress saddr);
protected void accept(final SelectionKey key) throws IOException {
final ServerSocketChannel serverSocketChannel = (ServerSocketChannel)key.channel();
final SocketChannel socketChannel = serverSocketChannel.accept();
socketChannel.configureBlocking(false);
final Socket socket = socketChannel.socket();
socket.setKeepAlive(true);
if (s_logger.isTraceEnabled()) {
s_logger.trace("Connection accepted for " + socket);
}
final SSLEngine sslEngine;
try {
final SSLContext sslContext = Link.initSSLContext(false);
sslEngine = sslContext.createSSLEngine();
sslEngine.setUseClientMode(false);
sslEngine.setNeedClientAuth(false);
sslEngine.setEnabledProtocols(SSLUtils.getSupportedProtocols(sslEngine.getEnabledProtocols()));
final NioConnection nioConnection = this;
_sslHandshakeExecutor.submit(new Runnable() {
@Override
public void run() {
_selector.wakeup();
try {
sslEngine.beginHandshake();
if (!Link.doHandshake(socketChannel, sslEngine, false)) {
throw new IOException("SSL handshake timed out with " + socketChannel.getRemoteAddress());
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("SSL: Handshake done");
}
final InetSocketAddress saddr = (InetSocketAddress)socket.getRemoteSocketAddress();
final Link link = new Link(saddr, nioConnection);
link.setSSLEngine(sslEngine);
link.setKey(socketChannel.register(key.selector(), SelectionKey.OP_READ, link));
final Task task = _factory.create(Task.Type.CONNECT, link, null);
registerLink(saddr, link);
_executor.submit(task);
} catch (IOException e) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Connection closed due to failure: " + e.getMessage());
}
closeAutoCloseable(socket, "accepting socket");
closeAutoCloseable(socketChannel, "accepting socketChannel");
} finally {
_selector.wakeup();
}
}
});
} catch (final Exception e) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Connection closed due to failure: " + e.getMessage());
}
closeAutoCloseable(socket, "accepting socket");
closeAutoCloseable(socketChannel, "accepting socketChannel");
} finally {
_selector.wakeup();
}
}
protected void terminate(final SelectionKey key) {
final Link link = (Link)key.attachment();
closeConnection(key);
if (link != null) {
link.terminated();
final Task task = _factory.create(Task.Type.DISCONNECT, link, null);
unregisterLink(link.getSocketAddress());
try {
_executor.submit(task);
} catch (final Exception e) {
s_logger.warn("Exception occurred when submitting the task", e);
}
}
}
protected void read(final SelectionKey key) throws IOException {
final Link link = (Link)key.attachment();
try {
final SocketChannel socketChannel = (SocketChannel)key.channel();
if (s_logger.isTraceEnabled()) {
s_logger.trace("Reading from: " + socketChannel.socket().toString());
}
final byte[] data = link.read(socketChannel);
if (data == null) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Packet is incomplete. Waiting for more.");
}
return;
}
final Task task = _factory.create(Task.Type.DATA, link, data);
try {
_executor.submit(task);
} catch (final Exception e) {
s_logger.warn("Exception occurred when submitting the task", e);
}
} catch (final Exception e) {
logDebug(e, key, 1);
terminate(key);
}
}
protected void logTrace(final Exception e, final SelectionKey key, final int loc) {
if (s_logger.isTraceEnabled()) {
Socket socket = null;
if (key != null) {
final SocketChannel ch = (SocketChannel)key.channel();
if (ch != null) {
socket = ch.socket();
}
}
s_logger.trace("Location " + loc + ": Socket " + socket + " closed on read. Probably -1 returned.");
}
}
protected void logDebug(final Exception e, final SelectionKey key, final int loc) {
if (s_logger.isDebugEnabled()) {
Socket socket = null;
if (key != null) {
final SocketChannel ch = (SocketChannel)key.channel();
if (ch != null) {
socket = ch.socket();
}
}
s_logger.debug("Location " + loc + ": Socket " + socket + " closed on read. Probably -1 returned: " + e.getMessage());
}
}
protected void processTodos() {
List<ChangeRequest> todos;
if (_todos.size() == 0) {
return; // Nothing to do.
}
synchronized (this) {
todos = _todos;
_todos = new ArrayList<ChangeRequest>();
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Todos Processing: " + todos.size());
}
SelectionKey key;
for (final ChangeRequest todo : todos) {
switch (todo.type) {
case ChangeRequest.CHANGEOPS:
try {
key = (SelectionKey)todo.key;
if (key != null && key.isValid()) {
if (todo.att != null) {
key.attach(todo.att);
final Link link = (Link)todo.att;
link.setKey(key);
}
key.interestOps(todo.ops);
}
} catch (final CancelledKeyException e) {
s_logger.debug("key has been cancelled");
}
break;
case ChangeRequest.REGISTER:
try {
key = ((SocketChannel)todo.key).register(_selector, todo.ops, todo.att);
if (todo.att != null) {
final Link link = (Link)todo.att;
link.setKey(key);
}
} catch (final ClosedChannelException e) {
s_logger.warn("Couldn't register socket: " + todo.key);
try {
((SocketChannel)todo.key).close();
} catch (final IOException ignore) {
s_logger.info("[ignored] socket channel");
} finally {
final Link link = (Link)todo.att;
link.terminated();
}
}
break;
case ChangeRequest.CLOSE:
if (s_logger.isTraceEnabled()) {
s_logger.trace("Trying to close " + todo.key);
}
key = (SelectionKey)todo.key;
closeConnection(key);
if (key != null) {
final Link link = (Link)key.attachment();
if (link != null) {
link.terminated();
}
}
break;
default:
s_logger.warn("Shouldn't be here");
throw new RuntimeException("Shouldn't be here");
}
}
s_logger.trace("Todos Done processing");
}
protected void connect(final SelectionKey key) throws IOException {
final SocketChannel socketChannel = (SocketChannel)key.channel();
try {
socketChannel.finishConnect();
key.interestOps(SelectionKey.OP_READ);
final Socket socket = socketChannel.socket();
if (!socket.getKeepAlive()) {
socket.setKeepAlive(true);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Connected to " + socket);
}
final Link link = new Link((InetSocketAddress)socket.getRemoteSocketAddress(), this);
link.setKey(key);
key.attach(link);
final Task task = _factory.create(Task.Type.CONNECT, link, null);
try {
_executor.submit(task);
} catch (final Exception e) {
s_logger.warn("Exception occurred when submitting the task", e);
}
} catch (final IOException e) {
logTrace(e, key, 2);
terminate(key);
}
}
protected void scheduleTask(final Task task) {
try {
_executor.submit(task);
} catch (final Exception e) {
s_logger.warn("Exception occurred when submitting the task", e);
}
}
protected void write(final SelectionKey key) throws IOException {
final Link link = (Link)key.attachment();
try {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Writing to " + link.getSocketAddress().toString());
}
final boolean close = link.write((SocketChannel)key.channel());
if (close) {
closeConnection(key);
link.terminated();
} else {
key.interestOps(SelectionKey.OP_READ);
}
} catch (final Exception e) {
logDebug(e, key, 3);
terminate(key);
}
}
protected void closeConnection(final SelectionKey key) {
if (key != null) {
final SocketChannel channel = (SocketChannel)key.channel();
key.cancel();
try {
if (channel != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Closing socket " + channel.socket());
}
channel.close();
}
} catch (final IOException ignore) {
s_logger.info("[ignored] channel");
}
}
}
public void register(final int ops, final SocketChannel key, final Object att) {
final ChangeRequest todo = new ChangeRequest(key, ChangeRequest.REGISTER, ops, att);
synchronized (this) {
_todos.add(todo);
}
_selector.wakeup();
}
public void change(final int ops, final SelectionKey key, final Object att) {
final ChangeRequest todo = new ChangeRequest(key, ChangeRequest.CHANGEOPS, ops, att);
synchronized (this) {
_todos.add(todo);
}
_selector.wakeup();
}
public void close(final SelectionKey key) {
final ChangeRequest todo = new ChangeRequest(key, ChangeRequest.CLOSE, 0, null);
synchronized (this) {
_todos.add(todo);
}
_selector.wakeup();
}
/* Release the resource used by the instance */
public void cleanUp() throws IOException {
if (_selector != null) {
_selector.close();
}
}
public class ChangeRequest {
public static final int REGISTER = 1;
public static final int CHANGEOPS = 2;
public static final int CLOSE = 3;
public Object key;
public int type;
public int ops;
public Object att;
public ChangeRequest(final Object key, final int type, final int ops, final Object att) {
this.key = key;
this.type = type;
this.ops = ops;
this.att = att;
}
}
}
| |
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/cloud/vision/v1p4beta1/product_search_service.proto
package com.google.cloud.vision.v1p4beta1;
/**
*
*
* <pre>
* Request message for the `UpdateProductSet` method.
* </pre>
*
* Protobuf type {@code google.cloud.vision.v1p4beta1.UpdateProductSetRequest}
*/
public final class UpdateProductSetRequest extends com.google.protobuf.GeneratedMessageV3
implements
// @@protoc_insertion_point(message_implements:google.cloud.vision.v1p4beta1.UpdateProductSetRequest)
UpdateProductSetRequestOrBuilder {
private static final long serialVersionUID = 0L;
// Use UpdateProductSetRequest.newBuilder() to construct.
private UpdateProductSetRequest(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private UpdateProductSetRequest() {}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
return this.unknownFields;
}
private UpdateProductSetRequest(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10:
{
com.google.cloud.vision.v1p4beta1.ProductSet.Builder subBuilder = null;
if (productSet_ != null) {
subBuilder = productSet_.toBuilder();
}
productSet_ =
input.readMessage(
com.google.cloud.vision.v1p4beta1.ProductSet.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(productSet_);
productSet_ = subBuilder.buildPartial();
}
break;
}
case 18:
{
com.google.protobuf.FieldMask.Builder subBuilder = null;
if (updateMask_ != null) {
subBuilder = updateMask_.toBuilder();
}
updateMask_ =
input.readMessage(com.google.protobuf.FieldMask.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(updateMask_);
updateMask_ = subBuilder.buildPartial();
}
break;
}
default:
{
if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.vision.v1p4beta1.ProductSearchServiceProto
.internal_static_google_cloud_vision_v1p4beta1_UpdateProductSetRequest_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.vision.v1p4beta1.ProductSearchServiceProto
.internal_static_google_cloud_vision_v1p4beta1_UpdateProductSetRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest.class,
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest.Builder.class);
}
public static final int PRODUCT_SET_FIELD_NUMBER = 1;
private com.google.cloud.vision.v1p4beta1.ProductSet productSet_;
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public boolean hasProductSet() {
return productSet_ != null;
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public com.google.cloud.vision.v1p4beta1.ProductSet getProductSet() {
return productSet_ == null
? com.google.cloud.vision.v1p4beta1.ProductSet.getDefaultInstance()
: productSet_;
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public com.google.cloud.vision.v1p4beta1.ProductSetOrBuilder getProductSetOrBuilder() {
return getProductSet();
}
public static final int UPDATE_MASK_FIELD_NUMBER = 2;
private com.google.protobuf.FieldMask updateMask_;
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public boolean hasUpdateMask() {
return updateMask_ != null;
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public com.google.protobuf.FieldMask getUpdateMask() {
return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_;
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() {
return getUpdateMask();
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
if (productSet_ != null) {
output.writeMessage(1, getProductSet());
}
if (updateMask_ != null) {
output.writeMessage(2, getUpdateMask());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (productSet_ != null) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getProductSet());
}
if (updateMask_ != null) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest)) {
return super.equals(obj);
}
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest other =
(com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest) obj;
boolean result = true;
result = result && (hasProductSet() == other.hasProductSet());
if (hasProductSet()) {
result = result && getProductSet().equals(other.getProductSet());
}
result = result && (hasUpdateMask() == other.hasUpdateMask());
if (hasUpdateMask()) {
result = result && getUpdateMask().equals(other.getUpdateMask());
}
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasProductSet()) {
hash = (37 * hash) + PRODUCT_SET_FIELD_NUMBER;
hash = (53 * hash) + getProductSet().hashCode();
}
if (hasUpdateMask()) {
hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER;
hash = (53 * hash) + getUpdateMask().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseDelimitedFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseDelimitedFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
com.google.protobuf.CodedInputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() {
return newBuilder();
}
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
*
* <pre>
* Request message for the `UpdateProductSet` method.
* </pre>
*
* Protobuf type {@code google.cloud.vision.v1p4beta1.UpdateProductSetRequest}
*/
public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
implements
// @@protoc_insertion_point(builder_implements:google.cloud.vision.v1p4beta1.UpdateProductSetRequest)
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequestOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.vision.v1p4beta1.ProductSearchServiceProto
.internal_static_google_cloud_vision_v1p4beta1_UpdateProductSetRequest_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.vision.v1p4beta1.ProductSearchServiceProto
.internal_static_google_cloud_vision_v1p4beta1_UpdateProductSetRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest.class,
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest.Builder.class);
}
// Construct using com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (productSetBuilder_ == null) {
productSet_ = null;
} else {
productSet_ = null;
productSetBuilder_ = null;
}
if (updateMaskBuilder_ == null) {
updateMask_ = null;
} else {
updateMask_ = null;
updateMaskBuilder_ = null;
}
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return com.google.cloud.vision.v1p4beta1.ProductSearchServiceProto
.internal_static_google_cloud_vision_v1p4beta1_UpdateProductSetRequest_descriptor;
}
@java.lang.Override
public com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest getDefaultInstanceForType() {
return com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest.getDefaultInstance();
}
@java.lang.Override
public com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest build() {
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest buildPartial() {
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest result =
new com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest(this);
if (productSetBuilder_ == null) {
result.productSet_ = productSet_;
} else {
result.productSet_ = productSetBuilder_.build();
}
if (updateMaskBuilder_ == null) {
result.updateMask_ = updateMask_;
} else {
result.updateMask_ = updateMaskBuilder_.build();
}
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest) {
return mergeFrom((com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest other) {
if (other == com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest.getDefaultInstance())
return this;
if (other.hasProductSet()) {
mergeProductSet(other.getProductSet());
}
if (other.hasUpdateMask()) {
mergeUpdateMask(other.getUpdateMask());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage =
(com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private com.google.cloud.vision.v1p4beta1.ProductSet productSet_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.cloud.vision.v1p4beta1.ProductSet,
com.google.cloud.vision.v1p4beta1.ProductSet.Builder,
com.google.cloud.vision.v1p4beta1.ProductSetOrBuilder>
productSetBuilder_;
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public boolean hasProductSet() {
return productSetBuilder_ != null || productSet_ != null;
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public com.google.cloud.vision.v1p4beta1.ProductSet getProductSet() {
if (productSetBuilder_ == null) {
return productSet_ == null
? com.google.cloud.vision.v1p4beta1.ProductSet.getDefaultInstance()
: productSet_;
} else {
return productSetBuilder_.getMessage();
}
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public Builder setProductSet(com.google.cloud.vision.v1p4beta1.ProductSet value) {
if (productSetBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
productSet_ = value;
onChanged();
} else {
productSetBuilder_.setMessage(value);
}
return this;
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public Builder setProductSet(
com.google.cloud.vision.v1p4beta1.ProductSet.Builder builderForValue) {
if (productSetBuilder_ == null) {
productSet_ = builderForValue.build();
onChanged();
} else {
productSetBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public Builder mergeProductSet(com.google.cloud.vision.v1p4beta1.ProductSet value) {
if (productSetBuilder_ == null) {
if (productSet_ != null) {
productSet_ =
com.google.cloud.vision.v1p4beta1.ProductSet.newBuilder(productSet_)
.mergeFrom(value)
.buildPartial();
} else {
productSet_ = value;
}
onChanged();
} else {
productSetBuilder_.mergeFrom(value);
}
return this;
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public Builder clearProductSet() {
if (productSetBuilder_ == null) {
productSet_ = null;
onChanged();
} else {
productSet_ = null;
productSetBuilder_ = null;
}
return this;
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public com.google.cloud.vision.v1p4beta1.ProductSet.Builder getProductSetBuilder() {
onChanged();
return getProductSetFieldBuilder().getBuilder();
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
public com.google.cloud.vision.v1p4beta1.ProductSetOrBuilder getProductSetOrBuilder() {
if (productSetBuilder_ != null) {
return productSetBuilder_.getMessageOrBuilder();
} else {
return productSet_ == null
? com.google.cloud.vision.v1p4beta1.ProductSet.getDefaultInstance()
: productSet_;
}
}
/**
*
*
* <pre>
* The ProductSet resource which replaces the one on the server.
* </pre>
*
* <code>.google.cloud.vision.v1p4beta1.ProductSet product_set = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.cloud.vision.v1p4beta1.ProductSet,
com.google.cloud.vision.v1p4beta1.ProductSet.Builder,
com.google.cloud.vision.v1p4beta1.ProductSetOrBuilder>
getProductSetFieldBuilder() {
if (productSetBuilder_ == null) {
productSetBuilder_ =
new com.google.protobuf.SingleFieldBuilderV3<
com.google.cloud.vision.v1p4beta1.ProductSet,
com.google.cloud.vision.v1p4beta1.ProductSet.Builder,
com.google.cloud.vision.v1p4beta1.ProductSetOrBuilder>(
getProductSet(), getParentForChildren(), isClean());
productSet_ = null;
}
return productSetBuilder_;
}
private com.google.protobuf.FieldMask updateMask_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.FieldMask,
com.google.protobuf.FieldMask.Builder,
com.google.protobuf.FieldMaskOrBuilder>
updateMaskBuilder_;
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public boolean hasUpdateMask() {
return updateMaskBuilder_ != null || updateMask_ != null;
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public com.google.protobuf.FieldMask getUpdateMask() {
if (updateMaskBuilder_ == null) {
return updateMask_ == null
? com.google.protobuf.FieldMask.getDefaultInstance()
: updateMask_;
} else {
return updateMaskBuilder_.getMessage();
}
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public Builder setUpdateMask(com.google.protobuf.FieldMask value) {
if (updateMaskBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
updateMask_ = value;
onChanged();
} else {
updateMaskBuilder_.setMessage(value);
}
return this;
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) {
if (updateMaskBuilder_ == null) {
updateMask_ = builderForValue.build();
onChanged();
} else {
updateMaskBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) {
if (updateMaskBuilder_ == null) {
if (updateMask_ != null) {
updateMask_ =
com.google.protobuf.FieldMask.newBuilder(updateMask_).mergeFrom(value).buildPartial();
} else {
updateMask_ = value;
}
onChanged();
} else {
updateMaskBuilder_.mergeFrom(value);
}
return this;
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public Builder clearUpdateMask() {
if (updateMaskBuilder_ == null) {
updateMask_ = null;
onChanged();
} else {
updateMask_ = null;
updateMaskBuilder_ = null;
}
return this;
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() {
onChanged();
return getUpdateMaskFieldBuilder().getBuilder();
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() {
if (updateMaskBuilder_ != null) {
return updateMaskBuilder_.getMessageOrBuilder();
} else {
return updateMask_ == null
? com.google.protobuf.FieldMask.getDefaultInstance()
: updateMask_;
}
}
/**
*
*
* <pre>
* The [FieldMask][google.protobuf.FieldMask] that specifies which fields to
* update.
* If update_mask isn't specified, all mutable fields are to be updated.
* Valid mask path is `display_name`.
* </pre>
*
* <code>.google.protobuf.FieldMask update_mask = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.FieldMask,
com.google.protobuf.FieldMask.Builder,
com.google.protobuf.FieldMaskOrBuilder>
getUpdateMaskFieldBuilder() {
if (updateMaskBuilder_ == null) {
updateMaskBuilder_ =
new com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.FieldMask,
com.google.protobuf.FieldMask.Builder,
com.google.protobuf.FieldMaskOrBuilder>(
getUpdateMask(), getParentForChildren(), isClean());
updateMask_ = null;
}
return updateMaskBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.cloud.vision.v1p4beta1.UpdateProductSetRequest)
}
// @@protoc_insertion_point(class_scope:google.cloud.vision.v1p4beta1.UpdateProductSetRequest)
private static final com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest();
}
public static com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<UpdateProductSetRequest> PARSER =
new com.google.protobuf.AbstractParser<UpdateProductSetRequest>() {
@java.lang.Override
public UpdateProductSetRequest parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new UpdateProductSetRequest(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<UpdateProductSetRequest> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<UpdateProductSetRequest> getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.cloud.vision.v1p4beta1.UpdateProductSetRequest getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.SortedSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.LoadTestTool;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.apache.hadoop.util.StringUtils;
/**
* Tests HFile read/write workloads, such as merging HFiles and random reads.
*/
public class HFileReadWriteTest {
private static final String TABLE_NAME = "MyTable";
private static enum Workload {
MERGE("merge", "Merge the specified HFiles", 1, Integer.MAX_VALUE),
RANDOM_READS("read", "Perform a random read benchmark on the given HFile",
1, 1);
private String option;
private String description;
public final int minNumInputFiles;
public final int maxNumInputFiles;
Workload(String option, String description, int minNumInputFiles,
int maxNumInputFiles) {
this.option = option;
this.description = description;
this.minNumInputFiles = minNumInputFiles;
this.maxNumInputFiles = maxNumInputFiles;
}
static OptionGroup getOptionGroup() {
OptionGroup optionGroup = new OptionGroup();
for (Workload w : values())
optionGroup.addOption(new Option(w.option, w.description));
return optionGroup;
}
private static String getOptionListStr() {
StringBuilder sb = new StringBuilder();
for (Workload w : values()) {
if (sb.length() > 0)
sb.append(", ");
sb.append("-" + w.option);
}
return sb.toString();
}
static Workload fromCmdLine(CommandLine cmdLine) {
for (Workload w : values()) {
if (cmdLine.hasOption(w.option))
return w;
}
LOG.error("No workload specified. Specify one of the options: " +
getOptionListStr());
return null;
}
public String onlyUsedFor() {
return ". Only used for the " + this + " workload.";
}
}
private static final String OUTPUT_DIR_OPTION = "output_dir";
private static final String COMPRESSION_OPTION = "compression";
private static final String BLOOM_FILTER_OPTION = "bloom";
private static final String BLOCK_SIZE_OPTION = "block_size";
private static final String DURATION_OPTION = "duration";
private static final String NUM_THREADS_OPTION = "num_threads";
private static final Log LOG = LogFactory.getLog(HFileReadWriteTest.class);
private Workload workload;
private FileSystem fs;
private Configuration conf;
private CacheConfig cacheConf;
private List<String> inputFileNames;
private Path outputDir;
private int numReadThreads;
private int durationSec;
private DataBlockEncoding dataBlockEncoding;
private boolean encodeInCacheOnly;
private HFileDataBlockEncoder dataBlockEncoder =
NoOpDataBlockEncoder.INSTANCE;
private StoreFile.BloomType bloomType = StoreFile.BloomType.NONE;
private int blockSize;
private Compression.Algorithm compression = Compression.Algorithm.NONE;
private byte[] firstRow, lastRow;
private AtomicLong numSeeks = new AtomicLong();
private AtomicLong numKV = new AtomicLong();
private AtomicLong totalBytes = new AtomicLong();
private byte[] family;
private long endTime = Long.MAX_VALUE;
private SortedSet<String> keysRead = new ConcurrentSkipListSet<String>();
private List<StoreFile> inputStoreFiles;
public HFileReadWriteTest() {
conf = HBaseConfiguration.create();
cacheConf = new CacheConfig(conf);
}
@SuppressWarnings("unchecked")
public boolean parseOptions(String args[]) {
Options options = new Options();
options.addOption(OUTPUT_DIR_OPTION, true, "Output directory" +
Workload.MERGE.onlyUsedFor());
options.addOption(COMPRESSION_OPTION, true, " Compression type, one of "
+ Arrays.toString(Compression.Algorithm.values()) +
Workload.MERGE.onlyUsedFor());
options.addOption(BLOOM_FILTER_OPTION, true, "Bloom filter type, one of "
+ Arrays.toString(StoreFile.BloomType.values()) +
Workload.MERGE.onlyUsedFor());
options.addOption(BLOCK_SIZE_OPTION, true, "HFile block size" +
Workload.MERGE.onlyUsedFor());
options.addOption(DURATION_OPTION, true, "The amount of time to run the " +
"random read workload for" + Workload.RANDOM_READS.onlyUsedFor());
options.addOption(NUM_THREADS_OPTION, true, "The number of random " +
"reader threads" + Workload.RANDOM_READS.onlyUsedFor());
options.addOption(NUM_THREADS_OPTION, true, "The number of random " +
"reader threads" + Workload.RANDOM_READS.onlyUsedFor());
options.addOption(LoadTestTool.OPT_DATA_BLOCK_ENCODING, true,
LoadTestTool.OPT_DATA_BLOCK_ENCODING_USAGE);
options.addOption(LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY, false,
LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY_USAGE);
options.addOptionGroup(Workload.getOptionGroup());
if (args.length == 0) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(HFileReadWriteTest.class.getSimpleName(),
options, true);
return false;
}
CommandLineParser parser = new PosixParser();
CommandLine cmdLine;
try {
cmdLine = parser.parse(options, args);
} catch (ParseException ex) {
LOG.error(ex);
return false;
}
workload = Workload.fromCmdLine(cmdLine);
if (workload == null)
return false;
inputFileNames = (List<String>) cmdLine.getArgList();
if (inputFileNames.size() == 0) {
LOG.error("No input file names specified");
return false;
}
if (inputFileNames.size() < workload.minNumInputFiles) {
LOG.error("Too few input files: at least " + workload.minNumInputFiles +
" required");
return false;
}
if (inputFileNames.size() > workload.maxNumInputFiles) {
LOG.error("Too many input files: at most " + workload.minNumInputFiles +
" allowed");
return false;
}
if (cmdLine.hasOption(COMPRESSION_OPTION)) {
compression = Compression.Algorithm.valueOf(
cmdLine.getOptionValue(COMPRESSION_OPTION));
}
if (cmdLine.hasOption(BLOOM_FILTER_OPTION)) {
bloomType = StoreFile.BloomType.valueOf(cmdLine.getOptionValue(
BLOOM_FILTER_OPTION));
}
encodeInCacheOnly =
cmdLine.hasOption(LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY);
if (cmdLine.hasOption(LoadTestTool.OPT_DATA_BLOCK_ENCODING)) {
dataBlockEncoding = DataBlockEncoding.valueOf(
cmdLine.getOptionValue(LoadTestTool.OPT_DATA_BLOCK_ENCODING));
// Optionally encode on disk, always encode in cache.
dataBlockEncoder = new HFileDataBlockEncoderImpl(
encodeInCacheOnly ? DataBlockEncoding.NONE : dataBlockEncoding,
dataBlockEncoding);
} else {
if (encodeInCacheOnly) {
LOG.error("The -" + LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY +
" option does not make sense without -" +
LoadTestTool.OPT_DATA_BLOCK_ENCODING);
return false;
}
}
blockSize = conf.getInt("hfile.min.blocksize.size", 65536);
if (cmdLine.hasOption(BLOCK_SIZE_OPTION))
blockSize = Integer.valueOf(cmdLine.getOptionValue(BLOCK_SIZE_OPTION));
if (workload == Workload.MERGE) {
String outputDirStr = cmdLine.getOptionValue(OUTPUT_DIR_OPTION);
if (outputDirStr == null) {
LOG.error("Output directory is not specified");
return false;
}
outputDir = new Path(outputDirStr);
// Will be checked for existence in validateConfiguration.
}
if (workload == Workload.RANDOM_READS) {
if (!requireOptions(cmdLine, new String[] { DURATION_OPTION,
NUM_THREADS_OPTION })) {
return false;
}
durationSec = Integer.parseInt(cmdLine.getOptionValue(DURATION_OPTION));
numReadThreads = Integer.parseInt(
cmdLine.getOptionValue(NUM_THREADS_OPTION));
}
Collections.sort(inputFileNames);
return true;
}
/** @return true if all the given options are specified */
private boolean requireOptions(CommandLine cmdLine,
String[] requiredOptions) {
for (String option : requiredOptions)
if (!cmdLine.hasOption(option)) {
LOG.error("Required option -" + option + " not specified");
return false;
}
return true;
}
public boolean validateConfiguration() throws IOException {
fs = FileSystem.get(conf);
for (String inputFileName : inputFileNames) {
Path path = new Path(inputFileName);
if (!fs.exists(path)) {
LOG.error("File " + inputFileName + " does not exist");
return false;
}
if (fs.getFileStatus(path).isDir()) {
LOG.error(inputFileName + " is a directory");
return false;
}
}
if (outputDir != null &&
(!fs.exists(outputDir) || !fs.getFileStatus(outputDir).isDir())) {
LOG.error(outputDir.toString() + " does not exist or is not a " +
"directory");
return false;
}
return true;
}
public void runMergeWorkload() throws IOException {
long maxKeyCount = prepareForMerge();
List<StoreFileScanner> scanners =
StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
false);
HColumnDescriptor columnDescriptor = new HColumnDescriptor(
HFileReadWriteTest.class.getSimpleName());
columnDescriptor.setBlocksize(blockSize);
columnDescriptor.setBloomFilterType(bloomType);
columnDescriptor.setCompressionType(compression);
columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
HRegionInfo regionInfo = new HRegionInfo();
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd,
null);
Store store = new Store(outputDir, region, columnDescriptor, fs, conf);
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf,
new CacheConfig(conf), fs, blockSize)
.withOutputDir(outputDir)
.withCompression(compression)
.withDataBlockEncoder(dataBlockEncoder)
.withBloomType(bloomType)
.withMaxKeyCount(maxKeyCount)
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
.withBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
.build();
StatisticsPrinter statsPrinter = new StatisticsPrinter();
statsPrinter.startThread();
try {
performMerge(scanners, store, writer);
writer.close();
} finally {
statsPrinter.requestStop();
}
Path resultPath = writer.getPath();
resultPath = tryUsingSimpleOutputPath(resultPath);
long fileSize = fs.getFileStatus(resultPath).getLen();
LOG.info("Created " + resultPath + ", size " + fileSize);
System.out.println();
System.out.println("HFile information for " + resultPath);
System.out.println();
HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}
private Path tryUsingSimpleOutputPath(Path resultPath) throws IOException {
if (inputFileNames.size() == 1) {
// In case of only one input set output to be consistent with the
// input name.
Path inputPath = new Path(inputFileNames.get(0));
Path betterOutputPath = new Path(outputDir,
inputPath.getName());
if (!fs.exists(betterOutputPath)) {
fs.rename(resultPath, betterOutputPath);
resultPath = betterOutputPath;
}
}
return resultPath;
}
private void performMerge(List<StoreFileScanner> scanners, Store store,
StoreFile.Writer writer) throws IOException {
InternalScanner scanner = null;
try {
Scan scan = new Scan();
// Include deletes
scanner = new StoreScanner(store, scan, scanners,
ScanType.MAJOR_COMPACT, Long.MIN_VALUE, Long.MIN_VALUE);
ArrayList<KeyValue> kvs = new ArrayList<KeyValue>();
while (scanner.next(kvs) || kvs.size() != 0) {
numKV.addAndGet(kvs.size());
for (KeyValue kv : kvs) {
totalBytes.addAndGet(kv.getLength());
writer.append(kv);
}
kvs.clear();
}
} finally {
if (scanner != null)
scanner.close();
}
}
/**
* @return the total key count in the files being merged
* @throws IOException
*/
private long prepareForMerge() throws IOException {
LOG.info("Merging " + inputFileNames);
LOG.info("Using block size: " + blockSize);
inputStoreFiles = new ArrayList<StoreFile>();
long maxKeyCount = 0;
for (String fileName : inputFileNames) {
Path filePath = new Path(fileName);
// Open without caching.
StoreFile sf = openStoreFile(filePath, false);
sf.createReader();
inputStoreFiles.add(sf);
StoreFile.Reader r = sf.getReader();
if (r != null) {
long keyCount = r.getFilterEntries();
maxKeyCount += keyCount;
LOG.info("Compacting: " + sf + "; keyCount = " + keyCount
+ "; Bloom Type = " + r.getBloomFilterType().toString()
+ "; Size = " + StringUtils.humanReadableInt(r.length()));
}
}
return maxKeyCount;
}
public HFile.Reader[] getHFileReaders() {
HFile.Reader readers[] = new HFile.Reader[inputStoreFiles.size()];
for (int i = 0; i < inputStoreFiles.size(); ++i)
readers[i] = inputStoreFiles.get(i).getReader().getHFileReader();
return readers;
}
private StoreFile openStoreFile(Path filePath, boolean blockCache)
throws IOException {
// We are passing the ROWCOL Bloom filter type, but StoreFile will still
// use the Bloom filter type specified in the HFile.
return new StoreFile(fs, filePath, conf, cacheConf,
StoreFile.BloomType.ROWCOL, dataBlockEncoder);
}
public static int charToHex(int c) {
if ('0' <= c && c <= '9')
return c - '0';
if ('a' <= c && c <= 'f')
return 10 + c - 'a';
return -1;
}
public static int hexToChar(int h) {
h &= 0xff;
if (0 <= h && h <= 9)
return '0' + h;
if (10 <= h && h <= 15)
return 'a' + h - 10;
return -1;
}
public static byte[] createRandomRow(Random rand, byte[] first, byte[] last)
{
int resultLen = Math.max(first.length, last.length);
int minLen = Math.min(first.length, last.length);
byte[] result = new byte[resultLen];
boolean greaterThanFirst = false;
boolean lessThanLast = false;
for (int i = 0; i < resultLen; ++i) {
// Generate random hex characters if both first and last row are hex
// at this position.
boolean isHex = i < minLen && charToHex(first[i]) != -1
&& charToHex(last[i]) != -1;
// If our key is already greater than the first key, we can use
// arbitrarily low values.
int low = greaterThanFirst || i >= first.length ? 0 : first[i] & 0xff;
// If our key is already less than the last key, we can use arbitrarily
// high values.
int high = lessThanLast || i >= last.length ? 0xff : last[i] & 0xff;
// Randomly select the next byte between the lowest and the highest
// value allowed for this position. Restrict to hex characters if
// necessary. We are generally biased towards border cases, which is OK
// for test.
int r;
if (isHex) {
// Use hex chars.
if (low < '0')
low = '0';
if (high > 'f')
high = 'f';
int lowHex = charToHex(low);
int highHex = charToHex(high);
r = hexToChar(lowHex + rand.nextInt(highHex - lowHex + 1));
} else {
r = low + rand.nextInt(high - low + 1);
}
if (r > low)
greaterThanFirst = true;
if (r < high)
lessThanLast = true;
result[i] = (byte) r;
}
if (Bytes.compareTo(result, first) < 0) {
throw new IllegalStateException("Generated key " +
Bytes.toStringBinary(result) + " is less than the first key " +
Bytes.toStringBinary(first));
}
if (Bytes.compareTo(result, last) > 0) {
throw new IllegalStateException("Generated key " +
Bytes.toStringBinary(result) + " is greater than te last key " +
Bytes.toStringBinary(last));
}
return result;
}
private static byte[] createRandomQualifier(Random rand) {
byte[] q = new byte[10 + rand.nextInt(30)];
rand.nextBytes(q);
return q;
}
private class RandomReader implements Callable<Boolean> {
private int readerId;
private StoreFile.Reader reader;
private boolean pread;
public RandomReader(int readerId, StoreFile.Reader reader,
boolean pread)
{
this.readerId = readerId;
this.reader = reader;
this.pread = pread;
}
@Override
public Boolean call() throws Exception {
Thread.currentThread().setName("reader " + readerId);
Random rand = new Random();
StoreFileScanner scanner = reader.getStoreFileScanner(true, pread);
while (System.currentTimeMillis() < endTime) {
byte[] row = createRandomRow(rand, firstRow, lastRow);
KeyValue kvToSeek = new KeyValue(row, family,
createRandomQualifier(rand));
if (rand.nextDouble() < 0.0001) {
LOG.info("kvToSeek=" + kvToSeek);
}
boolean seekResult;
try {
seekResult = scanner.seek(kvToSeek);
} catch (IOException ex) {
throw new IOException("Seek failed for key " + kvToSeek + ", pread="
+ pread, ex);
}
numSeeks.incrementAndGet();
if (!seekResult) {
error("Seek returned false for row " + Bytes.toStringBinary(row));
return false;
}
for (int i = 0; i < rand.nextInt(10) + 1; ++i) {
KeyValue kv = scanner.next();
numKV.incrementAndGet();
if (i == 0 && kv == null) {
error("scanner.next() returned null at the first iteration for " +
"row " + Bytes.toStringBinary(row));
return false;
}
if (kv == null)
break;
String keyHashStr = MD5Hash.getMD5AsHex(kv.getKey());
keysRead.add(keyHashStr);
totalBytes.addAndGet(kv.getLength());
}
}
return true;
}
private void error(String msg) {
LOG.error("error in reader " + readerId + " (pread=" + pread + "): "
+ msg);
}
}
private class StatisticsPrinter implements Callable<Boolean> {
private volatile boolean stopRequested;
private volatile Thread thread;
private long totalSeekAndReads, totalPositionalReads;
/**
* Run the statistics collector in a separate thread without an executor.
*/
public void startThread() {
new Thread() {
@Override
public void run() {
try {
call();
} catch (Exception e) {
LOG.error(e);
}
}
}.start();
}
@Override
public Boolean call() throws Exception {
LOG.info("Starting statistics printer");
thread = Thread.currentThread();
thread.setName(StatisticsPrinter.class.getSimpleName());
long startTime = System.currentTimeMillis();
long curTime;
while ((curTime = System.currentTimeMillis()) < endTime &&
!stopRequested) {
long elapsedTime = curTime - startTime;
printStats(elapsedTime);
try {
Thread.sleep(1000 - elapsedTime % 1000);
} catch (InterruptedException iex) {
Thread.currentThread().interrupt();
if (stopRequested)
break;
}
}
printStats(curTime - startTime);
LOG.info("Stopping statistics printer");
return true;
}
private void printStats(long elapsedTime) {
long numSeeksL = numSeeks.get();
double timeSec = elapsedTime / 1000.0;
double seekPerSec = numSeeksL / timeSec;
long kvCount = numKV.get();
double kvPerSec = kvCount / timeSec;
long bytes = totalBytes.get();
double bytesPerSec = bytes / timeSec;
// readOps and preadOps counters get reset on access, so we have to
// accumulate them here. HRegion metrics publishing thread should not
// be running in this tool, so no one else should be resetting these
// metrics.
totalSeekAndReads += HFile.getReadOps();
totalPositionalReads += HFile.getPreadOps();
long totalBlocksRead = totalSeekAndReads + totalPositionalReads;
double blkReadPerSec = totalBlocksRead / timeSec;
double seekReadPerSec = totalSeekAndReads / timeSec;
double preadPerSec = totalPositionalReads / timeSec;
boolean isRead = workload == Workload.RANDOM_READS;
StringBuilder sb = new StringBuilder();
sb.append("Time: " + (long) timeSec + " sec");
if (isRead)
sb.append(", seek/sec: " + (long) seekPerSec);
sb.append(", kv/sec: " + (long) kvPerSec);
sb.append(", bytes/sec: " + (long) bytesPerSec);
sb.append(", blk/sec: " + (long) blkReadPerSec);
sb.append(", total KV: " + numKV);
sb.append(", total bytes: " + totalBytes);
sb.append(", total blk: " + totalBlocksRead);
sb.append(", seekRead/sec: " + (long) seekReadPerSec);
sb.append(", pread/sec: " + (long) preadPerSec);
if (isRead)
sb.append(", unique keys: " + (long) keysRead.size());
LOG.info(sb.toString());
}
public void requestStop() {
stopRequested = true;
if (thread != null)
thread.interrupt();
}
}
public boolean runRandomReadWorkload() throws IOException {
if (inputFileNames.size() != 1) {
throw new IOException("Need exactly one input file for random reads: " +
inputFileNames);
}
Path inputPath = new Path(inputFileNames.get(0));
// Make sure we are using caching.
StoreFile storeFile = openStoreFile(inputPath, true);
StoreFile.Reader reader = storeFile.createReader();
LOG.info("First key: " + Bytes.toStringBinary(reader.getFirstKey()));
LOG.info("Last key: " + Bytes.toStringBinary(reader.getLastKey()));
KeyValue firstKV = KeyValue.createKeyValueFromKey(reader.getFirstKey());
firstRow = firstKV.getRow();
KeyValue lastKV = KeyValue.createKeyValueFromKey(reader.getLastKey());
lastRow = lastKV.getRow();
byte[] family = firstKV.getFamily();
if (!Bytes.equals(family, lastKV.getFamily())) {
LOG.error("First and last key have different families: "
+ Bytes.toStringBinary(family) + " and "
+ Bytes.toStringBinary(lastKV.getFamily()));
return false;
}
if (Bytes.equals(firstRow, lastRow)) {
LOG.error("First and last row are the same, cannot run read workload: " +
"firstRow=" + Bytes.toStringBinary(firstRow) + ", " +
"lastRow=" + Bytes.toStringBinary(lastRow));
return false;
}
ExecutorService exec = Executors.newFixedThreadPool(numReadThreads + 1);
int numCompleted = 0;
int numFailed = 0;
try {
ExecutorCompletionService<Boolean> ecs =
new ExecutorCompletionService<Boolean>(exec);
endTime = System.currentTimeMillis() + 1000 * durationSec;
boolean pread = true;
for (int i = 0; i < numReadThreads; ++i)
ecs.submit(new RandomReader(i, reader, pread));
ecs.submit(new StatisticsPrinter());
Future<Boolean> result;
while (true) {
try {
result = ecs.poll(endTime + 1000 - System.currentTimeMillis(),
TimeUnit.MILLISECONDS);
if (result == null)
break;
try {
if (result.get()) {
++numCompleted;
} else {
++numFailed;
}
} catch (ExecutionException e) {
LOG.error("Worker thread failure", e.getCause());
++numFailed;
}
} catch (InterruptedException ex) {
LOG.error("Interrupted after " + numCompleted +
" workers completed");
Thread.currentThread().interrupt();
continue;
}
}
} finally {
storeFile.closeReader(true);
exec.shutdown();
BlockCache c = cacheConf.getBlockCache();
if (c != null) {
c.shutdown();
}
}
LOG.info("Worker threads completed: " + numCompleted);
LOG.info("Worker threads failed: " + numFailed);
return true;
}
public boolean run() throws IOException {
LOG.info("Workload: " + workload);
switch (workload) {
case MERGE:
runMergeWorkload();
break;
case RANDOM_READS:
return runRandomReadWorkload();
default:
LOG.error("Unknown workload: " + workload);
return false;
}
return true;
}
private static void failure() {
System.exit(1);
}
public static void main(String[] args) {
HFileReadWriteTest app = new HFileReadWriteTest();
if (!app.parseOptions(args))
failure();
try {
if (!app.validateConfiguration() ||
!app.run())
failure();
} catch (IOException ex) {
LOG.error(ex);
failure();
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.codehaus.groovy.ast;
import org.codehaus.groovy.ast.expr.DeclarationExpression;
import org.codehaus.groovy.ast.expr.Expression;
import org.codehaus.groovy.ast.stmt.AssertStatement;
import org.codehaus.groovy.ast.stmt.BlockStatement;
import org.codehaus.groovy.ast.stmt.BreakStatement;
import org.codehaus.groovy.ast.stmt.CaseStatement;
import org.codehaus.groovy.ast.stmt.CatchStatement;
import org.codehaus.groovy.ast.stmt.ContinueStatement;
import org.codehaus.groovy.ast.stmt.DoWhileStatement;
import org.codehaus.groovy.ast.stmt.ExpressionStatement;
import org.codehaus.groovy.ast.stmt.ForStatement;
import org.codehaus.groovy.ast.stmt.IfStatement;
import org.codehaus.groovy.ast.stmt.ReturnStatement;
import org.codehaus.groovy.ast.stmt.Statement;
import org.codehaus.groovy.ast.stmt.SwitchStatement;
import org.codehaus.groovy.ast.stmt.SynchronizedStatement;
import org.codehaus.groovy.ast.stmt.ThrowStatement;
import org.codehaus.groovy.ast.stmt.TryCatchStatement;
import org.codehaus.groovy.ast.stmt.WhileStatement;
import org.codehaus.groovy.control.SourceUnit;
import org.codehaus.groovy.control.messages.SyntaxErrorMessage;
import org.codehaus.groovy.syntax.SyntaxException;
import org.codehaus.groovy.transform.ErrorCollecting;
public abstract class ClassCodeVisitorSupport extends CodeVisitorSupport implements ErrorCollecting, GroovyClassVisitor {
@Override
public void visitClass(ClassNode node) {
visitAnnotations(node);
visitPackage(node.getPackage());
visitImports(node.getModule());
node.visitContents(this);
visitObjectInitializerStatements(node);
}
public void visitAnnotations(AnnotatedNode node) {
visitAnnotations(node.getAnnotations());
}
protected final void visitAnnotations(Iterable<AnnotationNode> nodes) {
for (AnnotationNode node : nodes) {
// skip built-in properties
if (!node.isBuiltIn()) {
visitAnnotation(node);
}
}
}
protected void visitAnnotation(AnnotationNode node) {
for (Expression expr : node.getMembers().values()) {
expr.visit(this);
}
}
public void visitPackage(PackageNode node) {
if (node != null) {
visitAnnotations(node);
node.visit(this);
}
}
public void visitImports(ModuleNode node) {
if (node != null) {
for (ImportNode importNode : node.getImports()) {
visitAnnotations(importNode);
importNode.visit(this);
}
for (ImportNode importStarNode : node.getStarImports()) {
visitAnnotations(importStarNode);
importStarNode.visit(this);
}
for (ImportNode importStaticNode : node.getStaticImports().values()) {
visitAnnotations(importStaticNode);
importStaticNode.visit(this);
}
for (ImportNode importStaticStarNode : node.getStaticStarImports().values()) {
visitAnnotations(importStaticStarNode);
importStaticStarNode.visit(this);
}
}
}
@Override
public void visitConstructor(ConstructorNode node) {
visitConstructorOrMethod(node, true);
}
@Override
public void visitMethod(MethodNode node) {
visitConstructorOrMethod(node, false);
}
protected void visitConstructorOrMethod(MethodNode node, boolean isConstructor) {
visitAnnotations(node);
visitClassCodeContainer(node.getCode());
for (Parameter param : node.getParameters()) {
visitAnnotations(param);
}
}
@Override
public void visitField(FieldNode node) {
visitAnnotations(node);
Expression init = node.getInitialExpression();
if (init != null) init.visit(this);
}
@Override
public void visitProperty(PropertyNode node) {
visitAnnotations(node);
Statement statement = node.getGetterBlock();
visitClassCodeContainer(statement);
statement = node.getSetterBlock();
visitClassCodeContainer(statement);
Expression init = node.getInitialExpression();
if (init != null) init.visit(this);
}
protected void visitClassCodeContainer(Statement code) {
if (code != null) code.visit(this);
}
protected void visitObjectInitializerStatements(ClassNode node) {
for (Statement statement : node.getObjectInitializerStatements()) {
statement.visit(this);
}
}
@Override
public void visitDeclarationExpression(DeclarationExpression expression) {
visitAnnotations(expression);
super.visitDeclarationExpression(expression);
}
//--------------------------------------------------------------------------
@Override
public void visitAssertStatement(AssertStatement statement) {
visitStatement(statement);
super.visitAssertStatement(statement);
}
@Override
public void visitBlockStatement(BlockStatement statement) {
visitStatement(statement);
super.visitBlockStatement(statement);
}
@Override
public void visitBreakStatement(BreakStatement statement) {
visitStatement(statement);
super.visitBreakStatement(statement);
}
@Override
public void visitCaseStatement(CaseStatement statement) {
visitStatement(statement);
super.visitCaseStatement(statement);
}
@Override
public void visitCatchStatement(CatchStatement statement) {
visitStatement(statement);
super.visitCatchStatement(statement);
}
@Override
public void visitContinueStatement(ContinueStatement statement) {
visitStatement(statement);
super.visitContinueStatement(statement);
}
@Override
public void visitDoWhileLoop(DoWhileStatement statement) {
visitStatement(statement);
super.visitDoWhileLoop(statement);
}
@Override
public void visitExpressionStatement(ExpressionStatement statement) {
visitStatement(statement);
super.visitExpressionStatement(statement);
}
@Override
public void visitForLoop(ForStatement statement) {
visitStatement(statement);
super.visitForLoop(statement);
}
@Override
public void visitIfElse(IfStatement statement) {
visitStatement(statement);
super.visitIfElse(statement);
}
@Override
public void visitReturnStatement(ReturnStatement statement) {
visitStatement(statement);
super.visitReturnStatement(statement);
}
@Override
public void visitSwitch(SwitchStatement statement) {
visitStatement(statement);
super.visitSwitch(statement);
}
@Override
public void visitSynchronizedStatement(SynchronizedStatement statement) {
visitStatement(statement);
super.visitSynchronizedStatement(statement);
}
@Override
public void visitThrowStatement(ThrowStatement statement) {
visitStatement(statement);
super.visitThrowStatement(statement);
}
@Override
public void visitTryCatchFinally(TryCatchStatement statement) {
visitStatement(statement);
super.visitTryCatchFinally(statement);
}
@Override
public void visitWhileLoop(WhileStatement statement) {
visitStatement(statement);
super.visitWhileLoop(statement);
}
//--------------------------------------------------------------------------
protected void visitStatement(Statement statement) {
}
protected abstract SourceUnit getSourceUnit();
@Override
public void addError(String error, ASTNode node) {
SourceUnit source = getSourceUnit();
source.getErrorCollector().addErrorAndContinue(
new SyntaxErrorMessage(new SyntaxException(error + '\n', node.getLineNumber(), node.getColumnNumber(), node.getLastLineNumber(), node.getLastColumnNumber()), source)
);
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sun.jini.test.spec.servicediscovery.lookup;
import java.util.logging.Level;
import com.sun.jini.test.spec.servicediscovery.AbstractBaseTest;
import com.sun.jini.test.share.DiscoveryServiceUtil;
import net.jini.lookup.ServiceItemFilter;
import net.jini.core.lookup.ServiceItem;
import java.rmi.RemoteException;
import java.util.ArrayList;
import com.sun.jini.qa.harness.QAConfig;
import com.sun.jini.qa.harness.TestException;
/**
* With respect to the <code>lookup</code> method defined by the
* <code>ServiceDiscoveryManager</code> utility class, this class verifies
* that the blocking version that returns an array of instances of
* <code>ServiceItem</code> operates as specified when invoked under
* the following condition:
* <p><ul>
* <li> template matching performed by the service discovery manager is
* based on service type only
* <li> the service discovery manager applies no filtering to the results
* of the template matching
* <li> the minimum number of desired services is equal to the maximum
* number of desired services
* </ul><p>
*
* <pre>
* ServiceItem[] lookup(ServiceTemplate tmpl,
* int minMatches,
* int maxMatches
* ServiceItemFilter filter,
* long waitDur);
* </pre>
*/
public class LookupMinEqualsMax extends AbstractBaseTest {
protected long waitDur = 30*1000;
protected int minMatches = 0;
protected int maxMatches = 0;
/** Performs actions necessary to prepare for execution of the
* current test.
*
* 1. Starts N lookup services
* 2. Registers M test services with the lookup services started above
* 3. Creates a service discovery manager that discovers the lookup
* services started above
* 4. Creates a template that will match the test services based on
* service type only
*/
public void setup(QAConfig config) throws Exception {
super.setup(config);
testDesc = ": multiple service lookup employing -- template, "
+"blocking, minMatches = maxMatches";
registerServices(nServices,nAttributes);
maxMatches = nServices+nAddServices-1;
minMatches = maxMatches;
}//end setup
/** Cleans up all state. */
public void tearDown() {
/* Because service registration occurs in a separate thread,
* some tests can complete before all of the service(s) are
* registered with all of the lookup service(s). In that case,
* a lookup service may be destroyed in the middle of one of
* registration requests, causing a RemoteException. To avoid
* this, an arbitrary delay is executed to allow all previous
* registrations to complete.
*/
logger.log(Level.FINE, ""
+": waiting "+(regCompletionDelay/1000)+" seconds before "
+"tear down to allow all registrations to complete ... ");
DiscoveryServiceUtil.delayMS(regCompletionDelay);
super.tearDown();
}//end tearDown
/** Defines the actual steps of this particular test.
*
* 1. Invokes the desired version of the <code>lookup</code> method
* on the service discovery manager - applying NO filtering
* (<code>null</code> filter parameter) - to query the discovered
* lookup services for the desired service.
* 2. Verifies that the services returned are the services expected,
* and the <code>lookup</code> method blocks for the expected
* amount of time
*/
protected void applyTestDef() throws Exception {
/* Verify blocking mechanism for less than min registered services */
verifyBlocking(waitDur);
/* Register enough services to exceed the maximum, verify that the
* call to lookup actually blocks until the desired services are
* registered, and verify that no more than the maximum number are
* returned.
*/
waitDur = 1*60*1000; //reset the amount of time to block
verifyBlocking(nServices,nAddServices,waitDur);
}//end applyTestDef
/** Tests that the blocking mechanism of the lookup() method will block
* for the expected amount of time based on the given parameter
* values.
*
* If no services are to be registered (nSrvcs == 0), or if
* the template and filter combination given to lookup() match
* none of the registered services, this method verifies that
* lookup() not only blocks the full amount of time, but also
* returns null.
*
* If services are to be registered (nSrvcs > 0), this method
* verifies that lookup() blocks until the expected matching
* service is registered with with at least one of the lookup
* services used in the test.
*
* This method will return <code>null</code> if there are no problems.
* If the <code>String</code> returned by this method is
* non-<code>null</code>, then the test should declare failure and
* display the value returned by this method.
*/
protected void verifyBlocking(int startVal,int nSrvcs,long waitDur)
throws Exception
{
String testServiceClassname
= "com.sun.jini.test.spec.servicediscovery.AbstractBaseTest$TestService";
long waitDurSecs = waitDur/1000; //for debug output
if(nSrvcs > 0) {
logger.log(Level.FINE, ""+": look up at least "
+minMatches+" service(s), but no more than "
+maxMatches+" service(s) -- blocking "
+waitDurSecs+" second(s)");
/* Register all services after waiting less than the block time */
logger.log(Level.FINE, ""+": "+expectedServiceList.size()
+" service(s) "
+"registered, registering "+nSrvcs
+" more service(s) ...");
(new RegisterThread(startVal,nSrvcs,0,waitDur)).start();
} else {//(nSrvcs<=0)
/* Will register no more services */
logger.log(Level.FINE, ""+": "+expectedServiceList.size()
+" service(s) "
+"registered, look up at least "
+minMatches+" service(s), but no more than "
+maxMatches+" service(s) -- blocking "
+waitDurSecs+" second(s)");
}//endif(nSrvcs>0)
/* Try to lookup the services, block until the services appear */
long startTime = System.currentTimeMillis();
ServiceItem[] srvcItems = srvcDiscoveryMgr.lookup(template,
minMatches,
maxMatches,
firstStageFilter,
waitDur);
long endTime = System.currentTimeMillis();
long actualBlockTime = endTime-startTime;
long waitError = (actualBlockTime-waitDur)/1000;
/* Delay to allow all of the services to finish registering */
DiscoveryServiceUtil.delayMS(regCompletionDelay);
/* populate the expected info after lookup to prevent delay */
ArrayList expectedSrvcs
= new ArrayList(expectedServiceList.size());
for(int i=0;i<expectedServiceList.size();i++) {
expectedSrvcs.add(expectedServiceList.get(i));
}//end loop
/* Modify the list based on whether or not a filter exists */
if( (firstStageFilter != null)
&& (firstStageFilter instanceof ServiceItemFilter) )
{
for(int i=0,indx=0,len=expectedSrvcs.size();i<len;i++) {
if(srvcValOdd((TestService)expectedSrvcs.get(indx))) {
expectedSrvcs.remove(indx);
} else {
indx++;
}//endif
}//end loop
}//endif
/* According to section SD.4.1.3 of the spec, with respect to
* ServiceItem[] lookup(tmpl,min,max,filter,waitDur):
* 1. lookup() will query all lookups before blocking, and if
* at least the acceptable minimum number of services are
* found, will return without blocking
* 2. if the number of services found after querying all lookups
* first is less than the acceptable minimum, then lookup()
* will wait for the desired services to be registered with
* the lookups
* 3. while lookup() is blocking, if enough new services are
* registered so that the acceptable minimum is achieved,
* lookup() will return immediately; that is, even if there
* is more time left on the wait period, lookup() will not
* wait for more services beyond the minimum.
*
* For example, if 3 services are initially registered and
* lookup is called with min = 4 and max = 7, then lookup()
* will find the 3 services and then wait for more services to
* be registered. Suppose that while lookup() is blocking
* another 5 services are registered, bringing the total number
* of services to 8. In this case, lookup() will stopping
* waiting and return 4 services (the minimum), not the
* maximum 7.
* 4. if the minimum number of services have not been registered
* during the wait period, lookup() will return what it has
* found.
*
* Below, determine the number of services to expect based on
* the specified behavior described above.
*/
int nPreReg = countSrvcsByVal(nServices);
int nPostReg = expectedSrvcs.size();
int nExpectedSrvcs = nPreReg;
if(nPreReg < minMatches) {//will block after first lookup
logger.log(Level.FINE, ""+": lookup() will block");
if(nPostReg > nPreReg) {//will register more services
if(nPostReg >= minMatches) {
nExpectedSrvcs = minMatches;
} else {
nExpectedSrvcs = nPostReg;
}
} else {//will not register more services
nExpectedSrvcs = nPreReg;
}
} else {//(nPreReg >= minMatches) ==> won't block
logger.log(Level.FINE, ""
+": lookup() will NOT block");
if(nPreReg == minMatches) {//return min immediately
nExpectedSrvcs = minMatches;
} else {//(nPreReg > minMatches)
if(nPreReg < maxMatches) {
nExpectedSrvcs = nPreReg;
} else {//(nPreReg >= maxMatches)
nExpectedSrvcs = maxMatches;
}//endif
}//endif
}//endif
logger.log(Level.FINE, ""
+": minMatches = "+minMatches);
logger.log(Level.FINE, ""
+": maxMatches = "+maxMatches);
logger.log(Level.FINE, ""
+": nPreReg = "+nPreReg);
logger.log(Level.FINE, ""
+": nPostReg = "+nPostReg);
logger.log(Level.FINE, ""
+": nExpectedSrvcs = "+nExpectedSrvcs);
logger.log(Level.FINE, ""
+": srvcItems.length = "+srvcItems.length);
if(nExpectedSrvcs < minMatches) {//block full amount
/* Blocking time should be within epsilon of full amount */
if(waitError<-3) {
throw new TestException(" -- failed to block requested "
+"time -- requested block = "
+waitDurSecs+" second(s), actual "
+"block = "+(actualBlockTime/1000)
+" second(s)");
} else if(waitError>30) {
throw new TestException(" -- exceeded requested block "
+"time -- requested block = "
+waitDurSecs+" second(s), actual "
+"block = "+(actualBlockTime/1000)
+" second(s)");
}//endif
} else { //(nExpectedSrvcs>=minMatches)
/* Blocking time should be less than the full amount */
if(waitError >= 0) {
throw new TestException(" -- blocked longer than expected "
+"-- requested block = "
+waitDurSecs+" second(s), actual "
+"block = "+(actualBlockTime/1000)
+" second(s)");
}
}//endif(nExpectedSrvcs<maxMatches)
verifyServiceItems(srvcItems,
expectedSrvcs,
nExpectedSrvcs,
waitDur,
actualBlockTime);
}//end verifyBlocking
protected void verifyBlocking(long waitDur) throws Exception {
verifyBlocking(0,0,waitDur);
}//end verifyBlocking
protected void verifyBlocking(int nSrvcs,long waitDur) throws Exception {
verifyBlocking(0,nSrvcs,waitDur);
}//end verifyBlocking
private void verifyServiceItems(ServiceItem[] srvcItems,
ArrayList expectedSrvcs,
int nExpectedSrvcs,
long waitDur,
long actualBlockTime)
throws Exception
{
if(srvcItems == null) {
throw new TestException(" -- unexpected null service item array "
+"returned");
} else if(srvcItems.length != nExpectedSrvcs) {
logger.log(Level.FINE, ""+": lookup failed -- "
+"returned unexpected number of "
+"service items (expected = "
+nExpectedSrvcs+", but "
+"returned = "+srvcItems.length+")");
throw new TestException(" -- lookup returned unexpected "
+"number of service items "
+"(expected = "
+nExpectedSrvcs+", but "
+"returned = "+srvcItems.length+")");
} else {/* Compare returned array to expected services set */
label_i:
for(int i=0;i<srvcItems.length;i++) {
logger.log(Level.FINE, ""+": comparing sevice item "+i);
if( srvcItems[i] == null ) {
throw new TestException(" -- returned service item "+i
+" is null");
} else if(srvcItems[i].service == null) {
throw new TestException(" -- service component of "
+"returned service item "+i
+" is null");
} else {
for(int j=0;j<expectedSrvcs.size();j++) {
if( (srvcItems[i].service).equals
(expectedSrvcs.get(j)) )
{
continue label_i; // next srvcItems[i]
}//endif
}//end loop (j)
throw new TestException(" -- returned service item "+i
+" is not contained in the "
+" expected set of services");
}//endif
}//end loop (i)
logger.log(Level.FINE, ""+": all expected "
+"services found -- requested "
+"block = "+(waitDur/1000)
+" second(s), actual block = "
+(actualBlockTime/1000)
+" second(s)");
return;//passed
}//endif(srvcItems==null)
}//end verifyServiceItems
}//end class LookupMinEqualsMax
| |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.rm;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRAppMaster;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.PreemptionMessage;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.client.api.NMTokenCache;
import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.RackResolver;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Allocates the container from the ResourceManager scheduler.
*/
public class RMContainerAllocator extends RMContainerRequestor
implements ContainerAllocator {
static final Logger LOG = LoggerFactory.getLogger(RMContainerAllocator.class);
public static final
float DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART = 0.05f;
static final Priority PRIORITY_FAST_FAIL_MAP;
static final Priority PRIORITY_REDUCE;
static final Priority PRIORITY_MAP;
static final Priority PRIORITY_OPPORTUNISTIC_MAP;
@VisibleForTesting
public static final String RAMPDOWN_DIAGNOSTIC = "Reducer preempted "
+ "to make room for pending map attempts";
private Thread eventHandlingThread;
private final AtomicBoolean stopped;
static {
PRIORITY_FAST_FAIL_MAP = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
PRIORITY_FAST_FAIL_MAP.setPriority(5);
PRIORITY_REDUCE = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
PRIORITY_REDUCE.setPriority(10);
PRIORITY_MAP = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
PRIORITY_MAP.setPriority(20);
PRIORITY_OPPORTUNISTIC_MAP =
RecordFactoryProvider.getRecordFactory(null).newRecordInstance(
Priority.class);
PRIORITY_OPPORTUNISTIC_MAP.setPriority(19);
}
/*
Vocabulary Used:
pending -> requests which are NOT yet sent to RM
scheduled -> requests which are sent to RM but not yet assigned
assigned -> requests which are assigned to a container
completed -> request corresponding to which container has completed
Lifecycle of map
scheduled->assigned->completed
Lifecycle of reduce
pending->scheduled->assigned->completed
Maps are scheduled as soon as their requests are received. Reduces are
added to the pending and are ramped up (added to scheduled) based
on completed maps and current availability in the cluster.
*/
//reduces which are not yet scheduled
private final LinkedList<ContainerRequest> pendingReduces =
new LinkedList<ContainerRequest>();
//holds information about the assigned containers to task attempts
private final AssignedRequests assignedRequests;
//holds scheduled requests to be fulfilled by RM
private final ScheduledRequests scheduledRequests = new ScheduledRequests();
private int containersAllocated = 0;
private int containersReleased = 0;
private int hostLocalAssigned = 0;
private int rackLocalAssigned = 0;
private int lastCompletedTasks = 0;
private boolean recalculateReduceSchedule = false;
private Resource mapResourceRequest = Resources.none();
private Resource reduceResourceRequest = Resources.none();
private boolean reduceStarted = false;
private float maxReduceRampupLimit = 0;
private float maxReducePreemptionLimit = 0;
// Mapper allocation timeout, after which a reducer is forcibly preempted
private long reducerUnconditionalPreemptionDelayMs;
// Duration to wait before preempting a reducer when there is NO room
private long reducerNoHeadroomPreemptionDelayMs = 0;
private float reduceSlowStart = 0;
private int maxRunningMaps = 0;
private int maxRunningReduces = 0;
private long retryInterval;
private long retrystartTime;
private Clock clock;
private final AMPreemptionPolicy preemptionPolicy;
@VisibleForTesting
protected BlockingQueue<ContainerAllocatorEvent> eventQueue
= new LinkedBlockingQueue<ContainerAllocatorEvent>();
private ScheduleStats scheduleStats = new ScheduleStats();
private String mapNodeLabelExpression;
private String reduceNodeLabelExpression;
public RMContainerAllocator(ClientService clientService, AppContext context,
AMPreemptionPolicy preemptionPolicy) {
super(clientService, context);
this.preemptionPolicy = preemptionPolicy;
this.stopped = new AtomicBoolean(false);
this.clock = context.getClock();
this.assignedRequests = createAssignedRequests();
}
protected AssignedRequests createAssignedRequests() {
return new AssignedRequests();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
reduceSlowStart = conf.getFloat(
MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,
DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART);
maxReduceRampupLimit = conf.getFloat(
MRJobConfig.MR_AM_JOB_REDUCE_RAMPUP_UP_LIMIT,
MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_RAMP_UP_LIMIT);
maxReducePreemptionLimit = conf.getFloat(
MRJobConfig.MR_AM_JOB_REDUCE_PREEMPTION_LIMIT,
MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT);
reducerUnconditionalPreemptionDelayMs = 1000 * conf.getInt(
MRJobConfig.MR_JOB_REDUCER_UNCONDITIONAL_PREEMPT_DELAY_SEC,
MRJobConfig.DEFAULT_MR_JOB_REDUCER_UNCONDITIONAL_PREEMPT_DELAY_SEC);
reducerNoHeadroomPreemptionDelayMs = conf.getInt(
MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,
MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec -> ms
maxRunningMaps = conf.getInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT,
MRJobConfig.DEFAULT_JOB_RUNNING_MAP_LIMIT);
maxRunningReduces = conf.getInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT,
MRJobConfig.DEFAULT_JOB_RUNNING_REDUCE_LIMIT);
RackResolver.init(conf);
retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS,
MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS);
mapNodeLabelExpression = conf.get(MRJobConfig.MAP_NODE_LABEL_EXP);
reduceNodeLabelExpression = conf.get(MRJobConfig.REDUCE_NODE_LABEL_EXP);
// Init startTime to current time. If all goes well, it will be reset after
// first attempt to contact RM.
retrystartTime = System.currentTimeMillis();
this.scheduledRequests.setNumOpportunisticMapsPercent(
conf.getInt(MRJobConfig.MR_NUM_OPPORTUNISTIC_MAPS_PERCENT,
MRJobConfig.DEFAULT_MR_NUM_OPPORTUNISTIC_MAPS_PERCENT));
LOG.info(this.scheduledRequests.getNumOpportunisticMapsPercent() +
"% of the mappers will be scheduled using OPPORTUNISTIC containers");
}
@Override
protected void serviceStart() throws Exception {
this.eventHandlingThread = new Thread() {
@SuppressWarnings("unchecked")
@Override
public void run() {
ContainerAllocatorEvent event;
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = RMContainerAllocator.this.eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
try {
handleEvent(event);
} catch (Throwable t) {
LOG.error("Error in handling event type " + event.getType()
+ " to the ContainreAllocator", t);
// Kill the AM
eventHandler.handle(new JobEvent(getJob().getID(),
JobEventType.INTERNAL_ERROR));
return;
}
}
}
};
this.eventHandlingThread.start();
super.serviceStart();
}
@Override
protected synchronized void heartbeat() throws Exception {
scheduleStats.updateAndLogIfChanged("Before Scheduling: ");
List<Container> allocatedContainers = getResources();
if (allocatedContainers != null && allocatedContainers.size() > 0) {
scheduledRequests.assign(allocatedContainers);
}
int completedMaps = getJob().getCompletedMaps();
int completedTasks = completedMaps + getJob().getCompletedReduces();
if ((lastCompletedTasks != completedTasks) ||
(scheduledRequests.maps.size() > 0)) {
lastCompletedTasks = completedTasks;
recalculateReduceSchedule = true;
}
if (recalculateReduceSchedule) {
boolean reducerPreempted = preemptReducesIfNeeded();
if (!reducerPreempted) {
// Only schedule new reducers if no reducer preemption happens for
// this heartbeat
scheduleReduces(getJob().getTotalMaps(), completedMaps,
scheduledRequests.maps.size(), scheduledRequests.reduces.size(),
assignedRequests.maps.size(), assignedRequests.reduces.size(),
mapResourceRequest, reduceResourceRequest, pendingReduces.size(),
maxReduceRampupLimit, reduceSlowStart);
}
recalculateReduceSchedule = false;
}
scheduleStats.updateAndLogIfChanged("After Scheduling: ");
}
@Override
protected void serviceStop() throws Exception {
if (stopped.getAndSet(true)) {
// return if already stopped
return;
}
if (eventHandlingThread != null) {
eventHandlingThread.interrupt();
}
super.serviceStop();
scheduleStats.log("Final Stats: ");
}
@Private
@VisibleForTesting
AssignedRequests getAssignedRequests() {
return assignedRequests;
}
@Private
@VisibleForTesting
ScheduledRequests getScheduledRequests() {
return scheduledRequests;
}
@Private
@VisibleForTesting
int getNumOfPendingReduces() {
return pendingReduces.size();
}
public boolean getIsReduceStarted() {
return reduceStarted;
}
public void setIsReduceStarted(boolean reduceStarted) {
this.reduceStarted = reduceStarted;
}
@Override
public void handle(ContainerAllocatorEvent event) {
int qSize = eventQueue.size();
if (qSize != 0 && qSize % 1000 == 0) {
LOG.info("Size of event-queue in RMContainerAllocator is " + qSize);
}
int remCapacity = eventQueue.remainingCapacity();
if (remCapacity < 1000) {
LOG.warn("Very low remaining capacity in the event-queue "
+ "of RMContainerAllocator: " + remCapacity);
}
try {
eventQueue.put(event);
} catch (InterruptedException e) {
throw new YarnRuntimeException(e);
}
}
protected synchronized void handleEvent(ContainerAllocatorEvent event) {
recalculateReduceSchedule = true;
if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
ContainerRequestEvent reqEvent = (ContainerRequestEvent) event;
boolean isMap = reqEvent.getAttemptID().getTaskId().getTaskType().
equals(TaskType.MAP);
if (isMap) {
handleMapContainerRequest(reqEvent);
} else {
handleReduceContainerRequest(reqEvent);
}
} else if (
event.getType() == ContainerAllocator.EventType.CONTAINER_DEALLOCATE) {
LOG.info("Processing the event " + event.toString());
TaskAttemptId aId = event.getAttemptID();
boolean removed = scheduledRequests.remove(aId);
if (!removed) {
ContainerId containerId = assignedRequests.get(aId);
if (containerId != null) {
removed = true;
assignedRequests.remove(aId);
containersReleased++;
pendingRelease.add(containerId);
release(containerId);
}
}
if (!removed) {
LOG.error("Could not deallocate container for task attemptId " +
aId);
}
preemptionPolicy.handleCompletedContainer(event.getAttemptID());
} else if (
event.getType() == ContainerAllocator.EventType.CONTAINER_FAILED) {
ContainerFailedEvent fEv = (ContainerFailedEvent) event;
String host = getHost(fEv.getContMgrAddress());
containerFailedOnHost(host);
// propagate failures to preemption policy to discard checkpoints for
// failed tasks
preemptionPolicy.handleFailedContainer(event.getAttemptID());
}
}
@SuppressWarnings({ "unchecked" })
private void handleReduceContainerRequest(ContainerRequestEvent reqEvent) {
assert(reqEvent.getAttemptID().getTaskId().getTaskType().equals(
TaskType.REDUCE));
Resource supportedMaxContainerCapability = getMaxContainerCapability();
JobId jobId = getJob().getID();
if (reduceResourceRequest.equals(Resources.none())) {
reduceResourceRequest = reqEvent.getCapability();
eventHandler.handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.REDUCE,
reduceResourceRequest.getMemorySize())));
LOG.info("reduceResourceRequest:" + reduceResourceRequest);
}
boolean reduceContainerRequestAccepted = true;
if (reduceResourceRequest.getMemorySize() >
supportedMaxContainerCapability.getMemorySize()
||
reduceResourceRequest.getVirtualCores() >
supportedMaxContainerCapability.getVirtualCores()) {
reduceContainerRequestAccepted = false;
}
if (reduceContainerRequestAccepted) {
// set the resources
reqEvent.getCapability().setVirtualCores(
reduceResourceRequest.getVirtualCores());
reqEvent.getCapability().setMemorySize(
reduceResourceRequest.getMemorySize());
if (reqEvent.getEarlierAttemptFailed()) {
//previously failed reducers are added to the front for fail fast
pendingReduces.addFirst(new ContainerRequest(reqEvent,
PRIORITY_REDUCE, reduceNodeLabelExpression));
} else {
//reduces are added to pending queue and are slowly ramped up
pendingReduces.add(new ContainerRequest(reqEvent,
PRIORITY_REDUCE, reduceNodeLabelExpression));
}
} else {
String diagMsg = "REDUCE capability required is more than the " +
"supported max container capability in the cluster. Killing" +
" the Job. reduceResourceRequest: " + reduceResourceRequest +
" maxContainerCapability:" + supportedMaxContainerCapability;
LOG.info(diagMsg);
eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg));
eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
}
}
@SuppressWarnings({ "unchecked" })
private void handleMapContainerRequest(ContainerRequestEvent reqEvent) {
assert(reqEvent.getAttemptID().getTaskId().getTaskType().equals(
TaskType.MAP));
Resource supportedMaxContainerCapability = getMaxContainerCapability();
JobId jobId = getJob().getID();
if (mapResourceRequest.equals(Resources.none())) {
mapResourceRequest = reqEvent.getCapability();
eventHandler.handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.MAP,
mapResourceRequest.getMemorySize())));
LOG.info("mapResourceRequest:" + mapResourceRequest);
}
boolean mapContainerRequestAccepted = true;
if (mapResourceRequest.getMemorySize() >
supportedMaxContainerCapability.getMemorySize()
||
mapResourceRequest.getVirtualCores() >
supportedMaxContainerCapability.getVirtualCores()) {
mapContainerRequestAccepted = false;
}
if(mapContainerRequestAccepted) {
// set the resources
reqEvent.getCapability().setMemorySize(
mapResourceRequest.getMemorySize());
reqEvent.getCapability().setVirtualCores(
mapResourceRequest.getVirtualCores());
scheduledRequests.addMap(reqEvent); //maps are immediately scheduled
} else {
String diagMsg = "The required MAP capability is more than the " +
"supported max container capability in the cluster. Killing" +
" the Job. mapResourceRequest: " + mapResourceRequest +
" maxContainerCapability:" + supportedMaxContainerCapability;
LOG.info(diagMsg);
eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg));
eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
}
}
private static String getHost(String contMgrAddress) {
String host = contMgrAddress;
String[] hostport = host.split(":");
if (hostport.length == 2) {
host = hostport[0];
}
return host;
}
@Private
@VisibleForTesting
synchronized void setReduceResourceRequest(Resource res) {
this.reduceResourceRequest = res;
}
@Private
@VisibleForTesting
synchronized void setMapResourceRequest(Resource res) {
this.mapResourceRequest = res;
}
@Private
@VisibleForTesting
boolean preemptReducesIfNeeded() {
if (reduceResourceRequest.equals(Resources.none())) {
return false; // no reduces
}
if (assignedRequests.maps.size() > 0) {
// there are assigned mappers
return false;
}
if (scheduledRequests.maps.size() <= 0) {
// there are no pending requests for mappers
return false;
}
// At this point:
// we have pending mappers and all assigned resources are taken by reducers
if (reducerUnconditionalPreemptionDelayMs >= 0) {
// Unconditional preemption is enabled.
// If mappers are pending for longer than the configured threshold,
// preempt reducers irrespective of what the headroom is.
if (preemptReducersForHangingMapRequests(
reducerUnconditionalPreemptionDelayMs)) {
return true;
}
}
// The pending mappers haven't been waiting for too long. Let us see if
// there are enough resources for a mapper to run. This is calculated by
// excluding scheduled reducers from headroom and comparing it against
// resources required to run one mapper.
Resource scheduledReducesResource = Resources.multiply(
reduceResourceRequest, scheduledRequests.reduces.size());
Resource availableResourceForMap =
Resources.subtract(getAvailableResources(), scheduledReducesResource);
if (ResourceCalculatorUtils.computeAvailableContainers(availableResourceForMap,
mapResourceRequest, getSchedulerResourceTypes()) > 0) {
// Enough room to run a mapper
return false;
}
// Available resources are not enough to run mapper. See if we should hold
// off before preempting reducers and preempt if okay.
return preemptReducersForHangingMapRequests(reducerNoHeadroomPreemptionDelayMs);
}
private boolean preemptReducersForHangingMapRequests(long pendingThreshold) {
int hangingMapRequests = getNumHangingRequests(
pendingThreshold, scheduledRequests.maps);
if (hangingMapRequests > 0) {
preemptReducer(hangingMapRequests);
return true;
}
return false;
}
private void clearAllPendingReduceRequests() {
rampDownReduces(Integer.MAX_VALUE);
}
private void preemptReducer(int hangingMapRequests) {
clearAllPendingReduceRequests();
// preempt for making space for at least one map
int preemptionReduceNumForOneMap =
ResourceCalculatorUtils.divideAndCeilContainers(mapResourceRequest,
reduceResourceRequest, getSchedulerResourceTypes());
int preemptionReduceNumForPreemptionLimit =
ResourceCalculatorUtils.divideAndCeilContainers(
Resources.multiply(getResourceLimit(), maxReducePreemptionLimit),
reduceResourceRequest, getSchedulerResourceTypes());
int preemptionReduceNumForAllMaps =
ResourceCalculatorUtils.divideAndCeilContainers(
Resources.multiply(mapResourceRequest, hangingMapRequests),
reduceResourceRequest, getSchedulerResourceTypes());
int toPreempt =
Math.min(Math.max(preemptionReduceNumForOneMap,
preemptionReduceNumForPreemptionLimit),
preemptionReduceNumForAllMaps);
LOG.info("Going to preempt " + toPreempt
+ " due to lack of space for maps");
assignedRequests.preemptReduce(toPreempt);
}
private int getNumHangingRequests(long allocationDelayThresholdMs,
Map<TaskAttemptId, ContainerRequest> requestMap) {
if (allocationDelayThresholdMs <= 0)
return requestMap.size();
int hangingRequests = 0;
long currTime = clock.getTime();
for (ContainerRequest request: requestMap.values()) {
long delay = currTime - request.requestTimeMs;
if (delay > allocationDelayThresholdMs)
hangingRequests++;
}
return hangingRequests;
}
@Private
public void scheduleReduces(
int totalMaps, int completedMaps,
int scheduledMaps, int scheduledReduces,
int assignedMaps, int assignedReduces,
Resource mapResourceReqt, Resource reduceResourceReqt,
int numPendingReduces,
float maxReduceRampupLimit, float reduceSlowStart) {
if (numPendingReduces == 0) {
return;
}
// get available resources for this job
Resource headRoom = getAvailableResources();
LOG.info("Recalculating schedule, headroom=" + headRoom);
//check for slow start
if (!getIsReduceStarted()) {//not set yet
int completedMapsForReduceSlowstart = (int)Math.ceil(reduceSlowStart *
totalMaps);
if(completedMaps < completedMapsForReduceSlowstart) {
LOG.info("Reduce slow start threshold not met. " +
"completedMapsForReduceSlowstart " +
completedMapsForReduceSlowstart);
return;
} else {
LOG.info("Reduce slow start threshold reached. Scheduling reduces.");
setIsReduceStarted(true);
}
}
//if all maps are assigned, then ramp up all reduces irrespective of the
//headroom
if (scheduledMaps == 0 && numPendingReduces > 0) {
LOG.info("All maps assigned. " +
"Ramping up all remaining reduces:" + numPendingReduces);
scheduleAllReduces();
return;
}
float completedMapPercent = 0f;
if (totalMaps != 0) {//support for 0 maps
completedMapPercent = (float)completedMaps/totalMaps;
} else {
completedMapPercent = 1;
}
Resource netScheduledMapResource =
Resources.multiply(mapResourceReqt, (scheduledMaps + assignedMaps));
Resource netScheduledReduceResource =
Resources.multiply(reduceResourceReqt,
(scheduledReduces + assignedReduces));
Resource finalMapResourceLimit;
Resource finalReduceResourceLimit;
// ramp up the reduces based on completed map percentage
Resource totalResourceLimit = getResourceLimit();
Resource idealReduceResourceLimit =
Resources.multiply(totalResourceLimit,
Math.min(completedMapPercent, maxReduceRampupLimit));
Resource ideaMapResourceLimit =
Resources.subtract(totalResourceLimit, idealReduceResourceLimit);
// check if there aren't enough maps scheduled, give the free map capacity
// to reduce.
// Even when container number equals, there may be unused resources in one
// dimension
if (ResourceCalculatorUtils.computeAvailableContainers(ideaMapResourceLimit,
mapResourceReqt, getSchedulerResourceTypes()) >= (scheduledMaps + assignedMaps)) {
// enough resource given to maps, given the remaining to reduces
Resource unusedMapResourceLimit =
Resources.subtract(ideaMapResourceLimit, netScheduledMapResource);
finalReduceResourceLimit =
Resources.add(idealReduceResourceLimit, unusedMapResourceLimit);
finalMapResourceLimit =
Resources.subtract(totalResourceLimit, finalReduceResourceLimit);
} else {
finalMapResourceLimit = ideaMapResourceLimit;
finalReduceResourceLimit = idealReduceResourceLimit;
}
LOG.info("completedMapPercent " + completedMapPercent
+ " totalResourceLimit:" + totalResourceLimit
+ " finalMapResourceLimit:" + finalMapResourceLimit
+ " finalReduceResourceLimit:" + finalReduceResourceLimit
+ " netScheduledMapResource:" + netScheduledMapResource
+ " netScheduledReduceResource:" + netScheduledReduceResource);
int rampUp =
ResourceCalculatorUtils.computeAvailableContainers(Resources.subtract(
finalReduceResourceLimit, netScheduledReduceResource),
reduceResourceReqt, getSchedulerResourceTypes());
if (rampUp > 0) {
rampUp = Math.min(rampUp, numPendingReduces);
LOG.info("Ramping up " + rampUp);
rampUpReduces(rampUp);
} else if (rampUp < 0) {
int rampDown = -1 * rampUp;
rampDown = Math.min(rampDown, scheduledReduces);
LOG.info("Ramping down " + rampDown);
rampDownReduces(rampDown);
}
}
@Private
public void scheduleAllReduces() {
for (ContainerRequest req : pendingReduces) {
scheduledRequests.addReduce(req);
}
pendingReduces.clear();
}
@Private
public void rampUpReduces(int rampUp) {
//more reduce to be scheduled
for (int i = 0; i < rampUp; i++) {
ContainerRequest request = pendingReduces.removeFirst();
scheduledRequests.addReduce(request);
}
}
@Private
public void rampDownReduces(int rampDown) {
//remove from the scheduled and move back to pending
while (rampDown > 0) {
ContainerRequest request = scheduledRequests.removeReduce();
if (request == null) {
return;
}
pendingReduces.add(request);
rampDown--;
}
}
@SuppressWarnings("unchecked")
private List<Container> getResources() throws Exception {
applyConcurrentTaskLimits();
// will be null the first time
Resource headRoom = Resources.clone(getAvailableResources());
AllocateResponse response;
/*
* If contact with RM is lost, the AM will wait MR_AM_TO_RM_WAIT_INTERVAL_MS
* milliseconds before aborting. During this interval, AM will still try
* to contact the RM.
*/
try {
response = makeRemoteRequest();
// Reset retry count if no exception occurred.
retrystartTime = System.currentTimeMillis();
} catch (ApplicationAttemptNotFoundException e ) {
// This can happen if the RM has been restarted. If it is in that state,
// this application must clean itself up.
eventHandler.handle(new JobEvent(this.getJob().getID(),
JobEventType.JOB_AM_REBOOT));
throw new RMContainerAllocationException(
"Resource Manager doesn't recognize AttemptId: "
+ this.getContext().getApplicationAttemptId(), e);
} catch (ApplicationMasterNotRegisteredException e) {
LOG.info("ApplicationMaster is out of sync with ResourceManager,"
+ " hence resync and send outstanding requests.");
// RM may have restarted, re-register with RM.
lastResponseID = 0;
register();
addOutstandingRequestOnResync();
return null;
} catch (InvalidLabelResourceRequestException e) {
// If Invalid label exception is received means the requested label doesnt
// have access so killing job in this case.
String diagMsg = "Requested node-label-expression is invalid: "
+ StringUtils.stringifyException(e);
LOG.info(diagMsg);
JobId jobId = this.getJob().getID();
eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg));
eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
throw e;
} catch (Exception e) {
// This can happen when the connection to the RM has gone down. Keep
// re-trying until the retryInterval has expired.
if (System.currentTimeMillis() - retrystartTime >= retryInterval) {
LOG.error("Could not contact RM after " + retryInterval + " milliseconds.");
eventHandler.handle(new JobEvent(this.getJob().getID(),
JobEventType.JOB_AM_REBOOT));
throw new RMContainerAllocationException("Could not contact RM after " +
retryInterval + " milliseconds.");
}
// Throw this up to the caller, which may decide to ignore it and
// continue to attempt to contact the RM.
throw e;
}
Resource newHeadRoom = getAvailableResources();
List<Container> newContainers = response.getAllocatedContainers();
// Setting NMTokens
if (response.getNMTokens() != null) {
for (NMToken nmToken : response.getNMTokens()) {
NMTokenCache.setNMToken(nmToken.getNodeId().toString(),
nmToken.getToken());
}
}
// Setting AMRMToken
if (response.getAMRMToken() != null) {
updateAMRMToken(response.getAMRMToken());
}
List<ContainerStatus> finishedContainers =
response.getCompletedContainersStatuses();
// propagate preemption requests
final PreemptionMessage preemptReq = response.getPreemptionMessage();
if (preemptReq != null) {
preemptionPolicy.preempt(
new PreemptionContext(assignedRequests), preemptReq);
}
if (newContainers.size() + finishedContainers.size() > 0
|| !headRoom.equals(newHeadRoom)) {
//something changed
recalculateReduceSchedule = true;
if (LOG.isDebugEnabled() && !headRoom.equals(newHeadRoom)) {
LOG.debug("headroom=" + newHeadRoom);
}
}
if (LOG.isDebugEnabled()) {
for (Container cont : newContainers) {
LOG.debug("Received new Container :" + cont);
}
}
//Called on each allocation. Will know about newly blacklisted/added hosts.
computeIgnoreBlacklisting();
handleUpdatedNodes(response);
handleJobPriorityChange(response);
// Handle receiving the timeline collector address and token for this app.
MRAppMaster.RunningAppContext appContext =
(MRAppMaster.RunningAppContext)this.getContext();
if (appContext.getTimelineV2Client() != null) {
appContext.getTimelineV2Client().
setTimelineCollectorInfo(response.getCollectorInfo());
}
for (ContainerStatus cont : finishedContainers) {
processFinishedContainer(cont);
}
return newContainers;
}
@SuppressWarnings("unchecked")
@VisibleForTesting
void processFinishedContainer(ContainerStatus container) {
LOG.info("Received completed container " + container.getContainerId());
TaskAttemptId attemptID = assignedRequests.get(container.getContainerId());
if (attemptID == null) {
LOG.error("Container complete event for unknown container "
+ container.getContainerId());
} else {
pendingRelease.remove(container.getContainerId());
assignedRequests.remove(attemptID);
// Send the diagnostics
String diagnostic = StringInterner.weakIntern(container.getDiagnostics());
eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID,
diagnostic));
// send the container completed event to Task attempt
eventHandler.handle(createContainerFinishedEvent(container, attemptID));
preemptionPolicy.handleCompletedContainer(attemptID);
}
}
private void applyConcurrentTaskLimits() {
int numScheduledMaps = scheduledRequests.maps.size();
if (maxRunningMaps > 0 && numScheduledMaps > 0 &&
getJob().getTotalMaps() > maxRunningMaps) {
int maxRequestedMaps = Math.max(0,
maxRunningMaps - assignedRequests.maps.size());
int numScheduledFailMaps = scheduledRequests.earlierFailedMaps.size();
int failedMapRequestLimit = Math.min(maxRequestedMaps,
numScheduledFailMaps);
int normalMapRequestLimit = Math.min(
maxRequestedMaps - failedMapRequestLimit,
numScheduledMaps - numScheduledFailMaps);
setRequestLimit(PRIORITY_FAST_FAIL_MAP, mapResourceRequest,
failedMapRequestLimit);
setRequestLimit(PRIORITY_MAP, mapResourceRequest, normalMapRequestLimit);
setRequestLimit(PRIORITY_OPPORTUNISTIC_MAP, mapResourceRequest,
normalMapRequestLimit);
}
int numScheduledReduces = scheduledRequests.reduces.size();
if (maxRunningReduces > 0 && numScheduledReduces > 0 &&
getJob().getTotalReduces() > maxRunningReduces) {
int maxRequestedReduces = Math.max(0,
maxRunningReduces - assignedRequests.reduces.size());
int reduceRequestLimit = Math.min(maxRequestedReduces,
numScheduledReduces);
setRequestLimit(PRIORITY_REDUCE, reduceResourceRequest,
reduceRequestLimit);
}
}
private boolean canAssignMaps() {
return (maxRunningMaps <= 0
|| assignedRequests.maps.size() < maxRunningMaps);
}
private boolean canAssignReduces() {
return (maxRunningReduces <= 0
|| assignedRequests.reduces.size() < maxRunningReduces);
}
private void updateAMRMToken(Token token) throws IOException {
org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken =
new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token
.getIdentifier().array(), token.getPassword().array(), new Text(
token.getKind()), new Text(token.getService()));
UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser();
currentUGI.addToken(amrmToken);
amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
}
@VisibleForTesting
public TaskAttemptEvent createContainerFinishedEvent(ContainerStatus cont,
TaskAttemptId attemptID) {
if (cont.getExitStatus() == ContainerExitStatus.ABORTED
|| cont.getExitStatus() == ContainerExitStatus.PREEMPTED) {
// killed by framework
return new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_KILL);
} else {
return new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_CONTAINER_COMPLETED);
}
}
@SuppressWarnings("unchecked")
private void handleUpdatedNodes(AllocateResponse response) {
// send event to the job about on updated nodes
List<NodeReport> updatedNodes = response.getUpdatedNodes();
if (!updatedNodes.isEmpty()) {
// send event to the job to act upon completed tasks
eventHandler.handle(new JobUpdatedNodesEvent(getJob().getID(),
updatedNodes));
// act upon running tasks
HashSet<NodeId> unusableNodes = new HashSet<NodeId>();
for (NodeReport nr : updatedNodes) {
NodeState nodeState = nr.getNodeState();
if (nodeState.isUnusable()) {
unusableNodes.add(nr.getNodeId());
}
}
for (int i = 0; i < 2; ++i) {
HashMap<TaskAttemptId, Container> taskSet = i == 0 ? assignedRequests.maps
: assignedRequests.reduces;
// kill running containers
for (Map.Entry<TaskAttemptId, Container> entry : taskSet.entrySet()) {
TaskAttemptId tid = entry.getKey();
NodeId taskAttemptNodeId = entry.getValue().getNodeId();
if (unusableNodes.contains(taskAttemptNodeId)) {
LOG.info("Killing taskAttempt:" + tid
+ " because it is running on unusable node:"
+ taskAttemptNodeId);
// If map, reschedule next task attempt.
boolean rescheduleNextAttempt = (i == 0) ? true : false;
eventHandler.handle(new TaskAttemptKillEvent(tid,
"TaskAttempt killed because it ran on unusable node"
+ taskAttemptNodeId, rescheduleNextAttempt));
}
}
}
}
}
void handleJobPriorityChange(AllocateResponse response) {
Priority applicationPriority = response.getApplicationPriority();
if (null != applicationPriority) {
Priority priorityFromResponse = Priority
.newInstance(applicationPriority.getPriority());
// Update the job priority to Job directly.
getJob().setJobPriority(priorityFromResponse);
}
}
@Private
public Resource getResourceLimit() {
Resource headRoom = getAvailableResources();
Resource assignedMapResource =
Resources.multiply(mapResourceRequest, assignedRequests.maps.size());
Resource assignedReduceResource =
Resources.multiply(reduceResourceRequest,
assignedRequests.reduces.size());
return Resources.add(headRoom,
Resources.add(assignedMapResource, assignedReduceResource));
}
@Private
@VisibleForTesting
class ScheduledRequests {
private final LinkedList<TaskAttemptId> earlierFailedMaps =
new LinkedList<TaskAttemptId>();
/** Maps from a host to a list of Map tasks with data on the host */
private final Map<String, LinkedList<TaskAttemptId>> mapsHostMapping =
new HashMap<String, LinkedList<TaskAttemptId>>();
private final Map<String, LinkedList<TaskAttemptId>> mapsRackMapping =
new HashMap<String, LinkedList<TaskAttemptId>>();
@VisibleForTesting
final Map<TaskAttemptId, ContainerRequest> maps =
new LinkedHashMap<TaskAttemptId, ContainerRequest>();
int mapsMod100 = 0;
int numOpportunisticMapsPercent = 0;
void setNumOpportunisticMapsPercent(int numMaps) {
this.numOpportunisticMapsPercent = numMaps;
}
int getNumOpportunisticMapsPercent() {
return this.numOpportunisticMapsPercent;
}
@VisibleForTesting
final LinkedHashMap<TaskAttemptId, ContainerRequest> reduces =
new LinkedHashMap<TaskAttemptId, ContainerRequest>();
boolean remove(TaskAttemptId tId) {
ContainerRequest req = null;
if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
req = maps.remove(tId);
} else {
req = reduces.remove(tId);
}
if (req == null) {
return false;
} else {
decContainerReq(req);
return true;
}
}
ContainerRequest removeReduce() {
Iterator<Entry<TaskAttemptId, ContainerRequest>> it = reduces.entrySet().iterator();
if (it.hasNext()) {
Entry<TaskAttemptId, ContainerRequest> entry = it.next();
it.remove();
decContainerReq(entry.getValue());
return entry.getValue();
}
return null;
}
void addMap(ContainerRequestEvent event) {
ContainerRequest request = null;
if (event.getEarlierAttemptFailed()) {
earlierFailedMaps.add(event.getAttemptID());
request =
new ContainerRequest(event, PRIORITY_FAST_FAIL_MAP,
mapNodeLabelExpression);
LOG.info("Added "+event.getAttemptID()+" to list of failed maps");
// If its an earlier Failed attempt, do not retry as OPPORTUNISTIC
maps.put(event.getAttemptID(), request);
addContainerReq(request);
} else {
if (mapsMod100 < numOpportunisticMapsPercent) {
request =
new ContainerRequest(event, PRIORITY_OPPORTUNISTIC_MAP,
mapNodeLabelExpression);
maps.put(event.getAttemptID(), request);
addOpportunisticResourceRequest(request.priority, request.capability);
} else {
request =
new ContainerRequest(event, PRIORITY_MAP, mapNodeLabelExpression);
for (String host : event.getHosts()) {
LinkedList<TaskAttemptId> list = mapsHostMapping.get(host);
if (list == null) {
list = new LinkedList<TaskAttemptId>();
mapsHostMapping.put(host, list);
}
list.add(event.getAttemptID());
if (LOG.isDebugEnabled()) {
LOG.debug("Added attempt req to host " + host);
}
}
for (String rack : event.getRacks()) {
LinkedList<TaskAttemptId> list = mapsRackMapping.get(rack);
if (list == null) {
list = new LinkedList<TaskAttemptId>();
mapsRackMapping.put(rack, list);
}
list.add(event.getAttemptID());
if (LOG.isDebugEnabled()) {
LOG.debug("Added attempt req to rack " + rack);
}
}
maps.put(event.getAttemptID(), request);
addContainerReq(request);
}
mapsMod100++;
mapsMod100 %= 100;
}
}
void addReduce(ContainerRequest req) {
reduces.put(req.attemptID, req);
addContainerReq(req);
}
// this method will change the list of allocatedContainers.
private void assign(List<Container> allocatedContainers) {
Iterator<Container> it = allocatedContainers.iterator();
LOG.info("Got allocated containers " + allocatedContainers.size());
containersAllocated += allocatedContainers.size();
int reducePending = reduces.size();
while (it.hasNext()) {
Container allocated = it.next();
if (LOG.isDebugEnabled()) {
LOG.debug("Assigning container " + allocated.getId()
+ " with priority " + allocated.getPriority() + " to NM "
+ allocated.getNodeId());
}
// check if allocated container meets memory requirements
// and whether we have any scheduled tasks that need
// a container to be assigned
boolean isAssignable = true;
Priority priority = allocated.getPriority();
Resource allocatedResource = allocated.getResource();
if (PRIORITY_FAST_FAIL_MAP.equals(priority)
|| PRIORITY_MAP.equals(priority)
|| PRIORITY_OPPORTUNISTIC_MAP.equals(priority)) {
if (ResourceCalculatorUtils.computeAvailableContainers(allocatedResource,
mapResourceRequest, getSchedulerResourceTypes()) <= 0
|| maps.isEmpty()) {
LOG.info("Cannot assign container " + allocated
+ " for a map as either "
+ " container memory less than required " + mapResourceRequest
+ " or no pending map tasks - maps.isEmpty="
+ maps.isEmpty());
isAssignable = false;
}
}
else if (PRIORITY_REDUCE.equals(priority)) {
if (ResourceCalculatorUtils.computeAvailableContainers(allocatedResource,
reduceResourceRequest, getSchedulerResourceTypes()) <= 0
|| (reducePending <= 0)) {
LOG.info("Cannot assign container " + allocated
+ " for a reduce as either "
+ " container memory less than required " + reduceResourceRequest
+ " or no pending reduce tasks.");
isAssignable = false;
} else {
reducePending--;
}
} else {
LOG.warn("Container allocated at unwanted priority: " + priority +
". Returning to RM...");
isAssignable = false;
}
if(!isAssignable) {
// release container if we could not assign it
containerNotAssigned(allocated);
it.remove();
continue;
}
// do not assign if allocated container is on a
// blacklisted host
String allocatedHost = allocated.getNodeId().getHost();
if (isNodeBlacklisted(allocatedHost)) {
// we need to request for a new container
// and release the current one
LOG.info("Got allocated container on a blacklisted "
+ " host "+allocatedHost
+". Releasing container " + allocated);
// find the request matching this allocated container
// and replace it with a new one
ContainerRequest toBeReplacedReq =
getContainerReqToReplace(allocated);
if (toBeReplacedReq != null) {
LOG.info("Placing a new container request for task attempt "
+ toBeReplacedReq.attemptID);
ContainerRequest newReq =
getFilteredContainerRequest(toBeReplacedReq);
decContainerReq(toBeReplacedReq);
if (toBeReplacedReq.attemptID.getTaskId().getTaskType() ==
TaskType.MAP) {
maps.put(newReq.attemptID, newReq);
}
else {
reduces.put(newReq.attemptID, newReq);
}
addContainerReq(newReq);
}
else {
LOG.info("Could not map allocated container to a valid request."
+ " Releasing allocated container " + allocated);
}
// release container if we could not assign it
containerNotAssigned(allocated);
it.remove();
continue;
}
}
assignContainers(allocatedContainers);
// release container if we could not assign it
it = allocatedContainers.iterator();
while (it.hasNext()) {
Container allocated = it.next();
LOG.info("Releasing unassigned container " + allocated);
containerNotAssigned(allocated);
}
}
@SuppressWarnings("unchecked")
private void containerAssigned(Container allocated,
ContainerRequest assigned) {
// Update resource requests
decContainerReq(assigned);
// send the container-assigned event to task attempt
eventHandler.handle(new TaskAttemptContainerAssignedEvent(
assigned.attemptID, allocated, applicationACLs));
assignedRequests.add(allocated, assigned.attemptID);
if (LOG.isDebugEnabled()) {
LOG.debug("Assigned container (" + allocated + ") "
+ " to task " + assigned.attemptID + " on node "
+ allocated.getNodeId().toString());
}
}
private void containerNotAssigned(Container allocated) {
containersReleased++;
pendingRelease.add(allocated.getId());
release(allocated.getId());
}
private ContainerRequest assignWithoutLocality(Container allocated) {
ContainerRequest assigned = null;
Priority priority = allocated.getPriority();
if (PRIORITY_FAST_FAIL_MAP.equals(priority)) {
LOG.info("Assigning container " + allocated + " to fast fail map");
assigned = assignToFailedMap(allocated);
} else if (PRIORITY_REDUCE.equals(priority)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Assigning container " + allocated + " to reduce");
}
assigned = assignToReduce(allocated);
}
return assigned;
}
private void assignContainers(List<Container> allocatedContainers) {
Iterator<Container> it = allocatedContainers.iterator();
while (it.hasNext()) {
Container allocated = it.next();
ContainerRequest assigned = assignWithoutLocality(allocated);
if (assigned != null) {
containerAssigned(allocated, assigned);
it.remove();
}
}
assignMapsWithLocality(allocatedContainers);
}
private ContainerRequest getContainerReqToReplace(Container allocated) {
LOG.info("Finding containerReq for allocated container: " + allocated);
Priority priority = allocated.getPriority();
ContainerRequest toBeReplaced = null;
if (PRIORITY_FAST_FAIL_MAP.equals(priority)) {
LOG.info("Replacing FAST_FAIL_MAP container " + allocated.getId());
Iterator<TaskAttemptId> iter = earlierFailedMaps.iterator();
while (toBeReplaced == null && iter.hasNext()) {
toBeReplaced = maps.get(iter.next());
}
LOG.info("Found replacement: " + toBeReplaced);
return toBeReplaced;
}
else if (PRIORITY_MAP.equals(priority)
|| PRIORITY_OPPORTUNISTIC_MAP.equals(priority)) {
LOG.info("Replacing MAP container " + allocated.getId());
// allocated container was for a map
String host = allocated.getNodeId().getHost();
LinkedList<TaskAttemptId> list = mapsHostMapping.get(host);
if (list != null && list.size() > 0) {
TaskAttemptId tId = list.removeLast();
if (maps.containsKey(tId)) {
toBeReplaced = maps.remove(tId);
}
}
else {
TaskAttemptId tId = maps.keySet().iterator().next();
toBeReplaced = maps.remove(tId);
}
}
else if (PRIORITY_REDUCE.equals(priority)) {
TaskAttemptId tId = reduces.keySet().iterator().next();
toBeReplaced = reduces.remove(tId);
}
LOG.info("Found replacement: " + toBeReplaced);
return toBeReplaced;
}
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
//try to assign to earlierFailedMaps if present
ContainerRequest assigned = null;
while (assigned == null && earlierFailedMaps.size() > 0
&& canAssignMaps()) {
TaskAttemptId tId = earlierFailedMaps.removeFirst();
if (maps.containsKey(tId)) {
assigned = maps.remove(tId);
JobCounterUpdateEvent jce =
new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
eventHandler.handle(jce);
LOG.info("Assigned from earlierFailedMaps");
break;
}
}
return assigned;
}
private ContainerRequest assignToReduce(Container allocated) {
ContainerRequest assigned = null;
//try to assign to reduces if present
if (assigned == null && reduces.size() > 0 && canAssignReduces()) {
TaskAttemptId tId = reduces.keySet().iterator().next();
assigned = reduces.remove(tId);
LOG.info("Assigned to reduce");
}
return assigned;
}
@SuppressWarnings("unchecked")
private void assignMapsWithLocality(List<Container> allocatedContainers) {
// try to assign to all nodes first to match node local
Iterator<Container> it = allocatedContainers.iterator();
while(it.hasNext() && maps.size() > 0 && canAssignMaps()){
Container allocated = it.next();
Priority priority = allocated.getPriority();
assert (PRIORITY_MAP.equals(priority)
|| PRIORITY_OPPORTUNISTIC_MAP.equals(priority));
if (!PRIORITY_OPPORTUNISTIC_MAP.equals(priority)) {
// "if (maps.containsKey(tId))" below should be almost always true.
// hence this while loop would almost always have O(1) complexity
String host = allocated.getNodeId().getHost();
LinkedList<TaskAttemptId> list = mapsHostMapping.get(host);
while (list != null && list.size() > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Host matched to the request list " + host);
}
TaskAttemptId tId = list.removeFirst();
if (maps.containsKey(tId)) {
ContainerRequest assigned = maps.remove(tId);
containerAssigned(allocated, assigned);
it.remove();
JobCounterUpdateEvent jce =
new JobCounterUpdateEvent(assigned.attemptID.getTaskId()
.getJobId());
jce.addCounterUpdate(JobCounter.DATA_LOCAL_MAPS, 1);
eventHandler.handle(jce);
hostLocalAssigned++;
if (LOG.isDebugEnabled()) {
LOG.debug("Assigned based on host match " + host);
}
break;
}
}
}
}
// try to match all rack local
it = allocatedContainers.iterator();
while(it.hasNext() && maps.size() > 0 && canAssignMaps()){
Container allocated = it.next();
Priority priority = allocated.getPriority();
assert (PRIORITY_MAP.equals(priority)
|| PRIORITY_OPPORTUNISTIC_MAP.equals(priority));
if (!PRIORITY_OPPORTUNISTIC_MAP.equals(priority)) {
// "if (maps.containsKey(tId))" below should be almost always true.
// hence this while loop would almost always have O(1) complexity
String host = allocated.getNodeId().getHost();
String rack = RackResolver.resolve(host).getNetworkLocation();
LinkedList<TaskAttemptId> list = mapsRackMapping.get(rack);
while (list != null && list.size() > 0) {
TaskAttemptId tId = list.removeFirst();
if (maps.containsKey(tId)) {
ContainerRequest assigned = maps.remove(tId);
containerAssigned(allocated, assigned);
it.remove();
JobCounterUpdateEvent jce =
new JobCounterUpdateEvent(assigned.attemptID.getTaskId()
.getJobId());
jce.addCounterUpdate(JobCounter.RACK_LOCAL_MAPS, 1);
eventHandler.handle(jce);
rackLocalAssigned++;
if (LOG.isDebugEnabled()) {
LOG.debug("Assigned based on rack match " + rack);
}
break;
}
}
}
}
// assign remaining
it = allocatedContainers.iterator();
while(it.hasNext() && maps.size() > 0 && canAssignMaps()){
Container allocated = it.next();
Priority priority = allocated.getPriority();
assert (PRIORITY_MAP.equals(priority)
|| PRIORITY_OPPORTUNISTIC_MAP.equals(priority));
TaskAttemptId tId = maps.keySet().iterator().next();
ContainerRequest assigned = maps.remove(tId);
containerAssigned(allocated, assigned);
it.remove();
JobCounterUpdateEvent jce =
new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
eventHandler.handle(jce);
if (LOG.isDebugEnabled()) {
LOG.debug("Assigned based on * match");
}
}
}
}
@Private
@VisibleForTesting
class AssignedRequests {
private final Map<ContainerId, TaskAttemptId> containerToAttemptMap =
new HashMap<ContainerId, TaskAttemptId>();
@VisibleForTesting
final LinkedHashMap<TaskAttemptId, Container> maps =
new LinkedHashMap<TaskAttemptId, Container>();
@VisibleForTesting
final LinkedHashMap<TaskAttemptId, Container> reduces =
new LinkedHashMap<TaskAttemptId, Container>();
@VisibleForTesting
final Set<TaskAttemptId> preemptionWaitingReduces =
new HashSet<TaskAttemptId>();
void add(Container container, TaskAttemptId tId) {
LOG.info("Assigned container " + container.getId().toString() + " to " + tId);
containerToAttemptMap.put(container.getId(), tId);
if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
maps.put(tId, container);
} else {
reduces.put(tId, container);
}
}
@SuppressWarnings("unchecked")
void preemptReduce(int toPreempt) {
List<TaskAttemptId> reduceList = new ArrayList<TaskAttemptId>
(reduces.keySet());
//sort reduces on progress
Collections.sort(reduceList,
new Comparator<TaskAttemptId>() {
@Override
public int compare(TaskAttemptId o1, TaskAttemptId o2) {
return Float.compare(
getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress(),
getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress());
}
});
for (int i = 0; i < toPreempt && reduceList.size() > 0; i++) {
TaskAttemptId id = reduceList.remove(0);//remove the one on top
LOG.info("Preempting " + id);
preemptionWaitingReduces.add(id);
eventHandler.handle(new TaskAttemptKillEvent(id, RAMPDOWN_DIAGNOSTIC));
}
}
boolean remove(TaskAttemptId tId) {
ContainerId containerId = null;
if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
containerId = maps.remove(tId).getId();
} else {
containerId = reduces.remove(tId).getId();
if (containerId != null) {
boolean preempted = preemptionWaitingReduces.remove(tId);
if (preempted) {
LOG.info("Reduce preemption successful " + tId);
}
}
}
if (containerId != null) {
containerToAttemptMap.remove(containerId);
return true;
}
return false;
}
TaskAttemptId get(ContainerId cId) {
return containerToAttemptMap.get(cId);
}
ContainerId get(TaskAttemptId tId) {
Container taskContainer;
if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
taskContainer = maps.get(tId);
} else {
taskContainer = reduces.get(tId);
}
if (taskContainer == null) {
return null;
} else {
return taskContainer.getId();
}
}
}
private class ScheduleStats {
int numPendingReduces;
int numScheduledMaps;
int numScheduledReduces;
int numAssignedMaps;
int numAssignedReduces;
int numCompletedMaps;
int numCompletedReduces;
int numContainersAllocated;
int numContainersReleased;
public void updateAndLogIfChanged(String msgPrefix) {
boolean changed = false;
// synchronized to fix findbug warnings
synchronized (RMContainerAllocator.this) {
changed |= (numPendingReduces != pendingReduces.size());
numPendingReduces = pendingReduces.size();
changed |= (numScheduledMaps != scheduledRequests.maps.size());
numScheduledMaps = scheduledRequests.maps.size();
changed |= (numScheduledReduces != scheduledRequests.reduces.size());
numScheduledReduces = scheduledRequests.reduces.size();
changed |= (numAssignedMaps != assignedRequests.maps.size());
numAssignedMaps = assignedRequests.maps.size();
changed |= (numAssignedReduces != assignedRequests.reduces.size());
numAssignedReduces = assignedRequests.reduces.size();
changed |= (numCompletedMaps != getJob().getCompletedMaps());
numCompletedMaps = getJob().getCompletedMaps();
changed |= (numCompletedReduces != getJob().getCompletedReduces());
numCompletedReduces = getJob().getCompletedReduces();
changed |= (numContainersAllocated != containersAllocated);
numContainersAllocated = containersAllocated;
changed |= (numContainersReleased != containersReleased);
numContainersReleased = containersReleased;
}
if (changed) {
log(msgPrefix);
}
}
public void log(String msgPrefix) {
LOG.info(msgPrefix + "PendingReds:" + numPendingReduces +
" ScheduledMaps:" + numScheduledMaps +
" ScheduledReds:" + numScheduledReduces +
" AssignedMaps:" + numAssignedMaps +
" AssignedReds:" + numAssignedReduces +
" CompletedMaps:" + numCompletedMaps +
" CompletedReds:" + numCompletedReduces +
" ContAlloc:" + numContainersAllocated +
" ContRel:" + numContainersReleased +
" HostLocal:" + hostLocalAssigned +
" RackLocal:" + rackLocalAssigned);
}
}
static class PreemptionContext extends AMPreemptionPolicy.Context {
final AssignedRequests reqs;
PreemptionContext(AssignedRequests reqs) {
this.reqs = reqs;
}
@Override
public TaskAttemptId getTaskAttempt(ContainerId container) {
return reqs.get(container);
}
@Override
public List<Container> getContainers(TaskType t){
if(TaskType.REDUCE.equals(t))
return new ArrayList<Container>(reqs.reduces.values());
if(TaskType.MAP.equals(t))
return new ArrayList<Container>(reqs.maps.values());
return null;
}
}
}
| |
/*
* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.bigquery;
import com.google.api.services.bigquery.model.Streamingbuffer;
import com.google.api.services.bigquery.model.Table;
import com.google.auto.value.AutoValue;
import com.google.common.base.MoreObjects;
import java.io.Serializable;
import java.math.BigInteger;
import java.util.Objects;
import javax.annotation.Nullable;
/**
* A Google BigQuery default table definition. This definition is used for standard, two-dimensional
* tables with individual records organized in rows, and a data type assigned to each column (also
* called a field). Individual fields within a record may contain nested and repeated children
* fields. Every table is described by a schema that describes field names, types, and other
* information.
*
* @see <a href="https://cloud.google.com/bigquery/docs/tables">Managing Tables</a>
*/
@AutoValue
public abstract class StandardTableDefinition extends TableDefinition {
private static final long serialVersionUID = 2113445776046717900L;
/**
* Google BigQuery Table's Streaming Buffer information. This class contains information on a
* table's streaming buffer as the estimated size in number of rows/bytes.
*/
public static class StreamingBuffer implements Serializable {
private static final long serialVersionUID = 822027055549277843L;
private final Long estimatedRows;
private final Long estimatedBytes;
private final Long oldestEntryTime;
StreamingBuffer(Long estimatedRows, Long estimatedBytes, Long oldestEntryTime) {
this.estimatedRows = estimatedRows;
this.estimatedBytes = estimatedBytes;
this.oldestEntryTime = oldestEntryTime;
}
/** Returns a lower-bound estimate of the number of rows currently in the streaming buffer. */
public Long getEstimatedRows() {
return estimatedRows;
}
/** Returns a lower-bound estimate of the number of bytes currently in the streaming buffer. */
public Long getEstimatedBytes() {
return estimatedBytes;
}
/**
* Returns the timestamp of the oldest entry in the streaming buffer, in milliseconds since
* epoch. Returns {@code null} if the streaming buffer is empty.
*/
public Long getOldestEntryTime() {
return oldestEntryTime;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("estimatedRows", estimatedRows)
.add("estimatedBytes", estimatedBytes)
.add("oldestEntryTime", oldestEntryTime)
.toString();
}
@Override
public int hashCode() {
return Objects.hash(estimatedRows, estimatedBytes, oldestEntryTime);
}
@Override
public boolean equals(Object obj) {
return obj instanceof StreamingBuffer
&& Objects.equals(toPb(), ((StreamingBuffer) obj).toPb());
}
Streamingbuffer toPb() {
Streamingbuffer buffer = new Streamingbuffer();
if (estimatedBytes != null) {
buffer.setEstimatedBytes(BigInteger.valueOf(estimatedBytes));
}
if (estimatedRows != null) {
buffer.setEstimatedRows(BigInteger.valueOf(estimatedRows));
}
if (oldestEntryTime != null) {
buffer.setOldestEntryTime(BigInteger.valueOf(oldestEntryTime));
}
return buffer;
}
static StreamingBuffer fromPb(Streamingbuffer streamingBufferPb) {
Long oldestEntryTime = null;
if (streamingBufferPb.getOldestEntryTime() != null) {
oldestEntryTime = streamingBufferPb.getOldestEntryTime().longValue();
}
return new StreamingBuffer(
streamingBufferPb.getEstimatedRows() != null
? streamingBufferPb.getEstimatedRows().longValue()
: null,
streamingBufferPb.getEstimatedBytes() != null
? streamingBufferPb.getEstimatedBytes().longValue()
: null,
oldestEntryTime);
}
}
@AutoValue.Builder
public abstract static class Builder
extends TableDefinition.Builder<StandardTableDefinition, Builder> {
public abstract Builder setNumBytes(Long numBytes);
public abstract Builder setNumLongTermBytes(Long numLongTermBytes);
public abstract Builder setNumRows(Long numRows);
public abstract Builder setLocation(String location);
public abstract Builder setStreamingBuffer(StreamingBuffer streamingBuffer);
public abstract Builder setType(Type type);
/** Sets the table schema. */
public abstract Builder setSchema(Schema schema);
/**
* Sets the time partitioning configuration for the table. If not set, the table is not
* time-partitioned.
*/
public abstract Builder setTimePartitioning(TimePartitioning timePartitioning);
/**
* Set the clustering configuration for the table. If not set, the table is not clustered.
* Clustering is only available for partitioned tables.
*/
public abstract Builder setClustering(Clustering clustering);
/** Creates a {@code StandardTableDefinition} object. */
public abstract StandardTableDefinition build();
}
/** Returns the size of this table in bytes, excluding any data in the streaming buffer. */
@Nullable
public abstract Long getNumBytes();
/**
* Returns the number of bytes considered "long-term storage" for reduced billing purposes.
*
* @see <a href="https://cloud.google.com/bigquery/pricing#long-term-storage">Long Term Storage
* Pricing</a>
*/
@Nullable
public abstract Long getNumLongTermBytes();
/** Returns the number of rows in this table, excluding any data in the streaming buffer. */
@Nullable
public abstract Long getNumRows();
/**
* Returns the geographic location where the table should reside. This value is inherited from the
* dataset.
*
* @see <a
* href="https://cloud.google.com/bigquery/docs/managing_jobs_datasets_projects#dataset-location">
* Dataset Location</a>
*/
@Nullable
public abstract String getLocation();
/**
* Returns information on the table's streaming buffer if any exists. Returns {@code null} if no
* streaming buffer exists.
*/
@Nullable
public abstract StreamingBuffer getStreamingBuffer();
/**
* Returns the time partitioning configuration for this table. If {@code null}, the table is not
* time-partitioned.
*/
@Nullable
public abstract TimePartitioning getTimePartitioning();
/**
* Returns the clustering configuration for this table. If {@code null}, the table is not
* clustered.
*/
@Nullable
public abstract Clustering getClustering();
/** Returns a builder for a BigQuery standard table definition. */
public static Builder newBuilder() {
return new AutoValue_StandardTableDefinition.Builder().setType(Type.TABLE);
}
/**
* Creates a BigQuery standard table definition given its schema.
*
* @param schema the schema of the table
*/
public static StandardTableDefinition of(Schema schema) {
return newBuilder().setSchema(schema).build();
}
/** Returns a builder for the {@code StandardTableDefinition} object. */
public abstract Builder toBuilder();
@Override
Table toPb() {
Table tablePb = super.toPb();
if (getNumRows() != null) {
tablePb.setNumRows(BigInteger.valueOf(getNumRows()));
}
tablePb.setNumBytes(getNumBytes());
tablePb.setNumLongTermBytes(getNumLongTermBytes());
tablePb.setLocation(getLocation());
if (getStreamingBuffer() != null) {
tablePb.setStreamingBuffer(getStreamingBuffer().toPb());
}
if (getTimePartitioning() != null) {
tablePb.setTimePartitioning(getTimePartitioning().toPb());
}
if (getClustering() != null) {
tablePb.setClustering(getClustering().toPb());
}
return tablePb;
}
@SuppressWarnings("unchecked")
static StandardTableDefinition fromPb(Table tablePb) {
Builder builder = newBuilder().table(tablePb);
if (tablePb.getNumRows() != null) {
builder.setNumRows(tablePb.getNumRows().longValue());
}
if (tablePb.getStreamingBuffer() != null) {
builder.setStreamingBuffer(StreamingBuffer.fromPb(tablePb.getStreamingBuffer()));
}
if (tablePb.getTimePartitioning() != null) {
builder.setTimePartitioning(TimePartitioning.fromPb(tablePb.getTimePartitioning()));
}
if (tablePb.getClustering() != null) {
builder.setClustering(Clustering.fromPb(tablePb.getClustering()));
}
if (tablePb.getNumLongTermBytes() != null) {
builder.setNumLongTermBytes(tablePb.getNumLongTermBytes());
}
return builder.setNumBytes(tablePb.getNumBytes()).setLocation(tablePb.getLocation()).build();
}
}
| |
/*
* Licensed to Crate.io GmbH ("Crate") under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership. Crate licenses
* this file to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial agreement.
*/
package io.crate.execution.dsl.projection.builder;
import io.crate.common.collections.Lists2;
import io.crate.execution.dsl.projection.AggregationProjection;
import io.crate.execution.dsl.projection.EvalProjection;
import io.crate.execution.dsl.projection.FilterProjection;
import io.crate.execution.dsl.projection.GroupProjection;
import io.crate.execution.dsl.projection.Projection;
import io.crate.execution.dsl.projection.TopNProjection;
import io.crate.execution.dsl.projection.WriterProjection;
import io.crate.execution.engine.aggregation.AggregationFunction;
import io.crate.execution.engine.pipeline.TopN;
import io.crate.expression.symbol.AggregateMode;
import io.crate.expression.symbol.Aggregation;
import io.crate.expression.symbol.Function;
import io.crate.expression.symbol.InputColumn;
import io.crate.expression.symbol.Literal;
import io.crate.expression.symbol.Symbol;
import io.crate.metadata.ColumnIdent;
import io.crate.metadata.FunctionInfo;
import io.crate.metadata.FunctionType;
import io.crate.metadata.NodeContext;
import io.crate.metadata.RowGranularity;
import io.crate.metadata.SearchPath;
import io.crate.types.DataType;
import org.elasticsearch.common.settings.Settings;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public class ProjectionBuilder {
private final NodeContext nodeCtx;
public ProjectionBuilder(NodeContext nodeCtx) {
this.nodeCtx = nodeCtx;
}
public AggregationProjection aggregationProjection(Collection<? extends Symbol> inputs,
Collection<Function> aggregates,
java.util.function.Function<Symbol, Symbol> subQueryAndParamBinder,
AggregateMode mode,
RowGranularity granularity,
SearchPath searchPath) {
InputColumns.SourceSymbols sourceSymbols = new InputColumns.SourceSymbols(inputs);
ArrayList<Aggregation> aggregations = getAggregations(
aggregates,
mode,
sourceSymbols,
searchPath,
subQueryAndParamBinder
);
return new AggregationProjection(aggregations, granularity, mode);
}
public GroupProjection groupProjection(
Collection<? extends Symbol> inputs,
Collection<? extends Symbol> keys,
Collection<Function> values,
java.util.function.Function<Symbol, Symbol> subQueryAndParamBinder,
AggregateMode mode,
RowGranularity requiredGranularity,
SearchPath searchPath) {
InputColumns.SourceSymbols sourceSymbols = new InputColumns.SourceSymbols(inputs);
ArrayList<Aggregation> aggregations = getAggregations(
values,
mode,
sourceSymbols,
searchPath,
subQueryAndParamBinder
);
return new GroupProjection(
Lists2.map(InputColumns.create(keys, sourceSymbols), subQueryAndParamBinder),
aggregations,
mode,
requiredGranularity
);
}
private ArrayList<Aggregation> getAggregations(Collection<Function> functions,
AggregateMode mode,
InputColumns.SourceSymbols sourceSymbols,
SearchPath searchPath,
java.util.function.Function<Symbol, Symbol> subQueryAndParamBinder) {
ArrayList<Aggregation> aggregations = new ArrayList<>(functions.size());
for (Function function : functions) {
assert function.type() == FunctionType.AGGREGATE :
"function type must be " + FunctionType.AGGREGATE;
List<Symbol> aggregationInputs;
Symbol filterInput;
switch (mode) {
case ITER_FINAL:
case ITER_PARTIAL:
// ITER means that there is no aggregation part upfront, therefore the input
// symbols need to be in arguments
aggregationInputs = InputColumns.create(function.arguments(), sourceSymbols);
Symbol filter = function.filter();
if (filter != null) {
filterInput = InputColumns.create(filter, sourceSymbols);
} else {
filterInput = Literal.BOOLEAN_TRUE;
}
break;
case PARTIAL_FINAL:
aggregationInputs = List.of(sourceSymbols.getICForSource(function));
filterInput = Literal.BOOLEAN_TRUE;
break;
default:
throw new AssertionError("Invalid mode: " + mode.name());
}
AggregationFunction<?, ?> aggregationFunction = (AggregationFunction<?, ?>) nodeCtx.functions().getQualified(
function,
searchPath
);
assert aggregationFunction != null :
"Aggregation function implementation not found using full qualified lookup: " + function;
var valueType = mode.returnType(aggregationFunction);
var functionInfo = FunctionInfo.of(
aggregationFunction.signature(),
aggregationFunction.boundSignature().getArgumentDataTypes(),
valueType
);
Aggregation aggregation = new Aggregation(
aggregationFunction.signature(),
functionInfo,
aggregationFunction.boundSignature().getReturnType().createType(),
valueType,
Lists2.map(aggregationInputs, subQueryAndParamBinder),
subQueryAndParamBinder.apply(filterInput)
);
aggregations.add(aggregation);
}
return aggregations;
}
public static FilterProjection filterProjection(Collection<? extends Symbol> inputs, Symbol query) {
// FilterProjection can only pass-through rows as is; create inputColumns which preserve the type:
return new FilterProjection(InputColumns.create(query, inputs), InputColumn.mapToInputColumns(inputs));
}
/**
* Create a {@link TopNProjection} or {@link EvalProjection} if required, otherwise null is returned.
* <p>
* The output symbols will consist of InputColumns.
* </p>
* @param numOutputs number of outputs this projection should have.
* If inputTypes is longer this projection will cut off superfluous columns
*/
@Nullable
public static Projection topNOrEvalIfNeeded(Integer limit,
int offset,
int numOutputs,
List<DataType<?>> inputTypes) {
if (limit == null) {
limit = TopN.NO_LIMIT;
}
int numInputTypes = inputTypes.size();
List<DataType<?>> strippedInputs = inputTypes;
if (numOutputs < numInputTypes) {
strippedInputs = inputTypes.subList(0, numOutputs);
}
if (limit == TopN.NO_LIMIT && offset == 0) {
if (numOutputs >= numInputTypes) {
return null;
}
return new EvalProjection(InputColumn.mapToInputColumns(strippedInputs));
}
return new TopNProjection(limit, offset, strippedInputs);
}
public static WriterProjection writerProjection(Collection<? extends Symbol> inputs,
Symbol uri,
@Nullable WriterProjection.CompressionType compressionType,
Map<ColumnIdent, Symbol> overwrites,
@Nullable List<String> outputNames,
WriterProjection.OutputFormat outputFormat,
Settings withClauseOptions) {
return new WriterProjection(
InputColumn.mapToInputColumns(inputs), uri, compressionType, overwrites, outputNames, outputFormat, withClauseOptions);
}
}
| |
/*
* Copyright 2002-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.access.intercept.aspectj.aspect;
import static org.assertj.core.api.Assertions.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.springframework.security.access.AccessDecisionVoter;
import org.springframework.security.access.AccessDeniedException;
import org.springframework.security.access.annotation.Secured;
import org.springframework.security.access.annotation.SecuredAnnotationSecurityMetadataSource;
import org.springframework.security.access.expression.method.DefaultMethodSecurityExpressionHandler;
import org.springframework.security.access.expression.method.ExpressionBasedAnnotationAttributeFactory;
import org.springframework.security.access.expression.method.ExpressionBasedPostInvocationAdvice;
import org.springframework.security.access.expression.method.ExpressionBasedPreInvocationAdvice;
import org.springframework.security.access.intercept.AfterInvocationProviderManager;
import org.springframework.security.access.intercept.aspectj.AspectJMethodSecurityInterceptor;
import org.springframework.security.access.prepost.PostFilter;
import org.springframework.security.access.prepost.PostInvocationAdviceProvider;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.security.access.prepost.PreInvocationAuthorizationAdviceVoter;
import org.springframework.security.access.prepost.PrePostAnnotationSecurityMetadataSource;
import org.springframework.security.access.vote.AffirmativeBased;
import org.springframework.security.access.vote.RoleVoter;
import org.springframework.security.authentication.AuthenticationCredentialsNotFoundException;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.TestingAuthenticationToken;
import org.springframework.security.core.context.SecurityContextHolder;
/**
*
* @author Luke Taylor
* @since 3.0.3
*/
public class AnnotationSecurityAspectTests {
private AffirmativeBased adm;
private @Mock AuthenticationManager authman;
private TestingAuthenticationToken anne = new TestingAuthenticationToken("anne", "",
"ROLE_A");
// private TestingAuthenticationToken bob = new TestingAuthenticationToken("bob", "",
// "ROLE_B");
private AspectJMethodSecurityInterceptor interceptor;
private SecuredImpl secured = new SecuredImpl();
private SecuredImplSubclass securedSub = new SecuredImplSubclass();
private PrePostSecured prePostSecured = new PrePostSecured();
@Before
public final void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
interceptor = new AspectJMethodSecurityInterceptor();
AccessDecisionVoter[] voters = new AccessDecisionVoter[] {
new RoleVoter(),
new PreInvocationAuthorizationAdviceVoter(
new ExpressionBasedPreInvocationAdvice()) };
adm = new AffirmativeBased(
Arrays.<AccessDecisionVoter<? extends Object>> asList(voters));
interceptor.setAccessDecisionManager(adm);
interceptor.setAuthenticationManager(authman);
interceptor
.setSecurityMetadataSource(new SecuredAnnotationSecurityMetadataSource());
AnnotationSecurityAspect secAspect = AnnotationSecurityAspect.aspectOf();
secAspect.setSecurityInterceptor(interceptor);
}
@After
public void clearContext() {
SecurityContextHolder.clearContext();
}
@Test
public void securedInterfaceMethodAllowsAllAccess() throws Exception {
secured.securedMethod();
}
@Test(expected = AuthenticationCredentialsNotFoundException.class)
public void securedClassMethodDeniesUnauthenticatedAccess() throws Exception {
secured.securedClassMethod();
}
@Test
public void securedClassMethodAllowsAccessToRoleA() throws Exception {
SecurityContextHolder.getContext().setAuthentication(anne);
secured.securedClassMethod();
}
@Test(expected = AccessDeniedException.class)
public void internalPrivateCallIsIntercepted() {
SecurityContextHolder.getContext().setAuthentication(anne);
try {
secured.publicCallsPrivate();
fail("Expected AccessDeniedException");
}
catch (AccessDeniedException expected) {
}
securedSub.publicCallsPrivate();
}
@Test(expected = AccessDeniedException.class)
public void protectedMethodIsIntercepted() throws Exception {
SecurityContextHolder.getContext().setAuthentication(anne);
secured.protectedMethod();
}
@Test
public void overriddenProtectedMethodIsNotIntercepted() throws Exception {
// AspectJ doesn't inherit annotations
securedSub.protectedMethod();
}
// SEC-1262
@Test(expected = AccessDeniedException.class)
public void denyAllPreAuthorizeDeniesAccess() throws Exception {
configureForElAnnotations();
SecurityContextHolder.getContext().setAuthentication(anne);
prePostSecured.denyAllMethod();
}
@Test
public void postFilterIsApplied() throws Exception {
configureForElAnnotations();
SecurityContextHolder.getContext().setAuthentication(anne);
List<String> objects = prePostSecured.postFilterMethod();
assertThat(objects).hasSize(2);
assertThat(objects.contains("apple")).isTrue();
assertThat(objects.contains("aubergine")).isTrue();
}
private void configureForElAnnotations() {
DefaultMethodSecurityExpressionHandler eh = new DefaultMethodSecurityExpressionHandler();
interceptor
.setSecurityMetadataSource(new PrePostAnnotationSecurityMetadataSource(
new ExpressionBasedAnnotationAttributeFactory(eh)));
interceptor.setAccessDecisionManager(adm);
AfterInvocationProviderManager aim = new AfterInvocationProviderManager();
aim.setProviders(Arrays.asList(new PostInvocationAdviceProvider(
new ExpressionBasedPostInvocationAdvice(eh))));
interceptor.setAfterInvocationManager(aim);
}
}
interface SecuredInterface {
@Secured("ROLE_X")
void securedMethod();
}
class SecuredImpl implements SecuredInterface {
// Not really secured because AspectJ doesn't inherit annotations from interfaces
public void securedMethod() {
}
@Secured("ROLE_A")
public void securedClassMethod() {
}
@Secured("ROLE_X")
private void privateMethod() {
}
@Secured("ROLE_X")
protected void protectedMethod() {
}
@Secured("ROLE_X")
public void publicCallsPrivate() {
privateMethod();
}
}
class SecuredImplSubclass extends SecuredImpl {
protected void protectedMethod() {
}
public void publicCallsPrivate() {
super.publicCallsPrivate();
}
}
class PrePostSecured {
@PreAuthorize("denyAll")
public void denyAllMethod() {
}
@PostFilter("filterObject.startsWith('a')")
public List<String> postFilterMethod() {
ArrayList<String> objects = new ArrayList<>();
objects.addAll(Arrays.asList(new String[] { "apple", "banana", "aubergine",
"orange" }));
return objects;
}
}
| |
/*
* This file is generated by jOOQ.
*/
package com.rpkit.payments.bukkit.database.jooq.tables.records;
import com.rpkit.payments.bukkit.database.jooq.tables.RpkitPaymentNotification;
import java.time.LocalDateTime;
import org.jooq.Field;
import org.jooq.Record1;
import org.jooq.Record6;
import org.jooq.Row6;
import org.jooq.impl.UpdatableRecordImpl;
/**
* This class is generated by jOOQ.
*/
@SuppressWarnings({ "all", "unchecked", "rawtypes" })
public class RpkitPaymentNotificationRecord extends UpdatableRecordImpl<RpkitPaymentNotificationRecord> implements Record6<Integer, Integer, Integer, Integer, LocalDateTime, String> {
private static final long serialVersionUID = 1L;
/**
* Setter for <code>rpkit_payments.rpkit_payment_notification.id</code>.
*/
public void setId(Integer value) {
set(0, value);
}
/**
* Getter for <code>rpkit_payments.rpkit_payment_notification.id</code>.
*/
public Integer getId() {
return (Integer) get(0);
}
/**
* Setter for <code>rpkit_payments.rpkit_payment_notification.group_id</code>.
*/
public void setGroupId(Integer value) {
set(1, value);
}
/**
* Getter for <code>rpkit_payments.rpkit_payment_notification.group_id</code>.
*/
public Integer getGroupId() {
return (Integer) get(1);
}
/**
* Setter for <code>rpkit_payments.rpkit_payment_notification.to_id</code>.
*/
public void setToId(Integer value) {
set(2, value);
}
/**
* Getter for <code>rpkit_payments.rpkit_payment_notification.to_id</code>.
*/
public Integer getToId() {
return (Integer) get(2);
}
/**
* Setter for <code>rpkit_payments.rpkit_payment_notification.character_id</code>.
*/
public void setCharacterId(Integer value) {
set(3, value);
}
/**
* Getter for <code>rpkit_payments.rpkit_payment_notification.character_id</code>.
*/
public Integer getCharacterId() {
return (Integer) get(3);
}
/**
* Setter for <code>rpkit_payments.rpkit_payment_notification.date</code>.
*/
public void setDate(LocalDateTime value) {
set(4, value);
}
/**
* Getter for <code>rpkit_payments.rpkit_payment_notification.date</code>.
*/
public LocalDateTime getDate() {
return (LocalDateTime) get(4);
}
/**
* Setter for <code>rpkit_payments.rpkit_payment_notification.text</code>.
*/
public void setText(String value) {
set(5, value);
}
/**
* Getter for <code>rpkit_payments.rpkit_payment_notification.text</code>.
*/
public String getText() {
return (String) get(5);
}
// -------------------------------------------------------------------------
// Primary key information
// -------------------------------------------------------------------------
@Override
public Record1<Integer> key() {
return (Record1) super.key();
}
// -------------------------------------------------------------------------
// Record6 type implementation
// -------------------------------------------------------------------------
@Override
public Row6<Integer, Integer, Integer, Integer, LocalDateTime, String> fieldsRow() {
return (Row6) super.fieldsRow();
}
@Override
public Row6<Integer, Integer, Integer, Integer, LocalDateTime, String> valuesRow() {
return (Row6) super.valuesRow();
}
@Override
public Field<Integer> field1() {
return RpkitPaymentNotification.RPKIT_PAYMENT_NOTIFICATION.ID;
}
@Override
public Field<Integer> field2() {
return RpkitPaymentNotification.RPKIT_PAYMENT_NOTIFICATION.GROUP_ID;
}
@Override
public Field<Integer> field3() {
return RpkitPaymentNotification.RPKIT_PAYMENT_NOTIFICATION.TO_ID;
}
@Override
public Field<Integer> field4() {
return RpkitPaymentNotification.RPKIT_PAYMENT_NOTIFICATION.CHARACTER_ID;
}
@Override
public Field<LocalDateTime> field5() {
return RpkitPaymentNotification.RPKIT_PAYMENT_NOTIFICATION.DATE;
}
@Override
public Field<String> field6() {
return RpkitPaymentNotification.RPKIT_PAYMENT_NOTIFICATION.TEXT;
}
@Override
public Integer component1() {
return getId();
}
@Override
public Integer component2() {
return getGroupId();
}
@Override
public Integer component3() {
return getToId();
}
@Override
public Integer component4() {
return getCharacterId();
}
@Override
public LocalDateTime component5() {
return getDate();
}
@Override
public String component6() {
return getText();
}
@Override
public Integer value1() {
return getId();
}
@Override
public Integer value2() {
return getGroupId();
}
@Override
public Integer value3() {
return getToId();
}
@Override
public Integer value4() {
return getCharacterId();
}
@Override
public LocalDateTime value5() {
return getDate();
}
@Override
public String value6() {
return getText();
}
@Override
public RpkitPaymentNotificationRecord value1(Integer value) {
setId(value);
return this;
}
@Override
public RpkitPaymentNotificationRecord value2(Integer value) {
setGroupId(value);
return this;
}
@Override
public RpkitPaymentNotificationRecord value3(Integer value) {
setToId(value);
return this;
}
@Override
public RpkitPaymentNotificationRecord value4(Integer value) {
setCharacterId(value);
return this;
}
@Override
public RpkitPaymentNotificationRecord value5(LocalDateTime value) {
setDate(value);
return this;
}
@Override
public RpkitPaymentNotificationRecord value6(String value) {
setText(value);
return this;
}
@Override
public RpkitPaymentNotificationRecord values(Integer value1, Integer value2, Integer value3, Integer value4, LocalDateTime value5, String value6) {
value1(value1);
value2(value2);
value3(value3);
value4(value4);
value5(value5);
value6(value6);
return this;
}
// -------------------------------------------------------------------------
// Constructors
// -------------------------------------------------------------------------
/**
* Create a detached RpkitPaymentNotificationRecord
*/
public RpkitPaymentNotificationRecord() {
super(RpkitPaymentNotification.RPKIT_PAYMENT_NOTIFICATION);
}
/**
* Create a detached, initialised RpkitPaymentNotificationRecord
*/
public RpkitPaymentNotificationRecord(Integer id, Integer groupId, Integer toId, Integer characterId, LocalDateTime date, String text) {
super(RpkitPaymentNotification.RPKIT_PAYMENT_NOTIFICATION);
setId(id);
setGroupId(groupId);
setToId(toId);
setCharacterId(characterId);
setDate(date);
setText(text);
}
}
| |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.sql.planner.optimizations;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.prestosql.Session;
import io.prestosql.metadata.Metadata;
import io.prestosql.spi.connector.ConstantProperty;
import io.prestosql.spi.connector.LocalProperty;
import io.prestosql.spi.predicate.NullableValue;
import io.prestosql.sql.planner.Partitioning;
import io.prestosql.sql.planner.PartitioningHandle;
import io.prestosql.sql.planner.Symbol;
import javax.annotation.concurrent.Immutable;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import static com.google.common.base.MoreObjects.toStringHelper;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.Iterables.transform;
import static io.prestosql.sql.planner.SystemPartitioningHandle.COORDINATOR_DISTRIBUTION;
import static io.prestosql.sql.planner.SystemPartitioningHandle.SINGLE_DISTRIBUTION;
import static io.prestosql.sql.planner.SystemPartitioningHandle.SOURCE_DISTRIBUTION;
import static io.prestosql.util.MoreLists.filteredCopy;
import static java.util.Objects.requireNonNull;
public class ActualProperties
{
private final Global global;
private final List<LocalProperty<Symbol>> localProperties;
private final Map<Symbol, NullableValue> constants;
private ActualProperties(
Global global,
List<? extends LocalProperty<Symbol>> localProperties,
Map<Symbol, NullableValue> constants)
{
requireNonNull(global, "globalProperties is null");
requireNonNull(localProperties, "localProperties is null");
requireNonNull(constants, "constants is null");
this.global = global;
// The constants field implies a ConstantProperty in localProperties (but not vice versa).
// Let's make sure to include the constants into the local constant properties.
Set<Symbol> localConstants = LocalProperties.extractLeadingConstants(localProperties);
localProperties = LocalProperties.stripLeadingConstants(localProperties);
Set<Symbol> updatedLocalConstants = ImmutableSet.<Symbol>builder()
.addAll(localConstants)
.addAll(constants.keySet())
.build();
List<LocalProperty<Symbol>> updatedLocalProperties = LocalProperties.normalizeAndPrune(ImmutableList.<LocalProperty<Symbol>>builder()
.addAll(transform(updatedLocalConstants, ConstantProperty::new))
.addAll(localProperties)
.build());
this.localProperties = ImmutableList.copyOf(updatedLocalProperties);
this.constants = ImmutableMap.copyOf(constants);
}
public boolean isCoordinatorOnly()
{
return global.isCoordinatorOnly();
}
/**
* @return true if the plan will only execute on a single node
*/
public boolean isSingleNode()
{
return global.isSingleNode();
}
public boolean isNullsAndAnyReplicated()
{
return global.isNullsAndAnyReplicated();
}
public boolean isStreamPartitionedOn(Collection<Symbol> columns)
{
return isStreamPartitionedOn(columns, false);
}
public boolean isStreamPartitionedOn(Collection<Symbol> columns, boolean nullsAndAnyReplicated)
{
return global.isStreamPartitionedOn(columns, constants.keySet(), nullsAndAnyReplicated);
}
public boolean isNodePartitionedOn(Collection<Symbol> columns)
{
return isNodePartitionedOn(columns, false);
}
public boolean isNodePartitionedOn(Collection<Symbol> columns, boolean nullsAndAnyReplicated)
{
return global.isNodePartitionedOn(columns, constants.keySet(), nullsAndAnyReplicated);
}
public boolean isCompatibleTablePartitioningWith(Partitioning partitioning, boolean nullsAndAnyReplicated, Metadata metadata, Session session)
{
return global.isCompatibleTablePartitioningWith(partitioning, nullsAndAnyReplicated, metadata, session);
}
public boolean isCompatibleTablePartitioningWith(ActualProperties other, Function<Symbol, Set<Symbol>> symbolMappings, Metadata metadata, Session session)
{
return global.isCompatibleTablePartitioningWith(
other.global,
symbolMappings,
symbol -> Optional.ofNullable(constants.get(symbol)),
symbol -> Optional.ofNullable(other.constants.get(symbol)),
metadata,
session);
}
/**
* @return true if all the data will effectively land in a single stream
*/
public boolean isEffectivelySingleStream()
{
return global.isEffectivelySingleStream(constants.keySet());
}
/**
* @return true if repartitioning on the keys will yield some difference
*/
public boolean isStreamRepartitionEffective(Collection<Symbol> keys)
{
return global.isStreamRepartitionEffective(keys, constants.keySet());
}
public ActualProperties translate(Function<Symbol, Optional<Symbol>> translator)
{
Map<Symbol, NullableValue> translatedConstants = new HashMap<>();
for (Map.Entry<Symbol, NullableValue> entry : constants.entrySet()) {
Optional<Symbol> translatedKey = translator.apply(entry.getKey());
if (translatedKey.isPresent()) {
translatedConstants.put(translatedKey.get(), entry.getValue());
}
}
return builder()
.global(global.translate(translator, symbol -> Optional.ofNullable(constants.get(symbol))))
.local(LocalProperties.translate(localProperties, translator))
.constants(translatedConstants)
.build();
}
public Optional<Partitioning> getNodePartitioning()
{
return global.getNodePartitioning();
}
public Map<Symbol, NullableValue> getConstants()
{
return constants;
}
public List<LocalProperty<Symbol>> getLocalProperties()
{
return localProperties;
}
public ActualProperties withReplicatedNulls(boolean replicatedNulls)
{
return builderFrom(this)
.global(global.withReplicatedNulls(replicatedNulls))
.build();
}
public static Builder builder()
{
return new Builder();
}
public static Builder builderFrom(ActualProperties properties)
{
return new Builder(properties.global, properties.localProperties, properties.constants);
}
public static class Builder
{
private Global global;
private List<LocalProperty<Symbol>> localProperties;
private Map<Symbol, NullableValue> constants;
private boolean unordered;
public Builder()
{
this(Global.arbitraryPartition(), ImmutableList.of(), ImmutableMap.of());
}
public Builder(Global global, List<LocalProperty<Symbol>> localProperties, Map<Symbol, NullableValue> constants)
{
this.global = requireNonNull(global, "global is null");
this.localProperties = ImmutableList.copyOf(localProperties);
this.constants = ImmutableMap.copyOf(constants);
}
public Builder global(Global global)
{
this.global = global;
return this;
}
public Builder global(ActualProperties other)
{
this.global = other.global;
return this;
}
public Builder local(List<? extends LocalProperty<Symbol>> localProperties)
{
this.localProperties = ImmutableList.copyOf(localProperties);
return this;
}
public Builder constants(Map<Symbol, NullableValue> constants)
{
this.constants = ImmutableMap.copyOf(constants);
return this;
}
public Builder unordered(boolean unordered)
{
this.unordered = unordered;
return this;
}
public ActualProperties build()
{
List<LocalProperty<Symbol>> localProperties = this.localProperties;
if (unordered) {
localProperties = filteredCopy(this.localProperties, property -> !property.isOrderSensitive());
}
return new ActualProperties(global, localProperties, constants);
}
}
@Override
public int hashCode()
{
return Objects.hash(global, localProperties, constants.keySet());
}
@Override
public boolean equals(Object obj)
{
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
final ActualProperties other = (ActualProperties) obj;
return Objects.equals(this.global, other.global)
&& Objects.equals(this.localProperties, other.localProperties)
&& Objects.equals(this.constants.keySet(), other.constants.keySet());
}
@Override
public String toString()
{
return toStringHelper(this)
.add("globalProperties", global)
.add("localProperties", localProperties)
.add("constants", constants)
.toString();
}
@Immutable
public static final class Global
{
// Description of the partitioning of the data across nodes
private final Optional<Partitioning> nodePartitioning; // if missing => partitioned with some unknown scheme
// Description of the partitioning of the data across streams (splits)
private final Optional<Partitioning> streamPartitioning; // if missing => partitioned with some unknown scheme
// NOTE: Partitioning on zero columns (or effectively zero columns if the columns are constant) indicates that all
// the rows will be partitioned into a single node or stream. However, this can still be a partitioned plan in that the plan
// will be executed on multiple servers, but only one server will get all the data.
// Description of whether rows with nulls in partitioning columns or some arbitrary rows have been replicated to all *nodes*
private final boolean nullsAndAnyReplicated;
private Global(Optional<Partitioning> nodePartitioning, Optional<Partitioning> streamPartitioning, boolean nullsAndAnyReplicated)
{
checkArgument(!nodePartitioning.isPresent()
|| !streamPartitioning.isPresent()
|| nodePartitioning.get().getColumns().containsAll(streamPartitioning.get().getColumns())
|| streamPartitioning.get().getColumns().containsAll(nodePartitioning.get().getColumns()),
"Global stream partitioning columns should match node partitioning columns");
this.nodePartitioning = requireNonNull(nodePartitioning, "nodePartitioning is null");
this.streamPartitioning = requireNonNull(streamPartitioning, "streamPartitioning is null");
this.nullsAndAnyReplicated = nullsAndAnyReplicated;
}
public static Global coordinatorSingleStreamPartition()
{
return partitionedOn(
COORDINATOR_DISTRIBUTION,
ImmutableList.of(),
Optional.of(ImmutableList.of()));
}
public static Global singleStreamPartition()
{
return partitionedOn(
SINGLE_DISTRIBUTION,
ImmutableList.of(),
Optional.of(ImmutableList.of()));
}
public static Global arbitraryPartition()
{
return new Global(Optional.empty(), Optional.empty(), false);
}
public static Global partitionedOn(PartitioningHandle nodePartitioningHandle, List<Symbol> nodePartitioning, Optional<List<Symbol>> streamPartitioning)
{
return new Global(
Optional.of(Partitioning.create(nodePartitioningHandle, nodePartitioning)),
streamPartitioning.map(columns -> Partitioning.create(SOURCE_DISTRIBUTION, columns)),
false);
}
public static Global partitionedOn(Partitioning nodePartitioning, Optional<Partitioning> streamPartitioning)
{
return new Global(
Optional.of(nodePartitioning),
streamPartitioning,
false);
}
public static Global streamPartitionedOn(List<Symbol> streamPartitioning)
{
return new Global(
Optional.empty(),
Optional.of(Partitioning.create(SOURCE_DISTRIBUTION, streamPartitioning)),
false);
}
public Global withReplicatedNulls(boolean replicatedNulls)
{
return new Global(nodePartitioning, streamPartitioning, replicatedNulls);
}
private boolean isNullsAndAnyReplicated()
{
return nullsAndAnyReplicated;
}
/**
* @return true if the plan will only execute on a single node
*/
private boolean isSingleNode()
{
if (!nodePartitioning.isPresent()) {
return false;
}
return nodePartitioning.get().getHandle().isSingleNode();
}
private boolean isCoordinatorOnly()
{
if (!nodePartitioning.isPresent()) {
return false;
}
return nodePartitioning.get().getHandle().isCoordinatorOnly();
}
private boolean isNodePartitionedOn(Collection<Symbol> columns, Set<Symbol> constants, boolean nullsAndAnyReplicated)
{
return nodePartitioning.isPresent() && nodePartitioning.get().isPartitionedOn(columns, constants) && this.nullsAndAnyReplicated == nullsAndAnyReplicated;
}
private boolean isCompatibleTablePartitioningWith(Partitioning partitioning, boolean nullsAndAnyReplicated, Metadata metadata, Session session)
{
return nodePartitioning.isPresent() && nodePartitioning.get().isCompatibleWith(partitioning, metadata, session) && this.nullsAndAnyReplicated == nullsAndAnyReplicated;
}
private boolean isCompatibleTablePartitioningWith(
Global other,
Function<Symbol, Set<Symbol>> symbolMappings,
Function<Symbol, Optional<NullableValue>> leftConstantMapping,
Function<Symbol, Optional<NullableValue>> rightConstantMapping,
Metadata metadata,
Session session)
{
return nodePartitioning.isPresent() &&
other.nodePartitioning.isPresent() &&
nodePartitioning.get().isCompatibleWith(
other.nodePartitioning.get(),
symbolMappings,
leftConstantMapping,
rightConstantMapping,
metadata,
session) &&
nullsAndAnyReplicated == other.nullsAndAnyReplicated;
}
private Optional<Partitioning> getNodePartitioning()
{
return nodePartitioning;
}
private boolean isStreamPartitionedOn(Collection<Symbol> columns, Set<Symbol> constants, boolean nullsAndAnyReplicated)
{
return streamPartitioning.isPresent() && streamPartitioning.get().isPartitionedOn(columns, constants) && this.nullsAndAnyReplicated == nullsAndAnyReplicated;
}
/**
* @return true if all the data will effectively land in a single stream
*/
private boolean isEffectivelySingleStream(Set<Symbol> constants)
{
return streamPartitioning.isPresent() && streamPartitioning.get().isEffectivelySinglePartition(constants) && !nullsAndAnyReplicated;
}
/**
* @return true if repartitioning on the keys will yield some difference
*/
private boolean isStreamRepartitionEffective(Collection<Symbol> keys, Set<Symbol> constants)
{
return (!streamPartitioning.isPresent() || streamPartitioning.get().isRepartitionEffective(keys, constants)) && !nullsAndAnyReplicated;
}
private Global translate(Function<Symbol, Optional<Symbol>> translator, Function<Symbol, Optional<NullableValue>> constants)
{
return new Global(
nodePartitioning.flatMap(partitioning -> partitioning.translate(translator, constants)),
streamPartitioning.flatMap(partitioning -> partitioning.translate(translator, constants)),
nullsAndAnyReplicated);
}
@Override
public int hashCode()
{
return Objects.hash(nodePartitioning, streamPartitioning, nullsAndAnyReplicated);
}
@Override
public boolean equals(Object obj)
{
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
final Global other = (Global) obj;
return Objects.equals(this.nodePartitioning, other.nodePartitioning) &&
Objects.equals(this.streamPartitioning, other.streamPartitioning) &&
this.nullsAndAnyReplicated == other.nullsAndAnyReplicated;
}
@Override
public String toString()
{
return toStringHelper(this)
.add("nodePartitioning", nodePartitioning)
.add("streamPartitioning", streamPartitioning)
.add("nullsAndAnyReplicated", nullsAndAnyReplicated)
.toString();
}
}
}
| |
/**
* <copyright>
* </copyright>
*
* $Id$
*/
package org.wso2.developerstudio.eclipse.gmf.esb.impl;
import java.util.List;
import java.util.Map;
import org.eclipse.emf.common.notify.Notification;
import org.eclipse.emf.common.notify.NotificationChain;
import org.eclipse.emf.ecore.EClass;
import org.eclipse.emf.ecore.InternalEObject;
import org.eclipse.emf.ecore.impl.ENotificationImpl;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheAction;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheImplementationType;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheMediatorInputConnector;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheMediatorOnHitOutputConnector;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheMediatorOutputConnector;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheOnHitBranch;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheScope;
import org.wso2.developerstudio.eclipse.gmf.esb.CacheSequenceType;
import org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage;
import org.wso2.developerstudio.eclipse.gmf.esb.HashGenerator;
import org.wso2.developerstudio.eclipse.gmf.esb.MediatorFlow;
import org.wso2.developerstudio.eclipse.gmf.esb.RegistryKeyProperty;
import org.wso2.developerstudio.eclipse.platform.core.mediatype.PlatformMediaTypeConstants;
import org.wso2.developerstudio.eclipse.platform.core.utils.CSProviderConstants;
import org.wso2.developerstudio.eclipse.platform.core.utils.DeveloperStudioProviderUtils;
/**
* <!-- begin-user-doc -->
* An implementation of the model object '<em><b>Cache Mediator</b></em>'.
* <!-- end-user-doc -->
* <p>
* The following features are implemented:
* <ul>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getCacheId <em>Cache Id</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getCacheScope <em>Cache Scope</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getCacheAction <em>Cache Action</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getHashGenerator <em>Hash Generator</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getCacheTimeout <em>Cache Timeout</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getMaxMessageSize <em>Max Message Size</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getImplementationType <em>Implementation Type</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getMaxEntryCount <em>Max Entry Count</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getSequenceType <em>Sequence Type</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getSequenceKey <em>Sequence Key</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getInputConnector <em>Input Connector</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getOutputConnector <em>Output Connector</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getOnHitOutputConnector <em>On Hit Output Connector</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.impl.CacheMediatorImpl#getMediatorFlow <em>Mediator Flow</em>}</li>
* </ul>
* </p>
*
* @generated
*/
public class CacheMediatorImpl extends MediatorImpl implements CacheMediator {
/**
* The default value of the '{@link #getCacheId() <em>Cache Id</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getCacheId()
* @generated NOT
* @ordered
*/
protected static final String CACHE_ID_EDEFAULT = "";
/**
* The cached value of the '{@link #getCacheId() <em>Cache Id</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getCacheId()
* @generated
* @ordered
*/
protected String cacheId = CACHE_ID_EDEFAULT;
/**
* The default value of the '{@link #getCacheScope() <em>Cache Scope</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getCacheScope()
* @generated
* @ordered
*/
protected static final CacheScope CACHE_SCOPE_EDEFAULT = CacheScope.PER_MEDIATOR;
/**
* The cached value of the '{@link #getCacheScope() <em>Cache Scope</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getCacheScope()
* @generated
* @ordered
*/
protected CacheScope cacheScope = CACHE_SCOPE_EDEFAULT;
/**
* The default value of the '{@link #getCacheAction() <em>Cache Action</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getCacheAction()
* @generated
* @ordered
*/
protected static final CacheAction CACHE_ACTION_EDEFAULT = CacheAction.FINDER;
/**
* The cached value of the '{@link #getCacheAction() <em>Cache Action</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getCacheAction()
* @generated
* @ordered
*/
protected CacheAction cacheAction = CACHE_ACTION_EDEFAULT;
/**
* The default value of the '{@link #getHashGenerator() <em>Hash Generator</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getHashGenerator()
* @generated
* @ordered
*/
protected static final HashGenerator HASH_GENERATOR_EDEFAULT = HashGenerator.CARBON_MEDIATOR_CACHE_DIGEST_DOMHASH_GENERATOR;
/**
* The cached value of the '{@link #getHashGenerator() <em>Hash Generator</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getHashGenerator()
* @generated
* @ordered
*/
protected HashGenerator hashGenerator = HASH_GENERATOR_EDEFAULT;
/**
* The default value of the '{@link #getCacheTimeout() <em>Cache Timeout</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getCacheTimeout()
* @generated
* @ordered
*/
protected static final int CACHE_TIMEOUT_EDEFAULT = 120;
/**
* The cached value of the '{@link #getCacheTimeout() <em>Cache Timeout</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getCacheTimeout()
* @generated
* @ordered
*/
protected int cacheTimeout = CACHE_TIMEOUT_EDEFAULT;
/**
* The default value of the '{@link #getMaxMessageSize() <em>Max Message Size</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getMaxMessageSize()
* @generated
* @ordered
*/
protected static final int MAX_MESSAGE_SIZE_EDEFAULT = 2000;
/**
* The cached value of the '{@link #getMaxMessageSize() <em>Max Message Size</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getMaxMessageSize()
* @generated
* @ordered
*/
protected int maxMessageSize = MAX_MESSAGE_SIZE_EDEFAULT;
/**
* The default value of the '{@link #getImplementationType() <em>Implementation Type</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getImplementationType()
* @generated
* @ordered
*/
protected static final CacheImplementationType IMPLEMENTATION_TYPE_EDEFAULT = CacheImplementationType.IN_MEMORY;
/**
* The cached value of the '{@link #getImplementationType() <em>Implementation Type</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getImplementationType()
* @generated
* @ordered
*/
protected CacheImplementationType implementationType = IMPLEMENTATION_TYPE_EDEFAULT;
/**
* The default value of the '{@link #getMaxEntryCount() <em>Max Entry Count</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getMaxEntryCount()
* @generated
* @ordered
*/
protected static final int MAX_ENTRY_COUNT_EDEFAULT = 1000;
/**
* The cached value of the '{@link #getMaxEntryCount() <em>Max Entry Count</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getMaxEntryCount()
* @generated
* @ordered
*/
protected int maxEntryCount = MAX_ENTRY_COUNT_EDEFAULT;
/**
* The default value of the '{@link #getSequenceType() <em>Sequence Type</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getSequenceType()
* @generated NOT
* @ordered
*/
protected static final CacheSequenceType SEQUENCE_TYPE_EDEFAULT = CacheSequenceType.ANONYMOUS;
/**
* The cached value of the '{@link #getSequenceType() <em>Sequence Type</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getSequenceType()
* @generated
* @ordered
*/
protected CacheSequenceType sequenceType = SEQUENCE_TYPE_EDEFAULT;
/**
* The cached value of the '{@link #getSequenceKey() <em>Sequence Key</em>}' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getSequenceKey()
* @generated
* @ordered
*/
protected RegistryKeyProperty sequenceKey;
/**
* The cached value of the '{@link #getInputConnector() <em>Input Connector</em>}' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getInputConnector()
* @generated
* @ordered
*/
protected CacheMediatorInputConnector inputConnector;
/**
* The cached value of the '{@link #getOutputConnector() <em>Output Connector</em>}' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getOutputConnector()
* @generated
* @ordered
*/
protected CacheMediatorOutputConnector outputConnector;
/**
* The cached value of the '{@link #getOnHitOutputConnector() <em>On Hit Output Connector</em>}' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getOnHitOutputConnector()
* @generated
* @ordered
*/
protected CacheMediatorOnHitOutputConnector onHitOutputConnector;
/**
* The cached value of the '{@link #getMediatorFlow() <em>Mediator Flow</em>}' containment reference.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getMediatorFlow()
* @generated
* @ordered
*/
protected MediatorFlow mediatorFlow;
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated NOT
*/
protected CacheMediatorImpl() {
super();
RegistryKeyProperty keyproperty = EsbFactoryImpl.eINSTANCE.createRegistryKeyProperty();
keyproperty.setKeyName("keyName");
keyproperty.setPrettyName("prettyName");
keyproperty.setKeyValue("default/path");
setSequenceKey(keyproperty);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected EClass eStaticClass() {
return EsbPackage.Literals.CACHE_MEDIATOR;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public String getCacheId() {
return cacheId;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setCacheId(String newCacheId) {
String oldCacheId = cacheId;
cacheId = newCacheId;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__CACHE_ID, oldCacheId, cacheId));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public CacheScope getCacheScope() {
return cacheScope;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setCacheScope(CacheScope newCacheScope) {
CacheScope oldCacheScope = cacheScope;
cacheScope = newCacheScope == null ? CACHE_SCOPE_EDEFAULT : newCacheScope;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__CACHE_SCOPE, oldCacheScope, cacheScope));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public CacheAction getCacheAction() {
return cacheAction;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setCacheAction(CacheAction newCacheAction) {
CacheAction oldCacheAction = cacheAction;
cacheAction = newCacheAction == null ? CACHE_ACTION_EDEFAULT : newCacheAction;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__CACHE_ACTION, oldCacheAction, cacheAction));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public HashGenerator getHashGenerator() {
return hashGenerator;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setHashGenerator(HashGenerator newHashGenerator) {
HashGenerator oldHashGenerator = hashGenerator;
hashGenerator = newHashGenerator == null ? HASH_GENERATOR_EDEFAULT : newHashGenerator;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__HASH_GENERATOR, oldHashGenerator, hashGenerator));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public int getCacheTimeout() {
return cacheTimeout;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setCacheTimeout(int newCacheTimeout) {
int oldCacheTimeout = cacheTimeout;
cacheTimeout = newCacheTimeout;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__CACHE_TIMEOUT, oldCacheTimeout, cacheTimeout));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public int getMaxMessageSize() {
return maxMessageSize;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setMaxMessageSize(int newMaxMessageSize) {
int oldMaxMessageSize = maxMessageSize;
maxMessageSize = newMaxMessageSize;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__MAX_MESSAGE_SIZE, oldMaxMessageSize, maxMessageSize));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public CacheImplementationType getImplementationType() {
return implementationType;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setImplementationType(CacheImplementationType newImplementationType) {
CacheImplementationType oldImplementationType = implementationType;
implementationType = newImplementationType == null ? IMPLEMENTATION_TYPE_EDEFAULT : newImplementationType;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__IMPLEMENTATION_TYPE, oldImplementationType, implementationType));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public int getMaxEntryCount() {
return maxEntryCount;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setMaxEntryCount(int newMaxEntryCount) {
int oldMaxEntryCount = maxEntryCount;
maxEntryCount = newMaxEntryCount;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__MAX_ENTRY_COUNT, oldMaxEntryCount, maxEntryCount));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public CacheSequenceType getSequenceType() {
return sequenceType;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setSequenceType(CacheSequenceType newSequenceType) {
CacheSequenceType oldSequenceType = sequenceType;
sequenceType = newSequenceType == null ? SEQUENCE_TYPE_EDEFAULT : newSequenceType;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__SEQUENCE_TYPE, oldSequenceType, sequenceType));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public RegistryKeyProperty getSequenceKey() {
return sequenceKey;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public NotificationChain basicSetSequenceKey(RegistryKeyProperty newSequenceKey, NotificationChain msgs) {
RegistryKeyProperty oldSequenceKey = sequenceKey;
sequenceKey = newSequenceKey;
if (eNotificationRequired()) {
ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY, oldSequenceKey, newSequenceKey);
if (msgs == null) msgs = notification; else msgs.add(notification);
}
return msgs;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setSequenceKey(RegistryKeyProperty newSequenceKey) {
if (newSequenceKey != sequenceKey) {
NotificationChain msgs = null;
if (sequenceKey != null)
msgs = ((InternalEObject)sequenceKey).eInverseRemove(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY, null, msgs);
if (newSequenceKey != null)
msgs = ((InternalEObject)newSequenceKey).eInverseAdd(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY, null, msgs);
msgs = basicSetSequenceKey(newSequenceKey, msgs);
if (msgs != null) msgs.dispatch();
}
else if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY, newSequenceKey, newSequenceKey));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public CacheMediatorInputConnector getInputConnector() {
return inputConnector;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public NotificationChain basicSetInputConnector(CacheMediatorInputConnector newInputConnector, NotificationChain msgs) {
CacheMediatorInputConnector oldInputConnector = inputConnector;
inputConnector = newInputConnector;
if (eNotificationRequired()) {
ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR, oldInputConnector, newInputConnector);
if (msgs == null) msgs = notification; else msgs.add(notification);
}
return msgs;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setInputConnector(CacheMediatorInputConnector newInputConnector) {
if (newInputConnector != inputConnector) {
NotificationChain msgs = null;
if (inputConnector != null)
msgs = ((InternalEObject)inputConnector).eInverseRemove(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR, null, msgs);
if (newInputConnector != null)
msgs = ((InternalEObject)newInputConnector).eInverseAdd(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR, null, msgs);
msgs = basicSetInputConnector(newInputConnector, msgs);
if (msgs != null) msgs.dispatch();
}
else if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR, newInputConnector, newInputConnector));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public CacheMediatorOutputConnector getOutputConnector() {
return outputConnector;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public NotificationChain basicSetOutputConnector(CacheMediatorOutputConnector newOutputConnector, NotificationChain msgs) {
CacheMediatorOutputConnector oldOutputConnector = outputConnector;
outputConnector = newOutputConnector;
if (eNotificationRequired()) {
ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR, oldOutputConnector, newOutputConnector);
if (msgs == null) msgs = notification; else msgs.add(notification);
}
return msgs;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setOutputConnector(CacheMediatorOutputConnector newOutputConnector) {
if (newOutputConnector != outputConnector) {
NotificationChain msgs = null;
if (outputConnector != null)
msgs = ((InternalEObject)outputConnector).eInverseRemove(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR, null, msgs);
if (newOutputConnector != null)
msgs = ((InternalEObject)newOutputConnector).eInverseAdd(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR, null, msgs);
msgs = basicSetOutputConnector(newOutputConnector, msgs);
if (msgs != null) msgs.dispatch();
}
else if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR, newOutputConnector, newOutputConnector));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public CacheMediatorOnHitOutputConnector getOnHitOutputConnector() {
return onHitOutputConnector;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public NotificationChain basicSetOnHitOutputConnector(CacheMediatorOnHitOutputConnector newOnHitOutputConnector, NotificationChain msgs) {
CacheMediatorOnHitOutputConnector oldOnHitOutputConnector = onHitOutputConnector;
onHitOutputConnector = newOnHitOutputConnector;
if (eNotificationRequired()) {
ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR, oldOnHitOutputConnector, newOnHitOutputConnector);
if (msgs == null) msgs = notification; else msgs.add(notification);
}
return msgs;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setOnHitOutputConnector(CacheMediatorOnHitOutputConnector newOnHitOutputConnector) {
if (newOnHitOutputConnector != onHitOutputConnector) {
NotificationChain msgs = null;
if (onHitOutputConnector != null)
msgs = ((InternalEObject)onHitOutputConnector).eInverseRemove(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR, null, msgs);
if (newOnHitOutputConnector != null)
msgs = ((InternalEObject)newOnHitOutputConnector).eInverseAdd(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR, null, msgs);
msgs = basicSetOnHitOutputConnector(newOnHitOutputConnector, msgs);
if (msgs != null) msgs.dispatch();
}
else if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR, newOnHitOutputConnector, newOnHitOutputConnector));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public MediatorFlow getMediatorFlow() {
return mediatorFlow;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public NotificationChain basicSetMediatorFlow(MediatorFlow newMediatorFlow, NotificationChain msgs) {
MediatorFlow oldMediatorFlow = mediatorFlow;
mediatorFlow = newMediatorFlow;
if (eNotificationRequired()) {
ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW, oldMediatorFlow, newMediatorFlow);
if (msgs == null) msgs = notification; else msgs.add(notification);
}
return msgs;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setMediatorFlow(MediatorFlow newMediatorFlow) {
if (newMediatorFlow != mediatorFlow) {
NotificationChain msgs = null;
if (mediatorFlow != null)
msgs = ((InternalEObject)mediatorFlow).eInverseRemove(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW, null, msgs);
if (newMediatorFlow != null)
msgs = ((InternalEObject)newMediatorFlow).eInverseAdd(this, EOPPOSITE_FEATURE_BASE - EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW, null, msgs);
msgs = basicSetMediatorFlow(newMediatorFlow, msgs);
if (msgs != null) msgs.dispatch();
}
else if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW, newMediatorFlow, newMediatorFlow));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) {
switch (featureID) {
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY:
return basicSetSequenceKey(null, msgs);
case EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR:
return basicSetInputConnector(null, msgs);
case EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR:
return basicSetOutputConnector(null, msgs);
case EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR:
return basicSetOnHitOutputConnector(null, msgs);
case EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW:
return basicSetMediatorFlow(null, msgs);
}
return super.eInverseRemove(otherEnd, featureID, msgs);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case EsbPackage.CACHE_MEDIATOR__CACHE_ID:
return getCacheId();
case EsbPackage.CACHE_MEDIATOR__CACHE_SCOPE:
return getCacheScope();
case EsbPackage.CACHE_MEDIATOR__CACHE_ACTION:
return getCacheAction();
case EsbPackage.CACHE_MEDIATOR__HASH_GENERATOR:
return getHashGenerator();
case EsbPackage.CACHE_MEDIATOR__CACHE_TIMEOUT:
return getCacheTimeout();
case EsbPackage.CACHE_MEDIATOR__MAX_MESSAGE_SIZE:
return getMaxMessageSize();
case EsbPackage.CACHE_MEDIATOR__IMPLEMENTATION_TYPE:
return getImplementationType();
case EsbPackage.CACHE_MEDIATOR__MAX_ENTRY_COUNT:
return getMaxEntryCount();
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_TYPE:
return getSequenceType();
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY:
return getSequenceKey();
case EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR:
return getInputConnector();
case EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR:
return getOutputConnector();
case EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR:
return getOnHitOutputConnector();
case EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW:
return getMediatorFlow();
}
return super.eGet(featureID, resolve, coreType);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public void eSet(int featureID, Object newValue) {
switch (featureID) {
case EsbPackage.CACHE_MEDIATOR__CACHE_ID:
setCacheId((String)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__CACHE_SCOPE:
setCacheScope((CacheScope)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__CACHE_ACTION:
setCacheAction((CacheAction)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__HASH_GENERATOR:
setHashGenerator((HashGenerator)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__CACHE_TIMEOUT:
setCacheTimeout((Integer)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__MAX_MESSAGE_SIZE:
setMaxMessageSize((Integer)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__IMPLEMENTATION_TYPE:
setImplementationType((CacheImplementationType)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__MAX_ENTRY_COUNT:
setMaxEntryCount((Integer)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_TYPE:
setSequenceType((CacheSequenceType)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY:
setSequenceKey((RegistryKeyProperty)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR:
setInputConnector((CacheMediatorInputConnector)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR:
setOutputConnector((CacheMediatorOutputConnector)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR:
setOnHitOutputConnector((CacheMediatorOnHitOutputConnector)newValue);
return;
case EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW:
setMediatorFlow((MediatorFlow)newValue);
return;
}
super.eSet(featureID, newValue);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public void eUnset(int featureID) {
switch (featureID) {
case EsbPackage.CACHE_MEDIATOR__CACHE_ID:
setCacheId(CACHE_ID_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__CACHE_SCOPE:
setCacheScope(CACHE_SCOPE_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__CACHE_ACTION:
setCacheAction(CACHE_ACTION_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__HASH_GENERATOR:
setHashGenerator(HASH_GENERATOR_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__CACHE_TIMEOUT:
setCacheTimeout(CACHE_TIMEOUT_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__MAX_MESSAGE_SIZE:
setMaxMessageSize(MAX_MESSAGE_SIZE_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__IMPLEMENTATION_TYPE:
setImplementationType(IMPLEMENTATION_TYPE_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__MAX_ENTRY_COUNT:
setMaxEntryCount(MAX_ENTRY_COUNT_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_TYPE:
setSequenceType(SEQUENCE_TYPE_EDEFAULT);
return;
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY:
setSequenceKey((RegistryKeyProperty)null);
return;
case EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR:
setInputConnector((CacheMediatorInputConnector)null);
return;
case EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR:
setOutputConnector((CacheMediatorOutputConnector)null);
return;
case EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR:
setOnHitOutputConnector((CacheMediatorOnHitOutputConnector)null);
return;
case EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW:
setMediatorFlow((MediatorFlow)null);
return;
}
super.eUnset(featureID);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case EsbPackage.CACHE_MEDIATOR__CACHE_ID:
return CACHE_ID_EDEFAULT == null ? cacheId != null : !CACHE_ID_EDEFAULT.equals(cacheId);
case EsbPackage.CACHE_MEDIATOR__CACHE_SCOPE:
return cacheScope != CACHE_SCOPE_EDEFAULT;
case EsbPackage.CACHE_MEDIATOR__CACHE_ACTION:
return cacheAction != CACHE_ACTION_EDEFAULT;
case EsbPackage.CACHE_MEDIATOR__HASH_GENERATOR:
return hashGenerator != HASH_GENERATOR_EDEFAULT;
case EsbPackage.CACHE_MEDIATOR__CACHE_TIMEOUT:
return cacheTimeout != CACHE_TIMEOUT_EDEFAULT;
case EsbPackage.CACHE_MEDIATOR__MAX_MESSAGE_SIZE:
return maxMessageSize != MAX_MESSAGE_SIZE_EDEFAULT;
case EsbPackage.CACHE_MEDIATOR__IMPLEMENTATION_TYPE:
return implementationType != IMPLEMENTATION_TYPE_EDEFAULT;
case EsbPackage.CACHE_MEDIATOR__MAX_ENTRY_COUNT:
return maxEntryCount != MAX_ENTRY_COUNT_EDEFAULT;
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_TYPE:
return sequenceType != SEQUENCE_TYPE_EDEFAULT;
case EsbPackage.CACHE_MEDIATOR__SEQUENCE_KEY:
return sequenceKey != null;
case EsbPackage.CACHE_MEDIATOR__INPUT_CONNECTOR:
return inputConnector != null;
case EsbPackage.CACHE_MEDIATOR__OUTPUT_CONNECTOR:
return outputConnector != null;
case EsbPackage.CACHE_MEDIATOR__ON_HIT_OUTPUT_CONNECTOR:
return onHitOutputConnector != null;
case EsbPackage.CACHE_MEDIATOR__MEDIATOR_FLOW:
return mediatorFlow != null;
}
return super.eIsSet(featureID);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public String toString() {
if (eIsProxy()) return super.toString();
StringBuffer result = new StringBuffer(super.toString());
result.append(" (cacheId: ");
result.append(cacheId);
result.append(", cacheScope: ");
result.append(cacheScope);
result.append(", cacheAction: ");
result.append(cacheAction);
result.append(", hashGenerator: ");
result.append(hashGenerator);
result.append(", cacheTimeout: ");
result.append(cacheTimeout);
result.append(", maxMessageSize: ");
result.append(maxMessageSize);
result.append(", implementationType: ");
result.append(implementationType);
result.append(", maxEntryCount: ");
result.append(maxEntryCount);
result.append(", sequenceType: ");
result.append(sequenceType);
result.append(')');
return result.toString();
}
} //CacheMediatorImpl
| |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.greengrass.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetLoggerDefinition" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class GetLoggerDefinitionResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/** The ARN of the definition. */
private String arn;
/** The time, in milliseconds since the epoch, when the definition was created. */
private String creationTimestamp;
/** The ID of the definition. */
private String id;
/** The time, in milliseconds since the epoch, when the definition was last updated. */
private String lastUpdatedTimestamp;
/** The ID of the latest version associated with the definition. */
private String latestVersion;
/** The ARN of the latest version associated with the definition. */
private String latestVersionArn;
/** The name of the definition. */
private String name;
/** Tag(s) attached to the resource arn. */
private java.util.Map<String, String> tags;
/**
* The ARN of the definition.
*
* @param arn
* The ARN of the definition.
*/
public void setArn(String arn) {
this.arn = arn;
}
/**
* The ARN of the definition.
*
* @return The ARN of the definition.
*/
public String getArn() {
return this.arn;
}
/**
* The ARN of the definition.
*
* @param arn
* The ARN of the definition.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult withArn(String arn) {
setArn(arn);
return this;
}
/**
* The time, in milliseconds since the epoch, when the definition was created.
*
* @param creationTimestamp
* The time, in milliseconds since the epoch, when the definition was created.
*/
public void setCreationTimestamp(String creationTimestamp) {
this.creationTimestamp = creationTimestamp;
}
/**
* The time, in milliseconds since the epoch, when the definition was created.
*
* @return The time, in milliseconds since the epoch, when the definition was created.
*/
public String getCreationTimestamp() {
return this.creationTimestamp;
}
/**
* The time, in milliseconds since the epoch, when the definition was created.
*
* @param creationTimestamp
* The time, in milliseconds since the epoch, when the definition was created.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult withCreationTimestamp(String creationTimestamp) {
setCreationTimestamp(creationTimestamp);
return this;
}
/**
* The ID of the definition.
*
* @param id
* The ID of the definition.
*/
public void setId(String id) {
this.id = id;
}
/**
* The ID of the definition.
*
* @return The ID of the definition.
*/
public String getId() {
return this.id;
}
/**
* The ID of the definition.
*
* @param id
* The ID of the definition.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult withId(String id) {
setId(id);
return this;
}
/**
* The time, in milliseconds since the epoch, when the definition was last updated.
*
* @param lastUpdatedTimestamp
* The time, in milliseconds since the epoch, when the definition was last updated.
*/
public void setLastUpdatedTimestamp(String lastUpdatedTimestamp) {
this.lastUpdatedTimestamp = lastUpdatedTimestamp;
}
/**
* The time, in milliseconds since the epoch, when the definition was last updated.
*
* @return The time, in milliseconds since the epoch, when the definition was last updated.
*/
public String getLastUpdatedTimestamp() {
return this.lastUpdatedTimestamp;
}
/**
* The time, in milliseconds since the epoch, when the definition was last updated.
*
* @param lastUpdatedTimestamp
* The time, in milliseconds since the epoch, when the definition was last updated.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult withLastUpdatedTimestamp(String lastUpdatedTimestamp) {
setLastUpdatedTimestamp(lastUpdatedTimestamp);
return this;
}
/**
* The ID of the latest version associated with the definition.
*
* @param latestVersion
* The ID of the latest version associated with the definition.
*/
public void setLatestVersion(String latestVersion) {
this.latestVersion = latestVersion;
}
/**
* The ID of the latest version associated with the definition.
*
* @return The ID of the latest version associated with the definition.
*/
public String getLatestVersion() {
return this.latestVersion;
}
/**
* The ID of the latest version associated with the definition.
*
* @param latestVersion
* The ID of the latest version associated with the definition.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult withLatestVersion(String latestVersion) {
setLatestVersion(latestVersion);
return this;
}
/**
* The ARN of the latest version associated with the definition.
*
* @param latestVersionArn
* The ARN of the latest version associated with the definition.
*/
public void setLatestVersionArn(String latestVersionArn) {
this.latestVersionArn = latestVersionArn;
}
/**
* The ARN of the latest version associated with the definition.
*
* @return The ARN of the latest version associated with the definition.
*/
public String getLatestVersionArn() {
return this.latestVersionArn;
}
/**
* The ARN of the latest version associated with the definition.
*
* @param latestVersionArn
* The ARN of the latest version associated with the definition.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult withLatestVersionArn(String latestVersionArn) {
setLatestVersionArn(latestVersionArn);
return this;
}
/**
* The name of the definition.
*
* @param name
* The name of the definition.
*/
public void setName(String name) {
this.name = name;
}
/**
* The name of the definition.
*
* @return The name of the definition.
*/
public String getName() {
return this.name;
}
/**
* The name of the definition.
*
* @param name
* The name of the definition.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult withName(String name) {
setName(name);
return this;
}
/**
* Tag(s) attached to the resource arn.
*
* @return Tag(s) attached to the resource arn.
*/
public java.util.Map<String, String> getTags() {
return tags;
}
/**
* Tag(s) attached to the resource arn.
*
* @param tags
* Tag(s) attached to the resource arn.
*/
public void setTags(java.util.Map<String, String> tags) {
this.tags = tags;
}
/**
* Tag(s) attached to the resource arn.
*
* @param tags
* Tag(s) attached to the resource arn.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult withTags(java.util.Map<String, String> tags) {
setTags(tags);
return this;
}
/**
* Add a single Tags entry
*
* @see GetLoggerDefinitionResult#withTags
* @returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult addTagsEntry(String key, String value) {
if (null == this.tags) {
this.tags = new java.util.HashMap<String, String>();
}
if (this.tags.containsKey(key))
throw new IllegalArgumentException("Duplicated keys (" + key.toString() + ") are provided.");
this.tags.put(key, value);
return this;
}
/**
* Removes all the entries added into Tags.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetLoggerDefinitionResult clearTagsEntries() {
this.tags = null;
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getArn() != null)
sb.append("Arn: ").append(getArn()).append(",");
if (getCreationTimestamp() != null)
sb.append("CreationTimestamp: ").append(getCreationTimestamp()).append(",");
if (getId() != null)
sb.append("Id: ").append(getId()).append(",");
if (getLastUpdatedTimestamp() != null)
sb.append("LastUpdatedTimestamp: ").append(getLastUpdatedTimestamp()).append(",");
if (getLatestVersion() != null)
sb.append("LatestVersion: ").append(getLatestVersion()).append(",");
if (getLatestVersionArn() != null)
sb.append("LatestVersionArn: ").append(getLatestVersionArn()).append(",");
if (getName() != null)
sb.append("Name: ").append(getName()).append(",");
if (getTags() != null)
sb.append("Tags: ").append(getTags());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof GetLoggerDefinitionResult == false)
return false;
GetLoggerDefinitionResult other = (GetLoggerDefinitionResult) obj;
if (other.getArn() == null ^ this.getArn() == null)
return false;
if (other.getArn() != null && other.getArn().equals(this.getArn()) == false)
return false;
if (other.getCreationTimestamp() == null ^ this.getCreationTimestamp() == null)
return false;
if (other.getCreationTimestamp() != null && other.getCreationTimestamp().equals(this.getCreationTimestamp()) == false)
return false;
if (other.getId() == null ^ this.getId() == null)
return false;
if (other.getId() != null && other.getId().equals(this.getId()) == false)
return false;
if (other.getLastUpdatedTimestamp() == null ^ this.getLastUpdatedTimestamp() == null)
return false;
if (other.getLastUpdatedTimestamp() != null && other.getLastUpdatedTimestamp().equals(this.getLastUpdatedTimestamp()) == false)
return false;
if (other.getLatestVersion() == null ^ this.getLatestVersion() == null)
return false;
if (other.getLatestVersion() != null && other.getLatestVersion().equals(this.getLatestVersion()) == false)
return false;
if (other.getLatestVersionArn() == null ^ this.getLatestVersionArn() == null)
return false;
if (other.getLatestVersionArn() != null && other.getLatestVersionArn().equals(this.getLatestVersionArn()) == false)
return false;
if (other.getName() == null ^ this.getName() == null)
return false;
if (other.getName() != null && other.getName().equals(this.getName()) == false)
return false;
if (other.getTags() == null ^ this.getTags() == null)
return false;
if (other.getTags() != null && other.getTags().equals(this.getTags()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getArn() == null) ? 0 : getArn().hashCode());
hashCode = prime * hashCode + ((getCreationTimestamp() == null) ? 0 : getCreationTimestamp().hashCode());
hashCode = prime * hashCode + ((getId() == null) ? 0 : getId().hashCode());
hashCode = prime * hashCode + ((getLastUpdatedTimestamp() == null) ? 0 : getLastUpdatedTimestamp().hashCode());
hashCode = prime * hashCode + ((getLatestVersion() == null) ? 0 : getLatestVersion().hashCode());
hashCode = prime * hashCode + ((getLatestVersionArn() == null) ? 0 : getLatestVersionArn().hashCode());
hashCode = prime * hashCode + ((getName() == null) ? 0 : getName().hashCode());
hashCode = prime * hashCode + ((getTags() == null) ? 0 : getTags().hashCode());
return hashCode;
}
@Override
public GetLoggerDefinitionResult clone() {
try {
return (GetLoggerDefinitionResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| |
package synergynet3.activitypack2.table.flickstudy;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.UUID;
import java.util.logging.Level;
import multiplicity3.appsystem.IQueueOwner;
import multiplicity3.appsystem.MultiplicityClient;
import multiplicity3.config.identity.IdentityConfigPrefsItem;
import multiplicity3.csys.behaviours.RotateTranslateScaleBehaviour;
import multiplicity3.csys.factory.ContentTypeNotBoundException;
import multiplicity3.csys.items.item.IItem;
import multiplicity3.csys.items.mutablelabel.IMutableLabel;
import multiplicity3.input.IMultiTouchEventListener;
import multiplicity3.input.MultiTouchInputComponent;
import multiplicity3.input.events.MultiTouchCursorEvent;
import multiplicity3.input.events.MultiTouchObjectEvent;
import synergynet3.SynergyNetApp;
import synergynet3.additionalUtils.AdditionalSynergyNetUtilities;
import synergynet3.additionalitems.interfaces.ICachableImage;
import synergynet3.behaviours.BehaviourUtilities;
import synergynet3.behaviours.networkflick.NetworkFlickBehaviour;
import synergynet3.behaviours.networkflick.NetworkFlickLogging;
import synergynet3.behaviours.networkflick.NetworkFlickLogging.FLICKTYPE;
import synergynet3.feedbacksystem.FeedbackSystem;
import synergynet3.fonts.FontColour;
import synergynet3.fonts.FontUtil;
import com.jme3.math.Vector2f;
/**
* The Class FlickStudyApp.
*/
public class FlickStudyApp extends SynergyNetApp implements IMultiTouchEventListener
{
/** The bounce limit. */
private static int bounceLimit = 0;
/** The Constant DATE_FORMAT. */
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("dd/MM/yyyy");
/** The deceleration. */
private static float DECELERATION = 7.5f;
/** The flick mode. */
private static FLICKTYPE flickMode = FLICKTYPE.PROPORTIONAL;
/** The Constant LOG_NAME_FORMAT. */
private static final SimpleDateFormat LOG_NAME_FORMAT = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss");
/** The log address. */
private static String logAddress = "";
/** The Constant OFFSET. */
private static final float OFFSET = 200f;
/** The Constant RESOURCES_DIR. */
private static final String RESOURCES_DIR = "synergynet3/activitypack2/table/flickstudy/";
/** The Constant tables. */
private static final String[] tables =
{ "", "green", "blue", "yellow", "red" };
/** The Constant TIME_FORMAT. */
private static final SimpleDateFormat TIME_FORMAT = new SimpleDateFormat("HH:mm ss.SS");
/** The first touch. */
private Date firstTouch = null;
/** The id. */
private String id = "";
/** The last touch. */
private Date lastTouch = null;
/** The start time. */
private Date startTime = new Date();
/** The touch count. */
private int touchCount = 0;
/** The x pos. */
private float xPos = -OFFSET;
/**
* The main method.
*
* @param args
* the arguments
*/
public static void main(String[] args)
{
if (args.length > 0)
{
IdentityConfigPrefsItem idprefs = new IdentityConfigPrefsItem();
idprefs.setID(args[0]);
}
try
{
logAddress = ManagementFactory.getRuntimeMXBean().getSystemProperties().get("log");
AdditionalSynergyNetUtilities.logInfo("Using log at: " + logAddress);
}
catch (Exception e)
{
AdditionalSynergyNetUtilities.logInfo("No valid log address in arguments, no logging will take place.");
}
try
{
if (Boolean.parseBoolean(ManagementFactory.getRuntimeMXBean().getSystemProperties().get("instant")))
{
flickMode = FLICKTYPE.INSTANT;
}
AdditionalSynergyNetUtilities.logInfo("Flick mode set to " + flickMode);
}
catch (Exception e)
{
AdditionalSynergyNetUtilities.logInfo("No flick mode setting given, defaulting to PROPORTIONAL.");
}
try
{
DECELERATION = Float.parseFloat(ManagementFactory.getRuntimeMXBean().getSystemProperties().get("deceleration"));
}
catch (Exception e)
{
AdditionalSynergyNetUtilities.logInfo("No valid deceleration value given, using default.");
}
AdditionalSynergyNetUtilities.logInfo("Using deceleration of: " + DECELERATION);
try
{
bounceLimit = Integer.parseInt(ManagementFactory.getRuntimeMXBean().getSystemProperties().get("limit"));
AdditionalSynergyNetUtilities.logInfo("Limit of bounces set to: " + bounceLimit);
}
catch (Exception e)
{
AdditionalSynergyNetUtilities.logInfo("No limit for bounces set.");
}
MultiplicityClient client = MultiplicityClient.get();
client.start();
FlickStudyApp app = new FlickStudyApp();
client.setCurrentApp(app);
}
/*
* (non-Javadoc)
* @see
* multiplicity3.input.IMultiTouchEventListener#cursorChanged(multiplicity3
* .input.events.MultiTouchCursorEvent)
*/
@Override
public void cursorChanged(MultiTouchCursorEvent event)
{
}
/*
* (non-Javadoc)
* @see
* multiplicity3.input.IMultiTouchEventListener#cursorClicked(multiplicity3
* .input.events.MultiTouchCursorEvent)
*/
@Override
public void cursorClicked(MultiTouchCursorEvent event)
{
}
/*
* (non-Javadoc)
* @see
* multiplicity3.input.IMultiTouchEventListener#cursorPressed(multiplicity3
* .input.events.MultiTouchCursorEvent)
*/
@Override
public void cursorPressed(MultiTouchCursorEvent event)
{
if (firstTouch == null)
{
firstTouch = new Date();
}
touchCount++;
};
/*
* (non-Javadoc)
* @see
* multiplicity3.input.IMultiTouchEventListener#cursorReleased(multiplicity3
* .input.events.MultiTouchCursorEvent)
*/
@Override
public void cursorReleased(MultiTouchCursorEvent event)
{
lastTouch = new Date();
}
/*
* (non-Javadoc)
* @see synergynet3.SynergyNetApp#getSpecificFriendlyAppName()
*/
@Override
public String getSpecificFriendlyAppName()
{
return "FlickTest";
}
/*
* (non-Javadoc)
* @see
* multiplicity3.input.IMultiTouchEventListener#objectAdded(multiplicity3
* .input.events.MultiTouchObjectEvent)
*/
@Override
public void objectAdded(MultiTouchObjectEvent event)
{
}
/*
* (non-Javadoc)
* @see
* multiplicity3.input.IMultiTouchEventListener#objectChanged(multiplicity3
* .input.events.MultiTouchObjectEvent)
*/
@Override
public void objectChanged(MultiTouchObjectEvent event)
{
}
/*
* (non-Javadoc)
* @see
* multiplicity3.input.IMultiTouchEventListener#objectRemoved(multiplicity3
* .input.events.MultiTouchObjectEvent)
*/
@Override
public void objectRemoved(MultiTouchObjectEvent event)
{
}
/*
* (non-Javadoc)
* @see
* synergynet3.SynergyNetApp#onFlickArrival(synergynet3.behaviours.networkflick
* .messages.FlickMessage)
*/
@Override
public void onFlickArrival(synergynet3.behaviours.networkflick.messages.FlickMessage message)
{
IItem item = BehaviourUtilities.onFlickArrival(message, stage, tableIdentity, deceleration);
for (RotateTranslateScaleBehaviour rts : item.getBehaviours(RotateTranslateScaleBehaviour.class))
{
rts.setScaleLimits(0.5f, 1f);
}
}
/*
* (non-Javadoc)
* @see synergynet3.SynergyNetApp#shouldStart(multiplicity3.input.
* MultiTouchInputComponent, multiplicity3.appsystem.IQueueOwner)
*/
@Override
public void shouldStart(MultiTouchInputComponent input, IQueueOwner iqo)
{
input.registerMultiTouchEventListener(this);
super.shouldStart(input, iqo);
}
/*
* (non-Javadoc)
* @see synergynet3.SynergyNetApp#shouldStop()
*/
@Override
public void shouldStop()
{
writeToLog();
super.shouldStop();
}
/**
* Generate flick item.
*
* @param targetTable
* the target table
*/
private void generateFlickItem(int targetTable)
{
if (!id.equals(tables[targetTable]))
{
try
{
String fileName = targetTable + ".png";
ICachableImage flickItem = contentFactory.create(ICachableImage.class, fileName, UUID.randomUUID());
flickItem.setImage(RESOURCES_DIR + fileName);
flickItem.setSize(200f, 200f);
flickItem.setRelativeScale(0.5f);
RotateTranslateScaleBehaviour rt = behaviourMaker.addBehaviour(flickItem, RotateTranslateScaleBehaviour.class);
rt.setScaleLimits(0.5f, 1f);
NetworkFlickBehaviour nf = behaviourMaker.addBehaviour(flickItem, NetworkFlickBehaviour.class);
nf.setDeceleration(DECELERATION);
nf.setMaxDimension(200f);
flickItem.setRelativeLocation(new Vector2f(getXPos(), 0));
stage.addItem(flickItem);
FeedbackSystem.registerAsFeedbackEligible(flickItem, 200, 200, stage);
}
catch (ContentTypeNotBoundException e)
{
AdditionalSynergyNetUtilities.log(Level.SEVERE, "Content Type Not Bound", e);
}
}
}
/**
* Generate prompt.
*
* @param message
* the message
*/
private void generatePrompt(String message)
{
try
{
IMutableLabel promptLabel = this.stage.getContentFactory().create(IMutableLabel.class, "positionLabel", UUID.randomUUID());
promptLabel.setFont(FontUtil.getFont(FontColour.White));
promptLabel.setText(message);
promptLabel.setBoxSize((stage.getDisplayWidth()), 50);
promptLabel.setFontScale(0.75f);
promptLabel.setRelativeLocation(new Vector2f(0, (-stage.getDisplayHeight() / 2) + 25));
promptLabel.setInteractionEnabled(false);
stage.addItem(promptLabel);
promptLabel.getZOrderManager().setAutoBringToTop(true);
promptLabel.getZOrderManager().setBringToTopPropagatesUp(false);
}
catch (ContentTypeNotBoundException e)
{
AdditionalSynergyNetUtilities.log(Level.SEVERE, "Content Type Not Bound", e);
}
}
/**
* Gets the x pos.
*
* @return the x pos
*/
private float getXPos()
{
float toReturn = xPos;
xPos += OFFSET;
return toReturn;
}
/**
* Write to log.
*/
private void writeToLog()
{
if ((firstTouch == null) || (lastTouch == null))
{
return;
}
if (logAddress != null)
{
if (!logAddress.equals(""))
{
String address = logAddress + File.separator + LOG_NAME_FORMAT.format(startTime) + "_" + id + "_FlickingLog.csv";
File logFile = new File(address);
if (!logFile.isFile())
{
AdditionalSynergyNetUtilities.logInfo("No log file found, creating new one.");
try
{
if (!logFile.createNewFile())
{
AdditionalSynergyNetUtilities.logInfo("Unable to create new log file.");
return;
}
}
catch (IOException e1)
{
AdditionalSynergyNetUtilities.logInfo("Unable to create new log file.");
return;
}
}
AdditionalSynergyNetUtilities.logInfo("Saving data to " + address);
try
{
FileWriter out = new FileWriter(address, true);
BufferedWriter writer = new BufferedWriter(out);
writer.write("Table," + id);
writer.newLine();
writer.write("Date," + DATE_FORMAT.format(startTime));
writer.newLine();
writer.write("Start," + TIME_FORMAT.format(startTime));
writer.newLine();
writer.write("End," + TIME_FORMAT.format(new Date()));
writer.newLine();
writer.newLine();
if (flickMode == FLICKTYPE.INSTANT)
{
writer.write("Mode,Instant");
}
else
{
writer.write("Mode,Proportional");
}
writer.newLine();
writer.newLine();
writer.write("Deceleration," + DECELERATION);
writer.newLine();
writer.newLine();
writer.write("First Touch," + TIME_FORMAT.format(firstTouch));
writer.newLine();
writer.write("Last Touch," + TIME_FORMAT.format(lastTouch));
writer.newLine();
writer.newLine();
writer.write("Total Number of Touches," + touchCount);
writer.newLine();
writer.newLine();
if (bounceLimit > 0)
{
writer.write("Bounce Limit," + bounceLimit);
}
else
{
writer.write("No Bounce Limit Set.");
}
writer.newLine();
writer.newLine();
writer.write("Bounces due to innacuracy," + NetworkFlickLogging.INACCURATE_BOUNCE_COUNT);
writer.newLine();
writer.write("Bounces due to lack of momentum," + NetworkFlickLogging.LACK_OF_MOMENTUM_BOUNCE_COUNT);
writer.newLine();
writer.newLine();
writer.write("Flick Departures," + NetworkFlickLogging.DEPARTURE_COUNT);
writer.newLine();
writer.write("Flick Arrivals," + NetworkFlickLogging.ARRIVAL_COUNT);
writer.newLine();
writer.newLine();
String[] bounceLogMessages = NetworkFlickLogging.BOUNCE_LOG.split(NetworkFlickLogging.LOG_PARSE_TOKEN);
for (String message : bounceLogMessages)
{
writer.write(message);
writer.newLine();
}
writer.newLine();
String[] flickLogMessages = NetworkFlickLogging.FLICK_LOG.split(NetworkFlickLogging.LOG_PARSE_TOKEN);
for (String message : flickLogMessages)
{
writer.write(message);
writer.newLine();
}
writer.close();
}
catch (IOException e1)
{
AdditionalSynergyNetUtilities.logInfo("Unable to write to log file.");
}
}
}
}
/*
* (non-Javadoc)
* @see synergynet3.SynergyNetApp#loadDefaultContent()
*/
@Override
protected void loadDefaultContent()
{
id = getTableIdentity();
if (id.equals("green") || id.equals("blue") || id.equals("yellow") || id.equals("red"))
{
deceleration = DECELERATION;
enableNetworkFlick();
BehaviourUtilities.FLICK_TYPE = flickMode;
NetworkFlickLogging.LOGGING_ENABLED = true;
NetworkFlickLogging.BOUNCE_LIMIT = bounceLimit;
for (int i = 1; i <= 4; i++)
{
generateFlickItem(i);
}
generatePrompt("Flick each item to the table displaying the corresponding number.");
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gemstone.gemfire.cache.query.dunit;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import com.gemstone.gemfire.LogWriter;
import com.gemstone.gemfire.cache.Operation;
import com.gemstone.gemfire.cache.query.CqEvent;
import com.gemstone.gemfire.cache.query.CqListener;
import com.gemstone.gemfire.cache.query.data.Portfolio;
import dunit.DistributedTestCase;
import dunit.DistributedTestCase.WaitCriterion;
/**
* @author anil.
*
*/
public class CqTimeTestListener implements CqListener {
protected final LogWriter logger;
protected volatile int eventCreateCount = 0;
protected volatile int eventUpdateCount = 0;
protected volatile int eventDeleteCount = 0;
protected volatile int eventInvalidateCount = 0;
protected volatile int eventErrorCount = 0;
protected volatile int totalEventCount = 0;
protected volatile int eventQueryInsertCount = 0;
protected volatile int eventQueryUpdateCount = 0;
protected volatile int eventQueryDeleteCount = 0;
protected volatile int eventQueryInvalidateCount = 0;
protected volatile long eventQueryInsertTime = 0;
protected volatile long eventQueryUpdateTime = 0;
protected volatile boolean eventClose = false;
final public Set destroys = Collections.synchronizedSet(new HashSet());
final public Set creates = Collections.synchronizedSet(new HashSet());
final public Set invalidates = Collections.synchronizedSet(new HashSet());
final public Set updates = Collections.synchronizedSet(new HashSet());
static private final String WAIT_PROPERTY = "CQueryTestListener.maxWaitTime";
static private final int WAIT_DEFAULT = (20 * 1000);
public static final long MAX_TIME = Integer.getInteger(WAIT_PROPERTY,
WAIT_DEFAULT).intValue();;
public String cqName;
public CqTimeTestListener(LogWriter logger) {
this.logger = logger;
}
public void onEvent(CqEvent cqEvent) {
this.totalEventCount++;
long currentTime = System.currentTimeMillis();
Operation baseOperation = cqEvent.getBaseOperation();
Operation queryOperation = cqEvent.getQueryOperation();
Object key = cqEvent.getKey();
// logger.info("### Got CQ Event ###; baseOp=" + baseOperation
// + ";queryOp=" + queryOperation);
//
// logger.info("Number of events for the CQ: " +this.cqName + " : "
// + this.totalEventCount
// + " Key : " + key);
if (baseOperation.isUpdate()) {
this.eventUpdateCount++;
this.updates.add(key);
}
else if (baseOperation.isCreate()) {
this.eventCreateCount++;
this.creates.add(key);
}
else if (baseOperation.isDestroy()) {
this.eventDeleteCount++;
this.destroys.add(key);
}
else if (baseOperation.isInvalidate()) {
this.eventDeleteCount++;
this.invalidates.add(key);
}
if (queryOperation.isUpdate()) {
this.eventQueryUpdateCount++;
long createTime = ((Portfolio)cqEvent.getNewValue()).getCreateTime();
this.eventQueryUpdateTime += (currentTime - createTime);
}
else if (queryOperation.isCreate()) {
this.eventQueryInsertCount++;
long createTime = ((Portfolio)cqEvent.getNewValue()).getCreateTime();
this.eventQueryInsertTime += (currentTime - createTime);
}
else if (queryOperation.isDestroy()) {
this.eventQueryDeleteCount++;
}
else if (queryOperation.isInvalidate()) {
this.eventQueryInvalidateCount++;
}
}
public void onError(CqEvent cqEvent) {
this.eventErrorCount++;
}
public int getErrorEventCount() {
return this.eventErrorCount;
}
public int getTotalEventCount() {
return this.totalEventCount;
}
public int getCreateEventCount() {
return this.eventCreateCount;
}
public int getUpdateEventCount() {
return this.eventUpdateCount;
}
public int getDeleteEventCount() {
return this.eventDeleteCount;
}
public int getInvalidateEventCount() {
return this.eventInvalidateCount;
}
public int getQueryInsertEventCount() {
return this.eventQueryInsertCount;
}
public int getQueryUpdateEventCount() {
return this.eventQueryUpdateCount;
}
public int getQueryDeleteEventCount() {
return this.eventQueryDeleteCount;
}
public int getQueryInvalidateEventCount() {
return this.eventQueryInvalidateCount;
}
public long getTotalQueryUpdateTime() {
return this.eventQueryUpdateTime;
}
public long getTotalQueryCreateTime() {
return this.eventQueryInsertTime;
}
public void close() {
this.eventClose = true;
}
public void printInfo() {
logger.info("####" + this.cqName + ": " +
" Events Total :" + this.getTotalEventCount() +
" Events Created :" + this.eventCreateCount +
" Events Updated :" + this.eventUpdateCount +
" Events Deleted :" + this.eventDeleteCount +
" Events Invalidated :" + this.eventInvalidateCount +
" Query Inserts :" + this.eventQueryInsertCount +
" Query Updates :" + this.eventQueryUpdateCount +
" Query Deletes :" + this.eventQueryDeleteCount +
" Query Invalidates :" + this.eventQueryInvalidateCount +
" Total Events :" + this.totalEventCount);
}
public boolean waitForCreated(final Object key) {
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return CqTimeTestListener.this.creates.contains(key);
}
public String description() {
return "never got create event for CQ " + CqTimeTestListener.this.cqName;
}
};
DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
return true;
}
public boolean waitForDestroyed(final Object key) {
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return CqTimeTestListener.this.destroys.contains(key);
}
public String description() {
return "never got destroy event for CQ " + CqTimeTestListener.this.cqName;
}
};
DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
return true;
}
public boolean waitForInvalidated(final Object key) {
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return CqTimeTestListener.this.invalidates.contains(key);
}
public String description() {
return "never got invalidate event for CQ " + CqTimeTestListener.this.cqName;
}
};
DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
return true;
}
public boolean waitForUpdated(final Object key) {
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return CqTimeTestListener.this.updates.contains(key);
}
public String description() {
return "never got update event for CQ " + CqTimeTestListener.this.cqName;
}
};
DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
return true;
}
public boolean waitForClose() {
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return CqTimeTestListener.this.eventClose;
}
public String description() {
return "never got close event for CQ " + CqTimeTestListener.this.cqName;
}
};
DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
return true;
}
public void getEventHistory() {
destroys.clear();
creates.clear();
invalidates.clear();
updates.clear();
this.eventClose = false;
}
}
| |
/*
* Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.identity.oauth.dcr.service;
import org.apache.commons.lang.StringUtils;
import org.mockito.internal.util.reflection.Whitebox;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.testng.PowerMockTestCase;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import org.wso2.carbon.base.CarbonBaseConstants;
import org.wso2.carbon.context.PrivilegedCarbonContext;
import org.wso2.carbon.identity.application.common.IdentityApplicationManagementException;
import org.wso2.carbon.identity.application.common.model.ServiceProvider;
import org.wso2.carbon.identity.application.mgt.ApplicationManagementService;
import org.wso2.carbon.identity.base.IdentityException;
import org.wso2.carbon.identity.oauth.IdentityOAuthAdminException;
import org.wso2.carbon.identity.oauth.OAuthAdminService;
import org.wso2.carbon.identity.oauth.common.exception.InvalidOAuthClientException;
import org.wso2.carbon.identity.oauth.dcr.DCRMConstants;
import org.wso2.carbon.identity.oauth.dcr.bean.Application;
import org.wso2.carbon.identity.oauth.dcr.bean.ApplicationRegistrationRequest;
import org.wso2.carbon.identity.oauth.dcr.bean.ApplicationUpdateRequest;
import org.wso2.carbon.identity.oauth.dcr.exception.DCRMException;
import org.wso2.carbon.identity.oauth.dcr.internal.DCRDataHolder;
import org.wso2.carbon.identity.oauth.dto.OAuthConsumerAppDTO;
import org.wso2.carbon.idp.mgt.IdentityProviderManager;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import static org.mockito.Matchers.any;
import static org.powermock.api.mockito.PowerMockito.doNothing;
import static org.powermock.api.mockito.PowerMockito.doThrow;
import static org.powermock.api.mockito.PowerMockito.mock;
import static org.powermock.api.mockito.PowerMockito.mockStatic;
import static org.powermock.api.mockito.PowerMockito.when;
import static org.powermock.api.mockito.PowerMockito.whenNew;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.fail;
import static org.wso2.carbon.identity.oauth.common.OAuthConstants.OAuth10AParams.OAUTH_VERSION;
/**
* Unit test covering DCRMService
*/
@PrepareForTest({DCRMService.class, ServiceProvider.class, IdentityProviderManager.class})
public class DCRMServiceTest extends PowerMockTestCase {
private DCRMService dcrmService;
private OAuthAdminService mockOAuthAdminService;
private String dummyConsumerKey = "dummyConsumerKey";
private ApplicationRegistrationRequest applicationRegistrationRequest;
private String dummyClientName = "dummyClientName";
private String dummyInvalidClientName = "dummy@ClientName";
private List<String> dummyGrantTypes = new ArrayList<>();
private String dummyUserName = "dummyUserName";
private String dummyTenantDomain = "dummyTenantDomain";
private ApplicationManagementService mockApplicationManagementService;
@BeforeMethod
public void setUp() {
mockOAuthAdminService = mock(OAuthAdminService.class);
applicationRegistrationRequest = new ApplicationRegistrationRequest();
applicationRegistrationRequest.setClientName(dummyClientName);
dcrmService = new DCRMService();
}
@DataProvider(name = "DTOProvider")
public Object[][] getDTOStatus() {
return new String[][]{
{null},
{""}
};
}
@Test
public void getApplicationEmptyClientIdTest() throws DCRMException {
try {
dcrmService.getApplication("");
} catch (IdentityException ex) {
assertEquals(ex.getMessage(), "Invalid client_id");
return;
}
fail("Expected exception IdentityException not thrown by getApplication method");
}
@Test(dataProvider = "DTOProvider")
public void getApplicationNullDTOTest(String dtoStatus) throws Exception {
if (dtoStatus == null) {
when(mockOAuthAdminService.getOAuthApplicationData(dummyConsumerKey)).thenReturn(null);
when(mockOAuthAdminService.getAllOAuthApplicationData()).thenReturn(new OAuthConsumerAppDTO[0]);
} else {
OAuthConsumerAppDTO dto = new OAuthConsumerAppDTO();
dto.setApplicationName("");
when(mockOAuthAdminService.getOAuthApplicationData(dummyConsumerKey)).thenReturn(dto);
when(mockOAuthAdminService.getAllOAuthApplicationData()).thenReturn(new OAuthConsumerAppDTO[]{dto});
}
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
try {
dcrmService.getApplication(dummyConsumerKey);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.NOT_FOUND_APPLICATION_WITH_ID.toString());
return;
}
fail("Expected exception IdentityException not thrown by getApplication method");
}
@Test
public void getApplicationDTOTestWithIOAException() throws Exception {
doThrow(new IdentityOAuthAdminException("")).when(mockOAuthAdminService)
.getOAuthApplicationData(dummyConsumerKey);
when(mockOAuthAdminService.getAllOAuthApplicationData()).thenReturn(new OAuthConsumerAppDTO[0]);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
try {
dcrmService.getApplication(dummyConsumerKey);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.FAILED_TO_GET_APPLICATION_BY_ID.toString());
return;
}
fail("Expected exception IdentityException not thrown by getApplication method");
}
@Test
public void getApplicationDTOTestWithIOCException() throws Exception {
doThrow(new IdentityOAuthAdminException("", new InvalidOAuthClientException(""))).when(mockOAuthAdminService)
.getOAuthApplicationData(dummyConsumerKey);
when(mockOAuthAdminService.getAllOAuthApplicationData()).thenReturn(new OAuthConsumerAppDTO[0]);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
try {
dcrmService.getApplication(dummyConsumerKey);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.NOT_FOUND_APPLICATION_WITH_ID.toString());
return;
}
fail("Expected exception IdentityException not thrown by getApplication method");
}
@Test
public void getApplicationDTOTest() throws Exception {
startTenantFlow();
OAuthConsumerAppDTO dto = new OAuthConsumerAppDTO();
dto.setApplicationName(dummyClientName);
String dummyConsumerSecret = "dummyConsumerSecret";
dto.setOauthConsumerSecret(dummyConsumerSecret);
dto.setOauthConsumerKey(dummyConsumerKey);
String dummyCallbackUrl = "dummyCallbackUrl";
dto.setCallbackUrl(dummyCallbackUrl);
dto.setUsername(dummyUserName.concat("@").concat(dummyTenantDomain));
OAuthConsumerAppDTO[] oAuthConsumerAppDTOS = new OAuthConsumerAppDTO[]{dto};
when(mockOAuthAdminService.getOAuthApplicationData(dummyConsumerKey)).thenReturn(dto);
when(mockOAuthAdminService.getAllOAuthApplicationData()).thenReturn(oAuthConsumerAppDTOS);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
Application application = dcrmService.getApplication(dummyConsumerKey);
assertEquals(application.getClientId(), dummyConsumerKey);
assertEquals(application.getClientName(), dummyClientName);
assertEquals(application.getClientSecret(), dummyConsumerSecret);
assertEquals(application.getRedirectUris().get(0), dummyCallbackUrl);
}
@Test
public void registerApplicationTestWithExistSP() throws DCRMException, IdentityApplicationManagementException {
dummyGrantTypes.add("dummy1");
dummyGrantTypes.add("dummy2");
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
startTenantFlow();
mockApplicationManagementService = mock(ApplicationManagementService.class);
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
when(mockApplicationManagementService.getServiceProvider(dummyClientName, dummyTenantDomain)).thenReturn(new
ServiceProvider());
try {
dcrmService.registerApplication(applicationRegistrationRequest);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.CONFLICT_EXISTING_APPLICATION.toString());
return;
}
fail("Expected exception IdentityException not thrown by registerApplication method");
}
@Test
public void registerApplicationTestWithFailedToGetSP() throws DCRMException,
IdentityApplicationManagementException {
dummyGrantTypes.add("dummy1");
dummyGrantTypes.add("dummy2");
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
startTenantFlow();
mockApplicationManagementService = mock(ApplicationManagementService.class);
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
doThrow(new IdentityApplicationManagementException("")).when(mockApplicationManagementService)
.getServiceProvider(dummyClientName, dummyTenantDomain);
try {
dcrmService.registerApplication(applicationRegistrationRequest);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.FAILED_TO_GET_SP.toString());
return;
}
fail("Expected exception IdentityException not thrown by registerApplication method");
}
@Test
public void registerApplicationTestWithFailedToRegisterSP() throws Exception {
dummyGrantTypes.add("dummy1");
dummyGrantTypes.add("dummy2");
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
startTenantFlow();
mockApplicationManagementService = mock(ApplicationManagementService.class);
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
try {
dcrmService.registerApplication(applicationRegistrationRequest);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.FAILED_TO_REGISTER_SP.toString());
return;
}
fail("Expected exception IdentityException not thrown by registerApplication method");
}
@DataProvider(name = "RedirectAndGrantTypeProvider")
public Object[][] getListSizeAndGrantType() {
List<String> redirectUri1 = new ArrayList<>();
return new Object[][]{
{"implicit", redirectUri1},
{"authorization_code", redirectUri1},
};
}
@Test(dataProvider = "RedirectAndGrantTypeProvider")
public void registerApplicationTestWithSPWithFailCallback(String grantTypeVal, List<String> redirectUri)
throws Exception {
mockApplicationManagementService = mock(ApplicationManagementService.class);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
startTenantFlow();
dummyGrantTypes.add(grantTypeVal);
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
String grantType = StringUtils.join(applicationRegistrationRequest.getGrantTypes(), " ");
ServiceProvider serviceProvider = new ServiceProvider();
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
when(mockApplicationManagementService.getServiceProvider(dummyClientName, dummyTenantDomain)).thenReturn
(null, serviceProvider);
applicationRegistrationRequest.setRedirectUris(redirectUri);
OAuthConsumerAppDTO oAuthConsumerApp = new OAuthConsumerAppDTO();
oAuthConsumerApp.setApplicationName(dummyClientName);
oAuthConsumerApp.setGrantTypes(grantType);
oAuthConsumerApp.setOAuthVersion(OAUTH_VERSION);
when(mockOAuthAdminService
.getOAuthApplicationDataByAppName(dummyClientName)).thenReturn(oAuthConsumerApp);
try {
dcrmService.registerApplication(applicationRegistrationRequest);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.BAD_REQUEST_INVALID_INPUT.toString());
return;
}
fail("Expected exception IdentityException not thrown by registerApplication method");
}
@DataProvider(name = "redirectUriProvider")
public Object[][] getReDirecturi() {
List<String> redirectUri1 = new ArrayList<>();
redirectUri1.add("redirectUri1");
List<String> redirectUri2 = new ArrayList<>();
redirectUri2.add("redirectUri1");
redirectUri2.add("redirectUri1");
return new Object[][]{
{redirectUri1},
{redirectUri2}
};
}
@Test(dataProvider = "redirectUriProvider")
public void registerApplicationTestWithSP(List<String> redirectUri) throws Exception {
mockApplicationManagementService = mock(ApplicationManagementService.class);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
startTenantFlow();
dummyGrantTypes.add("implicit");
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
String grantType = StringUtils.join(applicationRegistrationRequest.getGrantTypes(), " ");
ServiceProvider serviceProvider = new ServiceProvider();
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
when(mockApplicationManagementService.getServiceProvider(dummyClientName, dummyTenantDomain)).thenReturn
(null, serviceProvider);
applicationRegistrationRequest.setRedirectUris(redirectUri);
OAuthConsumerAppDTO oAuthConsumerApp = new OAuthConsumerAppDTO();
oAuthConsumerApp.setApplicationName(dummyClientName);
oAuthConsumerApp.setGrantTypes(grantType);
oAuthConsumerApp.setOAuthVersion(OAUTH_VERSION);
when(mockOAuthAdminService
.getOAuthApplicationDataByAppName(dummyClientName)).thenReturn(oAuthConsumerApp);
when(mockOAuthAdminService.registerAndRetrieveOAuthApplicationData(any(OAuthConsumerAppDTO.class)))
.thenReturn(oAuthConsumerApp);
Application application = dcrmService.registerApplication(applicationRegistrationRequest);
assertEquals(application.getClientName(), dummyClientName);
}
@Test
public void testRegisterApplicationWithInvalidSPName() throws Exception {
mockApplicationManagementService = mock(ApplicationManagementService.class);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
startTenantFlow();
dummyGrantTypes.add("implicit");
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
applicationRegistrationRequest.setClientName(dummyInvalidClientName);
try {
dcrmService.registerApplication(applicationRegistrationRequest);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.BAD_REQUEST_INVALID_SP_NAME.toString());
return;
}
fail("Expected exception IdentityException not thrown by registerApplication method");
}
@Test(dataProvider = "redirectUriProvider")
public void registerApplicationTestWithDeleteCreatedSP(List<String> redirectUri) throws Exception {
mockStatic(IdentityProviderManager.class);
mockApplicationManagementService = mock(ApplicationManagementService.class);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
startTenantFlow();
dummyGrantTypes.add("implicit");
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
String grantType = StringUtils.join(applicationRegistrationRequest.getGrantTypes(), " ");
ServiceProvider serviceProvider = new ServiceProvider();
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
when(mockApplicationManagementService.getServiceProvider(dummyClientName, dummyTenantDomain)).thenReturn
(null, serviceProvider);
applicationRegistrationRequest.setRedirectUris(redirectUri);
OAuthConsumerAppDTO oAuthConsumerApp = new OAuthConsumerAppDTO();
oAuthConsumerApp.setApplicationName(dummyClientName);
oAuthConsumerApp.setGrantTypes(grantType);
oAuthConsumerApp.setOAuthVersion(OAUTH_VERSION);
whenNew(OAuthConsumerAppDTO.class).withNoArguments().thenReturn(oAuthConsumerApp);
doThrow(new IdentityOAuthAdminException("")).when(mockOAuthAdminService)
.registerOAuthApplicationData(oAuthConsumerApp);
try {
dcrmService.registerApplication(applicationRegistrationRequest);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.FAILED_TO_REGISTER_APPLICATION.toString());
return;
}
fail("Expected exception IdentityException not thrown by registerApplication method");
}
@Test(dataProvider = "redirectUriProvider")
public void registerApplicationTestWithFailedToDeleteCreatedSP(List<String> redirectUri) throws Exception {
mockStatic(IdentityProviderManager.class);
mockApplicationManagementService = mock(ApplicationManagementService.class);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
startTenantFlow();
dummyGrantTypes.add("implicit");
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
String grantType = StringUtils.join(applicationRegistrationRequest.getGrantTypes(), " ");
ServiceProvider serviceProvider = new ServiceProvider();
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
when(mockApplicationManagementService.getServiceProvider(dummyClientName, dummyTenantDomain)).thenReturn
(null, serviceProvider);
applicationRegistrationRequest.setRedirectUris(redirectUri);
OAuthConsumerAppDTO oAuthConsumerApp = new OAuthConsumerAppDTO();
oAuthConsumerApp.setApplicationName(dummyClientName);
oAuthConsumerApp.setGrantTypes(grantType);
oAuthConsumerApp.setOAuthVersion(OAUTH_VERSION);
whenNew(OAuthConsumerAppDTO.class).withNoArguments().thenReturn(oAuthConsumerApp);
doThrow(new IdentityOAuthAdminException("")).when(mockOAuthAdminService)
.registerOAuthApplicationData(oAuthConsumerApp);
doThrow(new IdentityApplicationManagementException("")).when(mockApplicationManagementService)
.deleteApplication(dummyClientName, dummyTenantDomain, dummyUserName);
try {
dcrmService.registerApplication(applicationRegistrationRequest);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.FAILED_TO_DELETE_SP.toString());
return;
}
fail("Expected exception IdentityException not thrown by registerApplication method");
}
@Test(dataProvider = "redirectUriProvider")
public void registerApplicationTestWithFailedToUpdateSP(List<String> redirectUri) throws Exception {
mockApplicationManagementService = mock(ApplicationManagementService.class);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
startTenantFlow();
dummyGrantTypes.add("implicit");
applicationRegistrationRequest.setGrantTypes(dummyGrantTypes);
String grantType = StringUtils.join(applicationRegistrationRequest.getGrantTypes(), " ");
ServiceProvider serviceProvider = new ServiceProvider();
serviceProvider.setApplicationName(dummyClientName);
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
when(mockApplicationManagementService.getServiceProvider(dummyClientName, dummyTenantDomain)).thenReturn
(null, serviceProvider);
applicationRegistrationRequest.setRedirectUris(redirectUri);
OAuthConsumerAppDTO oAuthConsumerApp = new OAuthConsumerAppDTO();
oAuthConsumerApp.setApplicationName(dummyClientName);
oAuthConsumerApp.setGrantTypes(grantType);
oAuthConsumerApp.setOAuthVersion(OAUTH_VERSION);
oAuthConsumerApp.setOauthConsumerKey("dummyConsumerKey");
oAuthConsumerApp.setUsername(dummyUserName.concat("@").concat(dummyTenantDomain));
when(mockOAuthAdminService
.getOAuthApplicationDataByAppName(dummyClientName)).thenReturn(oAuthConsumerApp);
when(mockOAuthAdminService
.getOAuthApplicationData("dummyConsumerKey")).thenReturn(oAuthConsumerApp);
when(mockOAuthAdminService.getAllOAuthApplicationData())
.thenReturn(new OAuthConsumerAppDTO[]{oAuthConsumerApp});
when(mockOAuthAdminService.registerAndRetrieveOAuthApplicationData(any(OAuthConsumerAppDTO.class))).
thenReturn(oAuthConsumerApp);
doThrow(new IdentityApplicationManagementException("ehweh")).when(mockApplicationManagementService)
.updateApplication(serviceProvider, dummyTenantDomain, dummyUserName);
try {
dcrmService.registerApplication(applicationRegistrationRequest);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.FAILED_TO_UPDATE_SP.toString());
return;
}
fail("Expected exception IdentityException not thrown by registerApplication method");
}
@Test
public void updateApplicationTestWithException()
throws DCRMException, IdentityOAuthAdminException, IdentityApplicationManagementException {
List<String> redirectUri = new ArrayList<>();
dummyGrantTypes.add("dummyVal");
redirectUri.add("dummyUri");
ApplicationUpdateRequest applicationUpdateRequest = mock(ApplicationUpdateRequest.class);
applicationUpdateRequest.setClientName(dummyClientName);
applicationUpdateRequest.setGrantTypes(dummyGrantTypes);
applicationUpdateRequest.setRedirectUris(redirectUri);
mockApplicationManagementService = mock(ApplicationManagementService.class);
Whitebox.setInternalState(dcrmService, "oAuthAdminService", mockOAuthAdminService);
OAuthConsumerAppDTO dto = new OAuthConsumerAppDTO();
dto.setApplicationName(dummyClientName);
String dummyClientId = "dummyClientId";
when(mockOAuthAdminService.getOAuthApplicationData(dummyClientId)).thenReturn(dto);
ServiceProvider serviceProvider = new ServiceProvider();
DCRDataHolder dcrDataHolder = DCRDataHolder.getInstance();
dcrDataHolder.setApplicationManagementService(mockApplicationManagementService);
when(mockApplicationManagementService.getServiceProvider(dummyClientName, dummyTenantDomain)).thenReturn
(serviceProvider);
when(mockOAuthAdminService.getAllOAuthApplicationData()).thenReturn(new OAuthConsumerAppDTO[0]);
doNothing().when(mockApplicationManagementService).updateApplication(serviceProvider, dummyTenantDomain,
dummyUserName);
doThrow(new IdentityOAuthAdminException("")).when(mockOAuthAdminService).updateConsumerApplication(any
(OAuthConsumerAppDTO.class));
try {
startTenantFlow();
dcrmService.updateApplication(applicationUpdateRequest, dummyClientId);
} catch (IdentityException ex) {
assertEquals(ex.getErrorCode(), DCRMConstants.ErrorMessages.FORBIDDEN_UNAUTHORIZED_USER.toString());
return;
} finally {
PrivilegedCarbonContext.endTenantFlow();
}
fail("Expected exception IdentityException not thrown by updateApplication method");
}
private void startTenantFlow() {
String carbonHome = Paths.get(System.getProperty("user.dir"), "src", "test", "resources").toString();
System.setProperty(CarbonBaseConstants.CARBON_HOME, carbonHome);
PrivilegedCarbonContext.getThreadLocalCarbonContext().setTenantDomain(dummyTenantDomain);
PrivilegedCarbonContext.getThreadLocalCarbonContext().setUsername(dummyUserName);
}
}
| |
/**
*/
package datacenter.core.util;
import datacenter.core.*;
import java.util.Map;
import org.eclipse.emf.common.util.Diagnostic;
import org.eclipse.emf.common.util.DiagnosticChain;
import org.eclipse.emf.common.util.ResourceLocator;
import org.eclipse.emf.ecore.EPackage;
import org.eclipse.emf.ecore.util.EObjectValidator;
/**
* <!-- begin-user-doc -->
* The <b>Validator</b> for the model.
* <!-- end-user-doc -->
* @see datacenter.core.CorePackage
* @generated
*/
public class CoreValidator extends EObjectValidator {
/**
* The cached model package
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public static final CoreValidator INSTANCE = new CoreValidator();
/**
* A constant for the {@link org.eclipse.emf.common.util.Diagnostic#getSource() source} of diagnostic {@link org.eclipse.emf.common.util.Diagnostic#getCode() codes} from this package.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see org.eclipse.emf.common.util.Diagnostic#getSource()
* @see org.eclipse.emf.common.util.Diagnostic#getCode()
* @generated
*/
public static final String DIAGNOSTIC_SOURCE = "datacenter.core";
/**
* A constant with a fixed name that can be used as the base value for additional hand written constants.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private static final int GENERATED_DIAGNOSTIC_CODE_COUNT = 0;
/**
* A constant with a fixed name that can be used as the base value for additional hand written constants in a derived class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected static final int DIAGNOSTIC_CODE_COUNT = GENERATED_DIAGNOSTIC_CODE_COUNT;
/**
* Creates an instance of the switch.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public CoreValidator() {
super();
}
/**
* Returns the package of this validator switch.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected EPackage getEPackage() {
return CorePackage.eINSTANCE;
}
/**
* Calls <code>validateXXX</code> for the corresponding classifier of the model.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected boolean validate(int classifierID, Object value, DiagnosticChain diagnostics, Map<Object, Object> context) {
switch (classifierID) {
case CorePackage.DYNAMIC_ELEMENT:
return validateDynamicElement((DynamicElement)value, diagnostics, context);
case CorePackage.CLOUD_NETWORK:
return validateCloudNetwork((CloudNetwork)value, diagnostics, context);
case CorePackage.SERVER:
return validateServer((Server)value, diagnostics, context);
case CorePackage.MACHINE:
return validateMachine((Machine)value, diagnostics, context);
case CorePackage.JOB:
return validateJob((Job)value, diagnostics, context);
case CorePackage.TASK:
return validateTask((Task)value, diagnostics, context);
case CorePackage.GEO_LOCATION:
return validateGeoLocation((GeoLocation)value, diagnostics, context);
default:
return true;
}
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateDynamicElement(DynamicElement dynamicElement, DiagnosticChain diagnostics, Map<Object, Object> context) {
return validate_EveryDefaultConstraint(dynamicElement, diagnostics, context);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateCloudNetwork(CloudNetwork cloudNetwork, DiagnosticChain diagnostics, Map<Object, Object> context) {
return validate_EveryDefaultConstraint(cloudNetwork, diagnostics, context);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateServer(Server server, DiagnosticChain diagnostics, Map<Object, Object> context) {
if (!validate_NoCircularContainment(server, diagnostics, context)) return false;
boolean result = validate_EveryMultiplicityConforms(server, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryDataValueConforms(server, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryReferenceIsContained(server, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryBidirectionalReferenceIsPaired(server, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryProxyResolves(server, diagnostics, context);
if (result || diagnostics != null) result &= validate_UniqueID(server, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryKeyUnique(server, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryMapEntryUnique(server, diagnostics, context);
if (result || diagnostics != null) result &= validateServer_CPUavailabilityCheck(server, diagnostics, context);
if (result || diagnostics != null) result &= validateServer_memoryAvailabilityCheck(server, diagnostics, context);
return result;
}
/**
* The cached validation expression for the CPUavailabilityCheck constraint of '<em>Server</em>'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected static final String SERVER__CP_UAVAILABILITY_CHECK__EEXPRESSION = "Tuple {\n" +
"\tmessage : String = 'Cannot assign more than available resources',\n" +
"\tstatus : Boolean = \n" +
"\t\t\tremainingCores >= 0\n" +
"}.status";
/**
* Validates the CPUavailabilityCheck constraint of '<em>Server</em>'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateServer_CPUavailabilityCheck(Server server, DiagnosticChain diagnostics, Map<Object, Object> context) {
return
validate
(CorePackage.Literals.SERVER,
server,
diagnostics,
context,
"http://www.eclipse.org/emf/2002/Ecore/OCL/Pivot",
"CPUavailabilityCheck",
SERVER__CP_UAVAILABILITY_CHECK__EEXPRESSION,
Diagnostic.ERROR,
DIAGNOSTIC_SOURCE,
0);
}
/**
* The cached validation expression for the memoryAvailabilityCheck constraint of '<em>Server</em>'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected static final String SERVER__MEMORY_AVAILABILITY_CHECK__EEXPRESSION = "Tuple {\n" +
"\tmessage : String = 'Cannot assign more than available resources',\n" +
"\tstatus : Boolean = \n" +
"\t\t\tremainingMemory >=0\n" +
"}.status";
/**
* Validates the memoryAvailabilityCheck constraint of '<em>Server</em>'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateServer_memoryAvailabilityCheck(Server server, DiagnosticChain diagnostics, Map<Object, Object> context) {
return
validate
(CorePackage.Literals.SERVER,
server,
diagnostics,
context,
"http://www.eclipse.org/emf/2002/Ecore/OCL/Pivot",
"memoryAvailabilityCheck",
SERVER__MEMORY_AVAILABILITY_CHECK__EEXPRESSION,
Diagnostic.ERROR,
DIAGNOSTIC_SOURCE,
0);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateMachine(Machine machine, DiagnosticChain diagnostics, Map<Object, Object> context) {
return validate_EveryDefaultConstraint(machine, diagnostics, context);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateJob(Job job, DiagnosticChain diagnostics, Map<Object, Object> context) {
return validate_EveryDefaultConstraint(job, diagnostics, context);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateTask(Task task, DiagnosticChain diagnostics, Map<Object, Object> context) {
return validate_EveryDefaultConstraint(task, diagnostics, context);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateGeoLocation(GeoLocation geoLocation, DiagnosticChain diagnostics, Map<Object, Object> context) {
if (!validate_NoCircularContainment(geoLocation, diagnostics, context)) return false;
boolean result = validate_EveryMultiplicityConforms(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryDataValueConforms(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryReferenceIsContained(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryBidirectionalReferenceIsPaired(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryProxyResolves(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validate_UniqueID(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryKeyUnique(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validate_EveryMapEntryUnique(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validateGeoLocation_latitudeCheck(geoLocation, diagnostics, context);
if (result || diagnostics != null) result &= validateGeoLocation_longitudeCheck(geoLocation, diagnostics, context);
return result;
}
/**
* The cached validation expression for the latitudeCheck constraint of '<em>Geo Location</em>'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected static final String GEO_LOCATION__LATITUDE_CHECK__EEXPRESSION = "Tuple {\n" +
"\tmessage : String = 'Latitude should range from -90 to 90 degrees',\n" +
"\tstatus : Boolean = \n" +
"\t\t\tlatitude > 90 and latitude > -90\n" +
"}.status";
/**
* Validates the latitudeCheck constraint of '<em>Geo Location</em>'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateGeoLocation_latitudeCheck(GeoLocation geoLocation, DiagnosticChain diagnostics, Map<Object, Object> context) {
return
validate
(CorePackage.Literals.GEO_LOCATION,
geoLocation,
diagnostics,
context,
"http://www.eclipse.org/emf/2002/Ecore/OCL/Pivot",
"latitudeCheck",
GEO_LOCATION__LATITUDE_CHECK__EEXPRESSION,
Diagnostic.ERROR,
DIAGNOSTIC_SOURCE,
0);
}
/**
* The cached validation expression for the longitudeCheck constraint of '<em>Geo Location</em>'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected static final String GEO_LOCATION__LONGITUDE_CHECK__EEXPRESSION = "Tuple {\n" +
"\tmessage : String = 'Longitude should range from 0 to 180 degrees',\n" +
"\tstatus : Boolean = \n" +
"\t\t\tlongitude > 0 and longitude < 180\n" +
"}.status";
/**
* Validates the longitudeCheck constraint of '<em>Geo Location</em>'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public boolean validateGeoLocation_longitudeCheck(GeoLocation geoLocation, DiagnosticChain diagnostics, Map<Object, Object> context) {
return
validate
(CorePackage.Literals.GEO_LOCATION,
geoLocation,
diagnostics,
context,
"http://www.eclipse.org/emf/2002/Ecore/OCL/Pivot",
"longitudeCheck",
GEO_LOCATION__LONGITUDE_CHECK__EEXPRESSION,
Diagnostic.ERROR,
DIAGNOSTIC_SOURCE,
0);
}
/**
* Returns the resource locator that will be used to fetch messages for this validator's diagnostics.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public ResourceLocator getResourceLocator() {
// TODO
// Specialize this to return a resource locator for messages specific to this validator.
// Ensure that you remove @generated or mark it @generated NOT
return super.getResourceLocator();
}
} //CoreValidator
| |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.node.internal;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.env.Environment;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Predicate;
import static org.elasticsearch.common.Strings.cleanPath;
/**
*
*/
public class InternalSettingsPreparer {
private static final String[] ALLOWED_SUFFIXES = {".yml", ".yaml", ".json", ".properties"};
static final String PROPERTY_DEFAULTS_PREFIX = "default.";
static final Predicate<String> PROPERTY_DEFAULTS_PREDICATE = key -> key.startsWith(PROPERTY_DEFAULTS_PREFIX);
public static final String SECRET_PROMPT_VALUE = "${prompt.secret}";
public static final String TEXT_PROMPT_VALUE = "${prompt.text}";
/**
* Prepares the settings by gathering all elasticsearch system properties and setting defaults.
*/
public static Settings prepareSettings(Settings input) {
Settings.Builder output = Settings.builder();
initializeSettings(output, input, true, Collections.emptyMap());
finalizeSettings(output, null, null);
return output.build();
}
/**
* Prepares the settings by gathering all elasticsearch system properties, optionally loading the configuration settings,
* and then replacing all property placeholders. If a {@link Terminal} is provided and configuration settings are loaded,
* settings with a value of <code>${prompt.text}</code> or <code>${prompt.secret}</code> will result in a prompt for
* the setting to the user.
* @param input The custom settings to use. These are not overwritten by settings in the configuration file.
* @param terminal the Terminal to use for input/output
* @return the {@link Settings} and {@link Environment} as a {@link Tuple}
*/
public static Environment prepareEnvironment(Settings input, Terminal terminal) {
return prepareEnvironment(input, terminal, Collections.emptyMap());
}
/**
* Prepares the settings by gathering all elasticsearch system properties, optionally loading the configuration settings,
* and then replacing all property placeholders. If a {@link Terminal} is provided and configuration settings are loaded,
* settings with a value of <code>${prompt.text}</code> or <code>${prompt.secret}</code> will result in a prompt for
* the setting to the user.
* @param input The custom settings to use. These are not overwritten by settings in the configuration file.
* @param terminal the Terminal to use for input/output
* @param properties Map of properties key/value pairs (usually from the command-line)
* @return the {@link Settings} and {@link Environment} as a {@link Tuple}
*/
public static Environment prepareEnvironment(Settings input, Terminal terminal, Map<String, String> properties) {
// just create enough settings to build the environment, to get the config dir
Settings.Builder output = Settings.builder();
initializeSettings(output, input, true, properties);
Environment environment = new Environment(output.build());
boolean settingsFileFound = false;
Set<String> foundSuffixes = new HashSet<>();
for (String allowedSuffix : ALLOWED_SUFFIXES) {
Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix);
if (Files.exists(path)) {
if (!settingsFileFound) {
try {
output.loadFromPath(path);
} catch (IOException e) {
throw new SettingsException("Failed to settings from " + path.toString(), e);
}
}
settingsFileFound = true;
foundSuffixes.add(allowedSuffix);
}
}
if (foundSuffixes.size() > 1) {
throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ","));
}
// re-initialize settings now that the config file has been loaded
// TODO: only re-initialize if a config file was actually loaded
initializeSettings(output, input, false, properties);
finalizeSettings(output, terminal, environment.configFile());
environment = new Environment(output.build());
// we put back the path.logs so we can use it in the logging configuration file
output.put(Environment.PATH_LOGS_SETTING.getKey(), cleanPath(environment.logsFile().toAbsolutePath().toString()));
return new Environment(output.build());
}
/**
* Initializes the builder with the given input settings, and loads system properties settings if allowed.
* If loadDefaults is true, system property default settings are loaded.
*/
private static void initializeSettings(Settings.Builder output, Settings input, boolean loadDefaults, Map<String, String> esSettings) {
output.put(input);
if (loadDefaults) {
output.putProperties(esSettings, PROPERTY_DEFAULTS_PREDICATE, key -> key.substring(PROPERTY_DEFAULTS_PREFIX.length()));
}
output.putProperties(esSettings, PROPERTY_DEFAULTS_PREDICATE.negate(), Function.identity());
output.replacePropertyPlaceholders();
}
/**
* Finish preparing settings by replacing forced settings, prompts, and any defaults that need to be added.
* The provided terminal is used to prompt for settings needing to be replaced.
* The provided configDir is optional and will be used to lookup names.txt if the node name is not set, if provided.
*/
private static void finalizeSettings(Settings.Builder output, Terminal terminal, Path configDir) {
// allow to force set properties based on configuration of the settings provided
List<String> forcedSettings = new ArrayList<>();
for (String setting : output.internalMap().keySet()) {
if (setting.startsWith("force.")) {
forcedSettings.add(setting);
}
}
for (String forcedSetting : forcedSettings) {
String value = output.remove(forcedSetting);
output.put(forcedSetting.substring("force.".length()), value);
}
output.replacePropertyPlaceholders();
// put the cluster name
if (output.get(ClusterName.CLUSTER_NAME_SETTING.getKey()) == null) {
output.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).value());
}
replacePromptPlaceholders(output, terminal);
// all settings placeholders have been resolved. resolve the value for the name setting by checking for name,
// then looking for node.name, and finally generate one if needed
String name = output.get("node.name");
if (name == null || name.isEmpty()) {
name = randomNodeName(configDir);
output.put("node.name", name);
}
}
private static String randomNodeName(Path configDir) {
InputStream input;
if (configDir != null && Files.exists(configDir.resolve("names.txt"))) {
Path namesPath = configDir.resolve("names.txt");
try {
input = Files.newInputStream(namesPath);
} catch (IOException e) {
throw new RuntimeException("Failed to load custom names.txt from " + namesPath, e);
}
} else {
input = InternalSettingsPreparer.class.getResourceAsStream("/config/names.txt");
}
try {
List<String> names = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8))) {
String name = reader.readLine();
while (name != null) {
names.add(name);
name = reader.readLine();
}
}
int index = Randomness.get().nextInt(names.size());
return names.get(index);
} catch (IOException e) {
throw new RuntimeException("Could not read node names list", e);
}
}
private static void replacePromptPlaceholders(Settings.Builder settings, Terminal terminal) {
List<String> secretToPrompt = new ArrayList<>();
List<String> textToPrompt = new ArrayList<>();
for (Map.Entry<String, String> entry : settings.internalMap().entrySet()) {
switch (entry.getValue()) {
case SECRET_PROMPT_VALUE:
secretToPrompt.add(entry.getKey());
break;
case TEXT_PROMPT_VALUE:
textToPrompt.add(entry.getKey());
break;
}
}
for (String setting : secretToPrompt) {
String secretValue = promptForValue(setting, terminal, true);
if (Strings.hasLength(secretValue)) {
settings.put(setting, secretValue);
} else {
// TODO: why do we remove settings if prompt returns empty??
settings.remove(setting);
}
}
for (String setting : textToPrompt) {
String textValue = promptForValue(setting, terminal, false);
if (Strings.hasLength(textValue)) {
settings.put(setting, textValue);
} else {
// TODO: why do we remove settings if prompt returns empty??
settings.remove(setting);
}
}
}
private static String promptForValue(String key, Terminal terminal, boolean secret) {
if (terminal == null) {
throw new UnsupportedOperationException("found property [" + key + "] with value [" + (secret ? SECRET_PROMPT_VALUE : TEXT_PROMPT_VALUE) +"]. prompting for property values is only supported when running elasticsearch in the foreground");
}
if (secret) {
return new String(terminal.readSecret("Enter value for [" + key + "]: "));
}
return terminal.readText("Enter value for [" + key + "]: ");
}
}
| |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.openapi.fileEditor.impl;
import com.intellij.ide.IdeBundle;
import com.intellij.internal.statistic.eventLog.FeatureUsageData;
import com.intellij.internal.statistic.service.fus.collectors.FUCounterUsageLogger;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.actionSystem.CommonDataKeys;
import com.intellij.openapi.actionSystem.DataProvider;
import com.intellij.openapi.actionSystem.IdeActions;
import com.intellij.openapi.actionSystem.PlatformDataKeys;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.command.CommandProcessor;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.editor.colors.EditorColors;
import com.intellij.openapi.editor.colors.EditorColorsManager;
import com.intellij.openapi.editor.colors.EditorColorsScheme;
import com.intellij.openapi.fileEditor.*;
import com.intellij.openapi.fileEditor.ex.FileEditorManagerEx;
import com.intellij.openapi.fileEditor.ex.FileEditorProviderManager;
import com.intellij.openapi.fileEditor.ex.FileEditorWithProvider;
import com.intellij.openapi.fileEditor.ex.IdeDocumentHistory;
import com.intellij.openapi.project.DumbService;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.*;
import com.intellij.openapi.util.registry.ExperimentalUI;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.wm.FocusWatcher;
import com.intellij.openapi.wm.IdeFocusManager;
import com.intellij.ui.*;
import com.intellij.ui.components.JBPanelWithEmptyText;
import com.intellij.ui.components.panels.NonOpaquePanel;
import com.intellij.ui.components.panels.Wrapper;
import com.intellij.ui.tabs.JBTabs;
import com.intellij.ui.tabs.impl.JBTabsImpl;
import com.intellij.util.ArrayUtil;
import com.intellij.util.ObjectUtils;
import com.intellij.util.SmartList;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.ApiStatus;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import java.awt.*;
import java.util.List;
import java.util.*;
import java.util.function.Supplier;
/**
* This class hides internal structure of UI component which represent
* set of opened editors. For example, one myEditor is represented by its
* component, more then one myEditor is wrapped into tabbed pane.
*
* @author Vladimir Kondratyev
*/
public class EditorComposite implements Disposable {
private static final Logger LOG = Logger.getInstance(EditorComposite.class);
/**
* File for which composite is created
*/
@NotNull private final VirtualFile myFile;
/**
* Whether the composite is pinned or not
*/
private boolean myPinned;
/**
* Whether the composite is opened as preview tab or not
*/
private boolean myPreview;
/**
* Editors which are opened in the composite
*/
volatile FileEditor[] myEditors;
/**
* This is initial timestamp of the file. It uses to implement
* "close non modified editors first" feature.
*/
private final long myInitialFileTimeStamp;
private TabbedPaneWrapper myTabbedPaneWrapper;
@NotNull
private final MyComponent myComponent;
private final FocusWatcher myFocusWatcher;
/**
* Currently selected myEditor
*/
private FileEditor mySelectedEditor;
private final FileEditorManagerEx myFileEditorManager;
private final Map<FileEditor, JComponent> myTopComponents = new HashMap<>();
private final Map<FileEditor, JComponent> myBottomComponents = new HashMap<>();
private final Map<FileEditor, @NlsContexts.TabTitle String> myDisplayNames = new HashMap<>();
private FileEditorProvider[] myProviders;
/**
* @param file {@code file} for which composite is being constructed
*
* @param editors {@code edittors} that should be placed into the composite
*
* @exception IllegalArgumentException if {@code editors}
* is {@code null} or {@code providers} is {@code null} or {@code myEditor} arrays is empty
*/
EditorComposite(@NotNull final VirtualFile file,
@NotNull FileEditor @NotNull [] editors,
@NotNull FileEditorProvider @NotNull [] providers,
@NotNull final FileEditorManagerEx fileEditorManager) {
ApplicationManager.getApplication().assertIsDispatchThread();
myFile = file;
myEditors = editors;
myProviders = providers;
for (FileEditor editor : editors) {
FileEditor.FILE_KEY.set(editor, myFile);
}
if (ArrayUtil.contains(null, editors)) throw new IllegalArgumentException("Must not pass null editors in " + Arrays.asList(editors));
myFileEditorManager = fileEditorManager;
myInitialFileTimeStamp = myFile.getTimeStamp();
Project project = fileEditorManager.getProject();
Disposer.register(project, this);
if (editors.length > 1) {
myTabbedPaneWrapper = createTabbedPaneWrapper(editors, null);
JComponent component = myTabbedPaneWrapper.getComponent();
myComponent = new MyComponent(component, () -> component);
}
else if (editors.length == 1) {
myTabbedPaneWrapper = null;
FileEditor editor = editors[0];
myComponent = new MyComponent(createEditorComponent(editor), editor::getPreferredFocusedComponent);
}
else {
throw new IllegalArgumentException("editors array cannot be empty");
}
mySelectedEditor = editors[0];
myFocusWatcher = new FocusWatcher();
myFocusWatcher.install(myComponent);
project.getMessageBus().connect(this).subscribe(
FileEditorManagerListener.FILE_EDITOR_MANAGER, new FileEditorManagerListener() {
@Override
public void selectionChanged(@NotNull final FileEditorManagerEvent event) {
final VirtualFile oldFile = event.getOldFile();
final VirtualFile newFile = event.getNewFile();
if (Comparing.equal(oldFile, newFile) && Comparing.equal(getFile(), newFile)) {
Runnable runnable = () -> {
final FileEditor oldEditor = event.getOldEditor();
if (oldEditor != null) oldEditor.deselectNotify();
final FileEditor newEditor = event.getNewEditor();
if (newEditor != null) {
newEditor.selectNotify();
FUCounterUsageLogger.getInstance().logEvent(
project,
"file.editor",
"alternative.file.editor.selected",
new FeatureUsageData()
.addData("fileEditor", newEditor.getClass().getName())
.addAnonymizedPath(newFile.getPath()));
}
((FileEditorProviderManagerImpl)FileEditorProviderManager.getInstance()).providerSelected(EditorComposite.this);
((IdeDocumentHistoryImpl)IdeDocumentHistory.getInstance(myFileEditorManager.getProject())).onSelectionChanged();
};
if (ApplicationManager.getApplication().isDispatchThread()) {
CommandProcessor.getInstance().executeCommand(myFileEditorManager.getProject(), runnable,
IdeBundle.message("command.switch.active.editor"), null);
}
else {
runnable.run(); // not invoked by user
}
}
}
});
}
public FileEditorProvider @NotNull [] getProviders() {
return myProviders;
}
@NotNull
private TabbedPaneWrapper.AsJBTabs createTabbedPaneWrapper(FileEditor @NotNull [] editors, MyComponent myComponent) {
PrevNextActionsDescriptor descriptor = new PrevNextActionsDescriptor(IdeActions.ACTION_NEXT_EDITOR_TAB, IdeActions.ACTION_PREVIOUS_EDITOR_TAB);
final TabbedPaneWrapper.AsJBTabs wrapper = new TabbedPaneWrapper.AsJBTabs(myFileEditorManager.getProject(), SwingConstants.BOTTOM, descriptor, this);
boolean firstEditor = true;
for (FileEditor editor : editors) {
JComponent component = firstEditor && myComponent != null ? (JComponent)myComponent.getComponent(0) : createEditorComponent(editor);
wrapper.addTab(getDisplayName(editor), component);
firstEditor = false;
}
wrapper.addChangeListener(new MyChangeListener());
return wrapper;
}
@NotNull
private JComponent createEditorComponent(@NotNull FileEditor editor) {
JPanel component = new JPanel(new BorderLayout());
JComponent comp = editor.getComponent();
if (!FileEditorManagerImpl.isDumbAware(editor)) {
comp = DumbService.getInstance(myFileEditorManager.getProject()).wrapGently(comp, editor);
}
component.add(comp, BorderLayout.CENTER);
JPanel topPanel = new TopBottomPanel();
myTopComponents.put(editor, topPanel);
component.add(topPanel, BorderLayout.NORTH);
final JPanel bottomPanel = new TopBottomPanel();
myBottomComponents.put(editor, bottomPanel);
component.add(bottomPanel, BorderLayout.SOUTH);
return component;
}
/**
* @return whether myEditor composite is pinned
*/
public boolean isPinned(){
return myPinned;
}
/**
* Sets new "pinned" state
*/
void setPinned(final boolean pinned) {
myPinned = pinned;
Container parent = getComponent().getParent();
if (parent instanceof JComponent) {
((JComponent)parent).putClientProperty(JBTabsImpl.PINNED, myPinned ? Boolean.TRUE : null);
}
}
public boolean isPreview() {
return myPreview;
}
void setPreview(final boolean preview) {
myPreview = preview;
}
private void fireSelectedEditorChanged(@NotNull FileEditor oldSelectedEditor, @NotNull FileEditor newSelectedEditor) {
if ((!EventQueue.isDispatchThread() || !myFileEditorManager.isInsideChange()) && !Comparing.equal(oldSelectedEditor, newSelectedEditor)) {
myFileEditorManager.notifyPublisher(() -> {
final FileEditorManagerEvent event = new FileEditorManagerEvent(myFileEditorManager, myFile, oldSelectedEditor, myFile, newSelectedEditor);
final FileEditorManagerListener publisher = myFileEditorManager.getProject().getMessageBus().syncPublisher(FileEditorManagerListener.FILE_EDITOR_MANAGER);
publisher.selectionChanged(event);
});
final JComponent component = newSelectedEditor.getComponent();
final EditorWindowHolder holder =
ComponentUtil.getParentOfType((Class<? extends EditorWindowHolder>)EditorWindowHolder.class, (Component)component);
if (holder != null) {
((FileEditorManagerImpl)myFileEditorManager).addSelectionRecord(myFile, holder.getEditorWindow());
}
}
}
/**
* @return preferred focused component inside myEditor composite. Composite uses FocusWatcher to
* track focus movement inside the myEditor.
*/
@Nullable
public JComponent getPreferredFocusedComponent(){
if (mySelectedEditor == null) return null;
final Component component = myFocusWatcher.getFocusedComponent();
if (!(component instanceof JComponent) || !component.isShowing() || !component.isEnabled() || !component.isFocusable()) {
return getSelectedEditor().getPreferredFocusedComponent();
}
return (JComponent)component;
}
/**
* @return file for which composite was created.
*/
@NotNull
public VirtualFile getFile() {
return myFile;
}
@NotNull
public FileEditorManager getFileEditorManager() {
return myFileEditorManager;
}
/**
* @return initial time stamp of the file (on moment of creation of
* the composite)
*/
public long getInitialFileTimeStamp() {
return myInitialFileTimeStamp;
}
/**
* @return editors which are opened in the composite. <b>Do not modify
* this array</b>.
*/
public FileEditor @NotNull [] getEditors() {
return myEditors;
}
@NotNull
List<JComponent> getTopComponents(@NotNull FileEditor editor) {
List<JComponent> result = new SmartList<>();
JComponent container = myTopComponents.get(editor);
for (Component each : container.getComponents()) {
if (each instanceof NonOpaquePanel) {
result.add(((NonOpaquePanel)each).getTargetComponent());
}
}
return Collections.unmodifiableList(result);
}
@Nullable
public JBTabs getTabs() {
return myTabbedPaneWrapper == null ? null : ((TabbedPaneWrapper.AsJBTabs)myTabbedPaneWrapper).getTabs();
}
public void addTopComponent(@NotNull FileEditor editor, @NotNull JComponent component) {
manageTopOrBottomComponent(editor, component, true, false);
}
public void removeTopComponent(@NotNull FileEditor editor, @NotNull JComponent component) {
manageTopOrBottomComponent(editor, component, true, true);
}
void addBottomComponent(@NotNull FileEditor editor, @NotNull JComponent component) {
manageTopOrBottomComponent(editor, component, false, false);
}
void removeBottomComponent(@NotNull FileEditor editor, @NotNull JComponent component) {
manageTopOrBottomComponent(editor, component, false, true);
}
private void manageTopOrBottomComponent(@NotNull FileEditor editor, @NotNull JComponent component, boolean top, boolean remove) {
final JComponent container = top ? myTopComponents.get(editor) : myBottomComponents.get(editor);
assert container != null;
if (remove) {
container.remove(component.getParent());
}
else {
NonOpaquePanel wrapper = new NonOpaquePanel(component);
if (!Boolean.TRUE.equals(component.getClientProperty(FileEditorManager.SEPARATOR_DISABLED))) {
wrapper.setBorder(createTopBottomSideBorder(top));
}
container.add(wrapper, calcComponentInsertionIndex(component, container));
}
container.revalidate();
}
private static int calcComponentInsertionIndex(@NotNull JComponent newComponent, @NotNull JComponent container) {
for (int i = 0, max = container.getComponentCount(); i < max; i++) {
Component childWrapper = container.getComponent(i);
Component childComponent = childWrapper instanceof Wrapper ? ((Wrapper)childWrapper).getTargetComponent() : childWrapper;
boolean weighted1 = newComponent instanceof Weighted;
boolean weighted2 = childComponent instanceof Weighted;
if (!weighted2) continue;
if (!weighted1) return i;
double w1 = ((Weighted)newComponent).getWeight();
double w2 = ((Weighted)childComponent).getWeight();
if (w1 < w2) return i;
}
return -1;
}
public void setDisplayName(@NotNull FileEditor editor, @NlsContexts.TabTitle @NotNull String name) {
int index = ContainerUtil.indexOfIdentity(ContainerUtil.immutableList(myEditors), editor);
assert index != -1;
myDisplayNames.put(editor, name);
if (myTabbedPaneWrapper != null) {
myTabbedPaneWrapper.setTitleAt(index, name);
}
}
@NotNull
protected @NlsContexts.TabTitle String getDisplayName(@NotNull FileEditor editor) {
return ObjectUtils.notNull(myDisplayNames.get(editor), editor.getName());
}
/**
* @return currently selected myEditor.
*/
@NotNull
FileEditor getSelectedEditor() {
return getSelectedWithProvider().getFileEditor();
}
/**
* @return currently selected myEditor with its provider.
*/
@NotNull
public FileEditorWithProvider getSelectedWithProvider() {
LOG.assertTrue(myEditors.length > 0, myEditors.length);
if (myEditors.length == 1) {
LOG.assertTrue(myTabbedPaneWrapper == null);
return new FileEditorWithProvider(myEditors[0], myProviders[0]);
}
else {
// we have to get myEditor from tabbed pane
LOG.assertTrue(myTabbedPaneWrapper != null);
int index = myTabbedPaneWrapper.getSelectedIndex();
if (index == -1) {
index = 0;
}
LOG.assertTrue(index >= 0, index);
LOG.assertTrue(index < myEditors.length, index);
return new FileEditorWithProvider(myEditors[index], myProviders[index]);
}
}
/**
* @deprecated use {@link #getSelectedWithProvider()}
*/
@Deprecated
@ApiStatus.ScheduledForRemoval(inVersion = "2021.3")
@NotNull
public Pair<FileEditor, FileEditorProvider> getSelectedEditorWithProvider() {
FileEditorWithProvider info = getSelectedWithProvider();
return Pair.create(info.getFileEditor(), info.getProvider());
}
void setSelectedEditor(final int index) {
if (myEditors.length == 1) {
// nothing to do
LOG.assertTrue(myTabbedPaneWrapper == null);
}
else {
LOG.assertTrue(myTabbedPaneWrapper != null);
myTabbedPaneWrapper.setSelectedIndex(index);
}
}
/**
* @return component which represents set of file editors in the UI
*/
@NotNull
public JComponent getComponent() {
return myComponent;
}
/**
* @return component which represents the component that is supposed to be focusable
*/
@Nullable
public JComponent getFocusComponent() {
return myComponent.myFocusComponent.get();
}
/**
* @return {@code true} if the composite contains at least one modified myEditor
*/
public boolean isModified() {
return ContainerUtil.exists(getEditors(), editor -> editor.isModified());
}
/**
* Handles changes of selected myEditor
*/
private final class MyChangeListener implements ChangeListener{
@Override
public void stateChanged(ChangeEvent e) {
FileEditor oldSelectedEditor = mySelectedEditor;
LOG.assertTrue(oldSelectedEditor != null);
int selectedIndex = myTabbedPaneWrapper.getSelectedIndex();
LOG.assertTrue(selectedIndex != -1);
mySelectedEditor = myEditors[selectedIndex];
fireSelectedEditorChanged(oldSelectedEditor, mySelectedEditor);
}
}
public static boolean isEditorComposite(@NotNull Component component) {
return component instanceof MyComponent;
}
private class MyComponent extends JPanel implements DataProvider{
private @NotNull Supplier<? extends JComponent> myFocusComponent;
MyComponent(@NotNull JComponent realComponent, @NotNull Supplier<? extends JComponent> focusComponent) {
super(new BorderLayout());
setFocusable(false);
myFocusComponent = focusComponent;
add(realComponent, BorderLayout.CENTER);
}
void setComponent(JComponent newComponent) {
add(newComponent, BorderLayout.CENTER);
myFocusComponent = () -> newComponent;
}
@Override
public boolean requestFocusInWindow() {
JComponent focusComponent = myFocusComponent.get();
return focusComponent != null && focusComponent.requestFocusInWindow();
}
@Override
public void requestFocus() {
JComponent focusComponent = myFocusComponent.get();
if (focusComponent != null) {
IdeFocusManager.getGlobalInstance()
.doWhenFocusSettlesDown(() -> IdeFocusManager.getGlobalInstance().requestFocus(focusComponent, true));
}
}
@Override
public boolean requestDefaultFocus() {
JComponent focusComponent = myFocusComponent.get();
return focusComponent != null && focusComponent.requestDefaultFocus();
}
@Override
public final Object getData(@NotNull String dataId) {
if (PlatformDataKeys.FILE_EDITOR.is(dataId)) {
return getSelectedEditor();
}
if (CommonDataKeys.VIRTUAL_FILE.is(dataId)) {
return myFile.isValid() ? myFile : null;
}
if (CommonDataKeys.VIRTUAL_FILE_ARRAY.is(dataId)) {
return myFile.isValid() ? new VirtualFile[]{myFile} : null;
}
JComponent component = getPreferredFocusedComponent();
if (component instanceof DataProvider && component != this) {
return ((DataProvider)component).getData(dataId);
}
return null;
}
}
@Override
public void dispose() {
for (FileEditor editor : myEditors) {
if (!Disposer.isDisposed(editor)) {
Disposer.dispose(editor);
}
}
myFocusWatcher.deinstall(myFocusWatcher.getTopComponent());
}
private void addEditor(@NotNull FileEditor editor) {
ApplicationManager.getApplication().assertIsDispatchThread();
//noinspection NonAtomicOperationOnVolatileField : field is modified only in EDT
myEditors = ArrayUtil.append(myEditors, editor);
FileEditor.FILE_KEY.set(editor, myFile);
if (myTabbedPaneWrapper == null) {
myTabbedPaneWrapper = createTabbedPaneWrapper(myEditors, myComponent);
myComponent.setComponent(myTabbedPaneWrapper.getComponent());
}
else {
JComponent component = createEditorComponent(editor);
myTabbedPaneWrapper.addTab(getDisplayName(editor), component);
}
myFocusWatcher.deinstall(myFocusWatcher.getTopComponent());
myFocusWatcher.install(myComponent);
}
private static final class TopBottomPanel extends JBPanelWithEmptyText {
private TopBottomPanel() {
setLayout(new BoxLayout(this, BoxLayout.Y_AXIS));
}
@Override
public Color getBackground() {
EditorColorsScheme globalScheme = EditorColorsManager.getInstance().getGlobalScheme();
if (ExperimentalUI.isNewEditorTabs()) {
return globalScheme.getDefaultBackground();
}
Color color = globalScheme.getColor(EditorColors.GUTTER_BACKGROUND);
return color == null ? EditorColors.GUTTER_BACKGROUND.getDefaultColor() : color;
}
}
@NotNull
private static SideBorder createTopBottomSideBorder(boolean top) {
return new SideBorder(null, top ? SideBorder.BOTTOM : SideBorder.TOP) {
@Override
public Color getLineColor() {
EditorColorsScheme scheme = EditorColorsManager.getInstance().getGlobalScheme();
if (ExperimentalUI.isNewEditorTabs()) {
return scheme.getDefaultBackground();
}
Color result = scheme.getColor(top ? EditorColors.SEPARATOR_ABOVE_COLOR : EditorColors.SEPARATOR_BELOW_COLOR);
if (result == null) result = scheme.getColor(EditorColors.TEARLINE_COLOR);
return result == null ? JBColor.BLACK : result;
}
};
}
@NotNull HistoryEntry currentStateAsHistoryEntry() {
final FileEditor[] editors = getEditors();
final FileEditorState[] states = new FileEditorState[editors.length];
for (int j = 0; j < states.length; j++) {
states[j] = editors[j].getState(FileEditorStateLevel.FULL);
LOG.assertTrue(states[j] != null);
}
final int selectedProviderIndex = ArrayUtil.find(editors, getSelectedEditor());
LOG.assertTrue(selectedProviderIndex != -1);
final FileEditorProvider[] providers = getProviders();
return HistoryEntry.createLight(getFile(), providers, states, providers[selectedProviderIndex]);
}
public void addEditor(@NotNull FileEditor editor, @NotNull FileEditorProvider provider) {
addEditor(editor);
myProviders = ArrayUtil.append(myProviders, provider);
}
}
| |
package edu.rosehulman.android.directory.auth;
import java.io.IOException;
import org.json.JSONException;
import android.accounts.Account;
import android.accounts.AccountAuthenticatorResponse;
import android.accounts.AccountManager;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.os.AsyncTask;
import android.os.Bundle;
import android.util.Log;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.TextView;
import android.widget.Toast;
import com.actionbarsherlock.app.SherlockActivity;
import com.actionbarsherlock.view.MenuItem;
import edu.rosehulman.android.directory.C;
import edu.rosehulman.android.directory.R;
import edu.rosehulman.android.directory.model.BannerAuthResponse;
import edu.rosehulman.android.directory.service.ClientException;
import edu.rosehulman.android.directory.service.MobileDirectoryService;
import edu.rosehulman.android.directory.service.ServerException;
import edu.rosehulman.android.directory.tasks.TaskManager;
/**
* Activity used to register users for the beta program
*/
public class LoginActivity extends SherlockActivity {
public static final String ACTION_NEW_ACCOUNT = "NewAccount";
public static final String ACTION_UPDATE_ACCOUNT = "UpdateAccount";
public static final String KEY_ACCOUNT = "Account";
public static Intent createIntentForNewAccount(Context context, AccountAuthenticatorResponse response) {
Intent intent = new Intent(context, LoginActivity.class);
intent.setAction(ACTION_NEW_ACCOUNT);
intent.putExtra(AccountManager.KEY_ACCOUNT_AUTHENTICATOR_RESPONSE, response);
return intent;
}
public static Intent createIntentForUpdateAccount(Context context, AccountAuthenticatorResponse response, Account account) {
Intent intent = new Intent(context, LoginActivity.class);
intent.setAction(ACTION_UPDATE_ACCOUNT);
intent.putExtra(AccountManager.KEY_ACCOUNT_AUTHENTICATOR_RESPONSE, response);
intent.putExtra(KEY_ACCOUNT, account);
return intent;
}
private TaskManager taskManager = new TaskManager();
private TextView txtUsername;
private TextView txtPassword;
private AccountAuthenticatorResponse mResponse;
private Account mAccount;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.login);
getSupportActionBar().setHomeButtonEnabled(true);
txtUsername = (TextView)findViewById(R.id.username);
txtPassword = (TextView)findViewById(R.id.password);
Intent intent = getIntent();
if (!intent.hasExtra(AccountManager.KEY_ACCOUNT_AUTHENTICATOR_RESPONSE)) {
setResult(RESULT_CANCELED);
finish();
return;
}
Bundle extras = intent.getExtras();
mResponse = extras.getParcelable(AccountManager.KEY_ACCOUNT_AUTHENTICATOR_RESPONSE);
if (ACTION_NEW_ACCOUNT.equals(intent.getAction())) {
} else if (ACTION_UPDATE_ACCOUNT.equals(intent.getAction())) {
mAccount = extras.getParcelable(KEY_ACCOUNT);
txtUsername.setText(mAccount.name);
txtUsername.setEnabled(false);
((TextView)findViewById(R.id.title)).setText(R.string.update_login_message);
} else {
mResponse.onError(AccountManager.ERROR_CODE_BAD_ARGUMENTS, "Invalid action");
setResult(RESULT_CANCELED);
finish();
return;
}
findViewById(R.id.back).setOnClickListener(new OnClickListener() {
public void onClick(View v) {
btnBack_onClick();
}
});
findViewById(R.id.login).setOnClickListener(new OnClickListener() {
public void onClick(View v) {
btnLogin_onClick();
}
});
}
@Override
public void onPause() {
super.onPause();
taskManager.abortTasks();
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case android.R.id.home:
setResult(Activity.RESULT_CANCELED);
mResponse.onError(AccountManager.ERROR_CODE_CANCELED, "Leaving login task");
finish();
break;
default:
return super.onOptionsItemSelected(item);
}
return true;
}
@Override
public void onBackPressed() {
setResult(Activity.RESULT_CANCELED);
mResponse.onError(AccountManager.ERROR_CODE_CANCELED, "Leaving login task");
super.onBackPressed();
}
private void btnBack_onClick() {
//remove ourselves from the app stack
setResult(Activity.RESULT_CANCELED);
mResponse.onError(AccountManager.ERROR_CODE_CANCELED, "Leaving login task");
finish();
}
private void btnLogin_onClick() {
//get our registration data
String username = txtUsername.getText().toString().toLowerCase();
String password = txtPassword.getText().toString();
//make sure required fields are populated
if ("".equals(username)) {
Toast.makeText(this, "A username is required", Toast.LENGTH_SHORT).show();
return;
}
if ("".equals(password)) {
Toast.makeText(this, "A password is required", Toast.LENGTH_SHORT).show();
return;
}
//start the registration process
LoginTask task = new LoginTask(username, password);
taskManager.addTask(task);
task.execute();
}
private void processAuthentication(String username, String encryptedPassword, String keyPart, String iv, BannerAuthResponse auth) {
AccountManager manager = AccountManager.get(LoginActivity.this);
String action = getIntent().getAction();
if (ACTION_NEW_ACCOUNT.equals(action)) {
Account account = new Account(username, AccountAuthenticator.ACCOUNT_TYPE);
boolean accountCreated = manager.addAccountExplicitly(account, encryptedPassword, null);
if (!accountCreated) {
mResponse.onError(AccountManager.ERROR_CODE_BAD_REQUEST, "Failed to create account");
setResult(RESULT_CANCELED);
finish();
return;
}
if (accountCreated) {
Bundle result = new Bundle();
result.putString(AccountManager.KEY_ACCOUNT_NAME, username);
result.putString(AccountManager.KEY_ACCOUNT_TYPE, AccountAuthenticator.ACCOUNT_TYPE);
mResponse.onResult(result);
manager.setAuthToken(account, AccountAuthenticator.TOKEN_TYPE, auth.token);
manager.setUserData(account, AccountAuthenticator.USER_KEY_PART, keyPart);
manager.setUserData(account, AccountAuthenticator.USER_IV, iv);
}
} else if (ACTION_UPDATE_ACCOUNT.equals(action)) {
Bundle result = new Bundle();
result.putString(AccountManager.KEY_ACCOUNT_NAME, mAccount.name);
result.putString(AccountManager.KEY_ACCOUNT_TYPE, AccountAuthenticator.ACCOUNT_TYPE);
result.putString(AccountManager.KEY_AUTHTOKEN, auth.token);
result.putLong(AccountAuthenticator.KEY_EXPIRATION_TIME, auth.expiration.getTime());
mResponse.onResult(result);
manager.setPassword(mAccount, encryptedPassword);
manager.setAuthToken(mAccount, AccountAuthenticator.TOKEN_TYPE, auth.token);
manager.setUserData(mAccount, AccountAuthenticator.USER_KEY_PART, keyPart);
manager.setUserData(mAccount, AccountAuthenticator.USER_IV, iv);
}
//remove ourselves from the app stack
setResult(Activity.RESULT_OK);
finish();
}
private class LoginTask extends AsyncTask<Void, Integer, BannerAuthResponse> {
private ProgressDialog dialog;
private String username;
private String password;
private String encryptedPassword;
private String keyPart;
private String iv;
private boolean serverError;
public LoginTask(String username, String password) {
this.username = username;
this.password = password;
}
@Override
protected void onPreExecute() {
dialog = new ProgressDialog(LoginActivity.this);
dialog.setTitle(null);
dialog.setMessage("Logging in...");
dialog.setIndeterminate(true);
dialog.setCancelable(true);
dialog.setOnCancelListener(new DialogInterface.OnCancelListener() {
@Override
public void onCancel(DialogInterface dialog) {
cancel(true);
}
});
dialog.show();
}
@Override
protected BannerAuthResponse doInBackground(Void... args) {
MobileDirectoryService service = new MobileDirectoryService();
BannerAuthResponse response = null;
serverError = false;
for (int attempt = 2; ; attempt++) {
try {
response = service.login(username, password);
if (response == null)
return null;
keyPart = Security.generateKeyPart();
iv = Security.generateIV();
encryptedPassword = Security.encrypt(LoginActivity.this, keyPart, iv, password);
return response;
} catch (ClientException e) {
//invalid username or password
return null;
} catch (ServerException e) {
Log.e(C.TAG, "Server is not accepting authentication requests", e);
serverError = true;
return null;
} catch (JSONException e) {
Log.e(C.TAG, "An error occured while parsing the JSON response", e);
serverError = true;
return null;
} catch (IOException e) {
Log.e(C.TAG, "Failed to authenticate user, retrying...", e);
}
if (isCancelled())
return null;
try {
Thread.sleep(2000);
} catch (InterruptedException ex) {}
publishProgress(attempt);
}
}
@Override
protected void onProgressUpdate(Integer... status) {
int attempt = status[0];
dialog.setMessage(String.format("Logging in (attempt %d)...", attempt));
}
@Override
protected void onCancelled(BannerAuthResponse result) {
dialog.dismiss();
}
@Override
protected void onPostExecute(BannerAuthResponse auth) {
dialog.dismiss();
if (auth == null) {
if (serverError) {
Toast.makeText(LoginActivity.this, "Authentication service is rejecting requests. Please try again later.", Toast.LENGTH_SHORT).show();
} else {
Toast.makeText(LoginActivity.this, "Invalid username or password", Toast.LENGTH_SHORT).show();
}
return;
}
processAuthentication(username, encryptedPassword, keyPart, iv, auth);
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.distributed.shared;
import java.io.File;
import java.lang.reflect.Field;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import com.google.common.util.concurrent.Futures;
import org.junit.Assert;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.distributed.api.ICluster;
import org.apache.cassandra.distributed.api.IInstance;
import org.apache.cassandra.distributed.api.IInstanceConfig;
import org.apache.cassandra.distributed.api.IInvokableInstance;
import org.apache.cassandra.distributed.api.IMessageFilters;
import org.apache.cassandra.distributed.api.NodeToolResult;
import org.apache.cassandra.distributed.impl.AbstractCluster;
import org.apache.cassandra.distributed.impl.InstanceConfig;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static org.apache.cassandra.config.CassandraRelevantProperties.BOOTSTRAP_SCHEMA_DELAY_MS;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Utilities for working with jvm-dtest clusters.
*
* This class is marked as Isolated as it relies on lambdas, which are in a package that is marked as shared, so need to
* tell jvm-dtest to not share this class.
*
* This class should never be called from within the cluster, always in the App ClassLoader.
*/
@Isolated
public class ClusterUtils
{
/**
* Start the instance with the given System Properties, after the instance has started, the properties will be cleared.
*/
public static <I extends IInstance> I start(I inst, Consumer<WithProperties> fn)
{
return start(inst, (ignore, prop) -> fn.accept(prop));
}
/**
* Start the instance with the given System Properties, after the instance has started, the properties will be cleared.
*/
public static <I extends IInstance> I start(I inst, BiConsumer<I, WithProperties> fn)
{
try (WithProperties properties = new WithProperties())
{
fn.accept(inst, properties);
inst.startup();
return inst;
}
}
/**
* Stop an instance in a blocking manner.
*
* The main difference between this and {@link IInstance#shutdown()} is that the wait on the future will catch
* the exceptions and throw as runtime.
*/
public static void stopUnchecked(IInstance i)
{
Futures.getUnchecked(i.shutdown());
}
/**
* Stops an instance abruptly. This is done by blocking all messages to/from so all other instances are unable
* to communicate, then stopping the instance gracefully.
*
* The assumption is that hard stopping inbound and outbound messages will apear to the cluster as if the instance
* was stopped via kill -9; this does not hold true if the instance is restarted as it knows it was properly shutdown.
*
* @param cluster to filter messages to
* @param inst to shut down
*/
public static <I extends IInstance> void stopAbrupt(ICluster<I> cluster, I inst)
{
// block all messages to/from the node going down to make sure a clean shutdown doesn't happen
IMessageFilters.Filter to = cluster.filters().allVerbs().to(inst.config().num()).drop();
IMessageFilters.Filter from = cluster.filters().allVerbs().from(inst.config().num()).drop();
try
{
stopUnchecked(inst);
}
finally
{
from.off();
to.off();
}
}
/**
* Stop all the instances in the cluster. This function is differe than {@link ICluster#close()} as it doesn't
* clean up the cluster state, it only stops all the instances.
*/
public static <I extends IInstance> void stopAll(ICluster<I> cluster)
{
cluster.stream().forEach(ClusterUtils::stopUnchecked);
}
/**
* Create a new instance and add it to the cluster, without starting it.
*
* @param cluster to add to
* @param other config to copy from
* @param fn function to add to the config before starting
* @param <I> instance type
* @return the instance added
*/
public static <I extends IInstance> I addInstance(AbstractCluster<I> cluster,
IInstanceConfig other,
Consumer<IInstanceConfig> fn)
{
return addInstance(cluster, other.localDatacenter(), other.localRack(), fn);
}
/**
* Create a new instance and add it to the cluster, without starting it.
*
* @param cluster to add to
* @param dc the instance should be in
* @param rack the instance should be in
* @param <I> instance type
* @return the instance added
*/
public static <I extends IInstance> I addInstance(AbstractCluster<I> cluster,
String dc, String rack)
{
return addInstance(cluster, dc, rack, ignore -> {});
}
/**
* Create a new instance and add it to the cluster, without starting it.
*
* @param cluster to add to
* @param dc the instance should be in
* @param rack the instance should be in
* @param fn function to add to the config before starting
* @param <I> instance type
* @return the instance added
*/
public static <I extends IInstance> I addInstance(AbstractCluster<I> cluster,
String dc, String rack,
Consumer<IInstanceConfig> fn)
{
Objects.requireNonNull(dc, "dc");
Objects.requireNonNull(rack, "rack");
InstanceConfig config = cluster.newInstanceConfig();
//TODO adding new instances should be cleaner, currently requires you create the cluster with all
// instances known about (at least to NetworkTopology and TokenStategy)
// this is very hidden, so should be more explicit
config.networkTopology().put(config.broadcastAddress(), NetworkTopology.dcAndRack(dc, rack));
fn.accept(config);
return cluster.bootstrap(config);
}
/**
* Create and start a new instance that replaces an existing instance.
*
* The instance will be in the same datacenter and rack as the existing instance.
*
* @param cluster to add to
* @param toReplace instance to replace
* @param <I> instance type
* @return the instance added
*/
public static <I extends IInstance> I replaceHostAndStart(AbstractCluster<I> cluster, IInstance toReplace)
{
return replaceHostAndStart(cluster, toReplace, ignore -> {});
}
/**
* Create and start a new instance that replaces an existing instance.
*
* The instance will be in the same datacenter and rack as the existing instance.
*
* @param cluster to add to
* @param toReplace instance to replace
* @param fn lambda to add additional properties
* @param <I> instance type
* @return the instance added
*/
public static <I extends IInstance> I replaceHostAndStart(AbstractCluster<I> cluster,
IInstance toReplace,
Consumer<WithProperties> fn)
{
return replaceHostAndStart(cluster, toReplace, (ignore, prop) -> fn.accept(prop));
}
/**
* Create and start a new instance that replaces an existing instance.
*
* The instance will be in the same datacenter and rack as the existing instance.
*
* @param cluster to add to
* @param toReplace instance to replace
* @param fn lambda to add additional properties or modify instance
* @param <I> instance type
* @return the instance added
*/
public static <I extends IInstance> I replaceHostAndStart(AbstractCluster<I> cluster,
IInstance toReplace,
BiConsumer<I, WithProperties> fn)
{
IInstanceConfig toReplaceConf = toReplace.config();
I inst = addInstance(cluster, toReplaceConf, c -> c.set("auto_bootstrap", true));
return start(inst, properties -> {
// lower this so the replacement waits less time
properties.setProperty("cassandra.broadcast_interval_ms", Long.toString(TimeUnit.SECONDS.toMillis(30)));
// default is 30s, lowering as it should be faster
properties.setProperty("cassandra.ring_delay_ms", Long.toString(TimeUnit.SECONDS.toMillis(10)));
properties.set(BOOTSTRAP_SCHEMA_DELAY_MS, TimeUnit.SECONDS.toMillis(10));
// state which node to replace
properties.setProperty("cassandra.replace_address_first_boot", toReplace.config().broadcastAddress().getAddress().getHostAddress());
fn.accept(inst, properties);
});
}
/**
* Calls {@link org.apache.cassandra.locator.TokenMetadata#sortedTokens()}, returning as a list of strings.
*/
public static List<String> getTokenMetadataTokens(IInvokableInstance inst)
{
return inst.callOnInstance(() ->
StorageService.instance.getTokenMetadata()
.sortedTokens().stream()
.map(Object::toString)
.collect(Collectors.toList()));
}
public static String getLocalToken(IInvokableInstance inst)
{
return inst.callOnInstance(() -> {
List<String> tokens = new ArrayList<>();
for (Token t : StorageService.instance.getTokenMetadata().getTokens(FBUtilities.getBroadcastAddressAndPort()))
tokens.add(t.getTokenValue().toString());
assert tokens.size() == 1 : "getLocalToken assumes a single token, but multiple tokens found";
return tokens.get(0);
});
}
public static <I extends IInstance> void runAndWaitForLogs(Runnable r, String waitString, AbstractCluster<I> cluster) throws TimeoutException
{
runAndWaitForLogs(r, waitString, cluster.stream().toArray(IInstance[]::new));
}
public static void runAndWaitForLogs(Runnable r, String waitString, IInstance...instances) throws TimeoutException
{
long [] marks = new long[instances.length];
for (int i = 0; i < instances.length; i++)
marks[i] = instances[i].logs().mark();
r.run();
for (int i = 0; i < instances.length; i++)
instances[i].logs().watchFor(marks[i], waitString);
}
/**
* Get the ring from the perspective of the instance.
*/
public static List<RingInstanceDetails> ring(IInstance inst)
{
NodeToolResult results = inst.nodetoolResult("ring");
results.asserts().success();
return parseRing(results.getStdout());
}
/**
* Make sure the target instance is in the ring.
*
* @param instance instance to check on
* @param expectedInRing instance expected in the ring
* @return the ring (if target is present)
*/
public static List<RingInstanceDetails> assertInRing(IInstance instance, IInstance expectedInRing)
{
String targetAddress = getBroadcastAddressHostString(expectedInRing);
List<RingInstanceDetails> ring = ring(instance);
Optional<RingInstanceDetails> match = ring.stream().filter(d -> d.address.equals(targetAddress)).findFirst();
assertThat(match).as("Not expected to find %s but was found", targetAddress).isPresent();
return ring;
}
/**
* Make sure the target instance's gossip state matches on the source instance
*
* @param instance instance to check on
* @param expectedInRing instance expected in the ring
* @param state expected gossip state
* @return the ring (if target is present and has expected state)
*/
public static List<RingInstanceDetails> assertRingState(IInstance instance, IInstance expectedInRing, String state)
{
String targetAddress = getBroadcastAddressHostString(expectedInRing);
List<RingInstanceDetails> ring = ring(instance);
List<RingInstanceDetails> match = ring.stream()
.filter(d -> d.address.equals(targetAddress))
.collect(Collectors.toList());
assertThat(match)
.isNotEmpty()
.as("State was expected to be %s but was not", state)
.anyMatch(r -> r.state.equals(state));
return ring;
}
/**
* Make sure the target instance is NOT in the ring.
*
* @param instance instance to check on
* @param expectedInRing instance not expected in the ring
* @return the ring (if target is not present)
*/
public static List<RingInstanceDetails> assertNotInRing(IInstance instance, IInstance expectedInRing)
{
String targetAddress = getBroadcastAddressHostString(expectedInRing);
List<RingInstanceDetails> ring = ring(instance);
Optional<RingInstanceDetails> match = ring.stream().filter(d -> d.address.equals(targetAddress)).findFirst();
Assert.assertEquals("Not expected to find " + targetAddress + " but was found", Optional.empty(), match);
return ring;
}
private static List<RingInstanceDetails> awaitRing(IInstance src, String errorMessage, Predicate<List<RingInstanceDetails>> fn)
{
List<RingInstanceDetails> ring = null;
for (int i = 0; i < 100; i++)
{
ring = ring(src);
if (fn.test(ring))
{
return ring;
}
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
throw new AssertionError(errorMessage + "\n" + ring);
}
/**
* Wait for the target to be in the ring as seen by the source instance.
*
* @param instance instance to check on
* @param expectedInRing instance to wait for
* @return the ring
*/
public static List<RingInstanceDetails> awaitRingJoin(IInstance instance, IInstance expectedInRing)
{
return awaitRingJoin(instance, expectedInRing.broadcastAddress().getAddress().getHostAddress());
}
/**
* Wait for the target to be in the ring as seen by the source instance.
*
* @param instance instance to check on
* @param expectedInRing instance address to wait for
* @return the ring
*/
public static List<RingInstanceDetails> awaitRingJoin(IInstance instance, String expectedInRing)
{
return awaitRing(instance, "Node " + expectedInRing + " did not join the ring...", ring -> {
Optional<RingInstanceDetails> match = ring.stream().filter(d -> d.address.equals(expectedInRing)).findFirst();
if (match.isPresent())
{
RingInstanceDetails details = match.get();
return details.status.equals("Up") && details.state.equals("Normal");
}
return false;
});
}
/**
* Wait for the ring to only have instances that are Up and Normal.
*
* @param src instance to check on
* @return the ring
*/
public static List<RingInstanceDetails> awaitRingHealthy(IInstance src)
{
return awaitRing(src, "Timeout waiting for ring to become healthy",
ring ->
ring.stream().allMatch(ClusterUtils::isRingInstanceDetailsHealthy));
}
/**
* Wait for the ring to have the target instance with the provided state.
*
* @param instance instance to check on
* @param expectedInRing to look for
* @param state expected
* @return the ring
*/
public static List<RingInstanceDetails> awaitRingState(IInstance instance, IInstance expectedInRing, String state)
{
return awaitRing(instance, "Timeout waiting for " + expectedInRing + " to have state " + state,
ring ->
ring.stream()
.filter(d -> d.address.equals(getBroadcastAddressHostString(expectedInRing)))
.filter(d -> d.state.equals(state))
.findAny().isPresent());
}
/**
* Make sure the ring is only the expected instances. The source instance may not be in the ring, so this function
* only relies on the expectedInsts param.
*
* @param instance instance to check on
* @param expectedInRing expected instances in the ring
* @return the ring (if condition is true)
*/
public static List<RingInstanceDetails> assertRingIs(IInstance instance, IInstance... expectedInRing)
{
return assertRingIs(instance, Arrays.asList(expectedInRing));
}
/**
* Make sure the ring is only the expected instances. The source instance may not be in the ring, so this function
* only relies on the expectedInsts param.
*
* @param instance instance to check on
* @param expectedInRing expected instances in the ring
* @return the ring (if condition is true)
*/
public static List<RingInstanceDetails> assertRingIs(IInstance instance, Collection<? extends IInstance> expectedInRing)
{
Set<String> expectedRingAddresses = expectedInRing.stream()
.map(i -> i.config().broadcastAddress().getAddress().getHostAddress())
.collect(Collectors.toSet());
return assertRingIs(instance, expectedRingAddresses);
}
/**
* Make sure the ring is only the expected instances. The source instance may not be in the ring, so this function
* only relies on the expectedInsts param.
*
* @param instance instance to check on
* @param expectedRingAddresses expected instances addresses in the ring
* @return the ring (if condition is true)
*/
public static List<RingInstanceDetails> assertRingIs(IInstance instance, Set<String> expectedRingAddresses)
{
List<RingInstanceDetails> ring = ring(instance);
Set<String> ringAddresses = ring.stream().map(d -> d.address).collect(Collectors.toSet());
assertThat(ringAddresses)
.as("Ring addreses did not match for instance %s", instance)
.isEqualTo(expectedRingAddresses);
return ring;
}
private static boolean isRingInstanceDetailsHealthy(RingInstanceDetails details)
{
return details.status.equals("Up") && details.state.equals("Normal");
}
private static List<RingInstanceDetails> parseRing(String str)
{
// 127.0.0.3 rack0 Up Normal 46.21 KB 100.00% -1
// /127.0.0.1:7012 Unknown ? Normal ? 100.00% -3074457345618258603
Pattern pattern = Pattern.compile("^(/?[0-9.:]+)\\s+(\\w+|\\?)\\s+(\\w+|\\?)\\s+(\\w+|\\?).*?(-?\\d+)\\s*$");
List<RingInstanceDetails> details = new ArrayList<>();
String[] lines = str.split("\n");
for (String line : lines)
{
Matcher matcher = pattern.matcher(line);
if (!matcher.find())
{
continue;
}
details.add(new RingInstanceDetails(matcher.group(1), matcher.group(2), matcher.group(3), matcher.group(4), matcher.group(5)));
}
return details;
}
private static Map<String, Map<String, String>> awaitGossip(IInstance src, String errorMessage, Predicate<Map<String, Map<String, String>>> fn)
{
Map<String, Map<String, String>> gossip = null;
for (int i = 0; i < 100; i++)
{
gossip = gossipInfo(src);
if (fn.test(gossip))
{
return gossip;
}
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
throw new AssertionError(errorMessage + "\n" + gossip);
}
/**
* Wait for the target instance to have the desired status. Target status is checked via string contains so works
* with 'NORMAL' but also can check tokens or full state.
*
* @param instance instance to check on
* @param expectedInGossip instance to wait for
* @param targetStatus for the instance
* @return gossip info
*/
public static Map<String, Map<String, String>> awaitGossipStatus(IInstance instance, IInstance expectedInGossip, String targetStatus)
{
return awaitGossip(instance, "Node " + expectedInGossip + " did not match state " + targetStatus, gossip -> {
Map<String, String> state = gossip.get(getBroadcastAddressString(expectedInGossip));
if (state == null)
return false;
String status = state.get("STATUS_WITH_PORT");
if (status == null)
status = state.get("STATUS");
if (status == null)
return targetStatus == null;
return status.contains(targetStatus);
});
}
/**
* Get the gossip information from the node. Currently only address, generation, and heartbeat are returned
*
* @param inst to check on
* @return gossip info
*/
public static Map<String, Map<String, String>> gossipInfo(IInstance inst)
{
NodeToolResult results = inst.nodetoolResult("gossipinfo");
results.asserts().success();
return parseGossipInfo(results.getStdout());
}
/**
* Make sure the gossip info for the specific target has the expected generation and heartbeat
*
* @param instance to check on
* @param expectedInGossip instance to check for
* @param expectedGeneration expected generation
* @param expectedHeartbeat expected heartbeat
*/
public static void assertGossipInfo(IInstance instance,
InetSocketAddress expectedInGossip, int expectedGeneration, int expectedHeartbeat)
{
String targetAddress = expectedInGossip.getAddress().toString();
Map<String, Map<String, String>> gossipInfo = gossipInfo(instance);
Map<String, String> gossipState = gossipInfo.get(targetAddress);
if (gossipState == null)
throw new NullPointerException("Unable to find gossip info for " + targetAddress + "; gossip info = " + gossipInfo);
Assert.assertEquals(Long.toString(expectedGeneration), gossipState.get("generation"));
Assert.assertEquals(Long.toString(expectedHeartbeat), gossipState.get("heartbeat")); //TODO do we really mix these two?
}
private static Map<String, Map<String, String>> parseGossipInfo(String str)
{
Map<String, Map<String, String>> map = new HashMap<>();
String[] lines = str.split("\n");
String currentInstance = null;
for (String line : lines)
{
if (line.startsWith("/"))
{
// start of new instance
currentInstance = line;
continue;
}
Objects.requireNonNull(currentInstance);
String[] kv = line.trim().split(":", 2);
assert kv.length == 2 : "When splitting line '" + line + "' expected 2 parts but not true";
Map<String, String> state = map.computeIfAbsent(currentInstance, ignore -> new HashMap<>());
state.put(kv[0], kv[1]);
}
return map;
}
/**
* Get the tokens assigned to the instance via config. This method does not work if the instance has learned
* or generated its tokens.
*
* @param instance to get tokens from
* @return non-empty list of tokens
*/
public static List<String> getTokens(IInstance instance)
{
IInstanceConfig conf = instance.config();
int numTokens = conf.getInt("num_tokens");
Assert.assertEquals("Only single token is supported", 1, numTokens);
String token = conf.getString("initial_token");
Assert.assertNotNull("initial_token was not found", token);
return Arrays.asList(token);
}
/**
* Get all data directories for the given instance.
*
* @param instance to get data directories for
* @return data directories
*/
public static List<File> getDataDirectories(IInstance instance)
{
IInstanceConfig conf = instance.config();
// this isn't safe as it assumes the implementation of InstanceConfig
// might need to get smarter... some day...
String[] ds = (String[]) conf.get("data_file_directories");
List<File> files = new ArrayList<>(ds.length);
for (int i = 0; i < ds.length; i++)
files.add(new File(ds[i]));
return files;
}
/**
* Get the commit log directory for the given instance.
*
* @param instance to get the commit log directory for
* @return commit log directory
*/
public static File getCommitLogDirectory(IInstance instance)
{
IInstanceConfig conf = instance.config();
// this isn't safe as it assumes the implementation of InstanceConfig
// might need to get smarter... some day...
String d = (String) conf.get("commitlog_directory");
return new File(d);
}
/**
* Get the hints directory for the given instance.
*
* @param instance to get the hints directory for
* @return hints directory
*/
public static File getHintsDirectory(IInstance instance)
{
IInstanceConfig conf = instance.config();
// this isn't safe as it assumes the implementation of InstanceConfig
// might need to get smarter... some day...
String d = (String) conf.get("hints_directory");
return new File(d);
}
/**
* Get the saved caches directory for the given instance.
*
* @param instance to get the saved caches directory for
* @return saved caches directory
*/
public static File getSavedCachesDirectory(IInstance instance)
{
IInstanceConfig conf = instance.config();
// this isn't safe as it assumes the implementation of InstanceConfig
// might need to get smarter... some day...
String d = (String) conf.get("saved_caches_directory");
return new File(d);
}
/**
* Get all writable directories for the given instance.
*
* @param instance to get directories for
* @return all writable directories
*/
public static List<File> getDirectories(IInstance instance)
{
List<File> out = new ArrayList<>();
out.addAll(getDataDirectories(instance));
out.add(getCommitLogDirectory(instance));
out.add(getHintsDirectory(instance));
out.add(getSavedCachesDirectory(instance));
return out;
}
/**
* Gets the name of the Partitioner for the given instance.
*
* @param instance to get partitioner from
* @return partitioner name
*/
public static String getPartitionerName(IInstance instance)
{
return (String) instance.config().get("partitioner");
}
/**
* Changes the instance's address to the new address. This method should only be called while the instance is
* down, else has undefined behavior.
*
* @param instance to update address for
* @param address to set
*/
public static void updateAddress(IInstance instance, String address)
{
updateAddress(instance.config(), address);
}
/**
* Changes the instance's address to the new address. This method should only be called while the instance is
* down, else has undefined behavior.
*
* @param conf to update address for
* @param address to set
*/
private static void updateAddress(IInstanceConfig conf, String address)
{
InetSocketAddress previous = conf.broadcastAddress();
for (String key : Arrays.asList("broadcast_address", "listen_address", "broadcast_rpc_address", "rpc_address"))
conf.set(key, address);
// InstanceConfig caches InetSocketAddress -> InetAddressAndPort
// this causes issues as startup now ignores config, so force reset it to pull from conf.
((InstanceConfig) conf).unsetBroadcastAddressAndPort(); //TODO remove the need to null out the cache...
//TODO NetworkTopology class isn't flexible and doesn't handle adding/removing nodes well...
// it also uses a HashMap which makes the class not thread safe... so mutating AFTER starting nodes
// are a risk
if (!conf.broadcastAddress().equals(previous))
{
conf.networkTopology().put(conf.broadcastAddress(), NetworkTopology.dcAndRack(conf.localDatacenter(), conf.localRack()));
try
{
Field field = NetworkTopology.class.getDeclaredField("map");
field.setAccessible(true);
Map<InetSocketAddress, NetworkTopology.DcAndRack> map = (Map<InetSocketAddress, NetworkTopology.DcAndRack>) field.get(conf.networkTopology());
map.remove(previous);
}
catch (NoSuchFieldException | IllegalAccessException e)
{
throw new AssertionError(e);
}
}
}
/**
* Get the broadcast address host address only (ex. 127.0.0.1)
*/
private static String getBroadcastAddressHostString(IInstance target)
{
return target.config().broadcastAddress().getAddress().getHostAddress();
}
/**
* Get the broadcast address in host:port format (ex. 127.0.0.1:7190)
*/
public static String getBroadcastAddressHostWithPortString(IInstance target)
{
InetSocketAddress address = target.config().broadcastAddress();
return address.getAddress().getHostAddress() + ":" + address.getPort();
}
/**
* Get the broadcast address InetAddess string (ex. localhost/127.0.0.1 or /127.0.0.1)
*/
private static String getBroadcastAddressString(IInstance target)
{
return target.config().broadcastAddress().getAddress().toString();
}
public static final class RingInstanceDetails
{
private final String address;
private final String rack;
private final String status;
private final String state;
private final String token;
private RingInstanceDetails(String address, String rack, String status, String state, String token)
{
this.address = address;
this.rack = rack;
this.status = status;
this.state = state;
this.token = token;
}
public String getAddress()
{
return address;
}
public String getRack()
{
return rack;
}
public String getStatus()
{
return status;
}
public String getState()
{
return state;
}
public String getToken()
{
return token;
}
@Override
public boolean equals(Object o)
{
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RingInstanceDetails that = (RingInstanceDetails) o;
return Objects.equals(address, that.address) &&
Objects.equals(rack, that.rack) &&
Objects.equals(status, that.status) &&
Objects.equals(state, that.state) &&
Objects.equals(token, that.token);
}
@Override
public int hashCode()
{
return Objects.hash(address, rack, status, state, token);
}
public String toString()
{
return Arrays.asList(address, rack, status, state, token).toString();
}
}
}
| |
/*
* The MIT License
*
* Copyright (c) 2004-2009, Sun Microsystems, Inc., Kohsuke Kawaguchi, Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.model;
import hudson.console.AnnotatedLargeText;
import hudson.util.StreamTaskListener;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.lang.ref.WeakReference;
import java.nio.charset.Charset;
import org.kohsuke.stapler.framework.io.LargeText;
import org.kohsuke.stapler.framework.io.ByteBuffer;
/**
* {@link Thread} for performing one-off task.
*
* <p>
* Designed to be used inside {@link TaskAction}.
*
*
*
* @author Kohsuke Kawaguchi
* @since 1.191
* @see TaskAction
*/
public abstract class TaskThread extends Thread {
/**
* @deprecated as of Hudson 1.350
* Use {@link #log}. It's the same object, in a better type.
*/
private final LargeText text;
/**
* Represents the output from this task thread.
*/
private final AnnotatedLargeText<TaskAction> log;
/**
* Represents the interface to produce output.
*/
private TaskListener listener;
private final TaskAction owner;
private volatile boolean isRunning;
/**
*
* @param output
* Determines where the output from this task thread goes.
*/
protected TaskThread(TaskAction owner, ListenerAndText output) {
//FIXME this failed to compile super(owner.getBuild().toString()+' '+owner.getDisplayName());
//Please implement more general way how to get information about action owner,
//if you want it in the thread's name.
super(owner.getDisplayName());
this.owner = owner;
this.text = this.log = output.text;
this.listener = output.listener;
this.isRunning = true;
}
public Reader readAll() throws IOException {
// this method can be invoked from another thread.
return text.readAll();
}
/**
* Registers that this {@link TaskThread} is run for the specified
* {@link TaskAction}. This can be explicitly called from subtypes
* to associate a single {@link TaskThread} across multiple tag actions.
*/
protected final void associateWith(TaskAction action) {
action.workerThread = this;
action.log = new WeakReference<AnnotatedLargeText>(log);
}
/**
* Starts the task execution asynchronously.
*/
@Override
public void start() {
associateWith(owner);
super.start();
}
public boolean isRunning() {
return isRunning;
}
/**
* Determines where the output of this {@link TaskThread} goes.
* <p>
* Subclass can override this to send the output to a file, for example.
*/
protected ListenerAndText createListener() throws IOException {
return ListenerAndText.forMemory();
}
@Override
public final void run() {
isRunning = true;
try {
perform(listener);
listener.getLogger().println("Completed");
owner.workerThread = null;
} catch (InterruptedException e) {
listener.getLogger().println("Aborted");
} catch (Exception e) {
e.printStackTrace(listener.getLogger());
} finally {
listener = null;
isRunning =false;
}
log.markAsComplete();
}
/**
* Do the actual work.
*
* @throws Exception
* The exception is recorded and reported as a failure.
*/
protected abstract void perform(TaskListener listener) throws Exception;
/**
* Tuple of {@link TaskListener} and {@link AnnotatedLargeText}, representing
* the interface for producing output and how to retrieve it later.
*/
public static final class ListenerAndText {
final TaskListener listener;
final AnnotatedLargeText<TaskAction> text;
public ListenerAndText(TaskListener listener, AnnotatedLargeText<TaskAction> text) {
this.listener = listener;
this.text = text;
}
/**
* @deprecated as of Hudson 1.350
* Use {@link #forMemory(TaskAction)} and pass in the calling {@link TaskAction}
*/
public static ListenerAndText forMemory() {
return forMemory(null);
}
/**
* @deprecated as of Hudson 1.350
* Use {@link #forFile(File, TaskAction)} and pass in the calling {@link TaskAction}
*/
public static ListenerAndText forFile(File f) throws IOException {
return forFile(f,null);
}
/**
* Creates one that's backed by memory.
*/
public static ListenerAndText forMemory(TaskAction context) {
// StringWriter is synchronized
ByteBuffer log = new ByteBuffer();
return new ListenerAndText(
new StreamTaskListener(log),
new AnnotatedLargeText<TaskAction>(log,Charset.defaultCharset(),false,context)
);
}
/**
* Creates one that's backed by a file.
*/
public static ListenerAndText forFile(File f, TaskAction context) throws IOException {
return new ListenerAndText(
new StreamTaskListener(f),
new AnnotatedLargeText<TaskAction>(f,Charset.defaultCharset(),false,context)
);
}
}
}
| |
/*
* Copyright (c) 2016, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.device.mgt.core.operation.mgt;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.wso2.carbon.context.CarbonContext;
import org.wso2.carbon.context.PrivilegedCarbonContext;
import org.wso2.carbon.device.mgt.common.Device;
import org.wso2.carbon.device.mgt.common.DeviceIdentifier;
import org.wso2.carbon.device.mgt.common.DeviceManagementException;
import org.wso2.carbon.device.mgt.common.EnrolmentInfo;
import org.wso2.carbon.device.mgt.common.InvalidDeviceException;
import org.wso2.carbon.device.mgt.common.MonitoringOperation;
import org.wso2.carbon.device.mgt.common.PaginationRequest;
import org.wso2.carbon.device.mgt.common.PaginationResult;
import org.wso2.carbon.device.mgt.common.TransactionManagementException;
import org.wso2.carbon.device.mgt.common.authorization.DeviceAccessAuthorizationException;
import org.wso2.carbon.device.mgt.common.group.mgt.DeviceGroupConstants;
import org.wso2.carbon.device.mgt.common.operation.mgt.Activity;
import org.wso2.carbon.device.mgt.common.operation.mgt.ActivityStatus;
import org.wso2.carbon.device.mgt.common.operation.mgt.Operation;
import org.wso2.carbon.device.mgt.common.operation.mgt.OperationManagementException;
import org.wso2.carbon.device.mgt.common.operation.mgt.OperationManager;
import org.wso2.carbon.device.mgt.common.policy.mgt.Policy;
import org.wso2.carbon.device.mgt.common.policy.mgt.ProfileFeature;
import org.wso2.carbon.device.mgt.common.push.notification.NotificationContext;
import org.wso2.carbon.device.mgt.common.push.notification.NotificationStrategy;
import org.wso2.carbon.device.mgt.common.push.notification.PushNotificationConfig;
import org.wso2.carbon.device.mgt.common.push.notification.PushNotificationExecutionFailedException;
import org.wso2.carbon.device.mgt.common.push.notification.PushNotificationProvider;
import org.wso2.carbon.device.mgt.common.spi.DeviceManagementService;
import org.wso2.carbon.device.mgt.core.DeviceManagementConstants;
import org.wso2.carbon.device.mgt.core.config.DeviceConfigurationManager;
import org.wso2.carbon.device.mgt.core.dao.DeviceDAO;
import org.wso2.carbon.device.mgt.core.dao.DeviceManagementDAOException;
import org.wso2.carbon.device.mgt.core.dao.DeviceManagementDAOFactory;
import org.wso2.carbon.device.mgt.core.dao.EnrollmentDAO;
import org.wso2.carbon.device.mgt.core.internal.DeviceManagementDataHolder;
import org.wso2.carbon.device.mgt.core.operation.mgt.dao.OperationDAO;
import org.wso2.carbon.device.mgt.core.operation.mgt.dao.OperationManagementDAOException;
import org.wso2.carbon.device.mgt.core.operation.mgt.dao.OperationManagementDAOFactory;
import org.wso2.carbon.device.mgt.core.operation.mgt.dao.OperationMappingDAO;
import org.wso2.carbon.device.mgt.core.operation.mgt.dao.util.OperationDAOUtil;
import org.wso2.carbon.device.mgt.core.operation.mgt.util.DeviceIDHolder;
import org.wso2.carbon.device.mgt.core.operation.mgt.util.OperationCreateTimeComparator;
import org.wso2.carbon.device.mgt.core.service.DeviceManagementProviderService;
import org.wso2.carbon.device.mgt.core.task.DeviceTaskManager;
import org.wso2.carbon.device.mgt.core.task.impl.DeviceTaskManagerImpl;
import org.wso2.carbon.device.mgt.core.util.DeviceManagerUtil;
import java.sql.SQLException;
import java.util.*;
/**
* This class implements all the functionality exposed as part of the OperationManager. Any transaction initiated
* upon persisting information related to operation state, etc has to be managed, demarcated and terminated via the
* methods available in OperationManagementDAOFactory.
*/
public class OperationManagerImpl implements OperationManager {
private static final Log log = LogFactory.getLog(OperationManagerImpl.class);
private static final int CACHE_VALIDITY_PERIOD = 5 * 60 * 1000;
private static final String NOTIFIER_TYPE_LOCAL = "LOCAL";
private static final String SYSTEM = "system";
private OperationDAO commandOperationDAO;
private OperationDAO configOperationDAO;
private OperationDAO profileOperationDAO;
private OperationDAO policyOperationDAO;
private OperationMappingDAO operationMappingDAO;
private OperationDAO operationDAO;
private DeviceDAO deviceDAO;
private EnrollmentDAO enrollmentDAO;
private String deviceType;
private DeviceManagementService deviceManagementService;
private Map<Integer, NotificationStrategy> notificationStrategies;
private Map<Integer, Long> lastUpdatedTimeStamps;
public OperationManagerImpl() {
commandOperationDAO = OperationManagementDAOFactory.getCommandOperationDAO();
configOperationDAO = OperationManagementDAOFactory.getConfigOperationDAO();
profileOperationDAO = OperationManagementDAOFactory.getProfileOperationDAO();
policyOperationDAO = OperationManagementDAOFactory.getPolicyOperationDAO();
operationMappingDAO = OperationManagementDAOFactory.getOperationMappingDAO();
operationDAO = OperationManagementDAOFactory.getOperationDAO();
deviceDAO = DeviceManagementDAOFactory.getDeviceDAO();
enrollmentDAO = DeviceManagementDAOFactory.getEnrollmentDAO();
notificationStrategies = new HashMap<>();
lastUpdatedTimeStamps = new HashMap<>();
}
public OperationManagerImpl(String deviceType, DeviceManagementService deviceManagementService) {
this();
this.deviceType = deviceType;
this.deviceManagementService = deviceManagementService;
}
public NotificationStrategy getNotificationStrategy() {
// Notification strategy can be set by the platform configurations. Therefore it is needed to
// get tenant specific notification strategy dynamically in the runtime. However since this is
// a resource intensive retrieval, we are maintaining tenant aware local cache here to keep device
// type specific notification strategy.
int tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId(false);
long lastUpdatedTimeStamp = 0;
if (lastUpdatedTimeStamps.containsKey(tenantId)){
lastUpdatedTimeStamp = lastUpdatedTimeStamps.get(tenantId);
}
if (Calendar.getInstance().getTimeInMillis() - lastUpdatedTimeStamp > CACHE_VALIDITY_PERIOD) {
PushNotificationConfig pushNoteConfig = deviceManagementService.getPushNotificationConfig();
if (pushNoteConfig != null && !NOTIFIER_TYPE_LOCAL.equals(pushNoteConfig.getType())) {
PushNotificationProvider provider = DeviceManagementDataHolder.getInstance()
.getPushNotificationProviderRepository().getProvider(pushNoteConfig.getType());
if (provider == null) {
log.error("No registered push notification provider found for the type '" +
pushNoteConfig.getType() + "' under tenant ID '" + tenantId + "'.");
return null;
}
notificationStrategies.put(tenantId, provider.getNotificationStrategy(pushNoteConfig));
} else if (notificationStrategies.containsKey(tenantId)){
notificationStrategies.remove(tenantId);
}
lastUpdatedTimeStamps.put(tenantId, Calendar.getInstance().getTimeInMillis());
}
return notificationStrategies.get(tenantId);
}
@Override
public Activity addOperation(Operation operation,
List<DeviceIdentifier> deviceIds)
throws OperationManagementException, InvalidDeviceException {
if (log.isDebugEnabled()) {
log.debug("operation:[" + operation.toString() + "]");
for (DeviceIdentifier deviceIdentifier : deviceIds) {
log.debug("device identifier id:[" + deviceIdentifier.getId() + "] type:[" +
deviceIdentifier.getType() + "]");
}
}
try {
DeviceIDHolder deviceValidationResult = DeviceManagerUtil.validateDeviceIdentifiers(deviceIds);
List<DeviceIdentifier> validDeviceIds = deviceValidationResult.getValidDeviceIDList();
if (validDeviceIds.size() > 0) {
DeviceIDHolder deviceAuthorizationResult = this.authorizeDevices(operation, validDeviceIds);
List<DeviceIdentifier> authorizedDeviceIds = deviceAuthorizationResult.getValidDeviceIDList();
if (authorizedDeviceIds.size() <= 0) {
log.warn("User : " + getUser() + " is not authorized to perform operations on given device-list.");
Activity activity = new Activity();
//Send the operation statuses only for admin triggered operations
String deviceType = validDeviceIds.get(0).getType();
activity.setActivityStatus(this.getActivityStatus(deviceValidationResult, deviceAuthorizationResult,
deviceType));
return activity;
}
boolean isScheduledOperation = this.isTaskScheduledOperation(operation);
String initiatedBy = PrivilegedCarbonContext.getThreadLocalCarbonContext().getUsername();
if (initiatedBy == null && isScheduledOperation) {
if(log.isDebugEnabled()) {
log.debug("initiatedBy : " + SYSTEM);
}
operation.setInitiatedBy(SYSTEM);
} else {
if(log.isDebugEnabled()) {
log.debug("initiatedBy : " + initiatedBy);
}
operation.setInitiatedBy(initiatedBy);
}
OperationManagementDAOFactory.beginTransaction();
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation operationDto =
OperationDAOUtil.convertOperation(operation);
int enrolmentId;
String operationCode = operationDto.getCode();
List<Device> authorizedDevices = new ArrayList<>();
List<Device> ignoredDevices = new ArrayList<>();
for (DeviceIdentifier deviceId : authorizedDeviceIds) {
Device device = getDevice(deviceId);
authorizedDevices.add(device);
}
if (operationDto.getControl() ==
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Control.NO_REPEAT) {
int existingOperationID;
for (Device device : authorizedDevices) {
enrolmentId = device.getEnrolmentInfo().getId();
existingOperationID = operationDAO.getExistingOperationID(enrolmentId, operationCode);
if (existingOperationID > 0) {
ignoredDevices.add(device);
operation.setId(existingOperationID);
this.sendNotification(operation, device);
}
}
}
if (ignoredDevices.size() > 0) {
if (authorizedDevices.size() == ignoredDevices.size()) {
if (log.isDebugEnabled()) {
log.debug("All the devices contain a pending operation for the Operation Code: "
+ operationCode);
}
Activity activity = new Activity();
//Send the operation statuses only for admin triggered operations
String deviceType = validDeviceIds.get(0).getType();
activity.setActivityStatus(this.getActivityStatus(deviceValidationResult, deviceAuthorizationResult,
deviceType));
return activity;
} else {
authorizedDevices.removeAll(ignoredDevices);
}
}
int operationId = this.lookupOperationDAO(operation).addOperation(operationDto);
boolean isScheduled = false;
NotificationStrategy notificationStrategy = getNotificationStrategy();
// check whether device list is greater than batch size notification strategy has enable to send push
// notification using scheduler task
if (DeviceConfigurationManager.getInstance().getDeviceManagementConfig().
getPushNotificationConfiguration().getSchedulerBatchSize() <= authorizedDeviceIds.size() &&
notificationStrategy != null) {
isScheduled = notificationStrategy.getConfig().isScheduled();
}
//TODO have to create a sql to load device details from deviceDAO using single query.
for (Device device : authorizedDevices) {
enrolmentId = device.getEnrolmentInfo().getId();
//Do not repeat the task operations
operationMappingDAO.addOperationMapping(operationId, enrolmentId, isScheduled);
}
OperationManagementDAOFactory.commitTransaction();
if (isScheduled) {
for (Device device : authorizedDevices) {
this.sendNotification(operation, device);
}
}
Activity activity = new Activity();
activity.setActivityId(DeviceManagementConstants.OperationAttributes.ACTIVITY + operationId);
activity.setCode(operationCode);
activity.setCreatedTimeStamp(new Date().toString());
activity.setType(Activity.Type.valueOf(operationDto.getType().toString()));
//For now set the operation statuses only for admin triggered operations
if (!isScheduledOperation) {
//Get the device-type from 1st valid DeviceIdentifier. We know the 1st element is definitely there.
String deviceType = validDeviceIds.get(0).getType();
activity.setActivityStatus(this.getActivityStatus(deviceValidationResult, deviceAuthorizationResult,
deviceType));
}
return activity;
} else {
throw new InvalidDeviceException("Invalid device Identifiers found.");
}
} catch (OperationManagementDAOException e) {
OperationManagementDAOFactory.rollbackTransaction();
throw new OperationManagementException("Error occurred while adding operation", e);
} catch (TransactionManagementException e) {
throw new OperationManagementException("Error occurred while initiating the transaction", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
private Operation getPolicyRevokeOperation() {
CommandOperation policyRevokeOperation = new CommandOperation();
policyRevokeOperation.setEnabled(true);
policyRevokeOperation.setCode(OperationMgtConstants.OperationCodes.POLICY_REVOKE);
policyRevokeOperation.setType(Operation.Type.COMMAND);
return policyRevokeOperation;
}
private Operation transformPolicy(Policy policy) {
List<ProfileFeature> effectiveFeatures = policy.getProfile().getProfileFeaturesList();
List<ProfileOperation> profileOperationList = new ArrayList<ProfileOperation>();
PolicyOperation policyOperation = new PolicyOperation();
policyOperation.setEnabled(true);
policyOperation.setType(org.wso2.carbon.device.mgt.common.operation.mgt.Operation.Type.POLICY);
policyOperation.setCode(PolicyOperation.POLICY_OPERATION_CODE);
for (ProfileFeature feature : effectiveFeatures) {
ProfileOperation profileOperation = new ProfileOperation();
profileOperation.setCode(feature.getFeatureCode());
profileOperation.setEnabled(true);
profileOperation.setStatus(org.wso2.carbon.device.mgt.common.operation.mgt.Operation.Status.PENDING);
profileOperation.setType(org.wso2.carbon.device.mgt.common.operation.mgt.Operation.Type.PROFILE);
profileOperation.setPayLoad(feature.getContent());
profileOperationList.add(profileOperation);
}
policyOperation.setProfileOperations(profileOperationList);
policyOperation.setPayLoad(policyOperation.getProfileOperations());
return policyOperation;
}
@Override
public void addOperationsForPolicyRevoke(Policy policy, List<DeviceIdentifier> deviceIds)
throws OperationManagementException, InvalidDeviceException {
Operation revokeOperation = getPolicyRevokeOperation();
Operation operation = transformPolicy(policy);
if (log.isDebugEnabled()) {
log.debug("operation:[" + operation.toString() + "]");
for (DeviceIdentifier deviceIdentifier : deviceIds) {
log.debug("device identifier id:[" + deviceIdentifier.getId() + "] type:[" +
deviceIdentifier.getType() + "]");
}
}
try {
DeviceIDHolder deviceValidationResult = DeviceManagerUtil.validateDeviceIdentifiers(deviceIds);
List<DeviceIdentifier> validDeviceIds = deviceValidationResult.getValidDeviceIDList();
if (validDeviceIds.size() > 0) {
DeviceIDHolder deviceAuthorizationResult = this.authorizeDevices(operation, validDeviceIds);
List<DeviceIdentifier> authorizedDeviceList = deviceAuthorizationResult.getValidDeviceIDList();
OperationManagementDAOFactory.beginTransaction();
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation policyOperationDto =
OperationDAOUtil.convertOperation(operation);
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation revokeOperationDto =
OperationDAOUtil.convertOperation(revokeOperation);
boolean isScheduledOperation = this.isTaskScheduledOperation(operation);
boolean isNotRepeated = false;
boolean isScheduled = false;
NotificationStrategy notificationStrategy = getNotificationStrategy();
// check whether device list is greater than batch size notification strategy has enable to send push
// notification using scheduler task
if (DeviceConfigurationManager.getInstance().getDeviceManagementConfig().
getPushNotificationConfiguration().getSchedulerBatchSize() <= authorizedDeviceList.size() &&
notificationStrategy != null) {
isScheduled = notificationStrategy.getConfig().isScheduled();
}
List<org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation> operationList = new LinkedList<org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation>();
operationList.add(revokeOperationDto);
operationList.add(policyOperationDto);
List<Integer> operationIds = this.lookupOperationDAO(operation).addOperations(operationList);
List<Device> devices = new ArrayList<>();
if (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Control.NO_REPEAT == policyOperationDto.
getControl()) {
isNotRepeated = true;
}
//Need to happen for both revoke and new policy operation
addOperationMappings(authorizedDeviceList, revokeOperationDto, operationIds.get(0), isScheduledOperation,
isNotRepeated, isScheduled, devices);
sendPushNotifications(revokeOperation, operationIds.get(0), isScheduled, notificationStrategy, devices);
//Need to happen for both revoke and new policy operation
addOperationMappings(authorizedDeviceList, policyOperationDto, operationIds.get(1), isScheduledOperation,
isNotRepeated, isScheduled, devices);
sendPushNotifications(operation, operationIds.get(1), isScheduled, notificationStrategy, devices);
OperationManagementDAOFactory.commitTransaction();
} else {
throw new InvalidDeviceException("Invalid device Identifiers found.");
}
} catch (OperationManagementDAOException e) {
OperationManagementDAOFactory.rollbackTransaction();
throw new OperationManagementException("Error occurred while adding operation", e);
} catch (TransactionManagementException e) {
throw new OperationManagementException("Error occurred while initiating the transaction", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
private String addOperationMappings(List<DeviceIdentifier> authorizedDeviceList, org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation operationDto, int operationId, boolean isScheduledOperation, boolean isNotRepeated, boolean isScheduled, List<Device> devices) throws OperationManagementException, OperationManagementDAOException {
int enrolmentId;
int existingTaskOperationId;//TODO have to create a sql to load device details from deviceDAO using single query.
String operationCode = operationDto.getCode();
for (DeviceIdentifier deviceId : authorizedDeviceList) {
Device device = getDevice(deviceId);
devices.add(device);
enrolmentId = device.getEnrolmentInfo().getId();
//Do not repeat the task operations
if (isScheduledOperation) {
existingTaskOperationId = operationDAO.getExistingOperationID(enrolmentId, operationCode);
if (existingTaskOperationId != -1) {
operationMappingDAO.addOperationMapping(operationId, enrolmentId, isScheduled);
}
} else if (isNotRepeated) {
operationDAO.updateEnrollmentOperationsStatus(enrolmentId, operationCode,
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Status.PENDING,
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Status.REPEATED);
operationMappingDAO.addOperationMapping(operationId, enrolmentId, isScheduled);
} else {
operationMappingDAO.addOperationMapping(operationId, enrolmentId, isScheduled);
}
}
return operationCode;
}
/*
* If notification strategy has not enable to send push notification using scheduler task we will send
* notification immediately. This is done in separate loop inorder to prevent overlap with DB insert
* operations with the possible db update operations trigger followed by pending operation call.
* Otherwise device may call pending operation while DB is locked for write and deadlock can occur.
*/
private void sendPushNotifications(Operation operation, int operationId, boolean isScheduled, NotificationStrategy notificationStrategy, List<Device> devices) {
int enrolmentId;
if (notificationStrategy != null && !isScheduled) {
for (Device device : devices) {
DeviceIdentifier deviceId = new DeviceIdentifier(device.getDeviceIdentifier(), device.getType());
if (log.isDebugEnabled()) {
log.debug("Sending push notification to " + deviceId + " from add operation method.");
}
operation.setId(operationId);
operation.setActivityId(DeviceManagementConstants.OperationAttributes.ACTIVITY + operationId);
try {
notificationStrategy.execute(new NotificationContext(deviceId, operation));
} catch (PushNotificationExecutionFailedException e) {
log.error("Error occurred while sending push notifications to " + deviceId.getType() +
" device carrying id '" + deviceId + "'", e);
/*
Reschedule if push notification failed. Doing db transactions in atomic way to prevent
deadlocks.
*/
enrolmentId = device.getEnrolmentInfo().getId();
try {
operationMappingDAO.updateOperationMapping(operationId, enrolmentId, org.wso2.carbon
.device.mgt.core.dto.operation.mgt.Operation.PushNotificationStatus.SCHEDULED);
} catch (OperationManagementDAOException ex) {
// Not throwing this exception in order to keep sending remaining notifications if any.
log.error("Error occurred while setting push notification status to SCHEDULED.", ex);
}
}
}
}
}
private void sendNotification(Operation operation, Device device) {
NotificationStrategy notificationStrategy = getNotificationStrategy();
/*
* If notification strategy has not enable to send push notification using scheduler task we will send
* notification immediately. This is done in separate loop inorder to prevent overlap with DB insert
* operations with the possible db update operations trigger followed by pending operation call.
* Otherwise device may call pending operation while DB is locked for write and deadlock can occur.
*/
if (notificationStrategy != null) {
if (log.isDebugEnabled()) {
log.debug("Sending push notification to " + device.getDeviceIdentifier() + " from add operation method.");
}
DeviceIdentifier deviceIdentifier = new DeviceIdentifier(device.getDeviceIdentifier(), device.getType());
try {
notificationStrategy.execute(new NotificationContext(deviceIdentifier, operation));
} catch (PushNotificationExecutionFailedException e) {
log.error("Error occurred while sending push notifications to " + device.getType() +
" device carrying id '" + device.getDeviceIdentifier() + "'", e);
/*
* Reschedule if push notification failed. Doing db transactions in atomic way to prevent
* deadlocks.
*/
try {
operationMappingDAO.updateOperationMapping(operation.getId(), device.getEnrolmentInfo().getId(), org.wso2.carbon
.device.mgt.core.dto.operation.mgt.Operation.PushNotificationStatus.SCHEDULED);
OperationManagementDAOFactory.commitTransaction();
} catch (OperationManagementDAOException ex) {
// Not throwing this exception in order to keep sending remaining notifications if any.
log.error("Error occurred while setting push notification status to SCHEDULED.", ex);
OperationManagementDAOFactory.rollbackTransaction();
}
}
}
}
private List<ActivityStatus> getActivityStatus(DeviceIDHolder deviceIdValidationResult, DeviceIDHolder deviceAuthResult,
String deviceType) {
List<ActivityStatus> activityStatuses = new ArrayList<>();
ActivityStatus activityStatus;
//Add the invalid DeviceIds
for (String id : deviceIdValidationResult.getErrorDeviceIdList()) {
activityStatus = new ActivityStatus();
activityStatus.setDeviceIdentifier(new DeviceIdentifier(id, deviceType));
activityStatus.setStatus(ActivityStatus.Status.INVALID);
activityStatuses.add(activityStatus);
}
//Add the unauthorized DeviceIds
for (String id : deviceAuthResult.getErrorDeviceIdList()) {
activityStatus = new ActivityStatus();
activityStatus.setDeviceIdentifier(new DeviceIdentifier(id, deviceType));
activityStatus.setStatus(ActivityStatus.Status.UNAUTHORIZED);
activityStatuses.add(activityStatus);
}
//Add the authorized DeviceIds
for (DeviceIdentifier id : deviceAuthResult.getValidDeviceIDList()) {
activityStatus = new ActivityStatus();
activityStatus.setDeviceIdentifier(id);
activityStatus.setStatus(ActivityStatus.Status.PENDING);
activityStatuses.add(activityStatus);
}
return activityStatuses;
}
private DeviceIDHolder authorizeDevices(
Operation operation, List<DeviceIdentifier> deviceIds) throws OperationManagementException {
List<DeviceIdentifier> authorizedDeviceList;
List<String> unAuthorizedDeviceList = new ArrayList<>();
DeviceIDHolder deviceIDHolder = new DeviceIDHolder();
try {
if (operation != null && isAuthenticationSkippedOperation(operation)) {
authorizedDeviceList = deviceIds;
} else {
boolean isAuthorized;
authorizedDeviceList = new ArrayList<>();
for (DeviceIdentifier devId : deviceIds) {
isAuthorized = DeviceManagementDataHolder.getInstance().getDeviceAccessAuthorizationService().
isUserAuthorized(devId);
if (isAuthorized) {
authorizedDeviceList.add(devId);
} else {
unAuthorizedDeviceList.add(devId.getId());
}
}
}
} catch (DeviceAccessAuthorizationException e) {
throw new OperationManagementException("Error occurred while authorizing access to the devices for user :" +
this.getUser(), e);
}
deviceIDHolder.setValidDeviceIDList(authorizedDeviceList);
deviceIDHolder.setErrorDeviceIdList(unAuthorizedDeviceList);
return deviceIDHolder;
}
private Device getDevice(DeviceIdentifier deviceId) throws OperationManagementException {
try {
return DeviceManagementDataHolder.getInstance().getDeviceManagementProvider().getDevice(deviceId, false);
} catch (DeviceManagementException e) {
throw new OperationManagementException(
"Error occurred while retrieving device info.", e);
}
}
@Override
public List<? extends Operation> getOperations(DeviceIdentifier deviceId) throws OperationManagementException {
List<Operation> operations = null;
if (!isActionAuthorized(deviceId)) {
throw new OperationManagementException("User '" + getUser() + "' is not authorized to access the '" +
deviceId.getType() + "' device, which carries the identifier '" +
deviceId.getId() + "'");
}
EnrolmentInfo enrolmentInfo = this.getActiveEnrolmentInfo(deviceId);
if (enrolmentInfo == null) {
return null;
}
try {
OperationManagementDAOFactory.openConnection();
List<? extends org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation> operationList =
operationDAO.getOperationsForDevice(enrolmentInfo.getId());
operations = new ArrayList<>();
for (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation dtoOperation : operationList) {
Operation operation = OperationDAOUtil.convertOperation(dtoOperation);
operations.add(operation);
}
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving the list of " +
"operations assigned for '" + deviceId.getType() +
"' device '" + deviceId.getId() + "'", e);
} catch (SQLException e) {
throw new OperationManagementException(
"Error occurred while opening a connection to the data source", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
return operations;
}
@Override
public PaginationResult getOperations(DeviceIdentifier deviceId, PaginationRequest request)
throws OperationManagementException {
PaginationResult paginationResult = null;
List<Operation> operations = new ArrayList<>();
String owner = request.getOwner();
try {
if (!DeviceManagerUtil.isDeviceExists(deviceId)) {
throw new OperationManagementException("Device not found for given device " +
"Identifier:" + deviceId.getId() + " and given type : " +
deviceId.getType());
}
} catch (DeviceManagementException e) {
throw new OperationManagementException("Error while checking the existence of the device identifier - "
+ deviceId.getId() + " of the device type - " + deviceId.getType(), e);
}
if (!isActionAuthorized(deviceId)) {
throw new OperationManagementException("User '" + getUser() + "' is not authorized to access the '" +
deviceId.getType() + "' device, which carries the identifier '" +
deviceId.getId() + "' of owner '" + owner + "'");
}
EnrolmentInfo enrolmentInfo = this.getEnrolmentInfo(deviceId, owner);
int enrolmentId = enrolmentInfo.getId();
try {
OperationManagementDAOFactory.openConnection();
List<? extends org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation> operationList =
operationDAO.getOperationsForDevice(enrolmentId, request);
for (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation dtoOperation : operationList) {
Operation operation = OperationDAOUtil.convertOperation(dtoOperation);
operations.add(operation);
}
paginationResult = new PaginationResult();
int count = operationDAO.getOperationCountForDevice(enrolmentId);
paginationResult.setData(operations);
paginationResult.setRecordsTotal(count);
paginationResult.setRecordsFiltered(count);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving the list of " +
"operations assigned for '" + deviceId.getType() +
"' device '" + deviceId.getId() + "'", e);
} catch (SQLException e) {
throw new OperationManagementException(
"Error occurred while opening a connection to the data source", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
return paginationResult;
}
@Override
public List<? extends Operation> getPendingOperations(DeviceIdentifier deviceId) throws
OperationManagementException {
if (log.isDebugEnabled()) {
log.debug("Device identifier id:[" + deviceId.getId() + "] type:[" + deviceId.getType() + "]");
}
List<Operation> operations = new ArrayList<>();
List<org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation> dtoOperationList = new ArrayList<>();
if (!isActionAuthorized(deviceId)) {
throw new OperationManagementException("User '" + getUser() + "' is not authorized to access the '" +
deviceId.getType() + "' device, which carries the identifier '" +
deviceId.getId() + "'");
}
//
EnrolmentInfo enrolmentInfo = this.getActiveEnrolmentInfo(deviceId);
if (enrolmentInfo == null) {
throw new OperationManagementException("Device not found for the given device Identifier:" +
deviceId.getId() + " and given type:" +
deviceId.getType());
}
int enrolmentId = enrolmentInfo.getId();
//Changing the enrollment status & attempt count if the device is marked as inactive or unreachable
switch (enrolmentInfo.getStatus()) {
case INACTIVE:
case UNREACHABLE:
this.setEnrolmentStatus(enrolmentId, EnrolmentInfo.Status.ACTIVE);
break;
}
try {
OperationManagementDAOFactory.openConnection();
dtoOperationList.addAll(commandOperationDAO.getOperationsByDeviceAndStatus(
enrolmentId, org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Status.PENDING));
dtoOperationList.addAll(configOperationDAO.getOperationsByDeviceAndStatus(
enrolmentId, org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Status.PENDING));
dtoOperationList.addAll(profileOperationDAO.getOperationsByDeviceAndStatus(
enrolmentId, org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Status.PENDING));
dtoOperationList.addAll(policyOperationDAO.getOperationsByDeviceAndStatus(
enrolmentId, org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Status.PENDING));
Operation operation;
for (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation dtoOperation : dtoOperationList) {
operation = OperationDAOUtil.convertOperation(dtoOperation);
operations.add(operation);
}
Collections.sort(operations, new OperationCreateTimeComparator());
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving the list of " +
"pending operations assigned for '" + deviceId.getType() +
"' device '" + deviceId.getId() + "'", e);
} catch (SQLException e) {
throw new OperationManagementException(
"Error occurred while opening a connection to the data source", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
return operations;
}
@Override
public Operation getNextPendingOperation(DeviceIdentifier deviceId) throws OperationManagementException {
if (log.isDebugEnabled()) {
log.debug("device identifier id:[" + deviceId.getId() + "] type:[" + deviceId.getType() + "]");
}
Operation operation = null;
if (!isActionAuthorized(deviceId)) {
throw new OperationManagementException("User '" + getUser() + "' is not authorized to access the '" +
deviceId.getType() + "' device, which carries the identifier '" +
deviceId.getId() + "'");
}
EnrolmentInfo enrolmentInfo = this.getActiveEnrolmentInfo(deviceId);
if (enrolmentInfo == null) {
throw new OperationManagementException("Device not found for given device " +
"Identifier:" + deviceId.getId() + " and given type" +
deviceId.getType());
}
int enrolmentId = enrolmentInfo.getId();
//Changing the enrollment status & attempt count if the device is marked as inactive or unreachable
switch (enrolmentInfo.getStatus()) {
case INACTIVE:
case UNREACHABLE:
this.setEnrolmentStatus(enrolmentId, EnrolmentInfo.Status.ACTIVE);
break;
}
try {
OperationManagementDAOFactory.openConnection();
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation dtoOperation = operationDAO.getNextOperation(
enrolmentInfo.getId());
if (dtoOperation != null) {
if (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.COMMAND.equals(dtoOperation.getType()
)) {
org.wso2.carbon.device.mgt.core.dto.operation.mgt.CommandOperation commandOperation;
commandOperation =
(org.wso2.carbon.device.mgt.core.dto.operation.mgt.CommandOperation) commandOperationDAO.
getOperation(dtoOperation.getId());
dtoOperation.setEnabled(commandOperation.isEnabled());
} else if (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.CONFIG.equals(dtoOperation.
getType())) {
dtoOperation = configOperationDAO.getOperation(dtoOperation.getId());
} else if (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.PROFILE.equals(dtoOperation.
getType())) {
dtoOperation = profileOperationDAO.getOperation(dtoOperation.getId());
} else if (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.POLICY.equals(dtoOperation.
getType())) {
dtoOperation = policyOperationDAO.getOperation(dtoOperation.getId());
}
operation = OperationDAOUtil.convertOperation(dtoOperation);
}
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving next pending operation", e);
} catch (SQLException e) {
throw new OperationManagementException(
"Error occurred while opening a connection to the data source", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
return operation;
}
@Override
public void updateOperation(DeviceIdentifier deviceId, Operation operation) throws OperationManagementException {
int operationId = operation.getId();
if (log.isDebugEnabled()) {
log.debug("operation Id:" + operationId + " status:" + operation.getStatus());
}
if (!isActionAuthorized(deviceId)) {
throw new OperationManagementException("User '" + getUser() + "' is not authorized to access the '" +
deviceId.getType() + "' device, which carries the identifier '" +
deviceId.getId() + "'");
}
EnrolmentInfo enrolmentInfo = this.getActiveEnrolmentInfo(deviceId);
if (enrolmentInfo == null) {
throw new OperationManagementException(
"Device not found for device id:" + deviceId.getId() + " " + "type:" +
deviceId.getType());
}
try {
int enrolmentId = enrolmentInfo.getId();
OperationManagementDAOFactory.beginTransaction();
if (operation.getStatus() != null) {
operationDAO.updateOperationStatus(enrolmentId, operationId,
org.wso2.carbon.device.mgt.core.dto.operation.mgt.
Operation.Status.valueOf(operation.getStatus().
toString()));
}
if (operation.getOperationResponse() != null) {
operationDAO.addOperationResponse(enrolmentId, operationId, operation.getOperationResponse());
}
OperationManagementDAOFactory.commitTransaction();
} catch (OperationManagementDAOException e) {
OperationManagementDAOFactory.rollbackTransaction();
throw new OperationManagementException(
"Error occurred while updating the operation: " + operationId + " status:" +
operation.getStatus(), e);
} catch (TransactionManagementException e) {
throw new OperationManagementException("Error occurred while initiating a transaction", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
@Override
public Operation getOperationByDeviceAndOperationId(DeviceIdentifier deviceId, int operationId)
throws OperationManagementException {
Operation operation = null;
if (log.isDebugEnabled()) {
log.debug("Operation Id: " + operationId + " Device Type: " + deviceId.getType() + " Device Identifier: " +
deviceId.getId());
}
if (!isActionAuthorized(deviceId)) {
throw new OperationManagementException("User '" + getUser() + "' is not authorized to access the '" +
deviceId.getType() + "' device, which carries the identifier '" +
deviceId.getId() + "'");
}
EnrolmentInfo enrolmentInfo = this.getActiveEnrolmentInfo(deviceId);
if (enrolmentInfo == null) {
throw new OperationManagementException("Device not found for given device identifier: " +
deviceId.getId() + " type: " + deviceId.getType());
}
try {
OperationManagementDAOFactory.openConnection();
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation deviceSpecificOperation = operationDAO.
getOperationByDeviceAndId(enrolmentInfo.getId(),
operationId);
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation dtoOperation = deviceSpecificOperation;
if (deviceSpecificOperation.getType().
equals(org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.COMMAND)) {
org.wso2.carbon.device.mgt.core.dto.operation.mgt.CommandOperation commandOperation;
commandOperation =
(org.wso2.carbon.device.mgt.core.dto.operation.mgt.CommandOperation) commandOperationDAO.
getOperation(deviceSpecificOperation.getId());
dtoOperation.setEnabled(commandOperation.isEnabled());
} else if (deviceSpecificOperation.getType().
equals(org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.CONFIG)) {
dtoOperation = configOperationDAO.getOperation(deviceSpecificOperation.getId());
} else if (deviceSpecificOperation.getType().equals(
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.PROFILE)) {
dtoOperation = profileOperationDAO.getOperation(deviceSpecificOperation.getId());
} else if (deviceSpecificOperation.getType().equals(
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.POLICY)) {
dtoOperation = policyOperationDAO.getOperation(deviceSpecificOperation.getId());
}
if (dtoOperation == null) {
throw new OperationManagementException("Operation not found for operation Id:" + operationId +
" device id:" + deviceId.getId());
}
dtoOperation.setStatus(deviceSpecificOperation.getStatus());
operation = OperationDAOUtil.convertOperation(deviceSpecificOperation);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving the list of " +
"operations assigned for '" + deviceId.getType() +
"' device '" + deviceId.getId() + "'", e);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening connection to the data source",
e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
return operation;
}
@Override
public List<? extends Operation> getOperationsByDeviceAndStatus(
DeviceIdentifier deviceId, Operation.Status status) throws OperationManagementException {
List<Operation> operations = new ArrayList<>();
List<org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation> dtoOperationList = new ArrayList<>();
if (!isActionAuthorized(deviceId)) {
throw new OperationManagementException("User '" + getUser() + "' is not authorized to access the '" +
deviceId.getType() + "' device, which carries the identifier '" +
deviceId.getId() + "'");
}
EnrolmentInfo enrolmentInfo = this.getActiveEnrolmentInfo(deviceId);
if (enrolmentInfo == null) {
throw new OperationManagementException(
"Device not found for device id:" + deviceId.getId() + " " + "type:" +
deviceId.getType());
}
try {
int enrolmentId = enrolmentInfo.getId();
OperationManagementDAOFactory.openConnection();
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Status dtoOpStatus =
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Status.valueOf(status.toString());
dtoOperationList.addAll(commandOperationDAO.getOperationsByDeviceAndStatus(enrolmentId, dtoOpStatus));
dtoOperationList.addAll(configOperationDAO.getOperationsByDeviceAndStatus(enrolmentId,
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.
Status.PENDING));
dtoOperationList.addAll(profileOperationDAO.getOperationsByDeviceAndStatus(enrolmentId,
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.
Status.PENDING));
dtoOperationList.addAll(policyOperationDAO.getOperationsByDeviceAndStatus(enrolmentId,
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.
Status.PENDING));
Operation operation;
for (org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation dtoOperation : dtoOperationList) {
operation = OperationDAOUtil.convertOperation(dtoOperation);
operations.add(operation);
}
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving the list of " +
"operations assigned for '" + deviceId.getType() +
"' device '" +
deviceId.getId() + "' and status:" + status.toString(), e);
} catch (SQLException e) {
throw new OperationManagementException(
"Error occurred while opening a connection to the data source", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
return operations;
}
@Override
public Operation getOperation(int operationId) throws OperationManagementException {
Operation operation;
try {
OperationManagementDAOFactory.openConnection();
org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation dtoOperation = operationDAO.getOperation(
operationId);
if (dtoOperation == null) {
throw new OperationManagementException("Operation not found for given Id:" + operationId);
}
if (dtoOperation.getType()
.equals(org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.COMMAND)) {
org.wso2.carbon.device.mgt.core.dto.operation.mgt.CommandOperation commandOperation;
commandOperation =
(org.wso2.carbon.device.mgt.core.dto.operation.mgt.CommandOperation) commandOperationDAO.
getOperation(dtoOperation.getId());
dtoOperation.setEnabled(commandOperation.isEnabled());
} else if (dtoOperation.getType().
equals(org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.CONFIG)) {
dtoOperation = configOperationDAO.getOperation(dtoOperation.getId());
} else if (dtoOperation.getType().equals(org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.
PROFILE)) {
dtoOperation = profileOperationDAO.getOperation(dtoOperation.getId());
} else if (dtoOperation.getType().equals(org.wso2.carbon.device.mgt.core.dto.operation.mgt.Operation.Type.
POLICY)) {
dtoOperation = policyOperationDAO.getOperation(dtoOperation.getId());
}
operation = OperationDAOUtil.convertOperation(dtoOperation);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving the operation with operation Id '" +
operationId, e);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
return operation;
}
@Override
public Activity getOperationByActivityId(String activity) throws OperationManagementException {
// This parses the operation id from activity id (ex : ACTIVITY_23) and converts to the integer.
int operationId = Integer.parseInt(
activity.replace(DeviceManagementConstants.OperationAttributes.ACTIVITY, ""));
if (operationId == 0) {
throw new IllegalArgumentException("Operation ID cannot be null or zero (0).");
}
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getActivity(operationId);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving the operation with activity Id '" +
activity, e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
@Override
public List<Activity> getOperationByActivityIds(List<String> activities)
throws OperationManagementException {
List<Integer> operationIds = new ArrayList<>();
for (String id : activities) {
int operationId = Integer.parseInt(
id.replace(DeviceManagementConstants.OperationAttributes.ACTIVITY, ""));
if (operationId == 0) {
throw new IllegalArgumentException("Operation ID cannot be null or zero (0).");
} else {
operationIds.add(operationId);
}
}
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getActivityList(operationIds);
} catch (SQLException e) {
throw new OperationManagementException(
"Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException(
"Error occurred while retrieving the operation with activity Id '" + activities
.toString(), e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
public Activity getOperationByActivityIdAndDevice(String activity, DeviceIdentifier deviceId) throws OperationManagementException {
// This parses the operation id from activity id (ex : ACTIVITY_23) and converts to the integer.
int operationId = Integer.parseInt(
activity.replace(DeviceManagementConstants.OperationAttributes.ACTIVITY, ""));
if (operationId == 0) {
throw new IllegalArgumentException("Operation ID cannot be null or zero (0).");
}
if (!isActionAuthorized(deviceId)) {
throw new OperationManagementException("User '" + getUser() + "' is not authorized to access the '" +
deviceId.getType() + "' device, which carries the identifier '" +
deviceId.getId() + "'");
}
Device device = this.getDevice(deviceId);
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getActivityByDevice(operationId, device.getId());
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving the operation with activity Id '" +
activity + " and device Id: " + deviceId.getId(), e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
@Override
public List<Activity> getActivitiesUpdatedAfter(long timestamp, int limit,
int offset) throws OperationManagementException {
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getActivitiesUpdatedAfter(timestamp, limit, offset);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while getting the activity list changed after a " +
"given time.", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
@Override
public List<Activity> getFilteredActivities(String operationCode, int limit, int offset) throws OperationManagementException{
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getFilteredActivities(operationCode, limit, offset);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while getting the activity list for the given "
+ "given operationCode: " + operationCode, e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
@Override
public int getTotalCountOfFilteredActivities(String operationCode) throws OperationManagementException{
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getTotalCountOfFilteredActivities(operationCode);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while getting the activity count for the given "
+ "operation code:" + operationCode, e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
@Override
public List<Activity> getActivitiesUpdatedAfterByUser(long timestamp, String user, int limit, int offset)
throws OperationManagementException {
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getActivitiesUpdatedAfterByUser(timestamp, user, limit, offset);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while getting the activity list changed after a " +
"given time which are added by user : " + user, e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
@Override
public int getActivityCountUpdatedAfter(long timestamp) throws OperationManagementException {
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getActivityCountUpdatedAfter(timestamp);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while getting the activity count changed after a " +
"given time.", e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
@Override
public int getActivityCountUpdatedAfterByUser(long timestamp, String user) throws OperationManagementException {
try {
OperationManagementDAOFactory.openConnection();
return operationDAO.getActivityCountUpdatedAfterByUser(timestamp, user);
} catch (SQLException e) {
throw new OperationManagementException("Error occurred while opening a connection to the data source.", e);
} catch (OperationManagementDAOException e) {
throw new OperationManagementException("Error occurred while getting the activity count changed after a " +
"given time which are added by user :" + user, e);
} finally {
OperationManagementDAOFactory.closeConnection();
}
}
private OperationDAO lookupOperationDAO(Operation operation) {
if (operation instanceof CommandOperation) {
return commandOperationDAO;
} else if (operation instanceof ProfileOperation) {
return profileOperationDAO;
} else if (operation instanceof ConfigOperation) {
return configOperationDAO;
} else if (operation instanceof PolicyOperation) {
return policyOperationDAO;
} else {
return operationDAO;
}
}
private String getUser() {
return CarbonContext.getThreadLocalCarbonContext().getUsername();
}
private boolean isAuthenticationSkippedOperation(Operation operation) {
//This is to check weather operations are coming from the task related to retrieving device information.
DeviceTaskManager taskManager = new DeviceTaskManagerImpl(deviceType);
if (taskManager.isTaskOperation(operation.getCode())) {
return true;
}
boolean status;
switch (operation.getCode()) {
case DeviceManagementConstants.AuthorizationSkippedOperationCodes.POLICY_OPERATION_CODE:
status = true;
break;
case DeviceManagementConstants.AuthorizationSkippedOperationCodes.MONITOR_OPERATION_CODE:
status = true;
break;
case DeviceManagementConstants.AuthorizationSkippedOperationCodes.POLICY_REVOKE_OPERATION_CODE:
status = true;
break;
default:
status = false;
}
return status;
}
private boolean isActionAuthorized(DeviceIdentifier deviceId) {
boolean isUserAuthorized;
try {
isUserAuthorized = DeviceManagementDataHolder.getInstance().getDeviceAccessAuthorizationService().
isUserAuthorized(deviceId, DeviceGroupConstants.Permissions.DEFAULT_OPERATOR_PERMISSIONS);
} catch (DeviceAccessAuthorizationException e) {
log.error("Error occurred while trying to authorize current user upon the invoked operation", e);
return false;
}
return isUserAuthorized;
}
private EnrolmentInfo getEnrolmentInfo(DeviceIdentifier deviceId, String owner) throws OperationManagementException {
EnrolmentInfo enrolmentInfo = null;
try {
int tenantId = CarbonContext.getThreadLocalCarbonContext().getTenantId();
String user = this.getUser();
DeviceManagementDAOFactory.openConnection();
if (this.isSameUser(user, owner)) {
enrolmentInfo = deviceDAO.getEnrolment(deviceId, owner, tenantId);
} else {
boolean isAdminUser = DeviceManagementDataHolder.getInstance().getDeviceAccessAuthorizationService().
isDeviceAdminUser();
if (isAdminUser) {
enrolmentInfo = deviceDAO.getEnrolment(deviceId, owner, tenantId);
}
//TODO : Add a check for group admin if this fails
}
} catch (DeviceManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving enrollment data of '" +
deviceId.getType() + "' device carrying the identifier '" +
deviceId.getId() + "' of owner '" + owner + "'", e);
} catch (SQLException e) {
throw new OperationManagementException(
"Error occurred while opening a connection to the data source", e);
} catch (DeviceAccessAuthorizationException e) {
throw new OperationManagementException("Error occurred while checking the device access permissions for '" +
deviceId.getType() + "' device carrying the identifier '" +
deviceId.getId() + "' of owner '" + owner + "'", e);
} finally {
DeviceManagementDAOFactory.closeConnection();
}
return enrolmentInfo;
}
private EnrolmentInfo getActiveEnrolmentInfo(DeviceIdentifier deviceId) throws OperationManagementException {
EnrolmentInfo enrolmentInfo;
try {
DeviceManagementDAOFactory.openConnection();
int tenantId = CarbonContext.getThreadLocalCarbonContext().getTenantId();
enrolmentInfo = deviceDAO.getActiveEnrolment(deviceId, tenantId);
} catch (DeviceManagementDAOException e) {
throw new OperationManagementException("Error occurred while retrieving enrollment data of '" +
deviceId.getType() + "' device carrying the identifier '" +
deviceId.getId() + "'", e);
} catch (SQLException e) {
throw new OperationManagementException(
"Error occurred while opening a connection to the data source", e);
} finally {
DeviceManagementDAOFactory.closeConnection();
}
return enrolmentInfo;
}
private boolean setEnrolmentStatus(int enrolmentId, EnrolmentInfo.Status status) throws OperationManagementException {
boolean updateStatus;
try {
DeviceManagementDAOFactory.beginTransaction();
int tenantId = CarbonContext.getThreadLocalCarbonContext().getTenantId();
String user = this.getUser();
updateStatus = enrollmentDAO.setStatus(enrolmentId, user, status, tenantId);
DeviceManagementDAOFactory.commitTransaction();
} catch (DeviceManagementDAOException e) {
DeviceManagementDAOFactory.rollbackTransaction();
throw new OperationManagementException("Error occurred while updating enrollment status of device of " +
"enrolment-id '" + enrolmentId + "'", e);
} catch (TransactionManagementException e) {
throw new OperationManagementException("Error occurred while initiating a transaction", e);
} finally {
DeviceManagementDAOFactory.closeConnection();
}
return updateStatus;
}
private boolean isTaskScheduledOperation(Operation operation) {
DeviceManagementProviderService deviceManagementProviderService = DeviceManagementDataHolder.getInstance().
getDeviceManagementProvider();
List<MonitoringOperation> monitoringOperations = deviceManagementProviderService.getMonitoringOperationList(deviceType);//Get task list from each device type
for (MonitoringOperation op : monitoringOperations) {
if (operation.getCode().equals(op.getTaskName())) {
return true;
}
}
return false;
}
private boolean isSameUser(String user, String owner) {
return user.equalsIgnoreCase(owner);
}
}
| |
/*
* IzPack - Copyright 2001-2009 Julien Ponge, All Rights Reserved.
*
* Copyright 2009 Dennis Reil
*
* http://izpack.org/
* http://izpack.codehaus.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.izforge.izpack.panels;
import java.awt.Dimension;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.FocusEvent;
import java.awt.event.FocusListener;
import java.io.File;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import javax.swing.Box;
import javax.swing.BoxLayout;
import javax.swing.DefaultListModel;
import javax.swing.JButton;
import javax.swing.JFileChooser;
import javax.swing.JLabel;
import javax.swing.JList;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.ListSelectionModel;
import com.izforge.izpack.gui.ButtonFactory;
import com.izforge.izpack.installer.InstallData;
import com.izforge.izpack.installer.InstallerFrame;
import com.izforge.izpack.util.Debug;
public class MultipleFileInputField extends JPanel implements ActionListener, FocusListener
{
private static final long serialVersionUID = 4673684743657328492L;
boolean isDirectory;
InstallerFrame parentFrame;
List<ValidatorContainer> validators;
DefaultListModel model;
JList fileList;
JButton browseBtn;
JButton deleteBtn;
String set;
int size;
InstallData data;
String fileExtension;
String fileExtensionDescription;
boolean allowEmpty;
boolean createMultipleVariables;
int visibleRows = 10;
int preferredX = 200;
int preferredY = 200;
String labeltext;
public MultipleFileInputField(InstallerFrame parent, InstallData data, boolean directory, String set, int size,List<ValidatorContainer> validatorConfig,String fileExt, String fileExtDesc,boolean createMultipleVariables,int visibleRows, int preferredXSize, int preferredYSize, String labelText){
this.parentFrame = parent;
this.data = data;
this.validators = validatorConfig;
this.set = set;
this.size = size;
this.fileExtension = fileExt;
this.fileExtensionDescription = fileExtDesc;
this.isDirectory = directory;
this.createMultipleVariables = createMultipleVariables;
this.visibleRows = visibleRows;
this.preferredX = preferredXSize;
this.preferredY = preferredYSize;
this.labeltext = labelText;
this.initialize();
}
public void clearFiles(){
this.model.clear();
}
public void addFile(String file){
this.model.addElement(file);
}
public void initialize(){
JPanel main = new JPanel();
main.setLayout(new BoxLayout(main,BoxLayout.Y_AXIS));
JPanel labelPanel = new JPanel();
labelPanel.setLayout(new BoxLayout(labelPanel,BoxLayout.X_AXIS));
JLabel label = new JLabel(this.labeltext);
labelPanel.add(label);
labelPanel.add(Box.createHorizontalGlue());
main.add(labelPanel);
model = new DefaultListModel();
fileList = new JList(model);
fileList.setSelectionMode(ListSelectionModel.SINGLE_SELECTION);
fileList.setVisibleRowCount(visibleRows);
JPanel panel = new JPanel();
panel.setLayout(new BoxLayout(panel,BoxLayout.X_AXIS));
JPanel buttonPanel = new JPanel();
buttonPanel.setLayout(new BoxLayout(buttonPanel,BoxLayout.Y_AXIS));
browseBtn = ButtonFactory.createButton(data.langpack.getString("UserInputPanel.button.browse"), data.buttonsHColor);
browseBtn.addActionListener(this);
deleteBtn = ButtonFactory.createButton(data.langpack.getString("UserInputPanel.button.delete"), data.buttonsHColor);
deleteBtn.addActionListener(this);
JScrollPane scroller = new JScrollPane(fileList);
scroller.setPreferredSize(new Dimension(preferredX,preferredY));
panel.add(scroller);
buttonPanel.add(browseBtn);
buttonPanel.add(deleteBtn);
buttonPanel.add(Box.createVerticalGlue());
panel.add(buttonPanel);
main.add(panel);
main.add(Box.createVerticalGlue());
add(main);
}
public void actionPerformed(ActionEvent arg0)
{
if (arg0.getSource() == browseBtn){
Debug.trace("Show dirchooser");
String initialPath = ".";
if (fileList.getSelectedValue() != null){
initialPath = (String) fileList.getSelectedValue();
}
JFileChooser filechooser = new JFileChooser(initialPath);
if (isDirectory){
filechooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
}
else {
filechooser.setFileSelectionMode(JFileChooser.FILES_ONLY);
if ((fileExtension != null) && (fileExtensionDescription != null)){
UserInputFileFilter fileFilter = new UserInputFileFilter();
fileFilter.setFileExt(fileExtension);
fileFilter.setFileExtDesc(fileExtensionDescription);
filechooser.setFileFilter(fileFilter);
}
}
if (filechooser.showOpenDialog(parentFrame) == JFileChooser.APPROVE_OPTION) {
String selectedFile = filechooser.getSelectedFile().getAbsolutePath();
model.addElement(selectedFile);
Debug.trace("Setting current file chooser directory to: " + selectedFile);
}
}
if (arg0.getSource() == deleteBtn){
Debug.trace("Delete selected file from list");
if (fileList.getSelectedValue() != null){
model.removeElement(fileList.getSelectedValue());
}
}
}
public List<String> getSelectedFiles(){
List<String> result = null;
if (model.size() > 0){
result = new ArrayList<String>();
Enumeration<?> elements = model.elements();
for (;elements.hasMoreElements();)
{
String element = (String) elements.nextElement();
result.add(element);
}
}
return result;
}
private void showMessage(String messageType) {
JOptionPane.showMessageDialog(parentFrame, parentFrame.langpack.getString("UserInputPanel." + messageType + ".message"),
parentFrame.langpack.getString("UserInputPanel." + messageType + ".caption"),
JOptionPane.WARNING_MESSAGE);
}
private boolean validateFile(String input){
boolean result = false;
if (allowEmpty && ((input == null) || (input.length() == 0))){
result = true;
}
else if (input != null){
File file = new File(input);
if (isDirectory && !file.isDirectory()){
result = false;
showMessage("dir.notdirectory");
}
else if (!isDirectory && !file.isFile()){
result = false;
showMessage("file.notfile");
}
else {
StringInputProcessingClient processingClient = new StringInputProcessingClient(input,validators);
boolean success = processingClient.validate();
if (!success){
JOptionPane.showMessageDialog(parentFrame, processingClient.getValidationMessage(),
parentFrame.langpack.getString("UserInputPanel.error.caption"),
JOptionPane.WARNING_MESSAGE);
}
result = success;
}
}
else {
if (isDirectory){
showMessage("dir.nodirectory");
}
else {
showMessage("file.nofile");
}
}
return result;
}
public boolean validateField(){
boolean result = false;
int fileCount = model.getSize();
for (int i=0; i < fileCount; i++){
result = validateFile((String) model.getElementAt(i));
if (!result){
break;
}
}
return result;
}
public boolean isAllowEmptyInput()
{
return allowEmpty;
}
public void setAllowEmptyInput(boolean allowEmpty)
{
this.allowEmpty = allowEmpty;
}
public void focusGained(FocusEvent e)
{
// TODO Auto-generated method stub
}
public void focusLost(FocusEvent e)
{
}
public boolean isCreateMultipleVariables()
{
return createMultipleVariables;
}
public void setCreateMultipleVariables(boolean createMultipleVariables)
{
this.createMultipleVariables = createMultipleVariables;
}
}
| |
/*
* Copyright (c) Andrey Kuznetsov. All Rights Reserved.
*
* http://www.imagero.com/layout/
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* o Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* o Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* o Neither the name of imagero Andrey Kuznetsov nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.smartg.swing;
import java.awt.Component;
import java.awt.Container;
import java.awt.Dimension;
import java.awt.Insets;
import java.awt.LayoutManager2;
import java.util.HashMap;
/**
* BCLayout - like BorderLayout but components may be also placed in corners.
* Obsolete since JNodeLayout is there!
*
* @author Andrey Kuznetsov
*/
public class BCLayout implements LayoutManager2 {
int hgap;
int vgap;
HashMap<String, Component> compTable = new HashMap<String, Component>(9);
HashMap<Component, String> revTable = new HashMap<Component, String>(9);
public static final String NORTH = "North";
public static final String SOUTH = "South";
public static final String EAST = "East";
public static final String WEST = "West";
public static final String CENTER = "Center";
public static final String NORTH_EAST = "NorthEast";
public static final String NORTH_WEST = "NorthWest";
public static final String SOUTH_EAST = "SouthEast";
public static final String SOUTH_WEST = "SouthWest";
//used only internally.
private static final Dimension nd = new Dimension();
boolean fillEmptyCorners;
public boolean isFillEmptyCorners() {
return fillEmptyCorners;
}
public void setFillEmptyCorners(boolean b) {
this.fillEmptyCorners = b;
}
public void addLayoutComponent(Component comp, Object constraints) {
addLayoutComponent((String) constraints, comp);
}
public Dimension maximumLayoutSize(Container target) {
return new Dimension(Integer.MAX_VALUE, Integer.MAX_VALUE);
}
public float getLayoutAlignmentX(Container target) {
return 0.5f;
}
public float getLayoutAlignmentY(Container target) {
return 0.5f;
}
public void invalidateLayout(Container target) {
}
public void addLayoutComponent(String name, Component comp) {
if (name == null) {
name = CENTER;
}
Object key = revTable.remove(comp);
compTable.remove(name);
if(key != null) {
compTable.remove(key);
}
compTable.put(name, comp);
revTable.put(comp, name);
}
public void removeLayoutComponent(Component comp) {
Object key = revTable.remove(comp);
if (key != null) {
compTable.remove(key);
}
}
Dimension computePreferredRow(Component left, Component center, Component right) {
Dimension dl = left != null ? left.getPreferredSize() : nd;
Dimension dc = center != null ? center.getPreferredSize() : nd;
Dimension dr = right != null ? right.getPreferredSize() : nd;
return new Dimension(dl.width + dc.width + dr.width + hgap + hgap, Math.max(dl.height, Math.max(dr.height, dc.height)));
}
Dimension computeMinimumRow(Component left, Component center, Component right) {
Dimension dl = left != null ? left.getMinimumSize() : nd;
Dimension dc = center != null ? center.getMinimumSize() : nd;
Dimension dr = right != null ? right.getMinimumSize() : nd;
return new Dimension(dl.width + dc.width + dr.width + hgap + hgap, Math.max(dl.height, Math.max(dr.height, dc.height)));
}
Dimension computePreferredColumn(Component top, Component center, Component bottom) {
Dimension dt = top != null ? top.getPreferredSize() : nd;
Dimension dc = center != null ? center.getPreferredSize() : nd;
Dimension db = bottom != null ? bottom.getPreferredSize() : nd;
return new Dimension(Math.max(dt.width, Math.max(db.width, dc.width)), dt.height + dc.height + db.height + vgap + vgap);
}
Dimension computeMinimumColumn(Component top, Component center, Component bottom) {
Dimension dt = top != null ? top.getMinimumSize() : nd;
Dimension dc = center != null ? center.getMinimumSize() : nd;
Dimension db = bottom != null ? bottom.getMinimumSize() : nd;
return new Dimension(Math.max(dt.width, Math.max(db.width, dc.width)), dt.height + dc.height + db.height + vgap + vgap);
}
private Dimension computePreferredEastColumn() {
return computePreferredColumn(get(NORTH_EAST), get(EAST), get(SOUTH_EAST));
}
private Dimension computePreferredWestColumn() {
return computePreferredColumn(get(NORTH_WEST), get(WEST), get(SOUTH_WEST));
}
public Dimension preferredLayoutSize(Container target) {
synchronized (target.getTreeLock()) {
return computePreferredSize(target);
}
}
private Dimension computePreferredSize(Container target) {
Dimension topRow = computePreferredTopRow();
Dimension middleRow = computePreferredMiddleRow();
Dimension bottomRow = computePreferredBottomRow();
Insets insets = target.getInsets();
Dimension d = new Dimension(
Math.max(topRow.width, Math.max(middleRow.width, bottomRow.width)),
topRow.height + middleRow.height + bottomRow.height + vgap + vgap);
d.width += insets.left + insets.right;
d.height += insets.top + insets.bottom;
return d;
}
private Dimension computePreferredBottomRow() {
return computePreferredRow(get(SOUTH_EAST), get(SOUTH), get(SOUTH_WEST));
}
private Dimension computePreferredMiddleRow() {
return computePreferredRow(get(EAST), get(CENTER), get(WEST));
}
private Dimension computePreferredTopRow() {
return computePreferredRow(get(NORTH_EAST), get(NORTH), get(NORTH_WEST));
}
public Dimension minimumLayoutSize(Container target) {
synchronized (target.getTreeLock()) {
return computeMinimumSize(target);
}
}
private Dimension computeMinimumSize(Container target) {
Dimension topRow = computeMinimumTopRow();
Dimension middleRow = computeMinimumMiddleRow();
Dimension bottomRow = computeMinimumBottomRow();
Insets insets = target.getInsets();
Dimension d = new Dimension(
Math.max(topRow.width, Math.max(middleRow.width, bottomRow.width)),
topRow.height + middleRow.height + bottomRow.height + vgap + vgap);
d.width += insets.left + insets.right;
d.height += insets.top + insets.bottom;
return d;
}
private Dimension computeMinimumBottomRow() {
return computeMinimumRow(get(SOUTH_EAST), get(SOUTH), get(SOUTH_WEST));
}
private Dimension computeMinimumMiddleRow() {
return computeMinimumRow(get(EAST), get(CENTER), get(WEST));
}
private Dimension computeMinimumTopRow() {
return computeMinimumRow(get(NORTH_EAST), get(NORTH), get(NORTH_WEST));
}
Component get(String key) {
Component c = compTable.get(key);
if(c == null || !c.isVisible()) {
return null;
}
return c;
}
public void layoutContainer(Container target) {
synchronized (target.getTreeLock()) {
Insets insets = target.getInsets();
Dimension size = target.getSize();
size.width -= insets.left + insets.right;
size.height -= insets.top + insets.bottom;
final int top = insets.top;
final int bottom = target.getHeight() - insets.bottom;
final int left = insets.left;
final int right = target.getWidth() - insets.right;
Dimension topRow = computePreferredTopRow();
Dimension bottomRow = computePreferredBottomRow();
Dimension rightColumn = computePreferredEastColumn();
Dimension leftColumn = computePreferredWestColumn();
Component nw = get(NORTH_WEST);
if(nw != null) {
nw.setBounds(left, top, leftColumn.width, topRow.height);
}
Component sw = get(SOUTH_WEST);
if(sw != null) {
sw.setBounds(left, bottom, leftColumn.width, bottomRow.height);
}
Component ne = get(NORTH_EAST);
if(ne != null) {
ne.setBounds(right - rightColumn.width, top, rightColumn.width, topRow.height);
}
Component se = get(SOUTH_EAST);
if(se != null) {
se.setBounds(right - rightColumn.width, bottom - bottomRow.height, rightColumn.width, bottomRow.height);
}
Component n = get(NORTH);
if(n != null) {
int _left = left + ((nw != null || !fillEmptyCorners) ? leftColumn.width : 0);
n.setBounds(_left, top, right - ((ne != null || !fillEmptyCorners) ? rightColumn.width : 0) - _left, topRow.height);
}
Component s = get(SOUTH);
if(s != null) {
int _left = left + ((sw != null || !fillEmptyCorners) ? leftColumn.width : 0);
s.setBounds(_left, bottom - bottomRow.height, right - ((se != null || !fillEmptyCorners) ? rightColumn.width : 0) - _left, bottomRow.height);
}
Component w = get(WEST);
if(w != null) {
int _top = top + ((nw != null || !fillEmptyCorners || n != null) ? topRow.height : 0);
w.setBounds(left, _top, leftColumn.width, bottom - ((sw != null || !fillEmptyCorners || s != null) ? bottomRow.height: 0) - _top);
}
Component e = get(EAST);
if(e != null) {
int _top = top + ((nw != null || !fillEmptyCorners || n != null) ? topRow.height : 0);
e.setBounds(right - rightColumn.width, _top, rightColumn.width, bottom - ((sw != null || !fillEmptyCorners || s != null) ? bottomRow.height: 0) - _top);
}
Component c = get(CENTER);
if(c != null) {
int _left = left + leftColumn.width;
int _top = top + topRow.height;
c.setBounds(_left, _top, right - rightColumn.width - _left, bottom - bottomRow.height - _top);
}
}
}
}
| |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.civicinfo.v2.model;
/**
* The result of a voter info lookup query.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Google Civic Information API. For a detailed
* explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class VoterInfoResponse extends com.google.api.client.json.GenericJson {
/**
* Contests that will appear on the voter's ballot.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<Contest> contests;
static {
// hack to force ProGuard to consider Contest used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(Contest.class);
}
/**
* Locations where a voter is eligible to drop off a completed ballot. The voter must have
* received and completed a ballot prior to arriving at the location. The location may not have
* ballots available on the premises. These locations could be open on or before election day as
* indicated in the pollingHours field.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<PollingLocation> dropOffLocations;
static {
// hack to force ProGuard to consider PollingLocation used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(PollingLocation.class);
}
/**
* Locations where the voter is eligible to vote early, prior to election day.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<PollingLocation> earlyVoteSites;
static {
// hack to force ProGuard to consider PollingLocation used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(PollingLocation.class);
}
/**
* The election that was queried.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private Election election;
/**
* Identifies what kind of resource this is. Value: the fixed string
* "civicinfo#voterInfoResponse".
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String kind;
/**
* Specifies whether voters in the precinct vote only by mailing their ballots (with the possible
* option of dropping off their ballots as well).
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean mailOnly;
/**
* The normalized version of the requested address
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private SimpleAddressType normalizedInput;
/**
* When there are multiple elections for a voter address, the otherElections field is populated in
* the API response and there are two possibilities: 1. If the earliest election is not the
* intended election, specify the election ID of the desired election in a second API request
* using the electionId field. 2. If these elections occur on the same day, the API doesn?t return
* any polling location, contest, or election official information to ensure that an additional
* query is made. For user-facing applications, we recommend displaying these elections to the
* user to disambiguate. A second API request using the electionId field should be made for the
* election that is relevant to the user.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<Election> otherElections;
static {
// hack to force ProGuard to consider Election used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(Election.class);
}
/**
* Locations where the voter is eligible to vote on election day.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<PollingLocation> pollingLocations;
static {
// hack to force ProGuard to consider PollingLocation used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(PollingLocation.class);
}
/**
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String precinctId;
/**
* Local Election Information for the state that the voter votes in. For the US, there will only
* be one element in this array.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<AdministrationRegion> state;
static {
// hack to force ProGuard to consider AdministrationRegion used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(AdministrationRegion.class);
}
/**
* Contests that will appear on the voter's ballot.
* @return value or {@code null} for none
*/
public java.util.List<Contest> getContests() {
return contests;
}
/**
* Contests that will appear on the voter's ballot.
* @param contests contests or {@code null} for none
*/
public VoterInfoResponse setContests(java.util.List<Contest> contests) {
this.contests = contests;
return this;
}
/**
* Locations where a voter is eligible to drop off a completed ballot. The voter must have
* received and completed a ballot prior to arriving at the location. The location may not have
* ballots available on the premises. These locations could be open on or before election day as
* indicated in the pollingHours field.
* @return value or {@code null} for none
*/
public java.util.List<PollingLocation> getDropOffLocations() {
return dropOffLocations;
}
/**
* Locations where a voter is eligible to drop off a completed ballot. The voter must have
* received and completed a ballot prior to arriving at the location. The location may not have
* ballots available on the premises. These locations could be open on or before election day as
* indicated in the pollingHours field.
* @param dropOffLocations dropOffLocations or {@code null} for none
*/
public VoterInfoResponse setDropOffLocations(java.util.List<PollingLocation> dropOffLocations) {
this.dropOffLocations = dropOffLocations;
return this;
}
/**
* Locations where the voter is eligible to vote early, prior to election day.
* @return value or {@code null} for none
*/
public java.util.List<PollingLocation> getEarlyVoteSites() {
return earlyVoteSites;
}
/**
* Locations where the voter is eligible to vote early, prior to election day.
* @param earlyVoteSites earlyVoteSites or {@code null} for none
*/
public VoterInfoResponse setEarlyVoteSites(java.util.List<PollingLocation> earlyVoteSites) {
this.earlyVoteSites = earlyVoteSites;
return this;
}
/**
* The election that was queried.
* @return value or {@code null} for none
*/
public Election getElection() {
return election;
}
/**
* The election that was queried.
* @param election election or {@code null} for none
*/
public VoterInfoResponse setElection(Election election) {
this.election = election;
return this;
}
/**
* Identifies what kind of resource this is. Value: the fixed string
* "civicinfo#voterInfoResponse".
* @return value or {@code null} for none
*/
public java.lang.String getKind() {
return kind;
}
/**
* Identifies what kind of resource this is. Value: the fixed string
* "civicinfo#voterInfoResponse".
* @param kind kind or {@code null} for none
*/
public VoterInfoResponse setKind(java.lang.String kind) {
this.kind = kind;
return this;
}
/**
* Specifies whether voters in the precinct vote only by mailing their ballots (with the possible
* option of dropping off their ballots as well).
* @return value or {@code null} for none
*/
public java.lang.Boolean getMailOnly() {
return mailOnly;
}
/**
* Specifies whether voters in the precinct vote only by mailing their ballots (with the possible
* option of dropping off their ballots as well).
* @param mailOnly mailOnly or {@code null} for none
*/
public VoterInfoResponse setMailOnly(java.lang.Boolean mailOnly) {
this.mailOnly = mailOnly;
return this;
}
/**
* The normalized version of the requested address
* @return value or {@code null} for none
*/
public SimpleAddressType getNormalizedInput() {
return normalizedInput;
}
/**
* The normalized version of the requested address
* @param normalizedInput normalizedInput or {@code null} for none
*/
public VoterInfoResponse setNormalizedInput(SimpleAddressType normalizedInput) {
this.normalizedInput = normalizedInput;
return this;
}
/**
* When there are multiple elections for a voter address, the otherElections field is populated in
* the API response and there are two possibilities: 1. If the earliest election is not the
* intended election, specify the election ID of the desired election in a second API request
* using the electionId field. 2. If these elections occur on the same day, the API doesn?t return
* any polling location, contest, or election official information to ensure that an additional
* query is made. For user-facing applications, we recommend displaying these elections to the
* user to disambiguate. A second API request using the electionId field should be made for the
* election that is relevant to the user.
* @return value or {@code null} for none
*/
public java.util.List<Election> getOtherElections() {
return otherElections;
}
/**
* When there are multiple elections for a voter address, the otherElections field is populated in
* the API response and there are two possibilities: 1. If the earliest election is not the
* intended election, specify the election ID of the desired election in a second API request
* using the electionId field. 2. If these elections occur on the same day, the API doesn?t return
* any polling location, contest, or election official information to ensure that an additional
* query is made. For user-facing applications, we recommend displaying these elections to the
* user to disambiguate. A second API request using the electionId field should be made for the
* election that is relevant to the user.
* @param otherElections otherElections or {@code null} for none
*/
public VoterInfoResponse setOtherElections(java.util.List<Election> otherElections) {
this.otherElections = otherElections;
return this;
}
/**
* Locations where the voter is eligible to vote on election day.
* @return value or {@code null} for none
*/
public java.util.List<PollingLocation> getPollingLocations() {
return pollingLocations;
}
/**
* Locations where the voter is eligible to vote on election day.
* @param pollingLocations pollingLocations or {@code null} for none
*/
public VoterInfoResponse setPollingLocations(java.util.List<PollingLocation> pollingLocations) {
this.pollingLocations = pollingLocations;
return this;
}
/**
* @return value or {@code null} for none
*/
public java.lang.String getPrecinctId() {
return precinctId;
}
/**
* @param precinctId precinctId or {@code null} for none
*/
public VoterInfoResponse setPrecinctId(java.lang.String precinctId) {
this.precinctId = precinctId;
return this;
}
/**
* Local Election Information for the state that the voter votes in. For the US, there will only
* be one element in this array.
* @return value or {@code null} for none
*/
public java.util.List<AdministrationRegion> getState() {
return state;
}
/**
* Local Election Information for the state that the voter votes in. For the US, there will only
* be one element in this array.
* @param state state or {@code null} for none
*/
public VoterInfoResponse setState(java.util.List<AdministrationRegion> state) {
this.state = state;
return this;
}
@Override
public VoterInfoResponse set(String fieldName, Object value) {
return (VoterInfoResponse) super.set(fieldName, value);
}
@Override
public VoterInfoResponse clone() {
return (VoterInfoResponse) super.clone();
}
}
| |
package net.minidev.ovh.api.nichandle;
/**
* Countries a nichandle can choose
*/
public enum OvhCountryEnum {
AC("AC"),
AD("AD"),
AE("AE"),
AF("AF"),
AG("AG"),
AI("AI"),
AL("AL"),
AM("AM"),
AO("AO"),
AQ("AQ"),
AR("AR"),
AS("AS"),
AT("AT"),
AU("AU"),
AW("AW"),
AX("AX"),
AZ("AZ"),
BA("BA"),
BB("BB"),
BD("BD"),
BE("BE"),
BF("BF"),
BG("BG"),
BH("BH"),
BI("BI"),
BJ("BJ"),
BL("BL"),
BM("BM"),
BN("BN"),
BO("BO"),
BQ("BQ"),
BR("BR"),
BS("BS"),
BT("BT"),
BW("BW"),
BY("BY"),
BZ("BZ"),
CA("CA"),
CC("CC"),
CD("CD"),
CF("CF"),
CG("CG"),
CH("CH"),
CI("CI"),
CK("CK"),
CL("CL"),
CM("CM"),
CN("CN"),
CO("CO"),
CR("CR"),
CU("CU"),
CV("CV"),
CW("CW"),
CX("CX"),
CY("CY"),
CZ("CZ"),
DE("DE"),
DG("DG"),
DJ("DJ"),
DK("DK"),
DM("DM"),
DO("DO"),
DZ("DZ"),
EA("EA"),
EC("EC"),
EE("EE"),
EG("EG"),
EH("EH"),
ER("ER"),
ES("ES"),
ET("ET"),
FI("FI"),
FJ("FJ"),
FK("FK"),
FM("FM"),
FO("FO"),
FR("FR"),
GA("GA"),
GB("GB"),
GD("GD"),
GE("GE"),
GF("GF"),
GG("GG"),
GH("GH"),
GI("GI"),
GL("GL"),
GM("GM"),
GN("GN"),
GP("GP"),
GQ("GQ"),
GR("GR"),
GS("GS"),
GT("GT"),
GU("GU"),
GW("GW"),
GY("GY"),
HK("HK"),
HN("HN"),
HR("HR"),
HT("HT"),
HU("HU"),
IC("IC"),
ID("ID"),
IE("IE"),
IL("IL"),
IM("IM"),
IN("IN"),
IO("IO"),
IQ("IQ"),
IR("IR"),
IS("IS"),
IT("IT"),
JE("JE"),
JM("JM"),
JO("JO"),
JP("JP"),
KE("KE"),
KG("KG"),
KH("KH"),
KI("KI"),
KM("KM"),
KN("KN"),
KP("KP"),
KR("KR"),
KW("KW"),
KY("KY"),
KZ("KZ"),
LA("LA"),
LB("LB"),
LC("LC"),
LI("LI"),
LK("LK"),
LR("LR"),
LS("LS"),
LT("LT"),
LU("LU"),
LV("LV"),
LY("LY"),
MA("MA"),
MC("MC"),
MD("MD"),
ME("ME"),
MF("MF"),
MG("MG"),
MH("MH"),
MK("MK"),
ML("ML"),
MM("MM"),
MN("MN"),
MO("MO"),
MP("MP"),
MQ("MQ"),
MR("MR"),
MS("MS"),
MT("MT"),
MU("MU"),
MV("MV"),
MW("MW"),
MX("MX"),
MY("MY"),
MZ("MZ"),
NA("NA"),
NC("NC"),
NE("NE"),
NF("NF"),
NG("NG"),
NI("NI"),
NL("NL"),
NO("NO"),
NP("NP"),
NR("NR"),
NU("NU"),
NZ("NZ"),
OM("OM"),
PA("PA"),
PE("PE"),
PF("PF"),
PG("PG"),
PH("PH"),
PK("PK"),
PL("PL"),
PM("PM"),
PN("PN"),
PR("PR"),
PS("PS"),
PT("PT"),
PW("PW"),
PY("PY"),
QA("QA"),
RE("RE"),
RO("RO"),
RS("RS"),
RU("RU"),
RW("RW"),
SA("SA"),
SB("SB"),
SC("SC"),
SD("SD"),
SE("SE"),
SG("SG"),
SH("SH"),
SI("SI"),
SJ("SJ"),
SK("SK"),
SL("SL"),
SM("SM"),
SN("SN"),
SO("SO"),
SR("SR"),
SS("SS"),
ST("ST"),
SV("SV"),
SX("SX"),
SY("SY"),
SZ("SZ"),
TA("TA"),
TC("TC"),
TD("TD"),
TF("TF"),
TG("TG"),
TH("TH"),
TJ("TJ"),
TK("TK"),
TL("TL"),
TM("TM"),
TN("TN"),
TO("TO"),
TR("TR"),
TT("TT"),
TV("TV"),
TW("TW"),
TZ("TZ"),
UA("UA"),
UG("UG"),
UM("UM"),
UNKNOWN("UNKNOWN"),
US("US"),
UY("UY"),
UZ("UZ"),
VA("VA"),
VC("VC"),
VE("VE"),
VG("VG"),
VI("VI"),
VN("VN"),
VU("VU"),
WF("WF"),
WS("WS"),
XK("XK"),
YE("YE"),
YT("YT"),
ZA("ZA"),
ZM("ZM"),
ZW("ZW");
final String value;
OvhCountryEnum(String s) {
this.value = s;
}
public String toString() {
return this.value;
}
}
| |
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*/
package java.beans;
import com.sun.beans.finder.PersistenceDelegateFinder;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.Map;
/**
* An <code>Encoder</code> is a class which can be used to create
* files or streams that encode the state of a collection of
* JavaBeans in terms of their public APIs. The <code>Encoder</code>,
* in conjunction with its persistence delegates, is responsible for
* breaking the object graph down into a series of <code>Statements</code>s
* and <code>Expression</code>s which can be used to create it.
* A subclass typically provides a syntax for these expressions
* using some human readable form - like Java source code or XML.
*
* @since 1.4
*
* @author Philip Milne
*/
public class Encoder {
private final PersistenceDelegateFinder finder = new PersistenceDelegateFinder();
private Map bindings = new IdentityHashMap();
private ExceptionListener exceptionListener;
boolean executeStatements = true;
private Map attributes;
/**
* Write the specified object to the output stream.
* The serialized form will denote a series of
* expressions, the combined effect of which will create
* an equivalent object when the input stream is read.
* By default, the object is assumed to be a <em>JavaBean</em>
* with a nullary constructor, whose state is defined by
* the matching pairs of "setter" and "getter" methods
* returned by the Introspector.
*
* @param o The object to be written to the stream.
*
* @see XMLDecoder#readObject
*/
protected void writeObject(Object o) {
if (o == this) {
return;
}
PersistenceDelegate info = getPersistenceDelegate(o == null ? null : o.getClass());
info.writeObject(o, this);
}
/**
* Sets the exception handler for this stream to <code>exceptionListener</code>.
* The exception handler is notified when this stream catches recoverable
* exceptions.
*
* @param exceptionListener The exception handler for this stream;
* if <code>null</code> the default exception listener will be used.
*
* @see #getExceptionListener
*/
public void setExceptionListener(ExceptionListener exceptionListener) {
this.exceptionListener = exceptionListener;
}
/**
* Gets the exception handler for this stream.
*
* @return The exception handler for this stream;
* Will return the default exception listener if this has not explicitly been set.
*
* @see #setExceptionListener
*/
public ExceptionListener getExceptionListener() {
return (exceptionListener != null) ? exceptionListener : Statement.defaultExceptionListener;
}
Object getValue(Expression exp) {
try {
return (exp == null) ? null : exp.getValue();
}
catch (Exception e) {
getExceptionListener().exceptionThrown(e);
throw new RuntimeException("failed to evaluate: " + exp.toString());
}
}
/**
* Returns the persistence delegate for the given type.
* The persistence delegate is calculated by applying
* the following rules in order:
* <ol>
* <li>
* If a persistence delegate is associated with the given type
* by using the {@link #setPersistenceDelegate} method
* it is returned.
* <li>
* A persistence delegate is then looked up by the name
* composed of the the fully qualified name of the given type
* and the "PersistenceDelegate" postfix.
* For example, a persistence delegate for the {@code Bean} class
* should be named {@code BeanPersistenceDelegate}
* and located in the same package.
* <pre>
* public class Bean { ... }
* public class BeanPersistenceDelegate { ... }</pre>
* The instance of the {@code BeanPersistenceDelegate} class
* is returned for the {@code Bean} class.
* <li>
* If the type is {@code null},
* a shared internal persistence delegate is returned
* that encodes {@code null} value.
* <li>
* If the type is a {@code enum} declaration,
* a shared internal persistence delegate is returned
* that encodes constants of this enumeration
* by their names.
* <li>
* If the type is a primitive type or the corresponding wrapper,
* a shared internal persistence delegate is returned
* that encodes values of the given type.
* <li>
* If the type is an array,
* a shared internal persistence delegate is returned
* that encodes an array of the appropriate type and length,
* and each of its elements as if they are properties.
* <li>
* If the type is a proxy,
* a shared internal persistence delegate is returned
* that encodes a proxy instance by using
* the {@link java.lang.reflect.Proxy#newProxyInstance} method.
* <li>
* If the {@link BeanInfo} for this type has a {@link BeanDescriptor}
* which defined a "persistenceDelegate" attribute,
* the value of this named attribute is returned.
* <li>
* In all other cases the default persistence delegate is returned.
* The default persistence delegate assumes the type is a <em>JavaBean</em>,
* implying that it has a default constructor and that its state
* may be characterized by the matching pairs of "setter" and "getter"
* methods returned by the {@link Introspector} class.
* The default constructor is the constructor with the greatest number
* of parameters that has the {@link ConstructorProperties} annotation.
* If none of the constructors has the {@code ConstructorProperties} annotation,
* then the nullary constructor (constructor with no parameters) will be used.
* For example, in the following code fragment, the nullary constructor
* for the {@code Foo} class will be used,
* while the two-parameter constructor
* for the {@code Bar} class will be used.
* <pre>
* public class Foo {
* public Foo() { ... }
* public Foo(int x) { ... }
* }
* public class Bar {
* public Bar() { ... }
* @ConstructorProperties({"x"})
* public Bar(int x) { ... }
* @ConstructorProperties({"x", "y"})
* public Bar(int x, int y) { ... }
* }</pre>
* </ol>
*
* @param type the class of the objects
* @return the persistence delegate for the given type
*
* @see #setPersistenceDelegate
* @see java.beans.Introspector#getBeanInfo
* @see java.beans.BeanInfo#getBeanDescriptor
*/
public PersistenceDelegate getPersistenceDelegate(Class<?> type) {
PersistenceDelegate pd = this.finder.find(type);
if (pd == null) {
pd = MetaData.getPersistenceDelegate(type);
if (pd != null) {
this.finder.register(type, pd);
}
}
return pd;
}
/**
* Associates the specified persistence delegate with the given type.
*
* @param type the class of objects that the specified persistence delegate applies to
* @param delegate the persistence delegate for instances of the given type
*
* @see #getPersistenceDelegate
* @see java.beans.Introspector#getBeanInfo
* @see java.beans.BeanInfo#getBeanDescriptor
*/
public void setPersistenceDelegate(Class<?> type, PersistenceDelegate delegate) {
this.finder.register(type, delegate);
}
/**
* Removes the entry for this instance, returning the old entry.
*
* @param oldInstance The entry that should be removed.
* @return The entry that was removed.
*
* @see #get
*/
public Object remove(Object oldInstance) {
Expression exp = (Expression)bindings.remove(oldInstance);
return getValue(exp);
}
/**
* Returns a tentative value for <code>oldInstance</code> in
* the environment created by this stream. A persistence
* delegate can use its <code>mutatesTo</code> method to
* determine whether this value may be initialized to
* form the equivalent object at the output or whether
* a new object must be instantiated afresh. If the
* stream has not yet seen this value, null is returned.
*
* @param oldInstance The instance to be looked up.
* @return The object, null if the object has not been seen before.
*/
public Object get(Object oldInstance) {
if (oldInstance == null || oldInstance == this ||
oldInstance.getClass() == String.class) {
return oldInstance;
}
Expression exp = (Expression)bindings.get(oldInstance);
return getValue(exp);
}
private Object writeObject1(Object oldInstance) {
Object o = get(oldInstance);
if (o == null) {
writeObject(oldInstance);
o = get(oldInstance);
}
return o;
}
private Statement cloneStatement(Statement oldExp) {
Object oldTarget = oldExp.getTarget();
Object newTarget = writeObject1(oldTarget);
Object[] oldArgs = oldExp.getArguments();
Object[] newArgs = new Object[oldArgs.length];
for (int i = 0; i < oldArgs.length; i++) {
newArgs[i] = writeObject1(oldArgs[i]);
}
Statement newExp = Statement.class.equals(oldExp.getClass())
? new Statement(newTarget, oldExp.getMethodName(), newArgs)
: new Expression(newTarget, oldExp.getMethodName(), newArgs);
newExp.loader = oldExp.loader;
return newExp;
}
/**
* Writes statement <code>oldStm</code> to the stream.
* The <code>oldStm</code> should be written entirely
* in terms of the callers environment, i.e. the
* target and all arguments should be part of the
* object graph being written. These expressions
* represent a series of "what happened" expressions
* which tell the output stream how to produce an
* object graph like the original.
* <p>
* The implementation of this method will produce
* a second expression to represent the same expression in
* an environment that will exist when the stream is read.
* This is achieved simply by calling <code>writeObject</code>
* on the target and all the arguments and building a new
* expression with the results.
*
* @param oldStm The expression to be written to the stream.
*/
public void writeStatement(Statement oldStm) {
// System.out.println("writeStatement: " + oldExp);
Statement newStm = cloneStatement(oldStm);
if (oldStm.getTarget() != this && executeStatements) {
try {
newStm.execute();
} catch (Exception e) {
getExceptionListener().exceptionThrown(new Exception("Encoder: discarding statement "
+ newStm, e));
}
}
}
/**
* The implementation first checks to see if an
* expression with this value has already been written.
* If not, the expression is cloned, using
* the same procedure as <code>writeStatement</code>,
* and the value of this expression is reconciled
* with the value of the cloned expression
* by calling <code>writeObject</code>.
*
* @param oldExp The expression to be written to the stream.
*/
public void writeExpression(Expression oldExp) {
// System.out.println("Encoder::writeExpression: " + oldExp);
Object oldValue = getValue(oldExp);
if (get(oldValue) != null) {
return;
}
bindings.put(oldValue, (Expression)cloneStatement(oldExp));
writeObject(oldValue);
}
void clear() {
bindings.clear();
}
// Package private method for setting an attributes table for the encoder
void setAttribute(Object key, Object value) {
if (attributes == null) {
attributes = new HashMap();
}
attributes.put(key, value);
}
Object getAttribute(Object key) {
if (attributes == null) {
return null;
}
return attributes.get(key);
}
}
| |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.matrix.stats;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Descriptive stats gathered per shard. Coordinating node computes final correlation and covariance stats
* based on these descriptive stats. This single pass, parallel approach is based on:
*
* http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf
*/
public class RunningStats implements Writeable, Cloneable {
/** count of observations (same number of observations per field) */
protected long docCount = 0;
/** per field sum of observations */
protected HashMap<String, Double> fieldSum;
/** counts */
protected HashMap<String, Long> counts;
/** mean values (first moment) */
protected HashMap<String, Double> means;
/** variance values (second moment) */
protected HashMap<String, Double> variances;
/** skewness values (third moment) */
protected HashMap<String, Double> skewness;
/** kurtosis values (fourth moment) */
protected HashMap<String, Double> kurtosis;
/** covariance values */
protected HashMap<String, HashMap<String, Double>> covariances;
public RunningStats() {
init();
}
public RunningStats(final String[] fieldNames, final double[] fieldVals) {
if (fieldVals != null && fieldVals.length > 0) {
init();
this.add(fieldNames, fieldVals);
}
}
private void init() {
counts = new HashMap<>();
fieldSum = new HashMap<>();
means = new HashMap<>();
skewness = new HashMap<>();
kurtosis = new HashMap<>();
covariances = new HashMap<>();
variances = new HashMap<>();
}
/** Ctor to create an instance of running statistics */
@SuppressWarnings("unchecked")
public RunningStats(StreamInput in) throws IOException {
this();
// read doc count
docCount = (Long)in.readGenericValue();
// read fieldSum
fieldSum = (HashMap<String, Double>)in.readGenericValue();
// counts
counts = (HashMap<String, Long>)in.readGenericValue();
// means
means = (HashMap<String, Double>)in.readGenericValue();
// variances
variances = (HashMap<String, Double>)in.readGenericValue();
// skewness
skewness = (HashMap<String, Double>)in.readGenericValue();
// kurtosis
kurtosis = (HashMap<String, Double>)in.readGenericValue();
// read covariances
covariances = (HashMap<String, HashMap<String, Double>>)in.readGenericValue();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
// marshall doc count
out.writeGenericValue(docCount);
// marshall fieldSum
out.writeGenericValue(fieldSum);
// counts
out.writeGenericValue(counts);
// mean
out.writeGenericValue(means);
// variances
out.writeGenericValue(variances);
// skewness
out.writeGenericValue(skewness);
// kurtosis
out.writeGenericValue(kurtosis);
// covariances
out.writeGenericValue(covariances);
}
/** updates running statistics with a documents field values **/
public void add(final String[] fieldNames, final double[] fieldVals) {
if (fieldNames == null) {
throw new IllegalArgumentException("Cannot add statistics without field names.");
} else if (fieldVals == null) {
throw new IllegalArgumentException("Cannot add statistics without field values.");
} else if (fieldNames.length != fieldVals.length) {
throw new IllegalArgumentException("Number of field values do not match number of field names.");
}
// update total, mean, and variance
++docCount;
String fieldName;
double fieldValue;
double m1, m2, m3, m4; // moments
double d, dn, dn2, t1;
final HashMap<String, Double> deltas = new HashMap<>();
for (int i = 0; i < fieldNames.length; ++i) {
fieldName = fieldNames[i];
fieldValue = fieldVals[i];
// update counts
counts.put(fieldName, 1 + (counts.containsKey(fieldName) ? counts.get(fieldName) : 0));
// update running sum
fieldSum.put(fieldName, fieldValue + (fieldSum.containsKey(fieldName) ? fieldSum.get(fieldName) : 0));
// update running deltas
deltas.put(fieldName, fieldValue * docCount - fieldSum.get(fieldName));
// update running mean, variance, skewness, kurtosis
if (means.containsKey(fieldName) == true) {
// update running means
m1 = means.get(fieldName);
d = fieldValue - m1;
means.put(fieldName, m1 + d / docCount);
// update running variances
dn = d / docCount;
t1 = d * dn * (docCount - 1);
m2 = variances.get(fieldName);
variances.put(fieldName, m2 + t1);
m3 = skewness.get(fieldName);
skewness.put(fieldName, m3 + (t1 * dn * (docCount - 2D) - 3D * dn * m2));
dn2 = dn * dn;
m4 = t1 * dn2 * (docCount * docCount - 3D * docCount + 3D) + 6D * dn2 * m2 - 4D * dn * m3;
kurtosis.put(fieldName, kurtosis.get(fieldName) + m4);
} else {
means.put(fieldName, fieldValue);
variances.put(fieldName, 0.0);
skewness.put(fieldName, 0.0);
kurtosis.put(fieldName, 0.0);
}
}
this.updateCovariance(fieldNames, deltas);
}
/** Update covariance matrix */
private void updateCovariance(final String[] fieldNames, final Map<String, Double> deltas) {
// deep copy of hash keys (field names)
ArrayList<String> cFieldNames = new ArrayList<>(Arrays.asList(fieldNames));
String fieldName;
double dR, newVal;
for (int i = 0; i < fieldNames.length; ++i) {
fieldName = fieldNames[i];
cFieldNames.remove(fieldName);
// update running covariances
dR = deltas.get(fieldName);
HashMap<String, Double> cFieldVals = (covariances.get(fieldName) != null) ? covariances.get(fieldName) : new HashMap<>();
for (String cFieldName : cFieldNames) {
if (cFieldVals.containsKey(cFieldName) == true) {
newVal = cFieldVals.get(cFieldName) + 1.0 / (docCount * (docCount - 1.0)) * dR * deltas.get(cFieldName);
cFieldVals.put(cFieldName, newVal);
} else {
cFieldVals.put(cFieldName, 0.0);
}
}
if (cFieldVals.size() > 0) {
covariances.put(fieldName, cFieldVals);
}
}
}
/**
* Merges the descriptive statistics of a second data set (e.g., per shard)
*
* running computations taken from: http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf
**/
public void merge(final RunningStats other) {
if (other == null) {
return;
} else if (this.docCount == 0) {
for (Map.Entry<String, Double> fs : other.means.entrySet()) {
final String fieldName = fs.getKey();
this.means.put(fieldName, fs.getValue().doubleValue());
this.counts.put(fieldName, other.counts.get(fieldName).longValue());
this.fieldSum.put(fieldName, other.fieldSum.get(fieldName).doubleValue());
this.variances.put(fieldName, other.variances.get(fieldName).doubleValue());
this.skewness.put(fieldName , other.skewness.get(fieldName).doubleValue());
this.kurtosis.put(fieldName, other.kurtosis.get(fieldName).doubleValue());
if (other.covariances.containsKey(fieldName) == true) {
this.covariances.put(fieldName, other.covariances.get(fieldName));
}
this.docCount = other.docCount;
}
return;
}
final double nA = docCount;
final double nB = other.docCount;
// merge count
docCount += other.docCount;
final HashMap<String, Double> deltas = new HashMap<>();
double meanA, varA, skewA, kurtA, meanB, varB, skewB, kurtB;
double d, d2, d3, d4, n2, nA2, nB2;
double newSkew, nk;
// across fields
for (Map.Entry<String, Double> fs : other.means.entrySet()) {
final String fieldName = fs.getKey();
meanA = means.get(fieldName);
varA = variances.get(fieldName);
skewA = skewness.get(fieldName);
kurtA = kurtosis.get(fieldName);
meanB = other.means.get(fieldName);
varB = other.variances.get(fieldName);
skewB = other.skewness.get(fieldName);
kurtB = other.kurtosis.get(fieldName);
// merge counts of two sets
counts.put(fieldName, counts.get(fieldName) + other.counts.get(fieldName));
// merge means of two sets
means.put(fieldName, (nA * means.get(fieldName) + nB * other.means.get(fieldName)) / (nA + nB));
// merge deltas
deltas.put(fieldName, other.fieldSum.get(fieldName) / nB - fieldSum.get(fieldName) / nA);
// merge totals
fieldSum.put(fieldName, fieldSum.get(fieldName) + other.fieldSum.get(fieldName));
// merge variances, skewness, and kurtosis of two sets
d = meanB - meanA; // delta mean
d2 = d * d; // delta mean squared
d3 = d * d2; // delta mean cubed
d4 = d2 * d2; // delta mean 4th power
n2 = docCount * docCount; // num samples squared
nA2 = nA * nA; // doc A num samples squared
nB2 = nB * nB; // doc B num samples squared
// variance
variances.put(fieldName, varA + varB + d2 * nA * other.docCount / docCount);
// skeewness
newSkew = skewA + skewB + d3 * nA * nB * (nA - nB) / n2;
skewness.put(fieldName, newSkew + 3D * d * (nA * varB - nB * varA) / docCount);
// kurtosis
nk = kurtA + kurtB + d4 * nA * nB * (nA2 - nA * nB + nB2) / (n2 * docCount);
kurtosis.put(fieldName, nk + 6D * d2 * (nA2 * varB + nB2 * varA) / n2 + 4D * d * (nA * skewB - nB * skewA) / docCount);
}
this.mergeCovariance(other, deltas);
}
/** Merges two covariance matrices */
private void mergeCovariance(final RunningStats other, final Map<String, Double> deltas) {
final double countA = docCount - other.docCount;
double f, dR, newVal;
for (Map.Entry<String, Double> fs : other.means.entrySet()) {
final String fieldName = fs.getKey();
// merge covariances of two sets
f = countA * other.docCount / this.docCount;
dR = deltas.get(fieldName);
// merge covariances
if (covariances.containsKey(fieldName)) {
HashMap<String, Double> cFieldVals = covariances.get(fieldName);
for (String cFieldName : cFieldVals.keySet()) {
newVal = cFieldVals.get(cFieldName);
if (other.covariances.containsKey(fieldName) && other.covariances.get(fieldName).containsKey(cFieldName)) {
newVal += other.covariances.get(fieldName).get(cFieldName) + f * dR * deltas.get(cFieldName);
} else {
newVal += other.covariances.get(cFieldName).get(fieldName) + f * dR * deltas.get(cFieldName);
}
cFieldVals.put(cFieldName, newVal);
}
covariances.put(fieldName, cFieldVals);
}
}
}
@Override
public RunningStats clone() {
try {
return (RunningStats) super.clone();
} catch (CloneNotSupportedException e) {
throw new ElasticsearchException("Error trying to create a copy of RunningStats");
}
}
}
| |
/*
* Copyright 2015 JBoss, by Red Hat, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.uberfire.ext.plugin.client.editor;
import java.util.Collection;
import javax.enterprise.event.Event;
import javax.enterprise.event.Observes;
import javax.inject.Inject;
import com.google.gwt.core.client.ScriptInjector;
import com.google.gwt.dom.client.StyleInjector;
import org.jboss.errai.common.client.api.Caller;
import org.jboss.errai.common.client.api.RemoteCallback;
import org.uberfire.backend.vfs.ObservablePath;
import org.uberfire.client.workbench.events.ChangeTitleWidgetEvent;
import org.uberfire.client.workbench.type.ClientResourceType;
import org.uberfire.ext.editor.commons.client.BaseEditor;
import org.uberfire.ext.editor.commons.client.BaseEditorView;
import org.uberfire.ext.editor.commons.client.file.popups.SavePopUpPresenter;
import org.uberfire.ext.editor.commons.client.validation.Validator;
import org.uberfire.ext.editor.commons.service.support.SupportsCopy;
import org.uberfire.ext.editor.commons.service.support.SupportsDelete;
import org.uberfire.ext.editor.commons.service.support.SupportsRename;
import org.uberfire.ext.plugin.client.validation.PluginNameValidator;
import org.uberfire.ext.plugin.event.NewPluginRegistered;
import org.uberfire.ext.plugin.event.PluginAdded;
import org.uberfire.ext.plugin.event.PluginDeleted;
import org.uberfire.ext.plugin.event.PluginRenamed;
import org.uberfire.ext.plugin.event.PluginSaved;
import org.uberfire.ext.plugin.event.PluginUnregistered;
import org.uberfire.ext.plugin.model.Media;
import org.uberfire.ext.plugin.model.Plugin;
import org.uberfire.ext.plugin.model.PluginContent;
import org.uberfire.ext.plugin.model.PluginSimpleContent;
import org.uberfire.ext.plugin.model.PluginType;
import org.uberfire.ext.plugin.model.RuntimePlugin;
import org.uberfire.ext.plugin.service.PluginServices;
import org.uberfire.lifecycle.OnStartup;
import org.uberfire.mvp.ParameterizedCommand;
import org.uberfire.mvp.PlaceRequest;
import static com.google.gwt.core.client.ScriptInjector.TOP_WINDOW;
import static org.uberfire.ext.editor.commons.client.menu.MenuItems.COPY;
import static org.uberfire.ext.editor.commons.client.menu.MenuItems.DELETE;
import static org.uberfire.ext.editor.commons.client.menu.MenuItems.RENAME;
import static org.uberfire.ext.editor.commons.client.menu.MenuItems.SAVE;
public abstract class RuntimePluginBaseEditor extends BaseEditor {
protected Plugin plugin;
@Inject
private Caller<PluginServices> pluginServices;
@Inject
private PluginNameValidator pluginNameValidator;
@Inject
private Event<NewPluginRegistered> newPluginRegisteredEvent;
@Inject
private Event<PluginUnregistered> pluginUnregisteredEvent;
@Inject
private SavePopUpPresenter savePopUpPresenter;
protected RuntimePluginBaseEditor(final BaseEditorView baseView) {
super(baseView);
}
protected abstract PluginType getPluginType();
protected abstract ClientResourceType getResourceType();
@OnStartup
public void onStartup(final ObservablePath path,
final PlaceRequest place) {
init(path,
place,
getResourceType(),
true,
false,
SAVE,
COPY,
RENAME,
DELETE);
// This is only used to define the "name" used by @WorkbenchPartTitle which is called by Uberfire after @OnStartup
// but before the async call in "loadContent()" has returned. When the *real* plugin is loaded this is overwritten
this.plugin = new Plugin(place.getParameter("name",
""),
getPluginType(),
path);
this.place = place;
}
protected void onPlugInRenamed(@Observes final PluginRenamed pluginRenamed) {
if (pluginRenamed.getOldPluginName().equals(plugin.getName()) &&
pluginRenamed.getPlugin().getType().equals(plugin.getType())) {
this.plugin = new Plugin(pluginRenamed.getPlugin().getName(),
getPluginType(),
pluginRenamed.getPlugin().getPath());
changeTitleNotification.fire(new ChangeTitleWidgetEvent(place,
getTitleText(),
getTitle()));
}
}
protected Caller<? extends SupportsDelete> getDeleteServiceCaller() {
return pluginServices;
}
protected Caller<? extends SupportsRename> getRenameServiceCaller() {
return pluginServices;
}
protected Caller<? extends SupportsCopy> getCopyServiceCaller() {
return pluginServices;
}
@Override
protected void loadContent() {
getPluginServices().call(new RemoteCallback<PluginContent>() {
@Override
public void callback(final PluginContent response) {
view().setFramework(response.getFrameworks());
view().setupContent(response,
new ParameterizedCommand<Media>() {
@Override
public void execute(final Media media) {
getPluginServices().call().deleteMedia(media);
}
});
view().hideBusyIndicator();
setOriginalHash(getContent().hashCode());
}
}).getPluginContent(getCurrentPath());
}
ObservablePath getCurrentPath() {
return versionRecordManager.getCurrentPath();
}
public PluginSimpleContent getContent() {
return new PluginSimpleContent(view().getContent(),
view().getTemplate(),
view().getCss(),
view().getCodeMap(),
view().getFrameworks(),
view().getContent().getLanguage());
}
protected void save() {
savePopUpPresenter.show(getCurrentPath(),
getSaveCommand());
concurrentUpdateSessionInfo = null;
}
ParameterizedCommand<String> getSaveCommand() {
return new ParameterizedCommand<String>() {
@Override
public void execute(final String commitMessage) {
getPluginServices().call(getSaveSuccessCallback(getContent().hashCode())).save(
getContent(),
commitMessage);
view().onSave();
}
};
}
public boolean mayClose() {
view().onClose();
return super.mayClose(getContent().hashCode());
}
abstract RuntimePluginBaseView view();
Caller<PluginServices> getPluginServices() {
return pluginServices;
}
Integer getOriginalHash() {
return originalHash;
}
@Override
public Validator getRenameValidator() {
return pluginNameValidator;
}
@Override
public Validator getCopyValidator() {
return pluginNameValidator;
}
public void onPluginSaved(@Observes PluginSaved pluginSaved) {
registerPlugin(pluginSaved.getPlugin());
}
public void onPluginAdded(@Observes PluginAdded pluginAdded) {
registerPlugin(pluginAdded.getPlugin());
}
public void onPluginDeleted(@Observes PluginDeleted pluginDeleted) {
unregisterPlugin(pluginDeleted.getPluginName(),
pluginDeleted.getPluginType());
}
public void onPluginRenamed(@Observes PluginRenamed pluginRenamed) {
unregisterPlugin(pluginRenamed.getOldPluginName(),
pluginRenamed.getOldPluginType());
registerPlugin(pluginRenamed.getPlugin());
}
void unregisterPlugin(String name,
PluginType type) {
pluginUnregisteredEvent.fire(new PluginUnregistered(name,
type));
}
void registerPlugin(Plugin plugin) {
pluginServices.call(new RemoteCallback<Collection<RuntimePlugin>>() {
@Override
public void callback(final Collection<RuntimePlugin> runtimePlugins) {
for (final RuntimePlugin plugin : runtimePlugins) {
ScriptInjector.fromString(plugin.getScript()).setWindow(TOP_WINDOW).inject();
StyleInjector.inject(plugin.getStyle(),
true);
}
newPluginRegisteredEvent.fire(new NewPluginRegistered(plugin.getName(),
plugin.getType()));
}
}).listPluginRuntimePlugins(plugin.getPath());
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.segment.serde;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Ints;
import org.apache.druid.collections.bitmap.ImmutableBitmap;
import org.apache.druid.collections.spatial.ImmutableRTree;
import org.apache.druid.io.Channels;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.io.smoosh.FileSmoosher;
import org.apache.druid.segment.column.ColumnBuilder;
import org.apache.druid.segment.column.ColumnConfig;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.data.BitmapSerde;
import org.apache.druid.segment.data.BitmapSerdeFactory;
import org.apache.druid.segment.data.ByteBufferWriter;
import org.apache.druid.segment.data.ColumnarInts;
import org.apache.druid.segment.data.ColumnarIntsSerializer;
import org.apache.druid.segment.data.ColumnarMultiInts;
import org.apache.druid.segment.data.CompressedVSizeColumnarIntsSupplier;
import org.apache.druid.segment.data.CompressedVSizeColumnarMultiIntsSupplier;
import org.apache.druid.segment.data.GenericIndexed;
import org.apache.druid.segment.data.GenericIndexedWriter;
import org.apache.druid.segment.data.ImmutableRTreeObjectStrategy;
import org.apache.druid.segment.data.V3CompressedVSizeColumnarMultiIntsSupplier;
import org.apache.druid.segment.data.VSizeColumnarInts;
import org.apache.druid.segment.data.VSizeColumnarMultiInts;
import org.apache.druid.segment.data.WritableSupplier;
import javax.annotation.Nullable;
import javax.validation.constraints.NotNull;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.WritableByteChannel;
public class DictionaryEncodedColumnPartSerde implements ColumnPartSerde
{
private static final int NO_FLAGS = 0;
private static final int STARTING_FLAGS = Feature.NO_BITMAP_INDEX.getMask();
enum Feature
{
MULTI_VALUE,
MULTI_VALUE_V3,
NO_BITMAP_INDEX;
public boolean isSet(int flags)
{
return (getMask() & flags) != 0;
}
public int getMask()
{
return (1 << ordinal());
}
}
enum VERSION
{
UNCOMPRESSED_SINGLE_VALUE, // 0x0
UNCOMPRESSED_MULTI_VALUE, // 0x1
COMPRESSED, // 0x2
UNCOMPRESSED_WITH_FLAGS; // 0x3
public static VERSION fromByte(byte b)
{
final VERSION[] values = VERSION.values();
Preconditions.checkArgument(b < values.length, "Unsupported dictionary column version[%s]", b);
return values[b];
}
public byte asByte()
{
return (byte) this.ordinal();
}
}
@JsonCreator
public static DictionaryEncodedColumnPartSerde createDeserializer(
@Nullable @JsonProperty("bitmapSerdeFactory") BitmapSerdeFactory bitmapSerdeFactory,
@NotNull @JsonProperty("byteOrder") ByteOrder byteOrder
)
{
return new DictionaryEncodedColumnPartSerde(
byteOrder,
bitmapSerdeFactory != null ? bitmapSerdeFactory : new BitmapSerde.LegacyBitmapSerdeFactory(),
null
);
}
private final ByteOrder byteOrder;
private final BitmapSerdeFactory bitmapSerdeFactory;
private final Serializer serializer;
private DictionaryEncodedColumnPartSerde(
ByteOrder byteOrder,
BitmapSerdeFactory bitmapSerdeFactory,
Serializer serializer
)
{
this.byteOrder = byteOrder;
this.bitmapSerdeFactory = bitmapSerdeFactory;
this.serializer = serializer;
}
@JsonProperty
public BitmapSerdeFactory getBitmapSerdeFactory()
{
return bitmapSerdeFactory;
}
@JsonProperty
public ByteOrder getByteOrder()
{
return byteOrder;
}
public static SerializerBuilder serializerBuilder()
{
return new SerializerBuilder();
}
public static class SerializerBuilder
{
private VERSION version = null;
private int flags = STARTING_FLAGS;
private GenericIndexedWriter<String> dictionaryWriter = null;
private ColumnarIntsSerializer valueWriter = null;
private BitmapSerdeFactory bitmapSerdeFactory = null;
private GenericIndexedWriter<ImmutableBitmap> bitmapIndexWriter = null;
private ByteBufferWriter<ImmutableRTree> spatialIndexWriter = null;
private ByteOrder byteOrder = null;
public SerializerBuilder withDictionary(GenericIndexedWriter<String> dictionaryWriter)
{
this.dictionaryWriter = dictionaryWriter;
return this;
}
public SerializerBuilder withBitmapSerdeFactory(BitmapSerdeFactory bitmapSerdeFactory)
{
this.bitmapSerdeFactory = bitmapSerdeFactory;
return this;
}
public SerializerBuilder withBitmapIndex(@Nullable GenericIndexedWriter<ImmutableBitmap> bitmapIndexWriter)
{
if (bitmapIndexWriter == null) {
flags |= Feature.NO_BITMAP_INDEX.getMask();
} else {
flags &= ~Feature.NO_BITMAP_INDEX.getMask();
}
this.bitmapIndexWriter = bitmapIndexWriter;
return this;
}
public SerializerBuilder withSpatialIndex(ByteBufferWriter<ImmutableRTree> spatialIndexWriter)
{
this.spatialIndexWriter = spatialIndexWriter;
return this;
}
public SerializerBuilder withByteOrder(ByteOrder byteOrder)
{
this.byteOrder = byteOrder;
return this;
}
public SerializerBuilder withValue(ColumnarIntsSerializer valueWriter, boolean hasMultiValue, boolean compressed)
{
this.valueWriter = valueWriter;
if (hasMultiValue) {
if (compressed) {
this.version = VERSION.COMPRESSED;
this.flags |= Feature.MULTI_VALUE_V3.getMask();
} else {
this.version = VERSION.UNCOMPRESSED_MULTI_VALUE;
this.flags |= Feature.MULTI_VALUE.getMask();
}
} else {
if (compressed) {
this.version = VERSION.COMPRESSED;
} else {
this.version = VERSION.UNCOMPRESSED_SINGLE_VALUE;
}
}
return this;
}
public DictionaryEncodedColumnPartSerde build()
{
if (mustWriteFlags(flags) && version.compareTo(VERSION.COMPRESSED) < 0) {
// Must upgrade version so we can write out flags.
this.version = VERSION.UNCOMPRESSED_WITH_FLAGS;
}
return new DictionaryEncodedColumnPartSerde(
byteOrder,
bitmapSerdeFactory,
new Serializer()
{
@Override
public long getSerializedSize() throws IOException
{
long size = 1 + // version
(version.compareTo(VERSION.COMPRESSED) >= 0
? Integer.BYTES
: 0); // flag if version >= compressed
if (dictionaryWriter != null) {
size += dictionaryWriter.getSerializedSize();
}
if (valueWriter != null) {
size += valueWriter.getSerializedSize();
}
if (bitmapIndexWriter != null) {
size += bitmapIndexWriter.getSerializedSize();
}
if (spatialIndexWriter != null) {
size += spatialIndexWriter.getSerializedSize();
}
return size;
}
@Override
public void writeTo(WritableByteChannel channel, FileSmoosher smoosher) throws IOException
{
Channels.writeFully(channel, ByteBuffer.wrap(new byte[]{version.asByte()}));
if (version.compareTo(VERSION.COMPRESSED) >= 0) {
channel.write(ByteBuffer.wrap(Ints.toByteArray(flags)));
}
if (dictionaryWriter != null) {
dictionaryWriter.writeTo(channel, smoosher);
}
if (valueWriter != null) {
valueWriter.writeTo(channel, smoosher);
}
if (bitmapIndexWriter != null) {
bitmapIndexWriter.writeTo(channel, smoosher);
}
if (spatialIndexWriter != null) {
spatialIndexWriter.writeTo(channel, smoosher);
}
}
}
);
}
}
@Override
public Serializer getSerializer()
{
return serializer;
}
@Override
public Deserializer getDeserializer()
{
return new Deserializer()
{
@Override
public void read(ByteBuffer buffer, ColumnBuilder builder, ColumnConfig columnConfig)
{
final VERSION rVersion = VERSION.fromByte(buffer.get());
final int rFlags;
if (rVersion.compareTo(VERSION.COMPRESSED) >= 0) {
rFlags = buffer.getInt();
} else {
rFlags = rVersion.equals(VERSION.UNCOMPRESSED_MULTI_VALUE)
? Feature.MULTI_VALUE.getMask()
: NO_FLAGS;
}
final boolean hasMultipleValues = Feature.MULTI_VALUE.isSet(rFlags) || Feature.MULTI_VALUE_V3.isSet(rFlags);
final GenericIndexed<String> rDictionary = GenericIndexed.read(
buffer,
GenericIndexed.STRING_STRATEGY,
builder.getFileMapper()
);
builder.setType(ValueType.STRING);
final WritableSupplier<ColumnarInts> rSingleValuedColumn;
final WritableSupplier<ColumnarMultiInts> rMultiValuedColumn;
if (hasMultipleValues) {
rMultiValuedColumn = readMultiValuedColumn(rVersion, buffer, rFlags);
rSingleValuedColumn = null;
} else {
rSingleValuedColumn = readSingleValuedColumn(rVersion, buffer);
rMultiValuedColumn = null;
}
DictionaryEncodedColumnSupplier dictionaryEncodedColumnSupplier = new DictionaryEncodedColumnSupplier(
rDictionary,
rSingleValuedColumn,
rMultiValuedColumn,
columnConfig.columnCacheSizeBytes()
);
builder
.setHasMultipleValues(hasMultipleValues)
.setDictionaryEncodedColumnSupplier(dictionaryEncodedColumnSupplier);
if (!Feature.NO_BITMAP_INDEX.isSet(rFlags)) {
GenericIndexed<ImmutableBitmap> rBitmaps = GenericIndexed.read(
buffer,
bitmapSerdeFactory.getObjectStrategy(),
builder.getFileMapper()
);
builder.setBitmapIndex(
new BitmapIndexColumnPartSupplier(
bitmapSerdeFactory.getBitmapFactory(),
rBitmaps,
rDictionary
)
);
}
if (buffer.hasRemaining()) {
ImmutableRTree rSpatialIndex =
new ImmutableRTreeObjectStrategy(bitmapSerdeFactory.getBitmapFactory()).fromByteBufferWithSize(buffer);
builder.setSpatialIndex(new SpatialIndexColumnPartSupplier(rSpatialIndex));
}
}
private WritableSupplier<ColumnarInts> readSingleValuedColumn(VERSION version, ByteBuffer buffer)
{
switch (version) {
case UNCOMPRESSED_SINGLE_VALUE:
case UNCOMPRESSED_WITH_FLAGS:
return VSizeColumnarInts.readFromByteBuffer(buffer);
case COMPRESSED:
return CompressedVSizeColumnarIntsSupplier.fromByteBuffer(buffer, byteOrder);
default:
throw new IAE("Unsupported single-value version[%s]", version);
}
}
private WritableSupplier<ColumnarMultiInts> readMultiValuedColumn(VERSION version, ByteBuffer buffer, int flags)
{
switch (version) {
case UNCOMPRESSED_MULTI_VALUE: {
return VSizeColumnarMultiInts.readFromByteBuffer(buffer);
}
case UNCOMPRESSED_WITH_FLAGS: {
if (Feature.MULTI_VALUE.isSet(flags)) {
return VSizeColumnarMultiInts.readFromByteBuffer(buffer);
} else {
throw new IAE("Unrecognized multi-value flag[%d] for version[%s]", flags, version);
}
}
case COMPRESSED: {
if (Feature.MULTI_VALUE.isSet(flags)) {
return CompressedVSizeColumnarMultiIntsSupplier.fromByteBuffer(buffer, byteOrder);
} else if (Feature.MULTI_VALUE_V3.isSet(flags)) {
return V3CompressedVSizeColumnarMultiIntsSupplier.fromByteBuffer(buffer, byteOrder);
} else {
throw new IAE("Unrecognized multi-value flag[%d] for version[%s]", flags, version);
}
}
default:
throw new IAE("Unsupported multi-value version[%s]", version);
}
}
};
}
private static boolean mustWriteFlags(final int flags)
{
// Flags that are not implied by version codes < COMPRESSED must be written. This includes MULTI_VALUE_V3.
return flags != NO_FLAGS && flags != Feature.MULTI_VALUE.getMask();
}
}
| |
/*
* Copyright (c) 2018 VMware Inc. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hillview.test.dataset;
import org.hillview.dataset.LocalDataSet;
import org.hillview.dataset.ParallelDataSet;
import org.hillview.dataset.api.IDataSet;
import org.hillview.utils.Pair;
import org.hillview.sketches.results.MinKSet;
import org.hillview.sketches.SampleDistinctElementsSketch;
import org.hillview.sketches.SampleDistinctRowsSketch;
import org.hillview.table.Table;
import org.hillview.table.api.ITable;
import org.hillview.table.rows.RowSnapshot;
import org.hillview.utils.TestTables;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
public class MinKTest {
private final Boolean printOn = false;
private double getMaxErr(int suppSize, List<Integer> ranks) {
int numBuckets = ranks.size();
double maxErr = 0;
Assert.assertEquals((int) ranks.get(0), 0);
if (printOn) {
System.out.printf("Min rank: %d\n", ranks.get(0));
}
for (int i = 1; i < numBuckets; i++) {
double err = Math.abs((ranks.get(i)/((double) suppSize) - i/((double) numBuckets)));
if (printOn)
System.out.printf("%d has Rank %f, Error %f\n", i,
ranks.get(i)/((double) suppSize), err);
if (err > maxErr)
maxErr = err;
}
return maxErr;
}
private void printBoundaries(List<String> boundaries, List<Integer> ranks) {
if(!printOn)
return;
int trueBuckets = boundaries.size() -1;
System.out.printf("Total of %d buckets\n", trueBuckets);
for (int i = 0; i <= trueBuckets; i++) {
System.out.printf("%s, %d \n", boundaries.get(i), ranks.get(i));
}
}
private double getErrBound(int numSamples) {
double bound = 5.0 / Math.sqrt(numSamples);
if (printOn)
System.out.printf("Error bound: %f\n", bound);
return bound;
}
private Pair<Table, List<String>> getStringTable(int suppSize) {
List<String> randomString = TestTables.randStringList(suppSize, 6);
int num = Math.max(10*suppSize, suppSize*((int) Math.ceil(Math.log(suppSize))));
Table table = TestTables.randStringTable(num, randomString);
return new Pair<>(table, randomString);
}
private void TestStringSamplingSketch(int suppSize) {
Table table = this.getStringTable(suppSize).first;
Assert.assertNotNull(table);
List<String> randomString = this.getStringTable(suppSize).second;
int numSamples = 10000;
SampleDistinctElementsSketch bks = new SampleDistinctElementsSketch("Name", 176864, numSamples);
MinKSet<String> mks = bks.create(table);
Assert.assertNotNull(mks);
if (printOn)
System.out.printf("Table size: %d, non-null %d\n", table.getNumOfRows(), mks.presentCount);
int maxBuckets = 100;
List<String> boundaries = mks.getLeftBoundaries(maxBuckets);
Assert.assertNotNull(randomString);
List<Integer> ranks = TestTables.getRanks(boundaries, randomString);
this.printBoundaries(boundaries, ranks);
double maxErr = this.getMaxErr(suppSize, ranks);
double bound = this.getErrBound(numSamples);
Assert.assertTrue(maxErr < bound);
}
private void StringTableRowSampling(int suppSize) {
Table table = this.getStringTable(suppSize).first;
Assert.assertNotNull(table);
List<String> randomString = this.getStringTable(suppSize).second;
Assert.assertNotNull(randomString);
int numSamples = 1000;
long seed = 57609102;
SampleDistinctRowsSketch sdrs = new SampleDistinctRowsSketch(
table.getRecordOrder(true), numSamples, seed);
MinKSet<RowSnapshot> mks = sdrs.create(table);
int maxBuckets = 10;
Assert.assertNotNull(mks);
List<RowSnapshot> rsBoundaries = mks.getLeftBoundaries(maxBuckets);
List<String> boundaries = new ArrayList<>();
for (RowSnapshot rss: rsBoundaries)
boundaries.add(rss.getString("Name"));
List<Integer> ranks = TestTables.getRanks(boundaries, randomString);
this.printBoundaries(boundaries, ranks);
double maxErr = this.getMaxErr(suppSize, ranks);
double bound = this.getErrBound(numSamples);
Assert.assertTrue(maxErr < bound);
}
@Test
public void testSupportValues() {
TestStringSamplingSketch(1);
TestStringSamplingSketch(2);
TestStringSamplingSketch(10);
TestStringSamplingSketch(100);
TestStringSamplingSketch(10000);
}
@Test
public void testRowSampling() {
StringTableRowSampling(10000);
StringTableRowSampling(100000);
}
private void IntTableRowSampling(ITable table, boolean isAscending, int numSamples, int maxBuckets) {
long seed = 57609102;
SampleDistinctRowsSketch sdrs = new SampleDistinctRowsSketch(
table.getRecordOrder(isAscending), numSamples, seed);
MinKSet<RowSnapshot> mks = sdrs.create(table);
Assert.assertNotNull(mks);
List<RowSnapshot> rsBoundaries = mks.getLeftBoundaries(maxBuckets);
if (printOn) {
System.out.printf("\n Boundaries for %d buckets: \n", maxBuckets);
for (RowSnapshot rss : rsBoundaries)
System.out.println(rss.toString());
}
}
@Test
public void testIntRowSampling() {
ITable table = TestTables.getIntTable(10000, 3);
IntTableRowSampling(table, true,1000, 10);
IntTableRowSampling(table, true,10000, 10);
IntTableRowSampling(table, false,1000, 10);
IntTableRowSampling(table, false,10000, 10);
}
@Test
public void testStringTwoTables() {
int suppSize = 100000;
int length = 6;
List<String> randomString = TestTables.randStringList(suppSize, length);
List<String> Part1 = new ArrayList<>();
List<String> Part2 = new ArrayList<>();
for (int i= 0; i < suppSize; i++) {
if (i % 2 == 0)
Part2.add(randomString.get(i));
else
Part1.add(randomString.get(i));
}
int num = suppSize*((int) Math.ceil(Math.log(suppSize)));
Table table1 = TestTables.randStringTable(num, Part1);
Table table2 = TestTables.randStringTable(num, Part2);
int numSamples = 5000;
SampleDistinctElementsSketch bks = new SampleDistinctElementsSketch("Name", 17864, numSamples);
MinKSet<String> mks1 = bks.create(table1);
MinKSet<String> mks2 = bks.create(table2);
MinKSet<String> mks3 = bks.add(mks1, mks2);
assert mks3 != null;
int maxBuckets = 50;
List<String> boundaries = mks3.getLeftBoundaries(maxBuckets);
List<Integer> ranks = TestTables.getRanks(boundaries, randomString);
double maxErr = this.getMaxErr(suppSize, ranks);
double bound = this.getErrBound(numSamples);
Assert.assertTrue(maxErr < bound);
}
@Test
public void testParallelTable() {
int suppSize = 2;
int length = 6;
List<String> randomString = TestTables.randStringList(suppSize, length);
int num = Math.max(10*suppSize, suppSize*((int) Math.ceil(Math.log(suppSize))));
Table t = TestTables.randStringTable(num, randomString);
final int parts = 4;
List<IDataSet<ITable>> fragments = new ArrayList<IDataSet<ITable>>();
for (int i = 0; i < parts; i++) {
LocalDataSet<ITable> data = new LocalDataSet<ITable>(t);
fragments.add(data);
}
IDataSet<ITable> big = new ParallelDataSet<ITable>(fragments);
int numSamples = 10000;
SampleDistinctElementsSketch bks = new SampleDistinctElementsSketch("Name", 1754, numSamples);
MinKSet<String> mks = big.blockingSketch(bks);
int maxBuckets = 100;
Assert.assertNotNull(mks);
List<String> boundaries = mks.getLeftBoundaries(maxBuckets);
List<Integer> ranks = TestTables.getRanks(boundaries, randomString);
this.printBoundaries(boundaries, ranks);
double maxErr = this.getMaxErr(suppSize, ranks);
double bound = this.getErrBound(numSamples);
Assert.assertTrue(maxErr < bound);
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.hive.metastore.messaging.json.gzip;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hive.metastore.messaging.AbortTxnMessage;
import org.apache.hadoop.hive.metastore.messaging.AcidWriteMessage;
import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage;
import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage;
import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage;
import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage;
import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage;
import org.apache.hadoop.hive.metastore.messaging.AllocWriteIdMessage;
import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage;
import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage;
import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage;
import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage;
import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage;
import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage;
import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage;
import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage;
import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage;
import org.apache.hadoop.hive.metastore.messaging.DropTableMessage;
import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
import org.apache.hadoop.hive.metastore.messaging.OpenTxnMessage;
import org.apache.hadoop.hive.metastore.messaging.UpdateTableColumnStatMessage;
import org.apache.hadoop.hive.metastore.messaging.UpdatePartitionColumnStatMessage;
import org.apache.hadoop.hive.metastore.messaging.DeleteTableColumnStatMessage;
import org.apache.hadoop.hive.metastore.messaging.DeletePartitionColumnStatMessage;
import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
import java.util.zip.GZIPInputStream;
public class DeSerializer extends JSONMessageDeserializer {
private static final Logger LOG = LoggerFactory.getLogger(Serializer.class.getName());
private static String deCompress(String messageBody) {
byte[] decodedBytes = Base64.getDecoder().decode(messageBody.getBytes(StandardCharsets.UTF_8));
try (
ByteArrayInputStream in = new ByteArrayInputStream(decodedBytes);
GZIPInputStream is = new GZIPInputStream(in)
) {
byte[] bytes = IOUtils.toByteArray(is);
return new String(bytes, StandardCharsets.UTF_8);
} catch (IOException e) {
LOG.error("cannot decode the stream", e);
LOG.debug("base64 encoded String", messageBody);
throw new RuntimeException("cannot decode the stream ", e);
}
}
/**
* this is mainly as a utility to allow debugging of messages for developers by providing the
* message in a file and getting an actual message out.
* This class on a deployed hive instance will also be bundled in hive-exec jar.
*
*/
public static void main(String[] args) throws IOException {
if(args.length != 1) {
System.out.println("Usage:");
System.out.println("java -cp [classpath] "+DeSerializer.class.getCanonicalName() +" [file_location]");
}
System.out.print(
deCompress(FileUtils.readFileToString(new File(args[0]), StandardCharsets.UTF_8)));
}
@Override
public CreateDatabaseMessage getCreateDatabaseMessage(String messageBody) {
return super.getCreateDatabaseMessage(deCompress(messageBody));
}
@Override
public AlterDatabaseMessage getAlterDatabaseMessage(String messageBody) {
return super.getAlterDatabaseMessage(deCompress(messageBody));
}
@Override
public DropDatabaseMessage getDropDatabaseMessage(String messageBody) {
return super.getDropDatabaseMessage(deCompress(messageBody));
}
@Override
public CreateTableMessage getCreateTableMessage(String messageBody) {
return super.getCreateTableMessage(deCompress(messageBody));
}
@Override
public AlterTableMessage getAlterTableMessage(String messageBody) {
return super.getAlterTableMessage(deCompress(messageBody));
}
@Override
public DropTableMessage getDropTableMessage(String messageBody) {
return super.getDropTableMessage(deCompress(messageBody));
}
@Override
public AddPartitionMessage getAddPartitionMessage(String messageBody) {
return super.getAddPartitionMessage(deCompress(messageBody));
}
@Override
public AlterPartitionMessage getAlterPartitionMessage(String messageBody) {
return super.getAlterPartitionMessage(deCompress(messageBody));
}
@Override
public DropPartitionMessage getDropPartitionMessage(String messageBody) {
return super.getDropPartitionMessage(deCompress(messageBody));
}
@Override
public CreateFunctionMessage getCreateFunctionMessage(String messageBody) {
return super.getCreateFunctionMessage(deCompress(messageBody));
}
@Override
public DropFunctionMessage getDropFunctionMessage(String messageBody) {
return super.getDropFunctionMessage(deCompress(messageBody));
}
@Override
public InsertMessage getInsertMessage(String messageBody) {
return super.getInsertMessage(deCompress(messageBody));
}
@Override
public AddPrimaryKeyMessage getAddPrimaryKeyMessage(String messageBody) {
return super.getAddPrimaryKeyMessage(deCompress(messageBody));
}
@Override
public AddForeignKeyMessage getAddForeignKeyMessage(String messageBody) {
return super.getAddForeignKeyMessage(deCompress(messageBody));
}
@Override
public AddUniqueConstraintMessage getAddUniqueConstraintMessage(String messageBody) {
return super.getAddUniqueConstraintMessage(deCompress(messageBody));
}
@Override
public AddNotNullConstraintMessage getAddNotNullConstraintMessage(String messageBody) {
return super.getAddNotNullConstraintMessage(deCompress(messageBody));
}
@Override
public DropConstraintMessage getDropConstraintMessage(String messageBody) {
return super.getDropConstraintMessage(deCompress(messageBody));
}
@Override
public OpenTxnMessage getOpenTxnMessage(String messageBody) {
return super.getOpenTxnMessage(deCompress(messageBody));
}
@Override
public CommitTxnMessage getCommitTxnMessage(String messageBody) {
return super.getCommitTxnMessage(deCompress(messageBody));
}
@Override
public AbortTxnMessage getAbortTxnMessage(String messageBody) {
return super.getAbortTxnMessage(deCompress(messageBody));
}
@Override
public AllocWriteIdMessage getAllocWriteIdMessage(String messageBody) {
return super.getAllocWriteIdMessage(deCompress(messageBody));
}
@Override
public AcidWriteMessage getAcidWriteMessage(String messageBody) {
return super.getAcidWriteMessage(deCompress(messageBody));
}
@Override
public UpdateTableColumnStatMessage getUpdateTableColumnStatMessage(String messageBody) {
return super.getUpdateTableColumnStatMessage(deCompress(messageBody));
}
@Override
public UpdatePartitionColumnStatMessage getUpdatePartitionColumnStatMessage(String messageBody) {
return super.getUpdatePartitionColumnStatMessage(deCompress(messageBody));
}
@Override
public DeleteTableColumnStatMessage getDeleteTableColumnStatMessage(String messageBody) {
return super.getDeleteTableColumnStatMessage(deCompress(messageBody));
}
@Override
public DeletePartitionColumnStatMessage getDeletePartitionColumnStatMessage(String messageBody) {
return super.getDeletePartitionColumnStatMessage(deCompress(messageBody));
}
}
| |
/*
* This file is part of Sponge, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.common.mixin.api.minecraft.network;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.base.Charsets;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.DecoderException;
import io.netty.handler.codec.EncoderException;
import org.spongepowered.api.data.persistence.DataView;
import org.spongepowered.api.network.channel.ChannelBuf;
import org.spongepowered.asm.mixin.Implements;
import org.spongepowered.asm.mixin.Interface;
import org.spongepowered.asm.mixin.Interface.Remap;
import org.spongepowered.asm.mixin.Intrinsic;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.Shadow;
import org.spongepowered.common.data.persistence.NBTTranslator;
import org.spongepowered.common.network.channel.ChannelBuffers;
import org.spongepowered.common.util.Constants;
import java.io.IOException;
import java.util.UUID;
import javax.annotation.Nullable;
import net.minecraft.nbt.CompoundTag;
import net.minecraft.network.FriendlyByteBuf;
@Mixin(FriendlyByteBuf.class)
@Implements(@Interface(iface = ChannelBuf.class, prefix = "cbuf$", remap = Remap.NONE))
public abstract class FriendlyByteBufMixin_API extends ByteBuf {
// @formatter:off
// mojang methods, fluent in target
@Shadow public abstract FriendlyByteBuf shadow$writeByteArray(byte[] array);
@Shadow public abstract FriendlyByteBuf shadow$writeVarInt(int input);
@Shadow public abstract FriendlyByteBuf shadow$writeVarLong(long input);
@Shadow public abstract FriendlyByteBuf shadow$writeUtf(String string);
@Shadow public abstract FriendlyByteBuf shadow$writeNbt(@Nullable CompoundTag nbt);
@Shadow public abstract FriendlyByteBuf shadow$writeUUID(UUID uniqueId);
// mojang methods, non-fluent
@Shadow public abstract byte[] shadow$readByteArray();
@Shadow public abstract byte[] shadow$readByteArray(int limit);
@Shadow public abstract int shadow$readVarInt();
@Shadow public abstract long shadow$readVarLong();
@Shadow public abstract String shadow$readUtf(int maxLength);
@Shadow public abstract CompoundTag shadow$readNbt() throws IOException;
@Shadow public abstract UUID shadow$readUUID();
// @formatter:on
@Intrinsic
public int cbuf$capacity() {
return this.capacity();
}
public int cbuf$available() {
return this.writerIndex() - this.readerIndex();
}
@Intrinsic
public int cbuf$readerIndex() {
return this.readerIndex();
}
public ChannelBuf cbuf$readerIndex(final int index) {
this.readerIndex(index);
return (ChannelBuf) this;
}
@Intrinsic
public int cbuf$writerIndex() {
return this.writerIndex();
}
public ChannelBuf cbuf$writerIndex(final int index) {
this.writerIndex(index);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setIndex(final int readIndex, final int writeIndex) {
this.setIndex(readIndex, writeIndex);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$clear() {
this.clear();
return (ChannelBuf) this;
}
public ChannelBuf cbuf$slice() {
return ChannelBuffers.wrap(this.slice());
}
public ChannelBuf cbuf$slice(final int index, final int length) {
return ChannelBuffers.wrap(this.slice(index, length));
}
public ChannelBuf cbuf$readSlice(final int length) {
return ChannelBuffers.wrap(this.readSlice(length));
}
@Intrinsic
public boolean cbuf$hasArray() {
return this.hasArray();
}
@Intrinsic
public byte[] cbuf$array() {
return this.array();
}
public ChannelBuf cbuf$writeBoolean(final boolean data) {
this.writeBoolean(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setBoolean(final int index, final boolean data) {
this.setBoolean(index, data);
return (ChannelBuf) this;
}
@Intrinsic
public boolean cbuf$readBoolean() {
return this.readBoolean();
}
@Intrinsic
public boolean cbuf$getBoolean(final int index) {
return this.getBoolean(index);
}
public ChannelBuf cbuf$writeByte(final byte data) {
this.writeByte(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setByte(final int index, final byte data) {
this.setByte(index, data);
return (ChannelBuf) this;
}
@Intrinsic
public byte cbuf$readByte() {
return this.readByte();
}
@Intrinsic
public byte cbuf$getByte(final int index) {
return this.getByte(index);
}
public ChannelBuf cbuf$writeByteArray(final byte[] data) {
return (ChannelBuf) this.shadow$writeByteArray(data); // fluent in target
}
public ChannelBuf cbuf$writeByteArray(final byte[] data, final int start, final int length) {
this.writeBytes(data, start, length);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setByteArray(final int index, final byte[] data) {
this.setBytes(index, data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setByteArray(final int index, final byte[] data, final int start, final int length) {
this.setBytes(index, data, start, length);
return (ChannelBuf) this;
}
@Intrinsic
public byte[] cbuf$readByteArray() {
return this.shadow$readByteArray();
}
@Intrinsic
public byte[] cbuf$readByteArray(final int limit) {
return this.shadow$readByteArray(limit);
}
public byte[] cbuf$getByteArray(final int index) {
final int readerIndex = this.readerIndex();
try {
this.readerIndex(index);
return this.shadow$readByteArray();
} finally {
this.readerIndex(readerIndex);
}
}
public byte[] cbuf$getByteArray(final int index, final int limit) {
final int readerIndex = this.readerIndex();
try {
this.readerIndex(index);
return this.shadow$readByteArray(limit);
} finally {
this.readerIndex(readerIndex);
}
}
public ChannelBuf cbuf$writeBytes(final byte[] data) {
this.writeBytes(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$writeBytes(final byte[] data, final int start, final int length) {
this.writeBytes(data, start, length);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setBytes(final int index, final byte[] data) {
this.setBytes(index, data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setBytes(final int index, final byte[] data, final int start, final int length) {
this.setBytes(index, data, start, length);
return (ChannelBuf) this;
}
@Intrinsic
public byte[] cbuf$readBytes(final int length) {
final byte[] bytes = new byte[length];
this.readBytes(bytes);
return bytes;
}
public byte[] cbuf$readBytes(final int index, final int length) {
final byte[] dest = new byte[length];
this.readBytes(dest, index, length);
return dest;
}
public ChannelBuf cbuf$writeShort(final short data) {
this.writeShort(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$writeShortLE(final short data) {
this.writeShortLE(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setShort(final int index, final short data) {
this.setShort(index, data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setShortLE(final int index, final short data) {
this.setShortLE(index, data);
return (ChannelBuf) this;
}
@Intrinsic
public short cbuf$readShort() {
return this.readShort();
}
@Intrinsic
public short cbuf$readShortLE() {
return this.readShortLE();
}
@Intrinsic
public short cbuf$getShort(final int index) {
return this.getShort(index);
}
@Intrinsic
public short cbuf$getShortLE(final int index) {
return this.getShortLE(index);
}
public ChannelBuf cbuf$writeChar(final char data) {
this.writeChar(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setChar(final int index, final char data) {
this.setChar(index, data);
return (ChannelBuf) this;
}
@Intrinsic
public char cbuf$readChar() {
return this.readChar();
}
@Intrinsic
public char cbuf$getChar(final int index) {
return this.getChar(index);
}
public ChannelBuf cbuf$writeInt(final int data) {
this.writeInt(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$writeIntLE(final int data) {
this.writeIntLE(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setInt(final int index, final int data) {
this.setInt(index, data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setIntLE(final int index, final int data) {
this.setIntLE(index, data);
return (ChannelBuf) this;
}
@Intrinsic
public int cbuf$readInt() {
return this.readInt();
}
@Intrinsic
public int cbuf$readIntLE() {
return this.readIntLE();
}
@Intrinsic
public int cbuf$getInt(final int index) {
return this.getInt(index);
}
@Intrinsic
public int cbuf$getIntLE(final int index) {
return this.getIntLE(index);
}
public ChannelBuf cbuf$writeLong(final long data) {
this.writeLong(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$writeLongLE(final long data) {
this.writeLongLE(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setLong(final int index, final long data) {
this.setLong(index, data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setLongLE(final int index, final long data) {
this.setLongLE(index, data);
return (ChannelBuf) this;
}
@Intrinsic
public long cbuf$readLong() {
return this.readLong();
}
@Intrinsic
public long cbuf$readLongLE() {
return this.readLongLE();
}
@Intrinsic
public long cbuf$getLong(final int index) {
return this.getLong(index);
}
@Intrinsic
public long cbuf$getLongLE(final int index) {
return this.getLongLE(index);
}
public ChannelBuf cbuf$writeFloat(final float data) {
this.writeFloat(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$writeFloatLE(final float data) {
this.writeFloatLE(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setFloat(final int index, final float data) {
this.setFloat(index, data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setFloatLE(final int index, final float data) {
this.setFloatLE(index, data);
return (ChannelBuf) this;
}
@Intrinsic
public float cbuf$readFloat() {
return this.readFloat();
}
@Intrinsic
public float cbuf$readFloatLE() {
return this.readFloatLE();
}
@Intrinsic
public float cbuf$getFloat(final int index) {
return this.getFloat(index);
}
@Intrinsic
public float cbuf$getFloatLE(final int index) {
return this.getFloatLE(index);
}
public ChannelBuf cbuf$writeDouble(final double data) {
this.writeDouble(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$writeDoubleLE(final double data) {
this.writeDoubleLE(data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setDouble(final int index, final double data) {
this.setDouble(index, data);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setDoubleLE(final int index, final double data) {
this.setDoubleLE(index, data);
return (ChannelBuf) this;
}
@Intrinsic
public double cbuf$readDouble() {
return this.readDouble();
}
@Intrinsic
public double cbuf$readDoubleLE() {
return this.readDoubleLE();
}
@Intrinsic
public double cbuf$getDouble(final int index) {
return this.getDouble(index);
}
@Intrinsic
public double cbuf$getDoubleLE(final int index) {
return this.getDoubleLE(index);
}
public ChannelBuf cbuf$writeVarInt(final int value) {
return (ChannelBuf) this.shadow$writeVarInt(value); // fluent in target
}
public ChannelBuf cbuf$setVarInt(final int index, final int value) {
final int oldIndex = this.writerIndex();
this.writerIndex(index);
this.shadow$writeVarInt(value);
this.writerIndex(oldIndex);
return (ChannelBuf) this;
}
@Intrinsic
public int cbuf$readVarInt() {
return this.shadow$readVarInt();
}
public int cbuf$getVarInt(final int index) {
final int oldIndex = this.readerIndex();
this.readerIndex(index);
final int value = this.shadow$readVarInt();
this.readerIndex(oldIndex);
return value;
}
public ChannelBuf cbuf$writeVarLong(final long value) {
return (ChannelBuf) this.shadow$writeVarLong(value); // fluent in target
}
public ChannelBuf cbuf$setVarLong(final int index, final long value) {
final int oldIndex = this.writerIndex();
this.writerIndex(index);
this.shadow$writeVarLong(value);
this.writerIndex(oldIndex);
return (ChannelBuf) this;
}
@Intrinsic
public long cbuf$readVarLong() {
return this.shadow$readVarLong();
}
public long cbuf$getVarLong(final int index) {
final int oldIndex = this.readerIndex();
this.readerIndex(index);
final long value = this.shadow$readVarLong();
this.readerIndex(oldIndex);
return value;
}
public ChannelBuf cbuf$writeString(final String data) {
return (ChannelBuf) this.shadow$writeUtf(checkNotNull(data)); // fluent in target
}
public ChannelBuf cbuf$setString(final int index, final String data) {
checkNotNull(data);
final int oldIndex = this.writerIndex();
this.writerIndex(index);
this.shadow$writeUtf(data);
this.writerIndex(oldIndex);
return (ChannelBuf) this;
}
public String cbuf$readString() {
return this.shadow$readUtf(Constants.Networking.MAX_STRING_LENGTH);
}
public String cbuf$getString(final int index) {
final int oldIndex = this.readerIndex();
this.readerIndex(index);
final String value = this.shadow$readUtf(Constants.Networking.MAX_STRING_LENGTH);
this.readerIndex(oldIndex);
return value;
}
public ChannelBuf cbuf$writeUTF(final String data) {
final byte[] bytes = data.getBytes(Charsets.UTF_8);
if (bytes.length > Constants.Networking.MAX_STRING_LENGTH_BYTES) {
throw new EncoderException("String too big (was " + data.length() + " bytes encoded, max "
+ Constants.Networking.MAX_STRING_LENGTH_BYTES + ")");
}
this.writeShort(bytes.length);
this.writeBytes(bytes);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setUTF(final int index, final String data) {
checkNotNull(data, "data");
final int oldIndex = this.writerIndex();
this.writerIndex(index);
this.cbuf$writeUTF(data);
this.writerIndex(oldIndex);
return (ChannelBuf) this;
}
public String cbuf$readUTF() {
final short length = this.readShort();
final byte[] bytes = new byte[length];
this.readBytes(bytes);
return new String(bytes, Charsets.UTF_8);
}
public String cbuf$getUTF(final int index) {
final int oldIndex = this.readerIndex();
this.readerIndex(index);
final short length = this.readShort();
final byte[] bytes = new byte[length];
this.readBytes(bytes);
final String data = new String(bytes, Charsets.UTF_8);
this.readerIndex(oldIndex);
return data;
}
public ChannelBuf cbuf$writeUniqueId(final UUID data) {
checkNotNull(data, "data");
return (ChannelBuf) this.shadow$writeUUID(data); // fluent in target
}
public ChannelBuf cbuf$setUniqueId(final int index, final UUID data) {
checkNotNull(data, "data");
final int oldIndex = this.writerIndex();
this.writerIndex(index);
this.shadow$writeUUID(data);
this.writerIndex(oldIndex);
return (ChannelBuf) this;
}
@Intrinsic
public UUID cbuf$readUniqueId() {
return this.shadow$readUUID();
}
public UUID getUniqueId(final int index) {
final int oldIndex = this.readerIndex();
this.readerIndex(index);
final UUID data = this.shadow$readUUID();
this.readerIndex(oldIndex);
return data;
}
public ChannelBuf cbuf$writeDataView(final DataView data) {
final CompoundTag compound = NBTTranslator.INSTANCE.translate(checkNotNull(data, "data"));
this.shadow$writeNbt(compound);
return (ChannelBuf) this;
}
public ChannelBuf cbuf$setDataView(final int index, final DataView data) {
checkNotNull(data, "data");
final int oldIndex = this.writerIndex();
this.writerIndex(index);
this.cbuf$writeDataView(data);
this.writerIndex(oldIndex);
return (ChannelBuf) this;
}
public DataView cbuf$readDataView() {
try {
return NBTTranslator.INSTANCE.translateFrom(this.shadow$readNbt());
} catch (final IOException e) {
throw new DecoderException(e);
}
}
public DataView cbuf$getDataView(final int index) {
final int oldIndex = this.readerIndex();
this.readerIndex(index);
final DataView data = this.cbuf$readDataView();
this.readerIndex(oldIndex);
return data;
}
}
| |
// Copyright (C) 2012 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.server.git;
import com.google.gerrit.common.data.LabelType;
import com.google.gerrit.common.data.LabelTypes;
import com.google.gerrit.reviewdb.client.Account;
import com.google.gerrit.reviewdb.client.ApprovalCategory;
import com.google.gerrit.reviewdb.client.Branch;
import com.google.gerrit.reviewdb.client.PatchSet;
import com.google.gerrit.reviewdb.client.PatchSetApproval;
import com.google.gerrit.reviewdb.server.ReviewDb;
import com.google.gerrit.server.IdentifiedUser;
import com.google.gwtorm.server.OrmException;
import com.google.inject.Provider;
import org.eclipse.jgit.errors.IncorrectObjectTypeException;
import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.lib.AnyObjectId;
import org.eclipse.jgit.lib.CommitBuilder;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.MutableObjectId;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectInserter;
import org.eclipse.jgit.lib.PersonIdent;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.merge.MergeStrategy;
import org.eclipse.jgit.merge.ThreeWayMerger;
import org.eclipse.jgit.revwalk.FooterKey;
import org.eclipse.jgit.revwalk.FooterLine;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevFlag;
import org.eclipse.jgit.revwalk.RevSort;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.transport.PackParser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TimeZone;
public class MergeUtil {
private static final Logger log = LoggerFactory.getLogger(MergeUtil.class);
private static final String R_HEADS_MASTER =
Constants.R_HEADS + Constants.MASTER;
private static final ApprovalCategory.Id CRVW = //
new ApprovalCategory.Id("CRVW");
private static final ApprovalCategory.Id VRIF = //
new ApprovalCategory.Id("VRIF");
private static final FooterKey REVIEWED_ON = new FooterKey("Reviewed-on");
private static final FooterKey CHANGE_ID = new FooterKey("Change-Id");
public static CodeReviewCommit getFirstFastForward(
final CodeReviewCommit mergeTip, final RevWalk rw,
final List<CodeReviewCommit> toMerge) throws MergeException {
for (final Iterator<CodeReviewCommit> i = toMerge.iterator(); i.hasNext();) {
try {
final CodeReviewCommit n = i.next();
if (mergeTip == null || rw.isMergedInto(mergeTip, n)) {
i.remove();
return n;
}
} catch (IOException e) {
throw new MergeException("Cannot fast-forward test during merge", e);
}
}
return mergeTip;
}
public static void reduceToMinimalMerge(final MergeSorter mergeSorter,
final List<CodeReviewCommit> toSort) throws MergeException {
final Collection<CodeReviewCommit> heads;
try {
heads = mergeSorter.sort(toSort);
} catch (IOException e) {
throw new MergeException("Branch head sorting failed", e);
}
toSort.clear();
toSort.addAll(heads);
Collections.sort(toSort, new Comparator<CodeReviewCommit>() {
@Override
public int compare(final CodeReviewCommit a, final CodeReviewCommit b) {
return a.originalOrder - b.originalOrder;
}
});
}
public static PatchSetApproval getSubmitter(final ReviewDb reviewDb,
final PatchSet.Id c) {
if (c == null) {
return null;
}
PatchSetApproval submitter = null;
try {
final List<PatchSetApproval> approvals =
reviewDb.patchSetApprovals().byPatchSet(c).toList();
for (PatchSetApproval a : approvals) {
if (a.getValue() > 0
&& ApprovalCategory.SUBMIT.equals(a.getCategoryId())) {
if (submitter == null
|| a.getGranted().compareTo(submitter.getGranted()) > 0) {
submitter = a;
}
}
}
} catch (OrmException e) {
}
return submitter;
}
public static CodeReviewCommit createCherryPickFromCommit(Repository repo,
ObjectInserter inserter, CodeReviewCommit mergeTip, CodeReviewCommit originalCommit,
PersonIdent cherryPickCommitterIdent, String commitMsg, RevWalk rw,
Boolean useContentMerge) throws MissingObjectException, IncorrectObjectTypeException, IOException {
final ThreeWayMerger m =
newThreeWayMerger(repo, inserter, useContentMerge);
m.setBase(originalCommit.getParent(0));
if (m.merge(mergeTip, originalCommit)) {
final CommitBuilder mergeCommit = new CommitBuilder();
mergeCommit.setTreeId(m.getResultTreeId());
mergeCommit.setParentId(mergeTip);
mergeCommit.setAuthor(originalCommit.getAuthorIdent());
mergeCommit.setCommitter(cherryPickCommitterIdent);
mergeCommit.setMessage(commitMsg);
final ObjectId id = commit(inserter, mergeCommit);
final CodeReviewCommit newCommit =
(CodeReviewCommit) rw.parseCommit(id);
return newCommit;
} else {
return null;
}
}
public static String createCherryPickCommitMessage(final CodeReviewCommit n,
final LabelTypes labelTypes, final Provider<String> urlProvider,
final ReviewDb db, final IdentifiedUser.GenericFactory identifiedUserFactory) {
final List<FooterLine> footers = n.getFooterLines();
final StringBuilder msgbuf = new StringBuilder();
msgbuf.append(n.getFullMessage());
if (msgbuf.length() == 0) {
// WTF, an empty commit message?
msgbuf.append("<no commit message provided>");
}
if (msgbuf.charAt(msgbuf.length() - 1) != '\n') {
// Missing a trailing LF? Correct it (perhaps the editor was broken).
msgbuf.append('\n');
}
if (footers.isEmpty()) {
// Doesn't end in a "Signed-off-by: ..." style line? Add another line
// break to start a new paragraph for the reviewed-by tag lines.
//
msgbuf.append('\n');
}
if (!contains(footers, CHANGE_ID, n.change.getKey().get())) {
msgbuf.append(CHANGE_ID.getName());
msgbuf.append(": ");
msgbuf.append(n.change.getKey().get());
msgbuf.append('\n');
}
final String siteUrl = urlProvider.get();
if (siteUrl != null) {
final String url = siteUrl + n.patchsetId.getParentKey().get();
if (!contains(footers, REVIEWED_ON, url)) {
msgbuf.append(REVIEWED_ON.getName());
msgbuf.append(": ");
msgbuf.append(url);
msgbuf.append('\n');
}
}
PatchSetApproval submitAudit = null;
for (final PatchSetApproval a : getApprovalsForCommit(db, n)) {
if (a.getValue() <= 0) {
// Negative votes aren't counted.
continue;
}
if (ApprovalCategory.SUBMIT.equals(a.getCategoryId())) {
// Submit is treated specially, below (becomes committer)
//
if (submitAudit == null
|| a.getGranted().compareTo(submitAudit.getGranted()) > 0) {
submitAudit = a;
}
continue;
}
final Account acc =
identifiedUserFactory.create(a.getAccountId()).getAccount();
final StringBuilder identbuf = new StringBuilder();
if (acc.getFullName() != null && acc.getFullName().length() > 0) {
if (identbuf.length() > 0) {
identbuf.append(' ');
}
identbuf.append(acc.getFullName());
}
if (acc.getPreferredEmail() != null
&& acc.getPreferredEmail().length() > 0) {
if (isSignedOffBy(footers, acc.getPreferredEmail())) {
continue;
}
if (identbuf.length() > 0) {
identbuf.append(' ');
}
identbuf.append('<');
identbuf.append(acc.getPreferredEmail());
identbuf.append('>');
}
if (identbuf.length() == 0) {
// Nothing reasonable to describe them by? Ignore them.
continue;
}
final String tag;
if (CRVW.equals(a.getCategoryId())) {
tag = "Reviewed-by";
} else if (VRIF.equals(a.getCategoryId())) {
tag = "Tested-by";
} else {
final LabelType lt = labelTypes.byId(a.getCategoryId().get());
if (lt == null) {
// TODO: Support arbitrary labels.
continue;
}
tag = lt.getName();
}
if (!contains(footers, new FooterKey(tag), identbuf.toString())) {
msgbuf.append(tag);
msgbuf.append(": ");
msgbuf.append(identbuf);
msgbuf.append('\n');
}
}
return msgbuf.toString();
}
public static List<PatchSetApproval> getApprovalsForCommit(final ReviewDb db, final CodeReviewCommit n) {
try {
List<PatchSetApproval> approvalList =
db.patchSetApprovals().byPatchSet(n.patchsetId).toList();
Collections.sort(approvalList, new Comparator<PatchSetApproval>() {
@Override
public int compare(final PatchSetApproval a, final PatchSetApproval b) {
return a.getGranted().compareTo(b.getGranted());
}
});
return approvalList;
} catch (OrmException e) {
log.error("Can't read approval records for " + n.patchsetId, e);
return Collections.emptyList();
}
}
private static boolean contains(List<FooterLine> footers, FooterKey key, String val) {
for (final FooterLine line : footers) {
if (line.matches(key) && val.equals(line.getValue())) {
return true;
}
}
return false;
}
private static boolean isSignedOffBy(List<FooterLine> footers, String email) {
for (final FooterLine line : footers) {
if (line.matches(FooterKey.SIGNED_OFF_BY)
&& email.equals(line.getEmailAddress())) {
return true;
}
}
return false;
}
public static PersonIdent computeMergeCommitAuthor(final ReviewDb reviewDb,
final IdentifiedUser.GenericFactory identifiedUserFactory,
final PersonIdent myIdent, final RevWalk rw,
final List<CodeReviewCommit> codeReviewCommits) {
PatchSetApproval submitter = null;
for (final CodeReviewCommit c : codeReviewCommits) {
PatchSetApproval s = getSubmitter(reviewDb, c.patchsetId);
if (submitter == null
|| (s != null && s.getGranted().compareTo(submitter.getGranted()) > 0)) {
submitter = s;
}
}
// Try to use the submitter's identity for the merge commit author.
// If all of the commits being merged are created by the submitter,
// prefer the identity line they used in the commits rather than the
// preferred identity stored in the user account. This way the Git
// commit records are more consistent internally.
//
PersonIdent authorIdent;
if (submitter != null) {
IdentifiedUser who =
identifiedUserFactory.create(submitter.getAccountId());
Set<String> emails = new HashSet<String>();
for (RevCommit c : codeReviewCommits) {
try {
rw.parseBody(c);
} catch (IOException e) {
log.warn("Cannot parse commit " + c.name(), e);
continue;
}
emails.add(c.getAuthorIdent().getEmailAddress());
}
final Timestamp dt = submitter.getGranted();
final TimeZone tz = myIdent.getTimeZone();
if (emails.size() == 1
&& who.getEmailAddresses().contains(emails.iterator().next())) {
authorIdent =
new PersonIdent(codeReviewCommits.get(0).getAuthorIdent(), dt, tz);
} else {
authorIdent = who.newCommitterIdent(dt, tz);
}
} else {
authorIdent = myIdent;
}
return authorIdent;
}
public static boolean canMerge(final MergeSorter mergeSorter,
final Repository repo, final boolean useContentMerge,
final CodeReviewCommit mergeTip, final CodeReviewCommit toMerge)
throws MergeException {
if (hasMissingDependencies(mergeSorter, toMerge)) {
return false;
}
final ThreeWayMerger m =
newThreeWayMerger(repo, createDryRunInserter(), useContentMerge);
try {
return m.merge(new AnyObjectId[] {mergeTip, toMerge});
} catch (IOException e) {
if (e.getMessage().startsWith("Multiple merge bases for")) {
return false;
}
throw new MergeException("Cannot merge " + toMerge.name(), e);
}
}
public static boolean canFastForward(final MergeSorter mergeSorter,
final CodeReviewCommit mergeTip, final RevWalk rw,
final CodeReviewCommit toMerge) throws MergeException {
if (hasMissingDependencies(mergeSorter, toMerge)) {
return false;
}
try {
return mergeTip == null || rw.isMergedInto(mergeTip, toMerge);
} catch (IOException e) {
throw new MergeException("Cannot fast-forward test during merge", e);
}
}
public static boolean canCherryPick(final MergeSorter mergeSorter,
final Repository repo, final boolean useContentMerge,
final CodeReviewCommit mergeTip, final RevWalk rw,
final CodeReviewCommit toMerge) throws MergeException {
if (mergeTip == null) {
// The branch is unborn. Fast-forward is possible.
//
return true;
}
if (toMerge.getParentCount() == 0) {
// Refuse to merge a root commit into an existing branch,
// we cannot obtain a delta for the cherry-pick to apply.
//
return false;
}
if (toMerge.getParentCount() == 1) {
// If there is only one parent, a cherry-pick can be done by
// taking the delta relative to that one parent and redoing
// that on the current merge tip.
//
try {
final ThreeWayMerger m =
newThreeWayMerger(repo, createDryRunInserter(), useContentMerge);
m.setBase(toMerge.getParent(0));
return m.merge(mergeTip, toMerge);
} catch (IOException e) {
throw new MergeException("Cannot merge " + toMerge.name(), e);
}
}
// There are multiple parents, so this is a merge commit. We
// don't want to cherry-pick the merge as clients can't easily
// rebase their history with that merge present and replaced
// by an equivalent merge with a different first parent. So
// instead behave as though MERGE_IF_NECESSARY was configured.
//
return canFastForward(mergeSorter, mergeTip, rw, toMerge)
|| canMerge(mergeSorter, repo, useContentMerge, mergeTip, toMerge);
}
public static boolean hasMissingDependencies(final MergeSorter mergeSorter,
final CodeReviewCommit toMerge) throws MergeException {
try {
return !mergeSorter.sort(Collections.singleton(toMerge)).contains(toMerge);
} catch (IOException e) {
throw new MergeException("Branch head sorting failed", e);
}
}
public static ObjectInserter createDryRunInserter() {
return new ObjectInserter() {
private final MutableObjectId buf = new MutableObjectId();
private final static int LAST_BYTE = Constants.OBJECT_ID_LENGTH - 1;
@Override
public ObjectId insert(int objectType, long length, InputStream in)
throws IOException {
// create non-existing dummy ID
buf.setByte(LAST_BYTE, buf.getByte(LAST_BYTE) + 1);
return buf.copy();
}
@Override
public PackParser newPackParser(InputStream in) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void flush() throws IOException {
// Do nothing.
}
@Override
public void release() {
// Do nothing.
}
};
}
public static CodeReviewCommit mergeOneCommit(final ReviewDb reviewDb,
final IdentifiedUser.GenericFactory identifiedUserFactory,
final PersonIdent myIdent, final Repository repo, final RevWalk rw,
final ObjectInserter inserter, final RevFlag canMergeFlag,
final boolean useContentMerge, final Branch.NameKey destBranch,
final CodeReviewCommit mergeTip, final CodeReviewCommit n)
throws MergeException {
final ThreeWayMerger m = newThreeWayMerger(repo, inserter, useContentMerge);
try {
if (m.merge(new AnyObjectId[] {mergeTip, n})) {
return writeMergeCommit(reviewDb, identifiedUserFactory, myIdent, rw,
inserter, canMergeFlag, destBranch, mergeTip, m.getResultTreeId(), n);
} else {
failed(rw, canMergeFlag, mergeTip, n, CommitMergeStatus.PATH_CONFLICT);
}
} catch (IOException e) {
if (e.getMessage().startsWith("Multiple merge bases for")) {
try {
failed(rw, canMergeFlag, mergeTip, n,
CommitMergeStatus.CRISS_CROSS_MERGE);
} catch (IOException e2) {
throw new MergeException("Cannot merge " + n.name(), e);
}
} else {
throw new MergeException("Cannot merge " + n.name(), e);
}
}
return mergeTip;
}
private static CodeReviewCommit failed(final RevWalk rw,
final RevFlag canMergeFlag, final CodeReviewCommit mergeTip,
final CodeReviewCommit n, final CommitMergeStatus failure)
throws MissingObjectException, IncorrectObjectTypeException, IOException {
rw.resetRetain(canMergeFlag);
rw.markStart(n);
rw.markUninteresting(mergeTip);
CodeReviewCommit failed;
while ((failed = (CodeReviewCommit) rw.next()) != null) {
failed.statusCode = failure;
}
return failed;
}
public static CodeReviewCommit writeMergeCommit(final ReviewDb reviewDb,
final IdentifiedUser.GenericFactory identifiedUserFactory,
final PersonIdent myIdent, final RevWalk rw,
final ObjectInserter inserter, final RevFlag canMergeFlag,
final Branch.NameKey destBranch, final CodeReviewCommit mergeTip,
final ObjectId treeId, final CodeReviewCommit n) throws IOException,
MissingObjectException, IncorrectObjectTypeException {
final List<CodeReviewCommit> merged = new ArrayList<CodeReviewCommit>();
rw.resetRetain(canMergeFlag);
rw.markStart(n);
rw.markUninteresting(mergeTip);
for (final RevCommit c : rw) {
final CodeReviewCommit crc = (CodeReviewCommit) c;
if (crc.patchsetId != null) {
merged.add(crc);
}
}
final StringBuilder msgbuf = new StringBuilder();
if (merged.size() == 1) {
final CodeReviewCommit c = merged.get(0);
rw.parseBody(c);
msgbuf.append("Merge \"");
msgbuf.append(c.getShortMessage());
msgbuf.append("\"");
} else {
msgbuf.append("Merge changes ");
for (final Iterator<CodeReviewCommit> i = merged.iterator(); i.hasNext();) {
msgbuf.append(i.next().change.getKey().abbreviate());
if (i.hasNext()) {
msgbuf.append(',');
}
}
}
if (!R_HEADS_MASTER.equals(destBranch.get())) {
msgbuf.append(" into ");
msgbuf.append(destBranch.getShortName());
}
if (merged.size() > 1) {
msgbuf.append("\n\n* changes:\n");
for (final CodeReviewCommit c : merged) {
rw.parseBody(c);
msgbuf.append(" ");
msgbuf.append(c.getShortMessage());
msgbuf.append("\n");
}
}
PersonIdent authorIdent =
computeMergeCommitAuthor(reviewDb, identifiedUserFactory, myIdent, rw,
merged);
final CommitBuilder mergeCommit = new CommitBuilder();
mergeCommit.setTreeId(treeId);
mergeCommit.setParentIds(mergeTip, n);
mergeCommit.setAuthor(authorIdent);
mergeCommit.setCommitter(myIdent);
mergeCommit.setMessage(msgbuf.toString());
return (CodeReviewCommit) rw.parseCommit(commit(inserter, mergeCommit));
}
public static ThreeWayMerger newThreeWayMerger(final Repository repo,
final ObjectInserter inserter, final boolean useContentMerge) {
ThreeWayMerger m;
if (useContentMerge) {
// Settings for this project allow us to try and
// automatically resolve conflicts within files if needed.
// Use ResolveMerge and instruct to operate in core.
m = MergeStrategy.RESOLVE.newMerger(repo, true);
} else {
// No auto conflict resolving allowed. If any of the
// affected files was modified, merge will fail.
m = MergeStrategy.SIMPLE_TWO_WAY_IN_CORE.newMerger(repo);
}
m.setObjectInserter(new ObjectInserter.Filter() {
@Override
protected ObjectInserter delegate() {
return inserter;
}
@Override
public void flush() {
}
@Override
public void release() {
}
});
return m;
}
public static ObjectId commit(final ObjectInserter inserter,
final CommitBuilder mergeCommit) throws IOException,
UnsupportedEncodingException {
ObjectId id = inserter.insert(mergeCommit);
inserter.flush();
return id;
}
public static PatchSetApproval markCleanMerges(final ReviewDb reviewDb,
final RevWalk rw, final RevFlag canMergeFlag,
final CodeReviewCommit mergeTip, final Set<RevCommit> alreadyAccepted)
throws MergeException {
if (mergeTip == null) {
// If mergeTip is null here, branchTip was null, indicating a new branch
// at the start of the merge process. We also elected to merge nothing,
// probably due to missing dependencies. Nothing was cleanly merged.
//
return null;
}
try {
PatchSetApproval submitApproval = null;
rw.resetRetain(canMergeFlag);
rw.sort(RevSort.TOPO);
rw.sort(RevSort.REVERSE, true);
rw.markStart(mergeTip);
for (RevCommit c : alreadyAccepted) {
rw.markUninteresting(c);
}
CodeReviewCommit c;
while ((c = (CodeReviewCommit) rw.next()) != null) {
if (c.patchsetId != null) {
c.statusCode = CommitMergeStatus.CLEAN_MERGE;
if (submitApproval == null) {
submitApproval = getSubmitter(reviewDb, c.patchsetId);
}
}
}
return submitApproval;
} catch (IOException e) {
throw new MergeException("Cannot mark clean merges", e);
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.tools;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.UnknownHostException;
import java.util.*;
import com.google.common.base.Optional;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import org.apache.commons.cli.*;
import com.datastax.driver.core.SSLOptions;
import javax.net.ssl.SSLContext;
import org.apache.cassandra.config.*;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.sstable.SSTableLoader;
import org.apache.cassandra.security.SSLFactory;
import org.apache.cassandra.streaming.*;
import org.apache.cassandra.utils.JVMStabilityInspector;
import org.apache.cassandra.utils.NativeSSTableLoaderClient;
import org.apache.cassandra.utils.OutputHandler;
public class BulkLoader
{
private static final String TOOL_NAME = "sstableloader";
private static final String VERBOSE_OPTION = "verbose";
private static final String HELP_OPTION = "help";
private static final String NOPROGRESS_OPTION = "no-progress";
private static final String IGNORE_NODES_OPTION = "ignore";
private static final String INITIAL_HOST_ADDRESS_OPTION = "nodes";
private static final String NATIVE_PORT_OPTION = "port";
private static final String USER_OPTION = "username";
private static final String PASSWD_OPTION = "password";
private static final String THROTTLE_MBITS = "throttle";
private static final String TRANSPORT_FACTORY = "transport-factory";
/* client encryption options */
private static final String SSL_TRUSTSTORE = "truststore";
private static final String SSL_TRUSTSTORE_PW = "truststore-password";
private static final String SSL_KEYSTORE = "keystore";
private static final String SSL_KEYSTORE_PW = "keystore-password";
private static final String SSL_PROTOCOL = "ssl-protocol";
private static final String SSL_ALGORITHM = "ssl-alg";
private static final String SSL_STORE_TYPE = "store-type";
private static final String SSL_CIPHER_SUITES = "ssl-ciphers";
private static final String CONNECTIONS_PER_HOST = "connections-per-host";
private static final String CONFIG_PATH = "conf-path";
public static void main(String args[])
{
Config.setClientMode(true);
LoaderOptions options = LoaderOptions.parseArgs(args);
OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
SSTableLoader loader = new SSTableLoader(
options.directory,
new ExternalClient(
options.hosts,
options.nativePort,
options.user,
options.passwd,
options.storagePort,
options.sslStoragePort,
options.serverEncOptions,
buildSSLOptions((EncryptionOptions.ClientEncryptionOptions)options.encOptions)),
handler,
options.connectionsPerHost);
DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(options.throttle);
StreamResultFuture future = null;
ProgressIndicator indicator = new ProgressIndicator();
try
{
if (options.noProgress)
{
future = loader.stream(options.ignores);
}
else
{
future = loader.stream(options.ignores, indicator);
}
}
catch (Exception e)
{
JVMStabilityInspector.inspectThrowable(e);
System.err.println(e.getMessage());
if (e.getCause() != null)
System.err.println(e.getCause());
e.printStackTrace(System.err);
System.exit(1);
}
try
{
future.get();
if (!options.noProgress)
indicator.printSummary(options.connectionsPerHost);
// Give sockets time to gracefully close
Thread.sleep(1000);
System.exit(0); // We need that to stop non daemonized threads
}
catch (Exception e)
{
System.err.println("Streaming to the following hosts failed:");
System.err.println(loader.getFailedHosts());
e.printStackTrace(System.err);
System.exit(1);
}
}
// Return true when everything is at 100%
static class ProgressIndicator implements StreamEventHandler
{
private long start;
private long lastProgress;
private long lastTime;
private int peak = 0;
private int totalFiles = 0;
private final Multimap<InetAddress, SessionInfo> sessionsByHost = HashMultimap.create();
public ProgressIndicator()
{
start = lastTime = System.nanoTime();
}
public void onSuccess(StreamState finalState)
{
}
public void onFailure(Throwable t)
{
}
public synchronized void handleStreamEvent(StreamEvent event)
{
if (event.eventType == StreamEvent.Type.STREAM_PREPARED)
{
SessionInfo session = ((StreamEvent.SessionPreparedEvent) event).session;
sessionsByHost.put(session.peer, session);
}
else if (event.eventType == StreamEvent.Type.FILE_PROGRESS || event.eventType == StreamEvent.Type.STREAM_COMPLETE)
{
ProgressInfo progressInfo = null;
if (event.eventType == StreamEvent.Type.FILE_PROGRESS)
{
progressInfo = ((StreamEvent.ProgressEvent) event).progress;
}
long time = System.nanoTime();
long deltaTime = time - lastTime;
StringBuilder sb = new StringBuilder();
sb.append("\rprogress: ");
long totalProgress = 0;
long totalSize = 0;
boolean updateTotalFiles = totalFiles == 0;
// recalculate progress across all sessions in all hosts and display
for (InetAddress peer : sessionsByHost.keySet())
{
sb.append("[").append(peer).append("]");
for (SessionInfo session : sessionsByHost.get(peer))
{
long size = session.getTotalSizeToSend();
long current = 0;
int completed = 0;
if (progressInfo != null && session.peer.equals(progressInfo.peer) && (session.sessionIndex == progressInfo.sessionIndex))
{
session.updateProgress(progressInfo);
}
for (ProgressInfo progress : session.getSendingFiles())
{
if (progress.isCompleted())
completed++;
current += progress.currentBytes;
}
totalProgress += current;
totalSize += size;
sb.append(session.sessionIndex).append(":");
sb.append(completed).append("/").append(session.getTotalFilesToSend());
sb.append(" ").append(String.format("%-3d", size == 0 ? 100L : current * 100L / size)).append("% ");
if (updateTotalFiles)
totalFiles += session.getTotalFilesToSend();
}
}
lastTime = time;
long deltaProgress = totalProgress - lastProgress;
lastProgress = totalProgress;
sb.append("total: ").append(totalSize == 0 ? 100L : totalProgress * 100L / totalSize).append("% ");
sb.append(String.format("%-3d", mbPerSec(deltaProgress, deltaTime))).append("MB/s");
int average = mbPerSec(totalProgress, (time - start));
if (average > peak)
peak = average;
sb.append("(avg: ").append(average).append(" MB/s)");
System.out.print(sb.toString());
}
}
private int mbPerSec(long bytes, long timeInNano)
{
double bytesPerNano = ((double)bytes) / timeInNano;
return (int)((bytesPerNano * 1000 * 1000 * 1000) / (1024 * 1024));
}
private void printSummary(int connectionsPerHost)
{
long end = System.nanoTime();
long durationMS = ((end - start) / (1000000));
int average = mbPerSec(lastProgress, (end - start));
StringBuilder sb = new StringBuilder();
sb.append("\nSummary statistics: \n");
sb.append(String.format(" %-30s: %-10d%n", "Connections per host: ", connectionsPerHost));
sb.append(String.format(" %-30s: %-10d%n", "Total files transferred: ", totalFiles));
sb.append(String.format(" %-30s: %-10d%n", "Total bytes transferred: ", lastProgress));
sb.append(String.format(" %-30s: %-10d%n", "Total duration (ms): ", durationMS));
sb.append(String.format(" %-30s: %-10d%n", "Average transfer rate (MB/s): ", + average));
sb.append(String.format(" %-30s: %-10d%n", "Peak transfer rate (MB/s): ", + peak));
System.out.println(sb.toString());
}
}
private static SSLOptions buildSSLOptions(EncryptionOptions.ClientEncryptionOptions clientEncryptionOptions)
{
if (!clientEncryptionOptions.enabled)
return null;
SSLContext sslContext;
try
{
sslContext = SSLFactory.createSSLContext(clientEncryptionOptions, true);
}
catch (IOException e)
{
throw new RuntimeException("Could not create SSL Context.", e);
}
return new SSLOptions(sslContext, clientEncryptionOptions.cipher_suites);
}
static class ExternalClient extends NativeSSTableLoaderClient
{
private final int storagePort;
private final int sslStoragePort;
private final EncryptionOptions.ServerEncryptionOptions serverEncOptions;
public ExternalClient(Set<InetAddress> hosts,
int port,
String user,
String passwd,
int storagePort,
int sslStoragePort,
EncryptionOptions.ServerEncryptionOptions serverEncryptionOptions,
SSLOptions sslOptions)
{
super(hosts, port, user, passwd, sslOptions);
this.storagePort = storagePort;
this.sslStoragePort = sslStoragePort;
this.serverEncOptions = serverEncryptionOptions;
}
@Override
public StreamConnectionFactory getConnectionFactory()
{
return new BulkLoadConnectionFactory(storagePort, sslStoragePort, serverEncOptions, false);
}
}
static class LoaderOptions
{
public final File directory;
public boolean debug;
public boolean verbose;
public boolean noProgress;
public int nativePort = 9042;
public String user;
public String passwd;
public int throttle = 0;
public int storagePort;
public int sslStoragePort;
public EncryptionOptions encOptions = new EncryptionOptions.ClientEncryptionOptions();
public int connectionsPerHost = 1;
public EncryptionOptions.ServerEncryptionOptions serverEncOptions = new EncryptionOptions.ServerEncryptionOptions();
public final Set<InetAddress> hosts = new HashSet<>();
public final Set<InetAddress> ignores = new HashSet<>();
LoaderOptions(File directory)
{
this.directory = directory;
}
public static LoaderOptions parseArgs(String cmdArgs[])
{
CommandLineParser parser = new GnuParser();
CmdLineOptions options = getCmdLineOptions();
try
{
CommandLine cmd = parser.parse(options, cmdArgs, false);
if (cmd.hasOption(HELP_OPTION))
{
printUsage(options);
System.exit(0);
}
String[] args = cmd.getArgs();
if (args.length == 0)
{
System.err.println("Missing sstable directory argument");
printUsage(options);
System.exit(1);
}
if (args.length > 1)
{
System.err.println("Too many arguments");
printUsage(options);
System.exit(1);
}
String dirname = args[0];
File dir = new File(dirname);
if (!dir.exists())
errorMsg("Unknown directory: " + dirname, options);
if (!dir.isDirectory())
errorMsg(dirname + " is not a directory", options);
LoaderOptions opts = new LoaderOptions(dir);
opts.verbose = cmd.hasOption(VERBOSE_OPTION);
opts.noProgress = cmd.hasOption(NOPROGRESS_OPTION);
if (cmd.hasOption(NATIVE_PORT_OPTION))
opts.nativePort = Integer.parseInt(cmd.getOptionValue(NATIVE_PORT_OPTION));
if (cmd.hasOption(USER_OPTION))
opts.user = cmd.getOptionValue(USER_OPTION);
if (cmd.hasOption(PASSWD_OPTION))
opts.passwd = cmd.getOptionValue(PASSWD_OPTION);
if (cmd.hasOption(INITIAL_HOST_ADDRESS_OPTION))
{
String[] nodes = cmd.getOptionValue(INITIAL_HOST_ADDRESS_OPTION).split(",");
try
{
for (String node : nodes)
{
opts.hosts.add(InetAddress.getByName(node.trim()));
}
}
catch (UnknownHostException e)
{
errorMsg("Unknown host: " + e.getMessage(), options);
}
}
else
{
System.err.println("Initial hosts must be specified (-d)");
printUsage(options);
System.exit(1);
}
if (cmd.hasOption(IGNORE_NODES_OPTION))
{
String[] nodes = cmd.getOptionValue(IGNORE_NODES_OPTION).split(",");
try
{
for (String node : nodes)
{
opts.ignores.add(InetAddress.getByName(node.trim()));
}
}
catch (UnknownHostException e)
{
errorMsg("Unknown host: " + e.getMessage(), options);
}
}
if (cmd.hasOption(CONNECTIONS_PER_HOST))
opts.connectionsPerHost = Integer.parseInt(cmd.getOptionValue(CONNECTIONS_PER_HOST));
// try to load config file first, so that values can be rewritten with other option values.
// otherwise use default config.
Config config;
if (cmd.hasOption(CONFIG_PATH))
{
File configFile = new File(cmd.getOptionValue(CONFIG_PATH));
if (!configFile.exists())
{
errorMsg("Config file not found", options);
}
config = new YamlConfigurationLoader().loadConfig(configFile.toURI().toURL());
}
else
{
config = new Config();
}
opts.storagePort = config.storage_port;
opts.sslStoragePort = config.ssl_storage_port;
opts.throttle = config.stream_throughput_outbound_megabits_per_sec;
opts.encOptions = config.client_encryption_options;
opts.serverEncOptions = config.server_encryption_options;
if (cmd.hasOption(THROTTLE_MBITS))
{
opts.throttle = Integer.parseInt(cmd.getOptionValue(THROTTLE_MBITS));
}
if (cmd.hasOption(SSL_TRUSTSTORE))
{
opts.encOptions.truststore = cmd.getOptionValue(SSL_TRUSTSTORE);
}
if (cmd.hasOption(SSL_TRUSTSTORE_PW))
{
opts.encOptions.truststore_password = cmd.getOptionValue(SSL_TRUSTSTORE_PW);
}
if (cmd.hasOption(SSL_KEYSTORE))
{
opts.encOptions.keystore = cmd.getOptionValue(SSL_KEYSTORE);
// if a keystore was provided, lets assume we'll need to use it
opts.encOptions.require_client_auth = true;
}
if (cmd.hasOption(SSL_KEYSTORE_PW))
{
opts.encOptions.keystore_password = cmd.getOptionValue(SSL_KEYSTORE_PW);
}
if (cmd.hasOption(SSL_PROTOCOL))
{
opts.encOptions.protocol = cmd.getOptionValue(SSL_PROTOCOL);
}
if (cmd.hasOption(SSL_ALGORITHM))
{
opts.encOptions.algorithm = cmd.getOptionValue(SSL_ALGORITHM);
}
if (cmd.hasOption(SSL_STORE_TYPE))
{
opts.encOptions.store_type = cmd.getOptionValue(SSL_STORE_TYPE);
}
if (cmd.hasOption(SSL_CIPHER_SUITES))
{
opts.encOptions.cipher_suites = cmd.getOptionValue(SSL_CIPHER_SUITES).split(",");
}
return opts;
}
catch (ParseException | ConfigurationException | MalformedURLException e)
{
errorMsg(e.getMessage(), options);
return null;
}
}
private static void errorMsg(String msg, CmdLineOptions options)
{
System.err.println(msg);
printUsage(options);
System.exit(1);
}
private static CmdLineOptions getCmdLineOptions()
{
CmdLineOptions options = new CmdLineOptions();
options.addOption("v", VERBOSE_OPTION, "verbose output");
options.addOption("h", HELP_OPTION, "display this help message");
options.addOption(null, NOPROGRESS_OPTION, "don't display progress");
options.addOption("i", IGNORE_NODES_OPTION, "NODES", "don't stream to this (comma separated) list of nodes");
options.addOption("d", INITIAL_HOST_ADDRESS_OPTION, "initial hosts", "Required. try to connect to these hosts (comma separated) initially for ring information");
options.addOption("p", NATIVE_PORT_OPTION, "rpc port", "port used for native connection (default 9042)");
options.addOption("t", THROTTLE_MBITS, "throttle", "throttle speed in Mbits (default unlimited)");
options.addOption("u", USER_OPTION, "username", "username for cassandra authentication");
options.addOption("pw", PASSWD_OPTION, "password", "password for cassandra authentication");
options.addOption("tf", TRANSPORT_FACTORY, "transport factory", "Fully-qualified ITransportFactory class name for creating a connection to cassandra");
options.addOption("cph", CONNECTIONS_PER_HOST, "connectionsPerHost", "number of concurrent connections-per-host.");
// ssl connection-related options
options.addOption("ts", SSL_TRUSTSTORE, "TRUSTSTORE", "Client SSL: full path to truststore");
options.addOption("tspw", SSL_TRUSTSTORE_PW, "TRUSTSTORE-PASSWORD", "Client SSL: password of the truststore");
options.addOption("ks", SSL_KEYSTORE, "KEYSTORE", "Client SSL: full path to keystore");
options.addOption("kspw", SSL_KEYSTORE_PW, "KEYSTORE-PASSWORD", "Client SSL: password of the keystore");
options.addOption("prtcl", SSL_PROTOCOL, "PROTOCOL", "Client SSL: connections protocol to use (default: TLS)");
options.addOption("alg", SSL_ALGORITHM, "ALGORITHM", "Client SSL: algorithm (default: SunX509)");
options.addOption("st", SSL_STORE_TYPE, "STORE-TYPE", "Client SSL: type of store");
options.addOption("ciphers", SSL_CIPHER_SUITES, "CIPHER-SUITES", "Client SSL: comma-separated list of encryption suites to use");
options.addOption("f", CONFIG_PATH, "path to config file", "cassandra.yaml file path for streaming throughput and client/server SSL.");
return options;
}
public static void printUsage(Options options)
{
String usage = String.format("%s [options] <dir_path>", TOOL_NAME);
String header = System.lineSeparator() +
"Bulk load the sstables found in the directory <dir_path> to the configured cluster." +
"The parent directories of <dir_path> are used as the target keyspace/table name. " +
"So for instance, to load an sstable named Standard1-g-1-Data.db into Keyspace1/Standard1, " +
"you will need to have the files Standard1-g-1-Data.db and Standard1-g-1-Index.db into a directory /path/to/Keyspace1/Standard1/.";
String footer = System.lineSeparator() +
"You can provide cassandra.yaml file with -f command line option to set up streaming throughput, client and server encryption options. " +
"Only stream_throughput_outbound_megabits_per_sec, server_encryption_options and client_encryption_options are read from yaml. " +
"You can override options read from cassandra.yaml with corresponding command line options.";
new HelpFormatter().printHelp(usage, header, options, footer);
}
}
public static class CmdLineOptions extends Options
{
/**
* Add option with argument and argument name
* @param opt shortcut for option name
* @param longOpt complete option name
* @param argName argument name
* @param description description of the option
* @return updated Options object
*/
public Options addOption(String opt, String longOpt, String argName, String description)
{
Option option = new Option(opt, longOpt, true, description);
option.setArgName(argName);
return addOption(option);
}
/**
* Add option without argument
* @param opt shortcut for option name
* @param longOpt complete option name
* @param description description of the option
* @return updated Options object
*/
public Options addOption(String opt, String longOpt, String description)
{
return addOption(new Option(opt, longOpt, false, description));
}
}
}
| |
package org.docksidestage.hangar.dbflute.cbean.cq.bs;
import java.util.Map;
import org.dbflute.cbean.*;
import org.dbflute.cbean.chelper.*;
import org.dbflute.cbean.coption.*;
import org.dbflute.cbean.cvalue.ConditionValue;
import org.dbflute.cbean.sqlclause.SqlClause;
import org.dbflute.exception.IllegalConditionBeanOperationException;
import org.docksidestage.hangar.dbflute.cbean.cq.ciq.*;
import org.docksidestage.hangar.dbflute.cbean.*;
import org.docksidestage.hangar.dbflute.cbean.cq.*;
/**
* The base condition-query of WHITE_BASE_ONE09_PALM.
* @author DBFlute(AutoGenerator)
*/
public class BsWhiteBaseOne09PalmCQ extends AbstractBsWhiteBaseOne09PalmCQ {
// ===================================================================================
// Attribute
// =========
protected WhiteBaseOne09PalmCIQ _inlineQuery;
// ===================================================================================
// Constructor
// ===========
public BsWhiteBaseOne09PalmCQ(ConditionQuery referrerQuery, SqlClause sqlClause, String aliasName, int nestLevel) {
super(referrerQuery, sqlClause, aliasName, nestLevel);
}
// ===================================================================================
// InlineView/OrClause
// ===================
/**
* Prepare InlineView query. <br>
* {select ... from ... left outer join (select * from WHITE_BASE_ONE09_PALM) where FOO = [value] ...}
* <pre>
* cb.query().queryMemberStatus().<span style="color: #CC4747">inline()</span>.setFoo...;
* </pre>
* @return The condition-query for InlineView query. (NotNull)
*/
public WhiteBaseOne09PalmCIQ inline() {
if (_inlineQuery == null) { _inlineQuery = xcreateCIQ(); }
_inlineQuery.xsetOnClause(false); return _inlineQuery;
}
protected WhiteBaseOne09PalmCIQ xcreateCIQ() {
WhiteBaseOne09PalmCIQ ciq = xnewCIQ();
ciq.xsetBaseCB(_baseCB);
return ciq;
}
protected WhiteBaseOne09PalmCIQ xnewCIQ() {
return new WhiteBaseOne09PalmCIQ(xgetReferrerQuery(), xgetSqlClause(), xgetAliasName(), xgetNestLevel(), this);
}
/**
* Prepare OnClause query. <br>
* {select ... from ... left outer join WHITE_BASE_ONE09_PALM on ... and FOO = [value] ...}
* <pre>
* cb.query().queryMemberStatus().<span style="color: #CC4747">on()</span>.setFoo...;
* </pre>
* @return The condition-query for OnClause query. (NotNull)
* @throws IllegalConditionBeanOperationException When this condition-query is base query.
*/
public WhiteBaseOne09PalmCIQ on() {
if (isBaseQuery()) { throw new IllegalConditionBeanOperationException("OnClause for local table is unavailable!"); }
WhiteBaseOne09PalmCIQ inlineQuery = inline(); inlineQuery.xsetOnClause(true); return inlineQuery;
}
// ===================================================================================
// Query
// =====
protected ConditionValue _palmId;
public ConditionValue xdfgetPalmId()
{ if (_palmId == null) { _palmId = nCV(); }
return _palmId; }
protected ConditionValue xgetCValuePalmId() { return xdfgetPalmId(); }
/**
* Add order-by as ascend. <br>
* PALM_ID: {PK, NotNull, INTEGER(10)}
* @return this. (NotNull)
*/
public BsWhiteBaseOne09PalmCQ addOrderBy_PalmId_Asc() { regOBA("PALM_ID"); return this; }
/**
* Add order-by as descend. <br>
* PALM_ID: {PK, NotNull, INTEGER(10)}
* @return this. (NotNull)
*/
public BsWhiteBaseOne09PalmCQ addOrderBy_PalmId_Desc() { regOBD("PALM_ID"); return this; }
protected ConditionValue _palmName;
public ConditionValue xdfgetPalmName()
{ if (_palmName == null) { _palmName = nCV(); }
return _palmName; }
protected ConditionValue xgetCValuePalmName() { return xdfgetPalmName(); }
/**
* Add order-by as ascend. <br>
* PALM_NAME: {NotNull, VARCHAR(200)}
* @return this. (NotNull)
*/
public BsWhiteBaseOne09PalmCQ addOrderBy_PalmName_Asc() { regOBA("PALM_NAME"); return this; }
/**
* Add order-by as descend. <br>
* PALM_NAME: {NotNull, VARCHAR(200)}
* @return this. (NotNull)
*/
public BsWhiteBaseOne09PalmCQ addOrderBy_PalmName_Desc() { regOBD("PALM_NAME"); return this; }
protected ConditionValue _baseId;
public ConditionValue xdfgetBaseId()
{ if (_baseId == null) { _baseId = nCV(); }
return _baseId; }
protected ConditionValue xgetCValueBaseId() { return xdfgetBaseId(); }
/**
* Add order-by as ascend. <br>
* BASE_ID: {UQ, NotNull, INTEGER(10), FK to WHITE_BASE}
* @return this. (NotNull)
*/
public BsWhiteBaseOne09PalmCQ addOrderBy_BaseId_Asc() { regOBA("BASE_ID"); return this; }
/**
* Add order-by as descend. <br>
* BASE_ID: {UQ, NotNull, INTEGER(10), FK to WHITE_BASE}
* @return this. (NotNull)
*/
public BsWhiteBaseOne09PalmCQ addOrderBy_BaseId_Desc() { regOBD("BASE_ID"); return this; }
// ===================================================================================
// SpecifiedDerivedOrderBy
// =======================
/**
* Add order-by for specified derived column as ascend.
* <pre>
* cb.specify().derivedPurchaseList().max(new SubQuery<PurchaseCB>() {
* public void query(PurchaseCB subCB) {
* subCB.specify().columnPurchaseDatetime();
* }
* }, <span style="color: #CC4747">aliasName</span>);
* <span style="color: #3F7E5E">// order by [alias-name] asc</span>
* cb.<span style="color: #CC4747">addSpecifiedDerivedOrderBy_Asc</span>(<span style="color: #CC4747">aliasName</span>);
* </pre>
* @param aliasName The alias name specified at (Specify)DerivedReferrer. (NotNull)
* @return this. (NotNull)
*/
public BsWhiteBaseOne09PalmCQ addSpecifiedDerivedOrderBy_Asc(String aliasName) { registerSpecifiedDerivedOrderBy_Asc(aliasName); return this; }
/**
* Add order-by for specified derived column as descend.
* <pre>
* cb.specify().derivedPurchaseList().max(new SubQuery<PurchaseCB>() {
* public void query(PurchaseCB subCB) {
* subCB.specify().columnPurchaseDatetime();
* }
* }, <span style="color: #CC4747">aliasName</span>);
* <span style="color: #3F7E5E">// order by [alias-name] desc</span>
* cb.<span style="color: #CC4747">addSpecifiedDerivedOrderBy_Desc</span>(<span style="color: #CC4747">aliasName</span>);
* </pre>
* @param aliasName The alias name specified at (Specify)DerivedReferrer. (NotNull)
* @return this. (NotNull)
*/
public BsWhiteBaseOne09PalmCQ addSpecifiedDerivedOrderBy_Desc(String aliasName) { registerSpecifiedDerivedOrderBy_Desc(aliasName); return this; }
// ===================================================================================
// Union Query
// ===========
public void reflectRelationOnUnionQuery(ConditionQuery bqs, ConditionQuery uqs) {
WhiteBaseOne09PalmCQ bq = (WhiteBaseOne09PalmCQ)bqs;
WhiteBaseOne09PalmCQ uq = (WhiteBaseOne09PalmCQ)uqs;
if (bq.hasConditionQueryWhiteBase()) {
uq.queryWhiteBase().reflectRelationOnUnionQuery(bq.queryWhiteBase(), uq.queryWhiteBase());
}
}
// ===================================================================================
// Foreign Query
// =============
/**
* Get the condition-query for relation table. <br>
* WHITE_BASE by my BASE_ID, named 'whiteBase'.
* @return The instance of condition-query. (NotNull)
*/
public WhiteBaseCQ queryWhiteBase() {
return xdfgetConditionQueryWhiteBase();
}
public WhiteBaseCQ xdfgetConditionQueryWhiteBase() {
String prop = "whiteBase";
if (!xhasQueRlMap(prop)) { xregQueRl(prop, xcreateQueryWhiteBase()); xsetupOuterJoinWhiteBase(); }
return xgetQueRlMap(prop);
}
protected WhiteBaseCQ xcreateQueryWhiteBase() {
String nrp = xresolveNRP("WHITE_BASE_ONE09_PALM", "whiteBase"); String jan = xresolveJAN(nrp, xgetNNLvl());
return xinitRelCQ(new WhiteBaseCQ(this, xgetSqlClause(), jan, xgetNNLvl()), _baseCB, "whiteBase", nrp);
}
protected void xsetupOuterJoinWhiteBase() { xregOutJo("whiteBase"); }
public boolean hasConditionQueryWhiteBase() { return xhasQueRlMap("whiteBase"); }
protected Map<String, Object> xfindFixedConditionDynamicParameterMap(String property) {
return null;
}
// ===================================================================================
// ScalarCondition
// ===============
public Map<String, WhiteBaseOne09PalmCQ> xdfgetScalarCondition() { return xgetSQueMap("scalarCondition"); }
public String keepScalarCondition(WhiteBaseOne09PalmCQ sq) { return xkeepSQue("scalarCondition", sq); }
// ===================================================================================
// MyselfDerived
// =============
public Map<String, WhiteBaseOne09PalmCQ> xdfgetSpecifyMyselfDerived() { return xgetSQueMap("specifyMyselfDerived"); }
public String keepSpecifyMyselfDerived(WhiteBaseOne09PalmCQ sq) { return xkeepSQue("specifyMyselfDerived", sq); }
public Map<String, WhiteBaseOne09PalmCQ> xdfgetQueryMyselfDerived() { return xgetSQueMap("queryMyselfDerived"); }
public String keepQueryMyselfDerived(WhiteBaseOne09PalmCQ sq) { return xkeepSQue("queryMyselfDerived", sq); }
public Map<String, Object> xdfgetQueryMyselfDerivedParameter() { return xgetSQuePmMap("queryMyselfDerived"); }
public String keepQueryMyselfDerivedParameter(Object pm) { return xkeepSQuePm("queryMyselfDerived", pm); }
// ===================================================================================
// MyselfExists
// ============
protected Map<String, WhiteBaseOne09PalmCQ> _myselfExistsMap;
public Map<String, WhiteBaseOne09PalmCQ> xdfgetMyselfExists() { return xgetSQueMap("myselfExists"); }
public String keepMyselfExists(WhiteBaseOne09PalmCQ sq) { return xkeepSQue("myselfExists", sq); }
// ===================================================================================
// MyselfInScope
// =============
public Map<String, WhiteBaseOne09PalmCQ> xdfgetMyselfInScope() { return xgetSQueMap("myselfInScope"); }
public String keepMyselfInScope(WhiteBaseOne09PalmCQ sq) { return xkeepSQue("myselfInScope", sq); }
// ===================================================================================
// Very Internal
// =============
// very internal (for suppressing warn about 'Not Use Import')
protected String xCB() { return WhiteBaseOne09PalmCB.class.getName(); }
protected String xCQ() { return WhiteBaseOne09PalmCQ.class.getName(); }
protected String xCHp() { return HpQDRFunction.class.getName(); }
protected String xCOp() { return ConditionOption.class.getName(); }
protected String xMap() { return Map.class.getName(); }
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gemstone.gemfire.management.internal.beans;
import java.io.IOException;
import java.io.Serializable;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.logging.log4j.Logger;
import com.gemstone.gemfire.SystemFailure;
import com.gemstone.gemfire.cache.Cache;
import com.gemstone.gemfire.cache.CacheFactory;
import com.gemstone.gemfire.cache.DataPolicy;
import com.gemstone.gemfire.cache.Region;
import com.gemstone.gemfire.cache.execute.Function;
import com.gemstone.gemfire.cache.execute.FunctionAdapter;
import com.gemstone.gemfire.cache.execute.FunctionContext;
import com.gemstone.gemfire.cache.execute.FunctionException;
import com.gemstone.gemfire.cache.execute.FunctionService;
import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
import com.gemstone.gemfire.cache.execute.ResultCollector;
import com.gemstone.gemfire.cache.query.Query;
import com.gemstone.gemfire.cache.query.QueryInvalidException;
import com.gemstone.gemfire.cache.query.QueryService;
import com.gemstone.gemfire.cache.query.SelectResults;
import com.gemstone.gemfire.cache.query.internal.CompiledValue;
import com.gemstone.gemfire.cache.query.internal.DefaultQuery;
import com.gemstone.gemfire.cache.query.internal.QCompiler;
import com.gemstone.gemfire.distributed.DistributedMember;
import com.gemstone.gemfire.internal.InternalEntity;
import com.gemstone.gemfire.internal.cache.BucketRegion;
import com.gemstone.gemfire.internal.cache.LocalDataSet;
import com.gemstone.gemfire.internal.cache.PartitionedRegion;
import com.gemstone.gemfire.internal.cache.PartitionedRegionHelper;
import com.gemstone.gemfire.internal.logging.LogService;
import com.gemstone.gemfire.management.DistributedRegionMXBean;
import com.gemstone.gemfire.management.ManagementService;
import com.gemstone.gemfire.management.internal.ManagementConstants;
import com.gemstone.gemfire.management.internal.ManagementStrings;
import com.gemstone.gemfire.management.internal.SystemManagementService;
import com.gemstone.gemfire.management.internal.cli.commands.DataCommands;
import com.gemstone.gemfire.management.internal.cli.json.GfJsonException;
import com.gemstone.gemfire.management.internal.cli.json.GfJsonObject;
import com.gemstone.gemfire.management.internal.cli.json.TypedJson;
/**
* This function is executed on one or multiple members based on the member
* input to DistributedSystemMXBean.queryData()
*
* @author rishim
*
*/
public class QueryDataFunction extends FunctionAdapter implements InternalEntity {
private static final Logger logger = LogService.getLogger();
@Override
public boolean hasResult() {
return true;
}
private static final long serialVersionUID = 1L;
@Override
public void execute(FunctionContext context) {
Object[] functionArgs = (Object[]) context.getArguments();
boolean showMember = (Boolean) functionArgs[DISPLAY_MEMBERWISE];
String queryString = (String) functionArgs[QUERY];
String regionName = (String) functionArgs[REGION];
int limit = (Integer) functionArgs[LIMIT];
int queryResultSetLimit = (Integer) functionArgs[QUERY_RESULTSET_LIMIT];
int queryCollectionsDepth = (Integer) functionArgs[QUERY_COLLECTIONS_DEPTH];
try {
context.getResultSender().lastResult(
selectWithType(context, queryString, showMember, regionName, limit, queryResultSetLimit,
queryCollectionsDepth));
} catch (Exception e) {
context.getResultSender().sendException(e);
}
}
@Override
public String getId() {
return ManagementConstants.QUERY_DATA_FUNCTION;
}
@SuppressWarnings( { "unchecked" })
public QueryDataFunctionResult selectWithType(FunctionContext context, String queryString, boolean showMember,
String regionName, int limit, int queryResultSetLimit, int queryCollectionsDepth) throws Exception {
Cache cache = CacheFactory.getAnyInstance();
Function loclQueryFunc = new LocalQueryFunction("LocalQueryFunction", regionName, showMember)
.setOptimizeForWrite(true);
queryString = applyLimitClause(queryString, limit, queryResultSetLimit);
try {
TypedJson result = new TypedJson(queryCollectionsDepth);
Region region = cache.getRegion(regionName);
if (region == null) {
throw new Exception(ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND_ON_MEMBER.toLocalizedString(regionName,
cache.getDistributedSystem().getDistributedMember().getId()));
}
Object results = null;
boolean noDataFound = true;
if (region.getAttributes().getDataPolicy() == DataPolicy.NORMAL) {
QueryService queryService = cache.getQueryService();
Query query = queryService.newQuery(queryString);
results = query.execute();
} else {
ResultCollector rcollector = null;
PartitionedRegion parRegion = PartitionedRegionHelper.getPartitionedRegion(regionName, cache);
if(parRegion != null && showMember){
if(parRegion.isDataStore()){
Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions();
Set<Integer> localPrimaryBucketSet = new HashSet<Integer>();
for (BucketRegion bRegion : localPrimaryBucketRegions) {
localPrimaryBucketSet.add(bRegion.getId());
}
LocalDataSet lds = new LocalDataSet(parRegion, localPrimaryBucketSet);
DefaultQuery query = (DefaultQuery)cache.getQueryService().newQuery(
queryString);
SelectResults selectResults = (SelectResults)lds.executeQuery(query, null, localPrimaryBucketSet);
results = selectResults;
}
}else{
rcollector = FunctionService.onRegion(cache.getRegion(regionName)).withArgs(queryString)
.execute(loclQueryFunc);
results = (ArrayList) rcollector.getResult();
}
}
if (results != null && results instanceof SelectResults) {
SelectResults selectResults = (SelectResults) results;
for (Iterator iter = selectResults.iterator(); iter.hasNext();) {
Object object = iter.next();
result.add(RESULT_KEY,object);
noDataFound = false;
}
} else if (results != null && results instanceof ArrayList) {
ArrayList listResults = (ArrayList) results;
ArrayList actualResult = (ArrayList)listResults.get(0);
for (Object object : actualResult) {
result.add(RESULT_KEY, object);
noDataFound = false;
}
}
if (!noDataFound && showMember) {
result.add(MEMBER_KEY,cache.getDistributedSystem().getDistributedMember().getId());
}
if (noDataFound) {
return new QueryDataFunctionResult(QUERY_EXEC_SUCCESS, BeanUtilFuncs.compress(new JsonisedErroMessage(NO_DATA_FOUND).toString()));
}
return new QueryDataFunctionResult(QUERY_EXEC_SUCCESS, BeanUtilFuncs.compress(result.toString()));
} catch (Exception e) {
logger.warn(e.getMessage(), e);
throw e;
} finally {
}
}
/**
* Matches the input query with query with limit pattern. If limit is found in
* input query this function ignores. Else it will append a default limit ..
* 1000 If input limit is 0 then also it will append default limit of 1000
*
* @param query
* input query
* @param limit
* limit on the result set
* @return a string having limit clause
*/
static String applyLimitClause(String query, int limit, int queryResultSetLimit) {
Matcher matcher = SELECT_EXPR_PATTERN.matcher(query);
if (matcher.matches()) {
Matcher limit_matcher = SELECT_WITH_LIMIT_EXPR_PATTERN.matcher(query);
boolean matchResult = limit_matcher.matches();
if (!matchResult) {
if (limit == 0) {
limit = queryResultSetLimit;
}
String result = new String(query);
result += " LIMIT " + limit;
return result;
}
}
return query;
}
@SuppressWarnings( { "unchecked" })
static Object callFunction(Object functionArgs, Set<DistributedMember> members, boolean zipResult) throws Exception {
try {
if (members.size() == 1) {
DistributedMember member = members.iterator().next();
ResultCollector collector = FunctionService.onMember(member).withArgs(functionArgs).execute(
ManagementConstants.QUERY_DATA_FUNCTION);
List list = (List) collector.getResult();
Object object = null;
if (list.size() > 0) {
object = list.get(0);
}
if (object instanceof Throwable) {
Throwable error = (Throwable) object;
throw error;
}
QueryDataFunctionResult result = (QueryDataFunctionResult) object;
if (zipResult) { // The result is already compressed
return result.compressedBytes;
} else {
Object[] functionArgsList = (Object[]) functionArgs;
boolean showMember = (Boolean) functionArgsList[DISPLAY_MEMBERWISE];
if (showMember) {// Added to show a single member similar to multiple
// member.
// Note , if no member is selected this is the code path executed. A
// random associated member is chosen.
List<String> decompressedList = new ArrayList<String>();
decompressedList.add(BeanUtilFuncs.decompress(result.compressedBytes));
return wrapResult(decompressedList.toString());
}
return BeanUtilFuncs.decompress(result.compressedBytes);
}
} else { // More than 1 Member
ResultCollector coll = FunctionService.onMembers(members).withArgs(functionArgs).execute(
ManagementConstants.QUERY_DATA_FUNCTION);
List list = (List) coll.getResult();
Object object = list.get(0);
if (object instanceof Throwable) {
Throwable error = (Throwable) object;
throw error;
}
Iterator<QueryDataFunctionResult> it = list.iterator();
List<String> decompressedList = new ArrayList<String>();
while (it.hasNext()) {
String decompressedStr = null;
decompressedStr = BeanUtilFuncs.decompress(it.next().compressedBytes);
decompressedList.add(decompressedStr);
}
if (zipResult) {
return BeanUtilFuncs.compress(wrapResult(decompressedList.toString()));
} else {
return wrapResult(decompressedList.toString());
}
}
} catch (FunctionException fe) {
throw new Exception(ManagementStrings.QUERY__MSG__QUERY_EXEC.toLocalizedString(fe.getMessage()));
} catch (IOException e) {
throw new Exception(ManagementStrings.QUERY__MSG__QUERY_EXEC.toLocalizedString(e.getMessage()));
} catch (Exception e) {
throw new Exception(ManagementStrings.QUERY__MSG__QUERY_EXEC.toLocalizedString(e.getMessage()));
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable e) {
SystemFailure.checkFailure();
throw new Exception(ManagementStrings.QUERY__MSG__QUERY_EXEC.toLocalizedString(e.getMessage()));
}
}
static String wrapResult(String str) {
StringWriter w = new StringWriter();
synchronized (w.getBuffer()) {
w.write("{\"result\":");
w.write(str);
w.write("}");
return w.toString();
}
}
public static Object queryData(String query, String members, int limit, boolean zipResult, int queryResultSetLimit, int queryCollectionsDepth) throws Exception {
if (query == null || query.isEmpty()) {
return new JsonisedErroMessage(ManagementStrings.QUERY__MSG__QUERY_EMPTY.toLocalizedString()).toString();
}
Set<DistributedMember> inputMembers = null;
if (members != null && !members.trim().isEmpty()) {
inputMembers = new HashSet<DistributedMember>();
StringTokenizer st = new StringTokenizer(members, ",");
while (st.hasMoreTokens()) {
String member = st.nextToken();
DistributedMember distributedMember = BeanUtilFuncs.getDistributedMemberByNameOrId(member);
inputMembers.add(distributedMember);
if (distributedMember == null) {
return new JsonisedErroMessage(ManagementStrings.QUERY__MSG__INVALID_MEMBER.toLocalizedString(member)).toString();
}
}
}
Cache cache = CacheFactory.getAnyInstance();
try {
SystemManagementService service = (SystemManagementService) ManagementService.getExistingManagementService(cache);
Set<String> regionsInQuery = compileQuery(cache, query);
// Validate region existance
if (regionsInQuery.size() > 0) {
for (String regionPath : regionsInQuery) {
DistributedRegionMXBean regionMBean = service.getDistributedRegionMXBean(regionPath);
if (regionMBean == null) {
return new JsonisedErroMessage(ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND.toLocalizedString(regionPath)).toString();
} else {
Set<DistributedMember> associatedMembers = DataCommands
.getRegionAssociatedMembers(regionPath, cache, true);
if (inputMembers != null && inputMembers.size() > 0) {
if (!associatedMembers.containsAll(inputMembers)) {
return new JsonisedErroMessage(ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND_ON_MEMBERS
.toLocalizedString(regionPath)).toString();
}
}
}
}
} else {
return new JsonisedErroMessage(ManagementStrings.QUERY__MSG__INVALID_QUERY
.toLocalizedString("Region mentioned in query probably missing /")).toString();
}
// Validate
if (regionsInQuery.size() > 1 && inputMembers == null) {
for (String regionPath : regionsInQuery) {
DistributedRegionMXBean regionMBean = service.getDistributedRegionMXBean(regionPath);
if (regionMBean.getRegionType().equals(DataPolicy.PARTITION.toString())
|| regionMBean.getRegionType().equals(DataPolicy.PERSISTENT_PARTITION.toString())) {
return new JsonisedErroMessage(ManagementStrings.QUERY__MSG__JOIN_OP_EX.toLocalizedString()).toString();
}
}
}
String randomRegion = regionsInQuery.iterator().next();
Set<DistributedMember> associatedMembers = DataCommands.getQueryRegionsAssociatedMembers(regionsInQuery, cache,
false);// First available member
if (associatedMembers != null && associatedMembers.size() > 0) {
Object[] functionArgs = new Object[6];
if (inputMembers != null && inputMembers.size() > 0) {// on input
// members
functionArgs[DISPLAY_MEMBERWISE] = true;
functionArgs[QUERY] = query;
functionArgs[REGION] = randomRegion;
functionArgs[LIMIT] = limit;
functionArgs[QUERY_RESULTSET_LIMIT] = queryResultSetLimit;
functionArgs[QUERY_COLLECTIONS_DEPTH] = queryCollectionsDepth;
Object result = QueryDataFunction.callFunction(functionArgs, inputMembers, zipResult);
return result;
} else { // Query on any random member
functionArgs[DISPLAY_MEMBERWISE] = false;
functionArgs[QUERY] = query;
functionArgs[REGION] = randomRegion;
functionArgs[LIMIT] = limit;
functionArgs[QUERY_RESULTSET_LIMIT] = queryResultSetLimit;
functionArgs[QUERY_COLLECTIONS_DEPTH] = queryCollectionsDepth;
Object result = QueryDataFunction.callFunction(functionArgs, associatedMembers, zipResult);
return result;
}
} else {
return new JsonisedErroMessage(ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND.toLocalizedString(regionsInQuery
.toString())).toString();
}
} catch (QueryInvalidException qe) {
return new JsonisedErroMessage(ManagementStrings.QUERY__MSG__INVALID_QUERY.toLocalizedString(qe.getMessage())).toString();
}
}
private static class JsonisedErroMessage {
private static String message = "message";
private GfJsonObject gFJsonObject = new GfJsonObject();
public JsonisedErroMessage(String errorMessage) throws Exception {
try {
gFJsonObject.put(message, errorMessage);
} catch (GfJsonException e) {
throw new Exception(e);
}
}
public String toString() {
return gFJsonObject.toString();
}
}
/**
* Compile the query and return a set of regions involved in the query It
* throws an QueryInvalidException if the query is not proper
*
* @param cache
* current cache
* @param query
* input query
* @return a set of regions involved in the query
* @throws QueryInvalidException
*/
@SuppressWarnings("deprecation")
public static Set<String> compileQuery(Cache cache, String query) throws QueryInvalidException {
QCompiler compiler = new QCompiler();
Set<String> regionsInQuery = null;
try {
CompiledValue compiledQuery = compiler.compileQuery(query);
Set<String> regions = new HashSet<String>();
compiledQuery.getRegionsInQuery(regions, null);
regionsInQuery = Collections.unmodifiableSet(regions);
return regionsInQuery;
} catch (QueryInvalidException qe) {
logger.error("{} Failed, Error {}", query, qe.getMessage(), qe);
throw qe;
}
}
/**
* Function to gather data locally. This function is required to execute query
* with region context
*
* @author rishim
*
*/
private class LocalQueryFunction extends FunctionAdapter {
private static final long serialVersionUID = 1L;
private boolean optimizeForWrite = false;
private boolean showMembers = false;
private String regionName;
@Override
public boolean hasResult() {
return true;
}
@Override
public boolean isHA() {
return false;
}
private final String id;
@Override
public boolean optimizeForWrite() {
return optimizeForWrite;
}
public LocalQueryFunction setOptimizeForWrite(boolean optimizeForWrite) {
this.optimizeForWrite = optimizeForWrite;
return this;
}
public LocalQueryFunction(String id, String regionName, boolean showMembers) {
super();
this.id = id;
this.regionName = regionName;
this.showMembers = showMembers;
}
@SuppressWarnings("unchecked")
@Override
public void execute(FunctionContext context) {
Cache cache = CacheFactory.getAnyInstance();
QueryService queryService = cache.getQueryService();
String qstr = (String) context.getArguments();
Region r = cache.getRegion(regionName);
try {
Query query = queryService.newQuery(qstr);
SelectResults sr;
if (r.getAttributes().getPartitionAttributes() != null && showMembers) {
sr = (SelectResults) query.execute((RegionFunctionContext) context);
context.getResultSender().lastResult(sr.asList());
} else {
sr = (SelectResults) query.execute();
context.getResultSender().lastResult(sr.asList());
}
} catch (Exception e) {
throw new FunctionException(e);
}
}
@Override
public String getId() {
return this.id;
}
}
private static String MEMBER_KEY = "member";
private static String RESULT_KEY = "result";
private static String NO_DATA_FOUND = "No Data Found";
private static String QUERY_EXEC_SUCCESS = "Query Executed Successfuly";
private static int DISPLAY_MEMBERWISE = 0;
private static int QUERY = 1;
private static int REGION = 2;
private static int LIMIT = 3;
private static int QUERY_RESULTSET_LIMIT = 4;
private static int QUERY_COLLECTIONS_DEPTH = 5;
static final String SELECT_EXPR = "\\s*SELECT\\s+.+\\s+FROM\\s+.+";
static Pattern SELECT_EXPR_PATTERN = Pattern.compile(SELECT_EXPR, Pattern.CASE_INSENSITIVE);
static final String SELECT_WITH_LIMIT_EXPR = "\\s*SELECT\\s+.+\\s+FROM(\\s+|(.*\\s+))LIMIT\\s+[0-9]+.*";
static Pattern SELECT_WITH_LIMIT_EXPR_PATTERN = Pattern.compile(SELECT_WITH_LIMIT_EXPR, Pattern.CASE_INSENSITIVE);
public static class QueryDataFunctionResult implements Serializable {
private static final long serialVersionUID = 1L;
private final String message;
private final byte[] compressedBytes;
public QueryDataFunctionResult(String message, byte[] compressedBytes) {
this.message = message;
this.compressedBytes = compressedBytes;
}
/**
* @return the message
*/
public String getMessage() {
return message;
}
/**
* @return the compressedBytes
*/
public byte[] getCompressedBytes() {
return compressedBytes;
}
}
}
| |
package us.kbase.typedobj.idref;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
public class IdReferenceHandlerSet<T> {
//TODO TEST unit tests
//TODO JAVADOC
private final int maxUniqueIdCount;
private int currentUniqueIdCount = 0;
private boolean processed = false;
private T associated = null;
private final Map<IdReferenceType, IdReferenceHandler<T>> handlers;
/** A handler for typed object IDs. Responsible for checking the
* syntax of the id and its attributes, and remapping IDs if necessary.
*
* ID handlers allow associating IDs with a particular object. This is useful
* for batch processing of typed object, as the IDs can be associated with
* a particular object but the entire ID set can be processed as a batch.
* @author gaprice@lbl.gov
*
* @param <T> the type of the object to be associated with IDs.
*/
public static abstract class IdReferenceHandler<T> {
private boolean processed = false;
/** Add an id to the handler
* @param associatedObject an object associated with the ID.
* @param id the id.
* @param attributes the attributes of the ID.
* @return boolean if this is a unique ID (on a per associated object
* basis) stored in memory and thus should count towards the maximum ID
* limit.
* @throws IdReferenceHandlerException if the ID could not be added.
* @throws HandlerLockedException if the handler is already locked.
*/
public boolean addId(
final T associatedObject,
final String id,
final List<String> attributes)
throws IdReferenceHandlerException, HandlerLockedException {
if (processed) {
throw new HandlerLockedException(
"This handler's ids have been processed and no more may be added");
}
if (associatedObject == null) {
throw new NullPointerException(
"associatedObject cannot be null");
}
if (id == null || id.trim().isEmpty()) {
throw new IdParseException(
"IDs may not be null or the empty string",
getIdType(), associatedObject, id, attributes, null);
}
return addIdImpl(associatedObject, id, attributes);
}
/** Implementation of the addId method */
protected abstract boolean addIdImpl(T associatedObject, String id,
List<String> attributes)
throws IdReferenceHandlerException;
// /** Add an id to the handler
// * @param associatedObject an object associated with the ID.
// * @param id the id.
// * @param attributes the attributes of the ID.
// * @return boolean if this is a unique ID (on a per associated object
// * basis) stored in memory and thus should count towards the maximum ID
// * limit.
// * @throws IdReferenceHandlerException if the ID could not be added.
// * @throws HandlerLockedException if the handler is already locked.
// */
// public boolean addId(T associatedObject, Long id,
// List<String> attributes)
// throws IdReferenceHandlerException,
// HandlerLockedException {
// if (locked) {
// throw new HandlerLockedException("This handler is locked");
// }
// if (associatedObject == null) {
// throw new NullPointerException(
// "associatedObject cannot be null");
// }
// if (id == null) {
// throw new IdParseException(
// "IDs may not be null",
// getIdType(), associatedObject, "" + id, attributes,
// null);
// }
// return addIdImpl(associatedObject, id, attributes);
// }
//
// /** Implementation of the addId method */
// protected abstract boolean addIdImpl(T associatedObject, Long id,
// List<String> attributes)
// throws IdReferenceHandlerException,
// HandlerLockedException;
/** Perform any necessary batch processing of the IDs before
* remapping and locks the handler. Calling the method twice has no
* effect.
*/
public void processIds() throws IdReferenceHandlerException {
if (processed) {
return;
}
processed = true;
processIdsImpl();
}
/** Implementation of the processIds method */
protected abstract void processIdsImpl()
throws IdReferenceHandlerException;
/** Translate an ID to the remapped ID.
* @param oldId the original ID.
* @return the new, remapped ID.
*/
public RemappedId getRemappedId(String oldId)
throws NoSuchIdException {
if (!processed) {
throw new IllegalStateException(
"IDs haven't been processed yet");
}
return getRemappedIdImpl(oldId);
}
/** Implementation of the getRemappedId method */
protected abstract RemappedId getRemappedIdImpl(String oldId)
throws NoSuchIdException;
/** Get the set of remapped IDs associated with a particular object.
* @param associatedObject the object to which the desired set of IDs
* are associated.
* @return the set of remapped IDs associated with an object.
*/
public Set<RemappedId> getRemappedIds(T associatedObject) {
if (!processed) {
throw new IllegalStateException(
"IDs haven't been processed yet");
}
return getRemappedIdsImpl(associatedObject);
}
/** Implementation of the getRemappedIds method */
protected abstract Set<RemappedId> getRemappedIdsImpl(
T associatedObject);
public abstract IdReferenceType getIdType();
}
protected IdReferenceHandlerSet(
final int maxUniqueIdCount,
final Map<IdReferenceType, IdReferenceHandler<T>> handlers) {
this.maxUniqueIdCount = maxUniqueIdCount;
this.handlers = new HashMap<IdReferenceType, IdReferenceHandler<T>>(
handlers);
}
/** Returns true if this handler set contains a handler for the ID type
* specified.
* @param idType the type of ID to check.
* @return true if this handler set contains a handler for the ID type
* specified.
*/
public boolean hasHandler(final IdReferenceType idType) {
return handlers.containsKey(idType);
}
/** Associate an object with any further IDs processed. For example,
* if serially processing IDs from a set of typed objects the object in
* question could be associated with the IDs.
* @param object the object to associate with any IDs processed after this
* point.
*/
public IdReferenceHandlerSet<T> associateObject(T object) {
if (object == null) {
throw new NullPointerException("object may not be null");
}
associated = object;
return this;
}
//To re-enable this, need to think through the whole ID lifecycle,
//need new methods to get remapped IDs of various types
// /** Add a long ID to the appropriate ID handler.
// * @param id the new ID.
// * @throws TooManyIdsException if too many IDs are currently in memory.
// * @throws IdReferenceHandlerException if the id could not be handled
// */
// public void addLongId(IdReference<Long> id)
// throws TooManyIdsException, IdReferenceHandlerException {
// checkIdRefValidity(id);
// updateIdCount(handlers.get(id.getType()).addId(associated,
// id.getId(), id.getAttributes()));
//
// }
/** Add a string ID to the appropriate ID handler.
* @param id the new ID.
* @throws TooManyIdsException if too many IDs are currently in memory.
* @throws IdReferenceHandlerException if the id could not be handled
*/
public void addStringId(final IdReference<String> id)
throws TooManyIdsException, IdReferenceHandlerException {
checkIdRefValidity(id);
updateIdCount(handlers.get(id.getType()).addId(associated,
id.getId(), id.getAttributes()));
}
private void updateIdCount(final boolean newId)
throws TooManyIdsException {
currentUniqueIdCount += newId ? 1 : 0;
if (currentUniqueIdCount > maxUniqueIdCount) {
throw new TooManyIdsException("Maximum ID count of " +
maxUniqueIdCount + " exceeded");
}
}
private void checkIdRefValidity(final IdReference<?> id) {
if (processed) {
throw new IllegalStateException(
"This ID handler set instance's IDs have been processed and no more can be added");
}
if (associated == null) {
throw new IllegalStateException(
"Must add an object to associate IDs with prior to adding IDs");
}
if (id == null) {
throw new NullPointerException("id cannot be null");
}
if (!handlers.containsKey(id.getType())) {
throw new NoSuchIdReferenceHandlerException(
"There is no handler for the ID type " +
id.getType().getType());
}
}
/** Process all the IDs saved in all the registered handlers and locks
* the handlers. Calling this methond twice will have no effect.
* @return this.
* @throws IdReferenceHandlerException if there was an error processing
* the IDs.
*
*/
public IdReferenceHandlerSet<T> processIDs() throws IdReferenceHandlerException {
if (processed) {
return this;
}
processed = true;
for (final Entry<IdReferenceType, IdReferenceHandler<T>> es:
handlers.entrySet()) {
es.getValue().processIds();
}
return this;
}
/** Check if processIds() has been called on this handler. Implies
* that the handler is locked.
* @return true if processIds() has been called.
*/
public boolean wereIdsProcessed() {
return processed;
}
/** Get the id types with registered handlers.
* @return the id types with handlers that have been added to this handler
* set.
*/
public Set<IdReferenceType> getIDTypes() {
return handlers.keySet();
}
/** Translate an ID to the remapped ID.
* @param idType the ID type.
* @param oldId the original ID.
* @return the new, remapped ID.
*/
public RemappedId getRemappedId(
final IdReferenceType idType,
final String oldId) {
if (idType == null || oldId == null) {
throw new NullPointerException("idType and oldId can't be null");
}
if (!handlers.containsKey(idType)) {
throw new NoSuchIdReferenceHandlerException(
"There is no handler registered for the ID type " +
idType.getType());
}
return handlers.get(idType).getRemappedId(oldId);
}
/** Gets the set of remapped IDs corresponding to an object.
* @param idType the type of IDs to get.
* @param associatedObject the object for which IDs will be returned.
* @return
*/
public Set<RemappedId> getRemappedIds(
final IdReferenceType idType,
final T associatedObject) {
if (idType == null || associatedObject == null) {
throw new NullPointerException(
"idType and associatedObject can't be null");
}
if (!handlers.containsKey(idType)) {
throw new NoSuchIdReferenceHandlerException(
"There is no handler registered for the ID type " +
idType.getType());
}
return handlers.get(idType).getRemappedIds(associatedObject);
}
/** Get the number of unique IDs in the handler set.
* @return the number of unique IDs.
*/
public int size() {
return currentUniqueIdCount;
}
/** Returns true if this handler set contains no IDs.
* @return true if this handler set contains no IDs.
*/
public boolean isEmpty() {
return currentUniqueIdCount == 0;
}
/** Returns the maximum ID count allowed for this handler.
* @return the maximum ID count allowed for this handler.
*/
public int getMaximumIdCount() {
return maxUniqueIdCount;
}
@SuppressWarnings("serial")
public static class TooManyIdsException extends Exception {
public TooManyIdsException(final String message) {
super(message);
}
}
@SuppressWarnings("serial")
public static class NoSuchIdException extends RuntimeException {
public NoSuchIdException(String message) {
super(message);
}
}
@SuppressWarnings("serial")
public static class HandlerLockedException extends RuntimeException {
public HandlerLockedException(String message) {
super(message);
}
}
@SuppressWarnings("serial")
public static class IdReferenceHandlerException extends Exception {
private final IdReferenceType idType;
public IdReferenceHandlerException(
final String message,
final IdReferenceType idType,
final Throwable cause) {
super(message, cause);
if (message == null || idType == null) {
throw new NullPointerException(
"message and idType cannot be null");
}
this.idType = idType;
}
public IdReferenceType getIdType() {
return idType;
}
}
@SuppressWarnings("serial")
public static class IdReferenceException
extends IdReferenceHandlerException {
private final String id;
private final List<String> idAttributes;
private final Object associatedObject;
public IdReferenceException(
final String message,
final IdReferenceType idType,
final Object associatedObject,
final String id,
final List<String> idAttributes,
final Throwable cause) {
super(message, idType, cause);
if (associatedObject == null || id == null) {
throw new NullPointerException(
"associatedObject and id cannot be null");
}
this.id = id;
this.idAttributes = idAttributes == null ? null :
Collections.unmodifiableList(new LinkedList<String>(idAttributes));
this.associatedObject = associatedObject;
}
public String getId() {
return id;
}
public List<String> getIdAttributes() {
return idAttributes;
}
public Object getAssociatedObject() {
return associatedObject;
}
public IdReference<String> getIdReference() {
return new IdReference<String>(getIdType(), id, idAttributes);
}
}
@SuppressWarnings("serial")
public static class IdParseException extends IdReferenceException {
public IdParseException(
final String message,
final IdReferenceType idType,
final Object associatedObject,
final String id,
final List<String> idAttributes,
final Throwable cause) {
super(message, idType, associatedObject, id, idAttributes, cause);
}
}
}
| |
package org.jgroups.tests;
import org.jgroups.*;
import org.jgroups.protocols.*;
import org.jgroups.protocols.pbcast.GMS;
import org.jgroups.protocols.pbcast.NAKACK2;
import org.jgroups.protocols.pbcast.STABLE;
import org.jgroups.util.Util;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Tests message ordering
* @author Bela Ban
*/
@Test(groups=Global.FUNCTIONAL,singleThreaded=true)
public class OrderingTest {
protected static final int NUM_MSGS=200000;
protected static final int NUM_SENDERS=2;
protected static final int TOTAL_NUM_MSGS=NUM_MSGS * NUM_SENDERS;
protected JChannel[] channels=new JChannel[NUM_SENDERS];
protected MySender[] senders=new MySender[NUM_SENDERS];
@BeforeMethod
void init() throws Exception {
System.out.println("creating " + NUM_SENDERS + " channels");
for(int i=0; i < channels.length; i++) {
channels[i]=createChannel();
channels[i].setReceiver(new MyReceiver());
senders[i]=new MySender(channels[i]);
channels[i].connect("OrderingTest.testFIFOOrder");
}
System.out.println("done");
System.out.println("\nwaiting for a cluster of " + NUM_SENDERS + " to form:");
boolean done=true;
for(int i=0; i < 20; i++) {
for(JChannel ch: channels) {
if(ch.getView().size() != NUM_SENDERS) {
done=false;
break;
}
}
if(!done)
Util.sleep(1000);
else
break;
}
}
@AfterMethod
void destroy() {
for(int i=channels.length-1; i >= 0; i--) {
Util.close(channels[i]);
}
}
protected static JChannel createChannel() throws Exception {
return new JChannel(new SHARED_LOOPBACK().setValue("oob_thread_pool_rejection_policy", "run")
.setValue("thread_pool_rejection_policy", "run")
.setValue("thread_pool_queue_max_size", 100000),
new SHARED_LOOPBACK_PING(),
new MERGE3(),
new FD_SOCK(),
new VERIFY_SUSPECT(),
new BARRIER(),
new NAKACK2().setValue("use_mcast_xmit", false).setValue("discard_delivered_msgs", true),
new UNICAST3(),
new STABLE().setValue("max_bytes", 50000),
new GMS().setValue("print_local_addr", false),
new UFC().setValue("max_credits", 2000000),
new MFC().setValue("max_credits", 2000000),
new FRAG2());
}
public void testFIFOOrdering() throws Exception {
assert channels[0].getView().size() == NUM_SENDERS : "view[0] is " + channels[0].getView().size();
System.out.println("done, view is " + channels[0].getView());
System.out.println("\nstarting to send " + NUM_MSGS + " messages");
for(int i=0; i < senders.length; i++)
senders[i].start();
for(int i=0; i < senders.length; i++) {
MySender sender=senders[i];
sender.join();
}
System.out.println("senders done");
System.out.println("\nwaiting for message reception by all receivers:");
boolean done;
for(int i=0; i < 50; i++) {
done=true;
for(JChannel ch: channels) {
MyReceiver receiver=(MyReceiver)ch.getReceiver();
int received=receiver.getReceived();
System.out.println(ch.getAddress() + ": " + received);
STABLE stable=(STABLE)ch.getProtocolStack().findProtocol(STABLE.class);
stable.gc();
if(received != TOTAL_NUM_MSGS) {
done=false;
break;
}
}
if(!done)
Util.sleep(1000);
else
break;
}
for(JChannel ch: channels) {
MyReceiver receiver=(MyReceiver)ch.getReceiver();
System.out.println(ch.getAddress() + ": " + receiver.getReceived());
}
for(JChannel ch: channels) {
MyReceiver receiver=(MyReceiver)ch.getReceiver();
assert receiver.getReceived() == TOTAL_NUM_MSGS : "receiver had " + receiver.getReceived() +
" messages (expected=" + TOTAL_NUM_MSGS + ")";
}
System.out.println("done");
System.out.println("\nchecking message order");
for(JChannel ch: channels) {
MyReceiver receiver=(MyReceiver)ch.getReceiver();
System.out.print(ch.getAddress() + ": ");
boolean ok=receiver.getNumberOfErrors() == 0;
System.out.println(ok? "OK" : "FAIL (" + receiver.getNumberOfErrors() + " errors)");
assert ok : receiver.getNumberOfErrors() + " errors";
}
System.out.println("done");
}
/* private static boolean checkOrder(ConcurrentMap<Address,List<Integer>> map, boolean print_incorrect_elements) {
boolean retval=true;
for(Map.Entry<Address,List<Integer>> entry: map.entrySet()) {
Address sender=entry.getKey();
List<Integer> list=entry.getValue();
int curr=1;
for(Integer num: list) {
if(!num.equals(curr)) {
retval=false;
if(!print_incorrect_elements)
return false;
System.err.println("element " + num + " != " + curr);
}
curr++;
}
}
return retval;
}*/
protected static class MySender extends Thread {
protected final JChannel ch;
public MySender(JChannel ch) {
this.ch=ch;
}
public void run() {
for(int i=1; i <= NUM_MSGS; i++) {
try {
Message msg=new Message(null, i);
ch.send(msg);
if(i % 100000 == 0)
System.out.println(Thread.currentThread().getId() + ": " + i + " sent");
}
catch(Exception e) {
e.printStackTrace();
}
}
}
}
protected static class MyReceiver extends ReceiverAdapter {
protected final ConcurrentMap<Address,Integer> map=new ConcurrentHashMap<>();
final AtomicInteger received=new AtomicInteger(0);
protected int num_errors=0;
public int getNumberOfErrors() {
return num_errors;
}
public int getReceived() {
return received.intValue();
}
public void receive(Message msg) {
Integer num=(Integer)msg.getObject();
Address sender=msg.getSrc();
Integer current_seqno=map.get(sender);
if(current_seqno == null) {
current_seqno=new Integer(1);
Integer tmp=map.putIfAbsent(sender, current_seqno);
if(tmp != null)
current_seqno=tmp;
}
if(current_seqno.intValue() == num)
map.put(sender, current_seqno + 1);
else
num_errors++;
if(received.incrementAndGet() % 100000 == 0)
System.out.println("received " + received);
}
}
/*public static void main(String[] args) throws Exception {
OrderingTest test=new OrderingTest();
test.init();
test.testFIFOOrdering();
test.destroy();
}*/
}
| |
package it.jaschke.alexandria.services;
import android.app.IntentService;
import android.content.ContentValues;
import android.content.Intent;
import android.database.Cursor;
import android.net.Uri;
import android.support.v4.content.LocalBroadcastManager;
import android.util.Log;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import it.jaschke.alexandria.MainActivity;
import it.jaschke.alexandria.R;
import it.jaschke.alexandria.data.AlexandriaContract;
import it.jaschke.alexandria.data.model.BookDetailModel;
/**
* An {@link IntentService} subclass for handling asynchronous task requests in
* a service on a separate handler thread.
* <p/>
*/
public class BookService extends IntentService {
private final String LOG_TAG = BookService.class.getSimpleName();
public static final String FETCH_BOOK = "it.jaschke.alexandria.services.action.FETCH_BOOK";
public static final String DELETE_BOOK = "it.jaschke.alexandria.services.action.DELETE_BOOK";
public static final String UNDO_DELETE_BOOK = "it.jaschke.alexandria.services.action.UNDO_DELETE_BOOK";
public static final String EAN = "it.jaschke.alexandria.services.extra.EAN";
public static final String BOOK_DETAIL_MODELE = "it.jaschke.alexandria.services.extra.BOOK_DETAIL_MODELE";
public BookService() {
super("Alexandria");
}
@Override
protected void onHandleIntent(Intent intent) {
if (intent != null) {
final String action = intent.getAction();
if (FETCH_BOOK.equals(action)) {
final String ean = intent.getStringExtra(EAN);
fetchBook(ean);
} else if (DELETE_BOOK.equals(action)) {
final String ean = intent.getStringExtra(EAN);
deleteBook(ean);
} else if (UNDO_DELETE_BOOK.equals(action)) {
BookDetailModel bookDetailModel = intent.getParcelableExtra(BOOK_DETAIL_MODELE);
insertBook(bookDetailModel);
}
}
}
/**
* Handle action Foo in the provided background thread with the provided
* parameters.
*/
private void deleteBook(String ean) {
if(ean!=null) {
getContentResolver().delete(AlexandriaContract.BookEntry.buildBookUri(Long.parseLong(ean)), null, null);
}
}
private void insertBook(BookDetailModel bookDetailModel) {
Cursor bookEntry = getContentResolver().query(
AlexandriaContract.BookEntry.buildBookUri(Long.parseLong(bookDetailModel.getIsbn())),
null, // leaving "columns" null just returns all the columns.
null, // cols for "where" clause
null, // values for "where" clause
null // sort order
);
if(bookEntry.getCount()>0){
bookEntry.close();
return;
}
bookEntry.close();
writeBackBook(bookDetailModel.getIsbn(),
bookDetailModel.getTitleBook(),
bookDetailModel.getSubTitle(),
bookDetailModel.getDesc(),
bookDetailModel.getImgUrl(),
bookDetailModel.getPublisher(),
bookDetailModel.getPublishedOn(),
bookDetailModel.getPages());
}
/**
* Handle action fetchBook in the provided background thread with the provided
* parameters.
*/
private void fetchBook(String ean) {
if(ean.length()!=13){
return;
}
Cursor bookEntry = getContentResolver().query(
AlexandriaContract.BookEntry.buildBookUri(Long.parseLong(ean)),
null, // leaving "columns" null just returns all the columns.
null, // cols for "where" clause
null, // values for "where" clause
null // sort order
);
if(bookEntry.getCount()>0){
bookEntry.close();
return;
}
bookEntry.close();
HttpURLConnection urlConnection = null;
BufferedReader reader = null;
String bookJsonString = null;
try {
final String FORECAST_BASE_URL = "https://www.googleapis.com/books/v1/volumes?";
final String QUERY_PARAM = "q";
final String ISBN_PARAM = "isbn:" + ean;
Uri builtUri = Uri.parse(FORECAST_BASE_URL).buildUpon()
.appendQueryParameter(QUERY_PARAM, ISBN_PARAM)
.build();
URL url = new URL(builtUri.toString());
urlConnection = (HttpURLConnection) url.openConnection();
urlConnection.setRequestMethod("GET");
urlConnection.connect();
InputStream inputStream = urlConnection.getInputStream();
StringBuffer buffer = new StringBuffer();
if (inputStream == null) {
return;
}
reader = new BufferedReader(new InputStreamReader(inputStream));
String line;
while ((line = reader.readLine()) != null) {
buffer.append(line);
buffer.append("\n");
}
if (buffer.length() == 0) {
return;
}
bookJsonString = buffer.toString();
} catch (Exception e) {
Log.e(LOG_TAG, "Error ", e);
} finally {
if (urlConnection != null) {
urlConnection.disconnect();
}
if (reader != null) {
try {
reader.close();
} catch (final IOException e) {
Log.e(LOG_TAG, "Error closing stream", e);
}
}
}
final String ITEMS = "items";
final String VOLUME_INFO = "volumeInfo";
final String TITLE = "title";
final String SUBTITLE = "subtitle";
final String AUTHORS = "authors";
final String DESC = "description";
final String CATEGORIES = "categories";
final String PUBLISHER = "publisher";
final String PAGE_COUNT = "pageCount";
final String PUBLISHED_DATE = "publishedDate";
final String IMG_URL_PATH = "imageLinks";
final String IMG_URL = "thumbnail";
try {
JSONObject bookJson = new JSONObject(bookJsonString);
JSONArray bookArray;
if(bookJson.has(ITEMS)){
bookArray = bookJson.getJSONArray(ITEMS);
}else{
Intent messageIntent = new Intent(MainActivity.MESSAGE_EVENT);
messageIntent.putExtra(MainActivity.MESSAGE_KEY,getResources().getString(R.string.not_found));
LocalBroadcastManager.getInstance(getApplicationContext()).sendBroadcast(messageIntent);
return;
}
JSONObject bookInfo = ((JSONObject) bookArray.get(0)).getJSONObject(VOLUME_INFO);
String title = bookInfo.getString(TITLE);
String subtitle = "";
if(bookInfo.has(SUBTITLE)) {
subtitle = bookInfo.getString(SUBTITLE);
}
String desc="";
if(bookInfo.has(DESC)){
desc = bookInfo.getString(DESC);
}
String imgUrl = "";
if(bookInfo.has(IMG_URL_PATH) && bookInfo.getJSONObject(IMG_URL_PATH).has(IMG_URL)) {
imgUrl = bookInfo.getJSONObject(IMG_URL_PATH).getString(IMG_URL);
}
String publisher = "";
if (bookInfo.has(PUBLISHER)) {
publisher = bookInfo.getString(PUBLISHER);
}
String publishedOn = "";
if (bookInfo.has(PUBLISHED_DATE)) {
publishedOn = bookInfo.getString(PUBLISHED_DATE);
}
int page = 0;
if (bookInfo.has(PAGE_COUNT)) {
page = bookInfo.getInt(PAGE_COUNT);
}
writeBackBook(ean, title, subtitle, desc, imgUrl, publisher, publishedOn, page);
if(bookInfo.has(AUTHORS)) {
writeBackAuthors(ean, bookInfo.getJSONArray(AUTHORS));
}
if(bookInfo.has(CATEGORIES)){
writeBackCategories(ean,bookInfo.getJSONArray(CATEGORIES) );
}
} catch (JSONException e) {
Log.e(LOG_TAG, "Error ", e);
}
}
private void writeBackBook(String ean, String title, String subtitle, String desc, String imgUrl, String publisher, String publishedOn, int page) {
ContentValues values= new ContentValues();
values.put(AlexandriaContract.BookEntry._ID, ean);
values.put(AlexandriaContract.BookEntry.TITLE, title);
values.put(AlexandriaContract.BookEntry.IMAGE_URL, imgUrl);
values.put(AlexandriaContract.BookEntry.SUBTITLE, subtitle);
values.put(AlexandriaContract.BookEntry.DESC, desc);
values.put(AlexandriaContract.BookEntry.PUBLISHER, publisher);
values.put(AlexandriaContract.BookEntry.PUBLISHED_DATE, publishedOn);
values.put(AlexandriaContract.BookEntry.PAGE_COUNT, page);
getContentResolver().insert(AlexandriaContract.BookEntry.CONTENT_URI,values);
}
private void writeBackAuthors(String ean, JSONArray jsonArray) throws JSONException {
ContentValues values= new ContentValues();
for (int i = 0; i < jsonArray.length(); i++) {
values.put(AlexandriaContract.AuthorEntry._ID, ean);
values.put(AlexandriaContract.AuthorEntry.AUTHOR, jsonArray.getString(i));
getContentResolver().insert(AlexandriaContract.AuthorEntry.CONTENT_URI, values);
values= new ContentValues();
}
}
private void writeBackCategories(String ean, JSONArray jsonArray) throws JSONException {
ContentValues values= new ContentValues();
for (int i = 0; i < jsonArray.length(); i++) {
values.put(AlexandriaContract.CategoryEntry._ID, ean);
values.put(AlexandriaContract.CategoryEntry.CATEGORY, jsonArray.getString(i));
getContentResolver().insert(AlexandriaContract.CategoryEntry.CONTENT_URI, values);
values= new ContentValues();
}
}
}
| |
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.uberfire.java.nio.file;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import org.junit.Test;
import org.uberfire.java.nio.base.BasicFileAttributesImpl;
import org.uberfire.java.nio.base.NotImplementedException;
import org.uberfire.java.nio.channels.SeekableByteChannel;
import org.uberfire.java.nio.file.attribute.BasicFileAttributeView;
import org.uberfire.java.nio.file.attribute.BasicFileAttributes;
import org.uberfire.java.nio.fs.file.BaseSimpleFileStore;
import org.uberfire.java.nio.fs.jgit.JGitFileStore;
import static org.fest.assertions.api.Assertions.assertThat;
import static org.fest.assertions.api.Assertions.fail;
public class FilesTest extends AbstractBaseTest {
@Test
public void newIOStreams() throws IOException {
final Path dir = newTempDir();
final OutputStream out = Files.newOutputStream(dir.resolve("file.txt"));
assertThat(out).isNotNull();
out.write("content".getBytes());
out.close();
final InputStream in = Files.newInputStream(dir.resolve("file.txt"));
assertThat(in).isNotNull();
final StringBuilder sb = new StringBuilder();
while (true) {
int i = in.read();
if (i == -1) {
break;
}
sb.append((char) i);
}
assertThat(sb.toString()).isEqualTo("content");
}
@Test(expected = NoSuchFileException.class)
public void newInputStreamNonExistent() {
Files.newInputStream(Paths.get("/path/to/some/file.txt"));
}
@Test(expected = NoSuchFileException.class)
public void newInputStreamOnDir() {
final Path dir = newTempDir();
Files.newInputStream(dir);
}
@Test(expected = IllegalArgumentException.class)
public void newInputStreamNull() {
Files.newInputStream(null);
}
@Test(expected = org.uberfire.java.nio.IOException.class)
public void newOutputStreamOnExistent() {
final Path dir = newTempDir();
Files.newOutputStream(dir);
}
@Test(expected = IllegalArgumentException.class)
public void newOutpurStreamNull() {
Files.newOutputStream(null);
}
@Test
public void newByteChannel() throws IOException {
final SeekableByteChannel sbc = Files.newByteChannel(newTempDir().resolve("file.temp.txt"),
new HashSet<OpenOption>());
assertThat(sbc).isNotNull();
sbc.close();
final SeekableByteChannel sbc2 = Files.newByteChannel(newTempDir().resolve("file.temp2.txt"));
assertThat(sbc).isNotNull();
sbc.close();
}
@Test(expected = FileAlreadyExistsException.class)
public void newByteChannelFileAlreadyExists() {
Files.newByteChannel(Files.createTempFile("foo",
"bar"));
}
@Test(expected = IllegalArgumentException.class)
public void newByteChannelNull() {
Files.newByteChannel(null);
}
@Test
public void createFile() throws IOException {
final Path path = Files.createFile(newTempDir().resolve("file.temp.txt"));
assertThat(path).isNotNull();
assertThat(path.toFile().exists()).isTrue();
}
@Test(expected = FileAlreadyExistsException.class)
public void createFileAlreadyExists() {
Files.createFile(Files.createTempFile("foo",
"bar"));
}
@Test(expected = IllegalArgumentException.class)
public void createFileNull() {
Files.createFile(null);
}
@Test
public void createDirectory() {
final Path path = newTempDir();
final Path dir = Files.createDirectory(path.resolve("myNewDir"));
assertThat(dir).isNotNull();
assertThat(dir.toFile().exists()).isTrue();
assertThat(dir.toFile().isDirectory()).isTrue();
final Path file = Files.createFile(dir.resolve("new.file.txt"));
assertThat(file).isNotNull();
assertThat(file.toFile().exists()).isTrue();
assertThat(file.toFile().isDirectory()).isFalse();
assertThat(file.toFile().isFile()).isTrue();
}
@Test(expected = FileAlreadyExistsException.class)
public void createDirectoryFileAlreadyExists() {
Files.createDirectory(newTempDir());
}
@Test(expected = IllegalArgumentException.class)
public void createDirectoryNull() {
Files.createDirectory(null);
}
@Test
public void createDirectories() {
final Path path = newTempDir();
final Path dir = Files.createDirectories(path.resolve("myNewDir/mysubDir1/mysubDir2"));
assertThat(dir).isNotNull();
assertThat(dir.toFile().exists()).isTrue();
assertThat(dir.toFile().isDirectory()).isTrue();
final Path file = Files.createFile(dir.resolve("new.file.txt"));
assertThat(file).isNotNull();
assertThat(file.toFile().exists()).isTrue();
assertThat(file.toFile().isDirectory()).isFalse();
assertThat(file.toFile().isFile()).isTrue();
}
@Test(expected = FileAlreadyExistsException.class)
public void createDirectoriesFileAlreadyExists() {
Files.createDirectories(newTempDir());
}
@Test(expected = IllegalArgumentException.class)
public void createDirectoriesNull() {
Files.createDirectories(null);
}
@Test
public void delete() {
final Path path = Files.createFile(newTempDir().resolve("file.temp.txt"));
assertThat(path).isNotNull();
assertThat(path.toFile().exists()).isTrue();
Files.delete(path);
assertThat(path).isNotNull();
assertThat(path.toFile().exists()).isFalse();
final Path dir = newTempDir();
assertThat(dir).isNotNull();
assertThat(dir.toFile().exists()).isTrue();
Files.delete(dir);
assertThat(dir).isNotNull();
assertThat(dir.toFile().exists()).isFalse();
}
@Test(expected = DirectoryNotEmptyException.class)
public void deleteDirectoryNotEmpty() {
final Path dir = newTempDir();
Files.createFile(dir.resolve("file.temp.txt"));
Files.delete(dir);
}
@Test(expected = NoSuchFileException.class)
public void deleteNoSuchFileException() {
Files.delete(newTempDir().resolve("file.temp.txt"));
}
@Test(expected = IllegalArgumentException.class)
public void deleteNull() {
Files.delete(null);
}
@Test
public void deleteIfExists() {
final Path path = Files.createFile(newTempDir().resolve("file.temp.txt"));
assertThat(path).isNotNull();
assertThat(path.toFile().exists()).isTrue();
assertThat(Files.deleteIfExists(path)).isTrue();
assertThat(path).isNotNull();
assertThat(path.toFile().exists()).isFalse();
final Path dir = newTempDir();
assertThat(dir).isNotNull();
assertThat(dir.toFile().exists()).isTrue();
assertThat(Files.deleteIfExists(dir)).isTrue();
assertThat(dir).isNotNull();
assertThat(dir.toFile().exists()).isFalse();
assertThat(Files.deleteIfExists(newTempDir().resolve("file.temp.txt"))).isFalse();
}
@Test(expected = DirectoryNotEmptyException.class)
public void deleteIfExistsDirectoryNotEmpty() {
final Path dir = newTempDir();
Files.createFile(dir.resolve("file.temp.txt"));
Files.deleteIfExists(dir);
}
@Test(expected = IllegalArgumentException.class)
public void deleteIfExistsNull() {
Files.deleteIfExists(null);
}
@Test
public void createTempFile() {
final Path tempFile = Files.createTempFile(null,
null);
assertThat(tempFile).isNotNull();
assertThat(tempFile.getFileName().toString()).endsWith("tmp");
assertThat(tempFile.toFile().exists()).isTrue();
final Path tempFile2 = Files.createTempFile("",
"");
assertThat(tempFile2).isNotNull();
assertThat(tempFile2.getFileName().toString()).endsWith("tmp");
assertThat(tempFile2.toFile().exists()).isTrue();
final Path tempFile3 = Files.createTempFile("foo",
"bar");
assertThat(tempFile3).isNotNull();
assertThat(tempFile3.toFile().exists()).isTrue();
assertThat(tempFile3.getFileName().toString()).startsWith("foo").endsWith(".bar");
final Path tempFile4 = Files.createTempFile("",
"bar");
assertThat(tempFile4).isNotNull();
assertThat(tempFile4.toFile().exists()).isTrue();
assertThat(tempFile4.getFileName().toString()).endsWith(".bar");
final Path tempFile5 = Files.createTempFile("",
".bar");
assertThat(tempFile5).isNotNull();
assertThat(tempFile5.toFile().exists()).isTrue();
assertThat(tempFile5.getFileName().toString()).endsWith(".bar");
final Path tempFile6 = Files.createTempFile("",
"bar.temp");
assertThat(tempFile6).isNotNull();
assertThat(tempFile6.toFile().exists()).isTrue();
assertThat(tempFile6.getFileName().toString()).endsWith(".bar.temp");
final Path tempFile7 = Files.createTempFile("",
".bar.temp");
assertThat(tempFile7).isNotNull();
assertThat(tempFile7.toFile().exists()).isTrue();
assertThat(tempFile7.getFileName().toString()).endsWith(".bar.temp");
}
@Test
public void createTempFileInsideDir() {
final Path dir = newTempDir();
assertThat(dir.toFile().list()).isNotNull().isEmpty();
final Path tempFile = Files.createTempFile(dir,
null,
null);
assertThat(tempFile).isNotNull();
assertThat(tempFile.getFileName().toString()).endsWith("tmp");
assertThat(tempFile.toFile().exists()).isTrue();
assertThat(dir.toFile().list()).isNotNull().isNotEmpty();
}
@Test(expected = NoSuchFileException.class)
public void createTempFileNoSuchFile() {
Files.createTempFile(Paths.get("/path/to/"),
null,
null);
}
@Test(expected = IllegalArgumentException.class)
public void createTempFileNull() {
Files.createTempFile((Path) null,
null,
null);
}
@Test
public void createTempDirectory() {
final Path tempFile = Files.createTempDirectory(null);
assertThat(tempFile).isNotNull();
assertThat(tempFile.toFile().exists()).isTrue();
assertThat(tempFile.toFile().isDirectory()).isTrue();
final Path tempFile2 = Files.createTempDirectory("");
assertThat(tempFile2).isNotNull();
assertThat(tempFile2.toFile().exists()).isTrue();
assertThat(tempFile2.toFile().isDirectory()).isTrue();
final Path tempFile3 = Files.createTempDirectory("foo");
assertThat(tempFile3).isNotNull();
assertThat(tempFile3.toFile().exists()).isTrue();
assertThat(tempFile3.getFileName().toString()).startsWith("foo");
assertThat(tempFile3.toFile().isDirectory()).isTrue();
}
@Test
public void createTempDirectoryInsideDir() {
final Path dir = newTempDir();
assertThat(dir.toFile().list()).isNotNull().isEmpty();
final Path tempFile = Files.createTempDirectory(dir,
null);
assertThat(tempFile).isNotNull();
assertThat(tempFile.toFile().exists()).isTrue();
assertThat(tempFile.toFile().isDirectory()).isTrue();
assertThat(dir.toFile().list()).isNotNull().isNotEmpty();
}
@Test(expected = NoSuchFileException.class)
public void createTempDirectoryNoSuchFile() {
Files.createTempDirectory(Paths.get("/path/to/"),
null);
}
@Test(expected = IllegalArgumentException.class)
public void createTempDirectoryNull() {
Files.createTempDirectory((Path) null,
null);
}
@Test
public void copyDir() {
final Path source = newTempDir();
final Path dest = newDirToClean();
assertThat(source.toFile().exists()).isTrue();
assertThat(dest.toFile().exists()).isFalse();
Files.copy(source,
dest);
assertThat(dest.toFile().exists()).isTrue();
assertThat(source.toFile().exists()).isTrue();
}
@Test(expected = DirectoryNotEmptyException.class)
public void copyDirDirectoryNotEmptyException() {
final Path source = newTempDir();
final Path dest = newDirToClean();
Files.createTempFile(source,
"foo",
"bar");
Files.copy(source,
dest);
}
@Test
public void copyFile() throws IOException {
final Path dir = newTempDir();
final Path source = dir.resolve("temp.txt");
final Path dest = dir.resolve("result.txt");
final OutputStream stream = Files.newOutputStream(source);
stream.write('a');
stream.close();
Files.copy(source,
dest);
assertThat(dest.toFile().exists()).isTrue();
assertThat(source.toFile().exists()).isTrue();
assertThat(dest.toFile().length()).isEqualTo(source.toFile().length());
}
@Test
public void copyFileInvalidSourceAndTarget() throws IOException {
final Path source = newTempDir();
final Path dest = newTempDir().resolve("other");
final Path sourceFile = source.resolve("file.txt");
final OutputStream stream = Files.newOutputStream(sourceFile);
stream.write('a');
stream.close();
try {
Files.copy(source,
dest);
fail("source isn't empty");
} catch (Exception ex) {
}
sourceFile.toFile().delete();
Files.copy(source,
dest);
try {
Files.copy(source,
dest);
fail("dest already exists");
} catch (Exception ex) {
}
dest.toFile().delete();
source.toFile().delete();
try {
Files.copy(source,
dest);
fail("source doesn't exists");
} catch (Exception ex) {
} finally {
}
}
@Test(expected = UnsupportedOperationException.class)
public void copyDifferentProviders() {
final Map<String, Object> env = new HashMap<String, Object>(2);
env.put("userName",
"user");
env.put("password",
"pass");
final URI uri = URI.create("git://test" + System.currentTimeMillis());
FileSystems.newFileSystem(uri,
env);
Files.copy(Paths.get(uri),
newTempDir());
}
@Test(expected = IllegalArgumentException.class)
public void copyNull1() throws IOException {
Files.copy(newTempDir(),
(Path) null);
}
@Test(expected = IllegalArgumentException.class)
public void copyNull2() throws IOException {
Files.copy((Path) null,
Paths.get("/temp"));
}
@Test(expected = IllegalArgumentException.class)
public void copyNull3() throws IOException {
Files.copy((Path) null,
(Path) null);
}
@Test
public void moveDir() {
final Path source = newTempDir();
final Path dest = newTempDir().resolve("other");
Files.move(source,
dest);
assertThat(source.toFile().exists()).isFalse();
assertThat(dest.toFile().exists()).isTrue();
}
@Test
public void moveFile() throws IOException {
final Path dir = newTempDir();
final Path source = dir.resolve("fileSource.txt");
final Path dest = dir.resolve("fileDest.txt");
final OutputStream stream = Files.newOutputStream(source);
stream.write('a');
stream.close();
long lenght = source.toFile().length();
Files.move(source,
dest);
assertThat(dest.toFile().exists()).isTrue();
assertThat(source.toFile().exists()).isFalse();
assertThat(dest.toFile().length()).isEqualTo(lenght);
}
@Test
public void moveFileInvalidSourceAndTarget() throws IOException {
final Path source = newTempDir();
final Path dest = newTempDir().resolve("other");
final Path sourceFile = source.resolve("file.txt");
final OutputStream stream = Files.newOutputStream(sourceFile);
stream.write('a');
stream.close();
try {
Files.move(source,
dest);
fail("source isn't empty");
} catch (Exception ex) {
}
sourceFile.toFile().delete();
Files.copy(source,
dest);
try {
Files.move(source,
dest);
fail("dest already exists");
} catch (Exception ex) {
}
dest.toFile().delete();
source.toFile().delete();
try {
Files.move(source,
dest);
fail("source doesn't exists");
} catch (Exception ex) {
} finally {
}
}
@Test(expected = IllegalArgumentException.class)
public void moveNull1() throws IOException {
Files.move(newTempDir(),
null);
}
@Test(expected = IllegalArgumentException.class)
public void moveNull2() throws IOException {
Files.move(null,
newTempDir());
}
@Test(expected = IllegalArgumentException.class)
public void moveNull3() throws IOException {
Files.move(null,
null);
}
@Test(expected = UnsupportedOperationException.class)
public void moveDifferentProviders() {
final Map<String, Object> env = new HashMap<String, Object>(2);
env.put("userName",
"user");
env.put("password",
"pass");
FileSystems.newFileSystem(URI.create("git://testXXXXXXX"),
env);
Files.move(Paths.get(URI.create("git://testXXXXXXX")),
newTempDir());
}
@Test
public void getFileStore() {
assertThat(Files.getFileStore(Paths.get("/some/file"))).isNotNull().isInstanceOf(BaseSimpleFileStore.class);
final Map<String, Object> env = new HashMap<String, Object>(2);
env.put("userName",
"user");
env.put("password",
"pass");
final String repoName = "git://testXXXXXXX" + System.currentTimeMillis();
final URI uri = URI.create(repoName);
FileSystems.newFileSystem(uri,
env);
assertThat(Files.getFileStore(Paths.get(uri))).isNotNull().isInstanceOf(JGitFileStore.class);
final URI fetch = URI.create(repoName + "?fetch");
FileSystems.getFileSystem(fetch);
}
@Test(expected = IllegalArgumentException.class)
public void getFileStoreNull() {
Files.getFileStore(null);
}
@Test(expected = FileSystemNotFoundException.class)
public void getFileStoreN() {
final URI uri = URI.create("nothing:///testXXXXXXX");
Files.getFileStore(Paths.get(uri));
}
@Test
public void getFileAttributeViewGeneral() throws IOException {
final Path path = Files.createTempFile(null,
null);
final BasicFileAttributeView view = Files.getFileAttributeView(path,
BasicFileAttributeView.class);
assertThat(view).isNotNull();
assertThat((Object) view.readAttributes()).isNotNull();
assertThat(view.readAttributes().isRegularFile()).isTrue();
assertThat(view.readAttributes().isDirectory()).isFalse();
assertThat(view.readAttributes().isSymbolicLink()).isFalse();
assertThat(view.readAttributes().isOther()).isFalse();
assertThat(view.readAttributes().size()).isEqualTo(0L);
}
@Test
public void getFileAttributeViewBasic() throws IOException {
final Path path = Files.createTempFile(null,
null);
final BasicFileAttributeView view = Files.getFileAttributeView(path,
BasicFileAttributeView.class);
assertThat(view).isNotNull();
assertThat((Object) view.readAttributes()).isNotNull();
assertThat(view.readAttributes().isRegularFile()).isTrue();
assertThat(view.readAttributes().isDirectory()).isFalse();
assertThat(view.readAttributes().isSymbolicLink()).isFalse();
assertThat(view.readAttributes().isOther()).isFalse();
assertThat(view.readAttributes().size()).isEqualTo(0L);
}
@Test
public void getFileAttributeViewInvalidView() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.getFileAttributeView(path,
MyAttrsView.class)).isNull();
}
@Test(expected = NoSuchFileException.class)
public void getFileAttributeViewNoSuchFileException() throws IOException {
final Path path = Paths.get("/path/to/file.txt");
Files.getFileAttributeView(path,
BasicFileAttributeView.class);
}
@Test(expected = IllegalArgumentException.class)
public void getFileAttributeViewNull1() throws IOException {
Files.getFileAttributeView(null,
MyAttrsView.class);
}
@Test(expected = IllegalArgumentException.class)
public void getFileAttributeViewNull2() throws IOException {
final Path path = Paths.get("/path/to/file.txt");
Files.getFileAttributeView(path,
null);
}
@Test(expected = IllegalArgumentException.class)
public void getFileAttributeViewNull3() throws IOException {
Files.getFileAttributeView(null,
null);
}
@Test
public void readAttributesGeneral() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
final BasicFileAttributesImpl attrs = Files.readAttributes(path,
BasicFileAttributesImpl.class);
assertThat(attrs).isNotNull();
assertThat(attrs.isRegularFile()).isTrue();
assertThat(attrs.isDirectory()).isFalse();
assertThat(attrs.isSymbolicLink()).isFalse();
assertThat(attrs.isOther()).isFalse();
assertThat(attrs.size()).isEqualTo(0L);
}
@Test
public void readAttributesBasic() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
final BasicFileAttributes attrs = Files.readAttributes(path,
BasicFileAttributes.class);
assertThat(attrs).isNotNull();
assertThat(attrs.isRegularFile()).isTrue();
assertThat(attrs.isDirectory()).isFalse();
assertThat(attrs.isSymbolicLink()).isFalse();
assertThat(attrs.isOther()).isFalse();
assertThat(attrs.size()).isEqualTo(0L);
}
@Test(expected = NoSuchFileException.class)
public void readAttributesNonExistentFile() throws IOException {
final Path path = Paths.get("/path/to/file.txt");
Files.readAttributes(path,
BasicFileAttributes.class);
}
@Test
public void readAttributesInvalid() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.readAttributes(path,
MyAttrs.class)).isNull();
}
@Test(expected = IllegalArgumentException.class)
public void readAttributesNull1() throws IOException {
Files.readAttributes(null,
MyAttrs.class);
}
@Test(expected = IllegalArgumentException.class)
public void readAttributesNull2() throws IOException {
final Path path = Paths.get("/path/to/file.txt");
Files.readAttributes(path,
(Class<MyAttrs>) null);
}
@Test(expected = IllegalArgumentException.class)
public void readAttributesNull3() throws IOException {
Files.readAttributes(null,
(Class<MyAttrs>) null);
}
@Test
public void readAttributesMap() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.readAttributes(path,
"*")).isNotNull().hasSize(9);
assertThat(Files.readAttributes(path,
"basic:*")).isNotNull().hasSize(9);
assertThat(Files.readAttributes(path,
"basic:isRegularFile")).isNotNull().hasSize(1);
assertThat(Files.readAttributes(path,
"basic:isRegularFile,isDirectory")).isNotNull().hasSize(2);
assertThat(Files.readAttributes(path,
"basic:isRegularFile,isDirectory,someThing")).isNotNull().hasSize(2);
assertThat(Files.readAttributes(path,
"basic:someThing")).isNotNull().hasSize(0);
assertThat(Files.readAttributes(path,
"isRegularFile")).isNotNull().hasSize(1);
assertThat(Files.readAttributes(path,
"isRegularFile,isDirectory")).isNotNull().hasSize(2);
assertThat(Files.readAttributes(path,
"isRegularFile,isDirectory,someThing")).isNotNull().hasSize(2);
assertThat(Files.readAttributes(path,
"someThing")).isNotNull().hasSize(0);
try {
Files.readAttributes(path,
":someThing");
fail("undefined view");
} catch (IllegalArgumentException ex) {
}
try {
Files.readAttributes(path,
"advanced:isRegularFile");
fail("undefined view");
} catch (UnsupportedOperationException ex) {
}
}
@Test(expected = IllegalArgumentException.class)
public void readAttributesMapNull1() throws IOException {
Files.readAttributes(null,
"*");
}
@Test(expected = IllegalArgumentException.class)
public void readAttributesMapNull2() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.readAttributes(path,
(String) null);
}
@Test(expected = IllegalArgumentException.class)
public void readAttributesMapNull3() throws IOException {
Files.readAttributes(null,
(String) null);
}
@Test(expected = IllegalArgumentException.class)
public void readAttributesMapEmpty() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.readAttributes(path,
"");
}
@Test(expected = NoSuchFileException.class)
public void readAttributesMapNoSuchFileException() throws IOException {
final Path path = Paths.get("/path/to/file.txt");
Files.readAttributes(path,
"*");
}
@Test(expected = IllegalArgumentException.class)
public void setAttributeNull1() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.setAttribute(path,
null,
null);
}
@Test(expected = IllegalArgumentException.class)
public void setAttributeNull2() throws IOException {
Files.setAttribute(null,
"some",
null);
}
@Test(expected = IllegalArgumentException.class)
public void setAttributeNull3() throws IOException {
Files.setAttribute(null,
null,
null);
}
@Test(expected = IllegalArgumentException.class)
public void setAttributeEmpty() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.setAttribute(path,
"",
null);
}
@Test(expected = IllegalStateException.class)
public void setAttributeInvalidAttr() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.setAttribute(path,
"myattr",
null);
}
@Test(expected = UnsupportedOperationException.class)
public void setAttributeInvalidView() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.setAttribute(path,
"advanced:isRegularFile",
null);
}
@Test(expected = IllegalArgumentException.class)
public void setAttributeInvalidView2() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.setAttribute(path,
":isRegularFile",
null);
}
@Test(expected = NotImplementedException.class)
public void setAttributeNotImpl() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.setAttribute(path,
"isRegularFile",
null);
}
@Test
public void readAttribute() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.getAttribute(path,
"basic:isRegularFile")).isNotNull();
assertThat(Files.getAttribute(path,
"basic:someThing")).isNull();
assertThat(Files.getAttribute(path,
"isRegularFile")).isNotNull();
assertThat(Files.getAttribute(path,
"someThing")).isNull();
}
@Test(expected = IllegalArgumentException.class)
public void readAttributeInvalid() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.getAttribute(path,
"*")).isNotNull();
}
@Test(expected = IllegalArgumentException.class)
public void readAttributeInvalid2() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.getAttribute(path,
"isRegularFile,isDirectory")).isNull();
}
@Test(expected = NoSuchFileException.class)
public void readAttributeInvalid3() throws IOException {
final Path path = Paths.get("/path/to/file.txt");
Files.getAttribute(path,
"isRegularFile");
}
@Test
public void getLastModifiedTime() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.getLastModifiedTime(path)).isNotNull();
}
@Test(expected = NoSuchFileException.class)
public void getLastModifiedTimeNoSuchFileException() throws IOException {
final Path path = Paths.get("/path/to/file");
Files.getLastModifiedTime(path);
}
@Test(expected = IllegalArgumentException.class)
public void getLastModifiedTimeNull() throws IOException {
Files.getLastModifiedTime(null);
}
@Test(expected = NotImplementedException.class)
public void setLastModifiedTime() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.setLastModifiedTime(path,
null);
}
@Test(expected = NoSuchFileException.class)
public void setLastModifiedTimeNoSuchFileException() throws IOException {
final Path path = Paths.get("/path/to/file");
Files.setLastModifiedTime(path,
null);
}
@Test(expected = IllegalArgumentException.class)
public void setLastModifiedTimeNull() throws IOException {
Files.setLastModifiedTime(null,
null);
}
@Test(expected = NotImplementedException.class)
public void setLastModifiedTimeNull2() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.setLastModifiedTime(path,
null);
}
@Test
public void size() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.size(path)).isNotNull().isEqualTo(0L);
final Path sourceFile = newTempDir().resolve("file.txt");
final OutputStream stream = Files.newOutputStream(sourceFile);
stream.write('a');
stream.close();
assertThat(Files.size(sourceFile)).isNotNull().isEqualTo(1L);
}
@Test(expected = NoSuchFileException.class)
public void sizeNoSuchFileException() throws IOException {
final Path path = Paths.get("/path/to/file");
Files.size(path);
}
@Test(expected = IllegalArgumentException.class)
public void sizeNull() throws IOException {
Files.size(null);
}
@Test
public void exists() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.exists(path)).isTrue();
assertThat(Files.exists(newTempDir())).isTrue();
assertThat(Files.exists(Paths.get("/some/path/here"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void existsNull() throws IOException {
Files.exists(null);
}
@Test
public void notExists() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.notExists(path)).isFalse();
assertThat(Files.notExists(newTempDir())).isFalse();
assertThat(Files.notExists(Paths.get("/some/path/here"))).isTrue();
assertThat(Files.notExists(newTempDir().resolve("some.text"))).isTrue();
}
@Test(expected = IllegalArgumentException.class)
public void notExistsNull() throws IOException {
Files.notExists(null);
}
@Test
public void isSameFile() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.isSameFile(path,
Paths.get(path.toString()))).isTrue();
assertThat(Files.isSameFile(path,
Files.createTempFile("foo",
"bar"))).isFalse();
assertThat(Files.isSameFile(newTempDir(),
newTempDir())).isFalse();
final Path dir = newTempDir();
assertThat(Files.isSameFile(dir,
Paths.get(dir.toString()))).isTrue();
assertThat(Files.isSameFile(Paths.get("/path/to/some/place"),
Paths.get("/path/to/some/place"))).isTrue();
assertThat(Files.isSameFile(Paths.get("/path/to/some/place"),
Paths.get("/path/to/some/place/a"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void isSameFileNull1() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.isSameFile(path,
null);
}
@Test(expected = IllegalArgumentException.class)
public void isSameFileNull2() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
Files.isSameFile(null,
path);
}
@Test(expected = IllegalArgumentException.class)
public void isSameFileNull3() throws IOException {
Files.isSameFile(null,
null);
}
@Test
public void isHidden() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.isHidden(path)).isFalse();
assertThat(Files.isHidden(newTempDir())).isFalse();
assertThat(Files.isHidden(Paths.get("/some/file"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void isHiddenNull() throws IOException {
Files.isHidden(null);
}
@Test
public void isReadable() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.isReadable(path)).isTrue();
assertThat(Files.isReadable(newTempDir())).isTrue();
assertThat(Files.isReadable(Paths.get("/some/file"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void isReadableNull() throws IOException {
Files.isReadable(null);
}
@Test
public void isWritable() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.isWritable(path)).isTrue();
assertThat(Files.isWritable(newTempDir())).isTrue();
assertThat(Files.isWritable(Paths.get("/some/file"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void isWritableNull() throws IOException {
Files.isWritable(null);
}
@Test
public void isExecutable() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.isExecutable(path)).isFalse();
assertThat(Files.isExecutable(newTempDir())).isTrue();
assertThat(Files.isExecutable(Paths.get("/some/file"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void isExecutableNull() throws IOException {
Files.isExecutable(null);
}
@Test
public void isSymbolicLink() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.isSymbolicLink(path)).isFalse();
assertThat(Files.isSymbolicLink(newTempDir())).isFalse();
assertThat(Files.isSymbolicLink(Paths.get("/some/file"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void isSymbolicLinkNull() throws IOException {
Files.isSymbolicLink(null);
}
@Test
public void isDirectory() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.isDirectory(path)).isFalse();
assertThat(Files.isDirectory(newTempDir())).isTrue();
assertThat(Files.isDirectory(Paths.get("/some/file"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void isDirectoryNull() throws IOException {
Files.isSymbolicLink(null);
}
@Test
public void isRegularFile() throws IOException {
final Path path = Files.createTempFile("foo",
"bar");
assertThat(Files.isRegularFile(path)).isTrue();
assertThat(Files.isRegularFile(newTempDir())).isFalse();
assertThat(Files.isRegularFile(Paths.get("/some/file"))).isFalse();
}
@Test(expected = IllegalArgumentException.class)
public void isRegularFileNull() throws IOException {
Files.isRegularFile(null);
}
private static interface MyAttrsView extends BasicFileAttributeView {
}
private static interface MyAttrs extends BasicFileAttributes {
}
}
| |
/*
* Copyright 2017 David Karnok
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package hu.akarnokd.reactive4javaflow.impl.operators;
import hu.akarnokd.reactive4javaflow.*;
import hu.akarnokd.reactive4javaflow.fused.*;
import hu.akarnokd.reactive4javaflow.impl.EmptySubscription;
import java.util.concurrent.Flow;
public final class FolyamTake<T> extends Folyam<T> {
final Folyam<T> source;
final long n;
public FolyamTake(Folyam<T> source, long n) {
this.source = source;
this.n = n;
}
@Override
protected void subscribeActual(FolyamSubscriber<? super T> s) {
if (s instanceof ConditionalSubscriber) {
source.subscribe(new TakeConditionalSubscriber<>((ConditionalSubscriber<? super T>) s, n));
} else {
source.subscribe(new TakeSubscriber<>(s, n));
}
}
static abstract class AbstractTakeSubscriber<T> implements FolyamSubscriber<T>, FusedSubscription<T> {
long remaining;
Flow.Subscription upstream;
FusedSubscription<T> qs;
boolean done;
int fusionMode;
AbstractTakeSubscriber(long remaining) {
this.remaining = remaining;
}
abstract void onStart();
@Override
public final void onSubscribe(Flow.Subscription subscription) {
upstream = subscription;
if (subscription instanceof FusedSubscription) {
qs = (FusedSubscription<T>)subscription;
}
onStart();
}
@Override
public final T poll() throws Throwable {
long r = remaining;
if (r < 0L) {
return null;
}
if (r == 0L) {
if (fusionMode == ASYNC) {
onComplete();
}
remaining = -1L;
return null;
}
T v = qs.poll();
if (v == null) {
return null;
}
remaining = --r;
if (r == 0L && fusionMode == ASYNC) {
cancel();
}
return v;
}
@Override
public final boolean isEmpty() {
long r = remaining;
if (r == 0 && fusionMode == ASYNC) {
return false;
}
return qs.isEmpty() || r <= 0L;
}
@Override
public final void clear() {
qs.clear();
}
@Override
public final int requestFusion(int mode) {
FusedSubscription<T> fs = qs;
if (fs != null) {
int m = fs.requestFusion(mode);
fusionMode = m;
return m;
}
return NONE;
}
@Override
public final void request(long n) {
upstream.request(n);
}
@Override
public final void cancel() {
upstream.cancel();
}
}
static final class TakeSubscriber<T> extends AbstractTakeSubscriber<T> {
final FolyamSubscriber<? super T> actual;
TakeSubscriber(FolyamSubscriber<? super T> actual, long remaining) {
super(remaining);
this.actual = actual;
}
@Override
void onStart() {
if (remaining == 0) {
upstream.cancel();
done = true;
EmptySubscription.complete(actual);
} else {
actual.onSubscribe(this);
}
}
@Override
public void onNext(T item) {
if (done) {
return;
}
if (item == null) {
actual.onNext(null);
return;
}
long r = remaining - 1;
actual.onNext(item);
if (r == 0L) {
upstream.cancel();
onComplete();
}
remaining = r;
}
@Override
public void onError(Throwable throwable) {
if (done) {
FolyamPlugins.onError(throwable);
return;
}
done = true;
actual.onError(throwable);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
actual.onComplete();
}
}
static final class TakeConditionalSubscriber<T> extends AbstractTakeSubscriber<T> implements ConditionalSubscriber<T> {
final ConditionalSubscriber<? super T> actual;
TakeConditionalSubscriber(ConditionalSubscriber<? super T> actual, long remaining) {
super(remaining);
this.actual = actual;
}
@Override
void onStart() {
if (remaining == 0) {
upstream.cancel();
done = true;
EmptySubscription.complete(actual);
} else {
actual.onSubscribe(this);
}
}
@Override
public void onNext(T item) {
if (!tryOnNext(item) && !done) {
upstream.request(1);
}
}
@Override
public boolean tryOnNext(T item) {
if (done) {
return false;
}
if (item == null) {
return actual.tryOnNext(null);
}
long r = remaining - 1;
boolean b = actual.tryOnNext(item);
remaining = r;
if (r == 0L) {
upstream.cancel();
onComplete();
}
return b;
}
@Override
public void onError(Throwable throwable) {
if (done) {
FolyamPlugins.onError(throwable);
return;
}
done = true;
actual.onError(throwable);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
actual.onComplete();
}
}
}
| |
package org.jbake.app;
import org.apache.commons.configuration.CompositeConfiguration;
import org.jbake.app.ConfigUtil.Keys;
import org.jbake.template.DelegatingTemplateEngine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Render output to a file.
*
* @author Jonathan Bullock <jonbullock@gmail.com>
*/
public class Renderer {
private static interface RenderingConfig {
File getPath();
String getName();
String getTemplate();
Map<String, Object> getModel();
}
private static abstract class AbstractRenderingConfig implements RenderingConfig{
protected final File path;
protected final String name;
protected final String template;
public AbstractRenderingConfig(File path, String name, String template) {
super();
this.path = path;
this.name = name;
this.template = template;
}
@Override
public File getPath() {
return path;
}
@Override
public String getName() {
return name;
}
@Override
public String getTemplate() {
return template;
}
}
public static class ModelRenderingConfig extends AbstractRenderingConfig {
private final Map<String, Object> model;
public ModelRenderingConfig(File path, String name, Map<String, Object> model, String template) {
super(path, name, template);
this.model = model;
}
@Override
public Map<String, Object> getModel() {
return model;
}
}
class DefaultRenderingConfig extends AbstractRenderingConfig {
private final Object content;
private DefaultRenderingConfig(File path, String allInOneName) {
super(path, allInOneName, findTemplateName(allInOneName));
this.content = Collections.singletonMap("type",allInOneName);
}
public DefaultRenderingConfig(String filename, String allInOneName) {
super(new File(destination.getPath() + File.separator + filename), allInOneName, findTemplateName(allInOneName));
this.content = Collections.singletonMap("type",allInOneName);
}
/**
* Constructor added due to known use of a allInOneName which is used for name, template and content
* @param path
* @param allInOneName
*/
public DefaultRenderingConfig(String allInOneName) {
this(new File(destination.getPath() + File.separator + allInOneName + config.getString(Keys.OUTPUT_EXTENSION)),
allInOneName);
}
@Override
public Map<String, Object> getModel() {
Map<String, Object> model = new HashMap<String, Object>();
model.put("renderer", renderingEngine);
model.put("content", content);
return model;
}
}
private final static Logger LOGGER = LoggerFactory.getLogger(Renderer.class);
// TODO: should all content be made available to all templates via this class??
private final File destination;
private final CompositeConfiguration config;
private final DelegatingTemplateEngine renderingEngine;
/**
* Creates a new instance of Renderer with supplied references to folders.
*
* @param db The database holding the content
* @param destination The destination folder
* @param templatesPath The templates folder
* @param config
*/
public Renderer(ContentStore db, File destination, File templatesPath, CompositeConfiguration config) {
this.destination = destination;
this.config = config;
this.renderingEngine = new DelegatingTemplateEngine(config, db, destination, templatesPath);
}
private String findTemplateName(String docType) {
String templateKey = "template."+docType+".file";
String returned = config.getString(templateKey);
return returned;
}
/**
* Render the supplied content to a file.
*
* @param content The content to renderDocument
* @throws Exception
*/
public void render(Map<String, Object> content) throws Exception {
String docType = (String) content.get(Crawler.Attributes.TYPE);
String outputFilename = destination.getPath() + File.separatorChar + (String) content.get(Crawler.Attributes.URI);
if (outputFilename.lastIndexOf(".") > 0) {
outputFilename = outputFilename.substring(0, outputFilename.lastIndexOf("."));
}
// delete existing versions if they exist in case status has changed either way
File draftFile = new File(outputFilename + config.getString(Keys.DRAFT_SUFFIX) + FileUtil.findExtension(config, docType));
if (draftFile.exists()) {
draftFile.delete();
}
File publishedFile = new File(outputFilename + FileUtil.findExtension(config, docType));
if (publishedFile.exists()) {
publishedFile.delete();
}
if (content.get(Crawler.Attributes.STATUS).equals(Crawler.Attributes.Status.DRAFT)) {
outputFilename = outputFilename + config.getString(Keys.DRAFT_SUFFIX);
}
File outputFile = new File(outputFilename + FileUtil.findExtension(config,docType));
StringBuilder sb = new StringBuilder();
sb.append("Rendering [").append(outputFile).append("]... ");
Map<String, Object> model = new HashMap<String, Object>();
model.put("content", content);
model.put("renderer", renderingEngine);
try {
Writer out = createWriter(outputFile);
renderingEngine.renderDocument(model, findTemplateName(docType), out);
out.close();
sb.append("done!");
LOGGER.info(sb.toString());
} catch (Exception e) {
sb.append("failed!");
LOGGER.error(sb.toString(), e);
throw new Exception("Failed to render file. Cause: " + e.getMessage());
}
}
private Writer createWriter(File file) throws IOException {
if (!file.exists()) {
file.getParentFile().mkdirs();
file.createNewFile();
}
return new OutputStreamWriter(new FileOutputStream(file), config.getString(ConfigUtil.Keys.RENDER_ENCODING));
}
private void render(RenderingConfig renderConfig) throws Exception {
File outputFile = renderConfig.getPath();
StringBuilder sb = new StringBuilder();
sb.append("Rendering ").append(renderConfig.getName()).append(" [").append(outputFile).append("]...");
try {
Writer out = createWriter(outputFile);
renderingEngine.renderDocument(renderConfig.getModel(), renderConfig.getTemplate(), out);
out.close();
sb.append("done!");
LOGGER.info(sb.toString());
} catch (Exception e) {
sb.append("failed!");
LOGGER.error(sb.toString(), e);
throw new Exception("Failed to render "+renderConfig.getName(), e);
}
}
/**
* Render an index file using the supplied content.
*
* @param indexFile The name of the output file
* @throws Exception
*/
public void renderIndex(String indexFile, ContentStore db) throws Exception {
long totalPosts = db.countClass("post");
boolean paginate = config.getBoolean(Keys.PAGINATE_INDEX, false);
int postsPerPage = config.getInt(Keys.POSTS_PER_PAGE, -1);
int start = 0;
Map<String, Object> model = new HashMap<String, Object>();
model.put("renderer", renderingEngine);
model.put("content", buildSimpleModel("masterindex"));
if (paginate) {
db.setLimit(postsPerPage);
}
try {
int page = 1;
while (start < totalPosts) {
String fileName = indexFile;
if (paginate) {
db.setStart(start);
int index = fileName.lastIndexOf(".");
if (page != 1) {
String previous = fileName.substring(0, index) + (page > 2 ? page-1 : "") +
fileName.substring(index);
model.put("previousFileName", previous);
} else {
model.remove("previousFileName");
}
// If this iteration won't consume the remaining posts, calculate
// the next file name
if ((start + postsPerPage) < totalPosts) {
model.put("nextFileName", fileName.substring(0, index) + (page+1) +
fileName.substring(index));
} else {
model.remove("nextFileName");
}
// Add page number to file name
fileName = fileName.substring(0, index) + (page > 1 ? page : "") +
fileName.substring(index);
}
render(new DefaultRenderingConfig(fileName, "masterindex"));
if (paginate) {
start += postsPerPage;
page++;
} else {
break; // TODO: eww
}
}
db.resetPagination();
} catch (Exception e) {
throw new Exception("Failed to render index. Cause: " + e.getMessage());
}
}
/**
* Render an XML sitemap file using the supplied content.
* @throws Exception
*
* @see <a href="https://support.google.com/webmasters/answer/156184?hl=en&ref_topic=8476">About Sitemaps</a>
* @see <a href="http://www.sitemaps.org/">Sitemap protocol</a>
*/
public void renderSitemap(String sitemapFile) throws Exception {
render(new DefaultRenderingConfig(sitemapFile, "sitemap"));
}
/**
* Render an XML feed file using the supplied content.
*
* @param feedFile The name of the output file
* @throws Exception
*/
public void renderFeed(String feedFile) throws Exception {
render(new DefaultRenderingConfig(feedFile, "feed"));
}
/**
* Render an archive file using the supplied content.
*
* @param archiveFile The name of the output file
* @throws Exception
*/
public void renderArchive(String archiveFile) throws Exception {
render(new DefaultRenderingConfig(archiveFile, "archive"));
}
/**
* Render tag files using the supplied content.
*
* @param tags The content to renderDocument
* @param tagPath The output path
* @throws Exception
*/
public int renderTags(Set<String> tags, String tagPath) throws Exception {
int renderedCount = 0;
final List<String> errors = new LinkedList<String>();
for (String tag : tags) {
try {
Map<String, Object> model = new HashMap<String, Object>();
model.put("renderer", renderingEngine);
model.put("tag", tag);
model.put("content", Collections.singletonMap("type","tag"));
tag = tag.trim().replace(" ", "-");
File path = new File(destination.getPath() + File.separator + tagPath + File.separator + tag + config.getString(Keys.OUTPUT_EXTENSION));
render(new ModelRenderingConfig(path, "tag", model, findTemplateName("tag")));
renderedCount++;
} catch (Exception e) {
errors.add(e.getCause().getMessage());
}
}
if (!errors.isEmpty()) {
StringBuilder sb = new StringBuilder();
sb.append("Failed to render tags. Cause(s):");
for(String error: errors) {
sb.append("\n" + error);
}
throw new Exception(sb.toString());
} else {
return renderedCount;
}
}
/**
* Builds simple map of values, which are exposed when rendering index/archive/sitemap/feed/tags.
*
* @param type
* @return
*/
private Map<String, Object> buildSimpleModel(String type) {
Map<String, Object> content = new HashMap<String, Object>();
content.put("type", type);
content.put("rootpath", "");
// add any more keys here that need to have a default value to prevent need to perform null check in templates
return content;
}
}
| |
/*
* Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*
*/
package org.wso2.carbon.apimgt.keymgt.listeners;
import org.apache.axis2.AxisFault;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.wso2.carbon.apimgt.api.APIManagementException;
import org.wso2.carbon.apimgt.impl.APIConstants;
import org.wso2.carbon.apimgt.impl.APIManagerConfiguration;
import org.wso2.carbon.apimgt.impl.APIManagerConfigurationService;
import org.wso2.carbon.apimgt.impl.dao.ApiMgtDAO;
import org.wso2.carbon.apimgt.impl.dto.Environment;
import org.wso2.carbon.apimgt.impl.internal.ServiceReferenceHolder;
import org.wso2.carbon.apimgt.impl.utils.APIAuthenticationAdminClient;
import org.wso2.carbon.apimgt.impl.utils.APIUtil;
import org.wso2.carbon.apimgt.impl.workflow.WorkflowException;
import org.wso2.carbon.apimgt.impl.workflow.WorkflowExecutor;
import org.wso2.carbon.base.MultitenantConstants;
import org.wso2.carbon.user.api.Tenant;
import org.wso2.carbon.user.core.UserCoreConstants;
import org.wso2.carbon.user.core.UserStoreException;
import org.wso2.carbon.user.core.UserStoreManager;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
/**
* KeyManagerUserOperationListener Test Case
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest({ServiceReferenceHolder.class})
public class KeyManagerUserOperationListenerTestCase {
private UserStoreManager userStoreManager;
private ApiMgtDAO apiMgtDAO;
private Tenant tenant;
private String username;
private WorkflowExecutor workflowExecutor;
private APIManagerConfiguration config;
private APIAuthenticationAdminClient apiAuthenticationAdminClient;
private Map<String, Environment> environmentMap;
@Before
public void init() {
userStoreManager = Mockito.mock(UserStoreManager.class);
apiMgtDAO = Mockito.mock(ApiMgtDAO.class);
workflowExecutor = Mockito.mock(WorkflowExecutor.class);
config = Mockito.mock(APIManagerConfiguration.class);
apiAuthenticationAdminClient = Mockito.mock(APIAuthenticationAdminClient.class);
tenant = new Tenant();
username = "testuser";
environmentMap = new HashMap<String, Environment>();
PowerMockito.mockStatic(ServiceReferenceHolder.class);
ServiceReferenceHolder serviceReferenceHolder = Mockito.mock(ServiceReferenceHolder.class);
PowerMockito.when(ServiceReferenceHolder.getInstance()).thenReturn(serviceReferenceHolder);
APIManagerConfigurationService apiManagerConfigurationService = Mockito.mock(APIManagerConfigurationService
.class);
Mockito.doReturn(apiManagerConfigurationService).when(serviceReferenceHolder)
.getAPIManagerConfigurationService();
APIManagerConfiguration apiManagerConfiguration = Mockito.mock(APIManagerConfiguration.class);
Mockito.doReturn("false").when(apiManagerConfiguration).getFirstProperty(APIConstants
.PUBLISHER_ROLE_CACHE_ENABLED);
Mockito.doReturn(apiManagerConfiguration).when(apiManagerConfigurationService).getAPIManagerConfiguration();
APIUtil.init();
}
@Test
public void testDoPreDeleteSuperTenantUser() throws APIManagementException {
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config);
Assert.assertTrue(keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager));
}
@Test
public void testDoPreDeleteTenantUser() throws APIManagementException {
tenant.setId(1);
tenant.setDomain("wso2.com");
WorkflowExecutor workflowExecutor = Mockito.mock(WorkflowExecutor.class);
APIManagerConfiguration apiManagerConfiguration = Mockito.mock(APIManagerConfiguration.class);
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, tenant, apiManagerConfiguration);
Assert.assertTrue(keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager));
}
@Test
public void testDoPreDeleteUserInPrimaryUserStore() throws APIManagementException, org.wso2.carbon.user.api
.UserStoreException {
tenant.setId(MultitenantConstants.SUPER_TENANT_ID);
tenant.setDomain(MultitenantConstants.SUPER_TENANT_DOMAIN_NAME);
Map<String, String> userStoreProperties = new HashMap<String, String>();
userStoreProperties.put(UserCoreConstants.RealmConfig.PROPERTY_DOMAIN_NAME, UserCoreConstants
.PRIMARY_DEFAULT_DOMAIN_NAME);
Mockito.when(userStoreManager.getProperties(tenant)).thenReturn(userStoreProperties);
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, tenant, config);
Assert.assertTrue(keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager));
}
@Test
public void testDoPreDeleteUserWhenHandlerIsDisabled() throws APIManagementException, org
.wso2.carbon.user.api.UserStoreException {
apiAuthenticationAdminClient = Mockito.mock(APIAuthenticationAdminClient.class);
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config, apiAuthenticationAdminClient, false);
Environment environment = new Environment();
environmentMap.put("hybrid", environment);
Mockito.when(config.getApiGatewayEnvironments()).thenReturn(environmentMap);
//Throws APIMgtException while retrieving access tokens
Mockito.doThrow(APIManagementException.class).when(apiMgtDAO).getActiveAccessTokensOfUser(username);
//Should always continue when the handler is disabled, even though Gateway cache update fails
Assert.assertTrue(keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager));
}
@Test
public void testDoPreDeleteUserWhenGatewayEnvironmentsAreNotAvailable() throws APIManagementException {
apiAuthenticationAdminClient = Mockito.mock(APIAuthenticationAdminClient.class);
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config, apiAuthenticationAdminClient, true);
Mockito.when(config.getApiGatewayEnvironments()).thenReturn(environmentMap);
//Throws APIMgtException while retrieving access tokens
Mockito.doThrow(APIManagementException.class).when(apiMgtDAO).getActiveAccessTokensOfUser(username);
//Should always return true when the gateway environments are not available,
//before throwing APIManagementException while retrieving accessTokens
Assert.assertTrue(keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager));
}
@Test
public void testDoPreDeleteUserWhenActiveAccessTokenAreNotAvailable() throws APIManagementException {
apiAuthenticationAdminClient = Mockito.mock(APIAuthenticationAdminClient.class);
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config, apiAuthenticationAdminClient, true);
Environment environment = new Environment();
environmentMap.put("hybrid", environment);
Mockito.when(config.getApiGatewayEnvironments()).thenReturn(environmentMap);
Mockito.when(apiMgtDAO.getActiveAccessTokensOfUser(username)).thenReturn(new HashSet<String>());
Assert.assertTrue(keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager));
}
@Test
public void testDoPreDeleteUserWithAccessTokenRetrievalFailure() throws APIManagementException {
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config, apiAuthenticationAdminClient, true);
Environment environment = new Environment();
environmentMap.put("hybrid", environment);
Mockito.when(config.getApiGatewayEnvironments()).thenReturn(environmentMap);
//Throws APIMgtException while retrieving access tokens
Mockito.doThrow(APIManagementException.class).when(apiMgtDAO).getActiveAccessTokensOfUser(username);
Assert.assertFalse(keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager));
}
@Test
public void testDoPreDeleteUserWithActiveAccessTokens() throws APIManagementException {
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config, apiAuthenticationAdminClient, true);
Environment environment = new Environment();
environmentMap.put("hybrid", environment);
Mockito.when(config.getApiGatewayEnvironments()).thenReturn(environmentMap);
Set<String> activeTokens = new HashSet<String>();
activeTokens.add(UUID.randomUUID().toString());
Mockito.when(apiMgtDAO.getActiveAccessTokensOfUser(username)).thenReturn(activeTokens);
//Test AxisFault while invalidating Cached Tokens via Admin Client
Assert.assertTrue(keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager));
}
@Test
public void testDoPreDeleteUserWithAPIManagementExceptionWhileRetrievingWFReferences() throws
APIManagementException {
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config);
Mockito.doThrow(APIManagementException.class).when(apiMgtDAO).getExternalWorkflowReferenceForUserSignup
(username);
keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager);
}
@Test
public void testDoPreDeleteUserWithUserStoreExceptionWhileRetrievingUserStoreProperties() throws org.wso2.carbon
.user.api.UserStoreException {
Tenant tenant = new Tenant();
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, tenant, config);
Mockito.doThrow(UserStoreException.class).when(userStoreManager).getProperties(tenant);
keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager);
}
@Test
public void testDoPreDeleteWithUserWorkflowExceptionWhileCleaningUpPendingWFTasks() throws APIManagementException,
WorkflowException {
String workflowExtRef = "";
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config);
Mockito.when(apiMgtDAO.getExternalWorkflowReferenceForUserSignup(username)).thenReturn(workflowExtRef);
Mockito.doThrow(WorkflowException.class).when(workflowExecutor).cleanUpPendingTask(workflowExtRef);
keyManagerUserOperationListener.doPreDeleteUser(username, userStoreManager);
}
@Test
public void testDoPreUpdateRoleListOfUser() {
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config);
String[] deletedRoles = {"testRole1"};
String[] newRoles = {"testRole2"};
Assert.assertTrue(keyManagerUserOperationListener.doPreUpdateRoleListOfUser(username, deletedRoles, newRoles,
userStoreManager));
}
@Test
public void testDoPreUpdateUserListOfRole() {
KeyManagerUserOperationListener keyManagerUserOperationListener = new KeyManagerUserOperationListenerWrapper
(apiMgtDAO, workflowExecutor, null, config);
String[] deletedRoles = {"testRole1"};
String[] newRoles = {"testRole2"};
Assert.assertTrue(keyManagerUserOperationListener.doPreUpdateUserListOfRole(username, deletedRoles, newRoles,
userStoreManager));
}
}
| |
package ruke.vrj.phase;
import java.util.ArrayList;
import org.antlr.v4.runtime.ParserRuleContext;
import ruke.vrj.Symbol;
import ruke.vrj.SymbolFlag;
import ruke.vrj.SymbolTable;
import ruke.vrj.TypeChecker;
import ruke.vrj.antlr.vrjBaseVisitor;
import ruke.vrj.antlr.vrjParser;
import ruke.vrj.antlr.vrjParser.ExpressionContext;
import ruke.vrj.antlr.vrjParser.NameContext;
import ruke.vrj.compiler.Result;
/**
* MIT License
*
* Copyright (c) 2017 Franco Montenegro
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
* and associated documentation files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
public class TypeCheck extends vrjBaseVisitor<Symbol> {
private final ArrayList<Result> results = new ArrayList<>();
private SymbolTable symbols;
private Symbol integer;
private Symbol real;
private Symbol string;
private Symbol code;
private Symbol booleanType;
private Symbol nullType;
/**
* Create a type check visitor.
*
* @param symbols Symbols
*/
public TypeCheck(final SymbolTable symbols) {
this.symbols = symbols;
this.integer = symbols.resolve("integer");
this.real = symbols.resolve("real");
this.string = symbols.resolve("string");
this.code = symbols.resolve("code");
this.booleanType = symbols.resolve("boolean");
this.nullType = symbols.resolve("null");
}
public ArrayList<Result> getResults() {
return this.results;
}
private final void checkForNumeric(final ParserRuleContext ctx, final Symbol expression) {
if (!TypeChecker.isValidNumber(expression)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.getStart().getCharPositionInLine(),
ctx.getStart().getCharPositionInLine() + ctx.getText().length(),
"Expected number"));
}
}
private final void checkForBoolean(final ParserRuleContext ctx, final Symbol expression) {
if (!TypeChecker.compatible(this.booleanType, expression)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.getStart().getCharPositionInLine(),
ctx.getStart().getCharPositionInLine() + ctx.getText().length(),
"Expected boolean expression"));
}
}
@Override
public Symbol visitLibraryDeclaration(vrjParser.LibraryDeclarationContext ctx) {
final String name = ctx.name(0).getText();
final Symbol library = this.symbols.resolve(name);
if (!library.initializer.isEmpty()) {
final Symbol initializer = library.children.resolve(library.initializer);
if (!TypeChecker.isValidInitializer(initializer)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.initializer.getStart().getCharPositionInLine(),
ctx.initializer.getStart().getCharPositionInLine() + library.initializer.length(),
"Invalid initializer. It must be a function with no parameters"));
}
}
if (!library.extendsFrom.isEmpty()) {
Symbol requirement;
for (final NameContext requirementName : ctx.libraryRequirementsExpression().name()) {
requirement = library.children.resolve(requirementName.getText());
if (!TypeChecker.isLibrary(requirement)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
requirementName.getStart().getCharPositionInLine(),
requirementName.getStart().getCharPositionInLine()
+ requirementName.getText().length(),
"Invalid library requirement. Only libraries are valid"));
}
}
}
final SymbolTable prevSymbols = this.symbols;
this.symbols = library.children;
this.visit(ctx.libraryBody());
this.symbols = prevSymbols;
return library;
}
@Override
public Symbol visitStructDeclaration(vrjParser.StructDeclarationContext ctx) {
final String name = ctx.name().getText();
final Symbol struct = this.symbols.resolve(name);
if (!struct.extendsFrom.isEmpty() && !struct.extendsFrom.get(0).equals("array")) {
final Symbol parent = struct.children.resolve(struct.extendsFrom.get(0));
if (!TypeChecker.isStruct(parent)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.extendsFromExpression().getStart().getCharPositionInLine(),
ctx.extendsFromExpression().getStart().getCharPositionInLine()
+ struct.extendsFrom.get(0).length(),
"Structs can only extend from struct or array"));
}
}
final SymbolTable prevSymbols = this.symbols;
this.symbols = struct.children;
this.visit(ctx.structBody());
this.symbols = prevSymbols;
return struct;
}
@Override
public Symbol visitFunctionDeclaration(vrjParser.FunctionDeclarationContext ctx) {
final String name = ctx.functionSignature().name().getText();
final Symbol function = this.symbols.resolve(name);
final SymbolTable prevSymbols = this.symbols;
this.symbols = function.children;
this.visit(ctx.functionSignature());
this.visit(ctx.statements());
this.symbols = prevSymbols;
return function;
}
@Override
public Symbol visitMethodDeclaration(vrjParser.MethodDeclarationContext ctx) {
final String name = ctx.functionSignature().name().getText();
final Symbol method = this.symbols.resolve(name);
method.registerImplementationIfNecessary();
if ("onInit".equals(name) && !TypeChecker.isValidInitializer(method)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.functionSignature().name().getStart().getCharPositionInLine(),
ctx.functionSignature().name().getStart().getCharPositionInLine() + name.length(),
"Struct initializer must be static and must take no parameters"));
}
final SymbolTable prevSymbols = this.symbols;
this.symbols = method.children;
this.visit(ctx.functionSignature());
this.visit(ctx.statements());
this.symbols = prevSymbols;
return method;
}
@Override
public Symbol visitParenthesisExpression(vrjParser.ParenthesisExpressionContext ctx) {
return this.visit(ctx.expression());
}
@Override
public Symbol visitNegativeExpression(vrjParser.NegativeExpressionContext ctx) {
final Symbol a = this.visit(ctx.expression());
this.checkForNumeric(ctx.expression(), a);
return this.integer;
}
@Override
public Symbol visitNotExpression(vrjParser.NotExpressionContext ctx) {
this.visit(ctx.expression());
return this.booleanType;
}
@Override
public Symbol visitModuloExpression(vrjParser.ModuloExpressionContext ctx) {
final Symbol a = this.visit(ctx.left);
final Symbol b = this.visit(ctx.right);
this.checkForNumeric(ctx.left, a);
this.checkForNumeric(ctx.right, b);
return this.integer;
}
@Override
public Symbol visitDivMultExpression(vrjParser.DivMultExpressionContext ctx) {
final Symbol a = this.visit(ctx.left);
final Symbol b = this.visit(ctx.right);
this.checkForNumeric(ctx.left, a);
this.checkForNumeric(ctx.right, b);
return this.integer;
}
@Override
public Symbol visitSumSubExpression(vrjParser.SumSubExpressionContext ctx) {
final Symbol a = this.visit(ctx.left);
final Symbol b = this.visit(ctx.right);
this.checkForNumeric(ctx.left, a);
this.checkForNumeric(ctx.right, b);
return this.integer;
}
@Override
public Symbol visitChainExpression(vrjParser.ChainExpressionContext ctx) {
final SymbolTable prevSymbols = this.symbols;
Symbol symbol = null;
for (final ExpressionContext member : ctx.expression()) {
symbol = this.visit(member);
if (symbol.equals(Symbol.NOTHING)) {
break;
}
// Make a copy to avoid searching in parents (we are only interested in children)
this.symbols = SymbolTable.copyOf(symbol.children.resolve(symbol.type).children);
}
this.symbols = prevSymbols;
return symbol;
}
@Override
public Symbol visitVariableExpression(vrjParser.VariableExpressionContext ctx) {
final String name = ctx.name().getText();
final Symbol variable = this.symbols.resolve(name);
if (variable.equals(Symbol.NOTHING)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.name().getStart().getCharPositionInLine(),
ctx.name().getStart().getCharPositionInLine() + name.length(),
"Variable " + name + " is not defined"));
return variable;
}
if (ctx.index != null) {
if (!variable.flags.contains(SymbolFlag.ARRAY)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.getStart().getCharPositionInLine(),
ctx.getStart().getCharPositionInLine() + ctx.getText().length(),
"Variable " + name + " is not an array"));
return variable;
}
final Symbol index = this.visit(ctx.index);
if (!TypeChecker.isValidArrayIndex(index)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.index.getStart().getCharPositionInLine(),
ctx.index.getStart().getCharPositionInLine() + ctx.index.getText().length(),
"Array index must be integer"));
} else {
return this.symbols.resolve(index.type);
}
}
return variable;
}
@Override
public Symbol visitFunctionExpression(vrjParser.FunctionExpressionContext ctx) {
final String name = ctx.name().getText();
final Symbol function = this.symbols.resolve(name);
if (function.equals(Symbol.NOTHING)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.name().getStart().getCharPositionInLine(),
ctx.name().getStart().getCharPositionInLine() + name.length(),
"Function " + name + " is not defined"));
} else {
this.visit(ctx.arguments());
}
return function;
}
@Override
public Symbol visitComparisonExpression(vrjParser.ComparisonExpressionContext ctx) {
final Symbol a = this.visit(ctx.left);
final Symbol b = this.visit(ctx.right);
switch (ctx.operator.getText()) {
case "==":
case "!=":
if (!TypeChecker.compatible(a, b)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.getStart().getCharPositionInLine(),
ctx.getStart().getCharPositionInLine() + ctx.getText().length(),
"Comparing incompatible types"));
}
break;
case "<=":
case ">=":
case "<":
case ">":
this.checkForNumeric(ctx.left, a);
this.checkForNumeric(ctx.right, b);
break;
default:
break;
}
return this.booleanType;
}
@Override
public Symbol visitLogicalExpression(vrjParser.LogicalExpressionContext ctx) {
final Symbol a = this.visit(ctx.left);
final Symbol b = this.visit(ctx.right);
this.checkForBoolean(ctx.left, a);
this.checkForBoolean(ctx.right, b);
return this.booleanType;
}
@Override
public Symbol visitCodeExpression(vrjParser.CodeExpressionContext ctx) {
final Symbol function = this.visit(ctx.code);
if (!function.equals(Symbol.NOTHING) && !function.flags.contains(SymbolFlag.FUNCTION)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.code.getStart().getCharPositionInLine(),
ctx.code.getStart().getCharPositionInLine() + ctx.code.getText().length(),
"Expected function"));
}
return this.code;
}
@Override
public Symbol visitBooleanExpression(vrjParser.BooleanExpressionContext ctx) {
return this.booleanType;
}
@Override
public Symbol visitNullExpression(vrjParser.NullExpressionContext ctx) {
return this.nullType;
}
@Override
public Symbol visitStringExpression(vrjParser.StringExpressionContext ctx) {
return this.string;
}
@Override
public Symbol visitRealExpression(vrjParser.RealExpressionContext ctx) {
return this.real;
}
@Override
public Symbol visitIntegerExpression(vrjParser.IntegerExpressionContext ctx) {
return this.integer;
}
@Override
public Symbol visitNonArrayVariableDeclaration(vrjParser.NonArrayVariableDeclarationContext ctx) {
final String name = ctx.name().getText();
final Symbol variable = this.symbols.resolve(name);
if (ctx.value != null) {
final Symbol value = this.visit(ctx.value);
if (!TypeChecker.compatible(variable, value)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.value.getStart().getCharPositionInLine(),
ctx.value.getStart().getCharPositionInLine() + ctx.value.getText().length(),
"Incompatible type"));
}
}
return variable;
}
@Override
public Symbol visitSetVariableStatement(vrjParser.SetVariableStatementContext ctx) {
final Symbol variable = this.visit(ctx.variable);
final Symbol value = this.visit(ctx.value);
final Symbol valueType = this.symbols.resolve(value.type);
if (!value.equals(Symbol.NOTHING) && !TypeChecker.compatible(variable, valueType)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.value.getStart().getCharPositionInLine(),
ctx.value.getStart().getCharPositionInLine() + ctx.value.getText().length(),
"Incompatible type"));
}
return variable;
}
@Override
public Symbol visitFunctionCallStatement(vrjParser.FunctionCallStatementContext ctx) {
final Symbol function = this.visit(ctx.function);
if (!function.equals(Symbol.NOTHING) && !function.flags.contains(SymbolFlag.FUNCTION)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.function.getStart().getCharPositionInLine(),
ctx.function.getStart().getCharPositionInLine() + ctx.function.getText().length(),
"Not a function"));
}
return function;
}
@Override
public Symbol visitExitWhenStatement(vrjParser.ExitWhenStatementContext ctx) {
final Symbol condition = visit(ctx.condition);
if (!TypeChecker.compatible(this.booleanType, condition)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.condition.getStart().getCharPositionInLine(),
ctx.condition.getStart().getCharPositionInLine() + ctx.condition.getText().length(),
"Exitwhen condition must be a boolean expression"));
}
return Symbol.NOTHING;
}
@Override
public Symbol visitReturnStatement(vrjParser.ReturnStatementContext ctx) {
final Symbol expectedValue = this.symbols.resolve(this.symbols.owner.type);
final Symbol value = ctx.expression() == null ? Symbol.NOTHING : this.visit(ctx.expression());
if (!TypeChecker.compatible(expectedValue, value)) {
this.results.add(
new Result(
ctx.getStart().getInputStream().getSourceName(),
ctx.getStart().getLine(),
ctx.getStart().getCharPositionInLine(),
ctx.getStart().getCharPositionInLine() + ctx.getText().length(),
"Incompatible type"));
}
return expectedValue;
}
@Override
public Symbol visitIfStatement(vrjParser.IfStatementContext ctx) {
final Symbol condition = this.visit(ctx.condition);
this.checkForBoolean(ctx.condition, condition);
this.visit(ctx.statements());
if (ctx.elseIfStatement() != null) {
for (final vrjParser.ElseIfStatementContext elseif : ctx.elseIfStatement()) {
this.visit(elseif);
}
}
if (ctx.elseStatement() != null) {
this.visit(ctx.elseStatement());
}
return Symbol.NOTHING;
}
@Override
public Symbol visitElseIfStatement(vrjParser.ElseIfStatementContext ctx) {
final Symbol condition = this.visit(ctx.condition);
this.checkForBoolean(ctx.condition, condition);
this.visit(ctx.statements());
return Symbol.NOTHING;
}
}
| |
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*******************************************************************************/
package org.ofbiz.entity.util;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.transaction.Transaction;
import org.ofbiz.base.util.Debug;
import org.ofbiz.entity.GenericEntityException;
import org.ofbiz.entity.datasource.GenericHelperInfo;
import org.ofbiz.entity.model.ModelEntity;
import org.ofbiz.entity.model.ModelField;
import org.ofbiz.entity.transaction.GenericTransactionException;
import org.ofbiz.entity.transaction.TransactionFactoryLoader;
import org.ofbiz.entity.transaction.TransactionUtil;
/**
* Sequence Utility to get unique sequences from named sequence banks
*/
public class SequenceUtil {
private static final Debug.OfbizLogger module = Debug.getOfbizLogger(java.lang.invoke.MethodHandles.lookup().lookupClass());
private final ConcurrentMap<String, SequenceBank> sequences = new ConcurrentHashMap<String, SequenceBank>();
private final GenericHelperInfo helperInfo;
private final String tableName;
private final String nameColName;
private final String idColName;
public SequenceUtil(GenericHelperInfo helperInfo, ModelEntity seqEntity, String nameFieldName, String idFieldName) {
this.helperInfo = helperInfo;
if (seqEntity == null) {
throw new IllegalArgumentException("The sequence model entity was null but is required.");
}
this.tableName = seqEntity.getTableName(helperInfo.getHelperBaseName());
ModelField nameField = seqEntity.getField(nameFieldName);
if (nameField == null) {
throw new IllegalArgumentException("Could not find the field definition for the sequence name field " + nameFieldName);
}
this.nameColName = nameField.getColName();
ModelField idField = seqEntity.getField(idFieldName);
if (idField == null) {
throw new IllegalArgumentException("Could not find the field definition for the sequence id field " + idFieldName);
}
this.idColName = idField.getColName();
}
public Long getNextSeqId(String seqName, long staggerMax, ModelEntity seqModelEntity) {
SequenceBank bank = this.getBank(seqName, seqModelEntity);
return bank.getNextSeqId(staggerMax);
}
public void forceBankRefresh(String seqName, long staggerMax) {
// don't use the get method because we don't want to create if it fails
SequenceBank bank = sequences.get(seqName);
if (bank == null) {
return;
}
bank.refresh(staggerMax);
}
private SequenceBank getBank(String seqName, ModelEntity seqModelEntity) {
SequenceBank bank = sequences.get(seqName);
if (bank == null) {
long bankSize = SequenceBank.defaultBankSize;
if (seqModelEntity != null && seqModelEntity.getSequenceBankSize() != null) {
bankSize = seqModelEntity.getSequenceBankSize().longValue();
if (bankSize > SequenceBank.maxBankSize) bankSize = SequenceBank.maxBankSize;
}
bank = new SequenceBank(seqName, bankSize);
SequenceBank bankFromCache = sequences.putIfAbsent(seqName, bank);
bank = bankFromCache != null ? bankFromCache : bank;
}
return bank;
}
private class SequenceBank {
public static final long defaultBankSize = 10;
public static final long maxBankSize = 5000;
public static final long startSeqId = 10000;
private final String seqName;
private final long bankSize;
private final String updateForLockStatement;
private final String selectSequenceStatement;
private long curSeqId;
private long maxSeqId;
private SequenceBank(String seqName, long bankSize) {
this.seqName = seqName;
curSeqId = 0;
maxSeqId = 0;
this.bankSize = bankSize;
updateForLockStatement = "UPDATE " + SequenceUtil.this.tableName + " SET " + SequenceUtil.this.idColName + "=" + SequenceUtil.this.idColName + " WHERE " + SequenceUtil.this.nameColName + "='" + this.seqName + "'";
selectSequenceStatement = "SELECT " + SequenceUtil.this.idColName + " FROM " + SequenceUtil.this.tableName + " WHERE " + SequenceUtil.this.nameColName + "='" + this.seqName + "'";
}
private Long getNextSeqId(long staggerMax) {
long stagger = 1;
if (staggerMax > 1) {
stagger = (long)Math.ceil(Math.random() * staggerMax);
if (stagger == 0) stagger = 1;
}
synchronized (this) {
if ((curSeqId + stagger) <= maxSeqId) {
long retSeqId = curSeqId;
curSeqId += stagger;
return retSeqId;
} else {
fillBank(stagger);
if ((curSeqId + stagger) <= maxSeqId) {
long retSeqId = curSeqId;
curSeqId += stagger;
return retSeqId;
} else {
Debug.logError("Fill bank failed, returning null", module);
return null;
}
}
}
}
private synchronized void refresh(long staggerMax) {
this.curSeqId = this.maxSeqId;
this.fillBank(staggerMax);
}
/*
The algorithm to get the new sequence id in a thread safe way is the following:
1 - run an update with no changes to get a lock on the record
1bis - if no record is found, try to create and update it to get the lock
2 - select the record (now locked) to get the curSeqId
3 - increment the sequence
The three steps are executed in one dedicated database transaction.
*/
private void fillBank(long stagger) {
// no need to get a new bank, SeqIds available
if ((curSeqId + stagger) <= maxSeqId) {
return;
}
long bankSize = this.bankSize;
if (stagger > 1) {
// NOTE: could use staggerMax for this, but if that is done it would be easier to guess a valid next id without a brute force attack
bankSize = stagger * defaultBankSize;
}
if (bankSize > maxBankSize) {
bankSize = maxBankSize;
}
Transaction suspendedTransaction = null;
try {
if (TransactionUtil.isTransactionInPlace()) { // SCIPIO: 2018-09-04: added check to eliminate useless warnings
suspendedTransaction = TransactionUtil.suspend();
}
boolean beganTransaction = false;
try {
beganTransaction = TransactionUtil.begin();
Connection connection = null;
Statement stmt = null;
ResultSet rs = null;
try {
connection = TransactionFactoryLoader.getInstance().getConnection(SequenceUtil.this.helperInfo);
} catch (SQLException sqle) {
Debug.logWarning("Unable to establish a connection with the database. Error was:" + sqle.toString(), module);
throw sqle;
} catch (GenericEntityException e) {
Debug.logWarning("Unable to establish a connection with the database. Error was: " + e.toString(), module);
throw e;
}
if (connection == null) {
throw new GenericEntityException("Unable to establish a connection with the database, connection was null...");
}
try {
stmt = connection.createStatement();
String sql = null;
// 1 - run an update with no changes to get a lock on the record
if (stmt.executeUpdate(updateForLockStatement) <= 0) {
// SCIPIO: This occurs normally and frequently; counterproductive to log as warning since almost never an error
//Debug.logWarning("Lock failed; no sequence row was found, will try to add a new one for sequence: " + seqName, module);
Debug.logInfo("Lock failed; no sequence row was found, will try to add a new one for sequence: " + seqName, module);
sql = "INSERT INTO " + SequenceUtil.this.tableName + " (" + SequenceUtil.this.nameColName + ", " + SequenceUtil.this.idColName + ") VALUES ('" + this.seqName + "', " + startSeqId + ")";
try {
stmt.executeUpdate(sql);
} catch (SQLException sqle) {
// insert failed: this means that another thread inserted the record; then retry to run an update with no changes to get a lock on the record
if (stmt.executeUpdate(updateForLockStatement) <= 0) {
// This should never happen
throw new GenericEntityException("No rows changed when trying insert new sequence: " + seqName);
}
}
}
// 2 - select the record (now locked) to get the curSeqId
rs = stmt.executeQuery(selectSequenceStatement);
boolean sequenceFound = rs.next();
if (sequenceFound) {
curSeqId = rs.getLong(SequenceUtil.this.idColName);
}
rs.close();
if (!sequenceFound) {
throw new GenericEntityException("Failed to find the sequence record for sequence: " + seqName);
}
// 3 - increment the sequence
sql = "UPDATE " + SequenceUtil.this.tableName + " SET " + SequenceUtil.this.idColName + "=" + SequenceUtil.this.idColName + "+" + bankSize + " WHERE " + SequenceUtil.this.nameColName + "='" + this.seqName + "'";
if (stmt.executeUpdate(sql) <= 0) {
throw new GenericEntityException("Update failed, no rows changes for seqName: " + seqName);
}
TransactionUtil.commit(beganTransaction);
} catch (SQLException sqle) {
Debug.logWarning(sqle, "SQL Exception:" + sqle.getMessage(), module);
throw sqle;
} finally {
try {
if (stmt != null) stmt.close();
} catch (SQLException sqle) {
Debug.logWarning(sqle, "Error closing statement in sequence util", module);
}
try {
connection.close();
} catch (SQLException sqle) {
Debug.logWarning(sqle, "Error closing connection in sequence util", module);
}
}
} catch (SQLException | GenericEntityException e) {
// reset the sequence fields and return (note: it would be better to throw an exception)
curSeqId = 0;
maxSeqId = 0;
String errMsg = "General error in getting a sequenced ID";
Debug.logError(e, errMsg, module);
try {
TransactionUtil.rollback(beganTransaction, errMsg, e);
} catch (GenericTransactionException gte2) {
Debug.logError(gte2, "Unable to rollback transaction", module);
}
return;
}
} catch (GenericTransactionException e) {
Debug.logError(e, "System Error suspending transaction in sequence util", module);
// reset the sequence fields and return (note: it would be better to throw an exception)
curSeqId = 0;
maxSeqId = 0;
return;
} finally {
if (suspendedTransaction != null) {
try {
TransactionUtil.resume(suspendedTransaction);
} catch (GenericTransactionException e) {
Debug.logError(e, "Error resuming suspended transaction in sequence util", module);
// reset the sequence fields and return (note: it would be better to throw an exception)
curSeqId = 0;
maxSeqId = 0;
return;
}
}
}
maxSeqId = curSeqId + bankSize;
if (Debug.infoOn()) Debug.logInfo("Got bank of sequenced IDs for [" + this.seqName + "]; curSeqId=" + curSeqId + ", maxSeqId=" + maxSeqId + ", bankSize=" + bankSize, module);
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jmeter.protocol.http.parser;
import java.io.ByteArrayInputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.Iterator;
import org.apache.commons.lang3.StringUtils;
import org.apache.jmeter.protocol.http.util.ConversionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.tidy.Tidy;
import org.xml.sax.SAXException;
/**
* HtmlParser implementation using JTidy.
*/
class JTidyHTMLParser extends HTMLParser {
private static final Logger log = LoggerFactory.getLogger(JTidyHTMLParser.class);
protected JTidyHTMLParser() {
super();
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<URL> getEmbeddedResourceURLs(String userAgent, byte[] html, URL baseUrl, URLCollection urls, String encoding) throws HTMLParseException {
Document dom;
try {
dom = (Document) getDOM(html, encoding);
} catch (SAXException se) {
throw new HTMLParseException(se);
}
// Now parse the DOM tree
scanNodes(dom, urls, baseUrl);
return urls.iterator();
}
/**
* Scan nodes recursively, looking for embedded resources
*
* @param node -
* initial node
* @param urls -
* container for URLs
* @param baseUrl -
* used to create absolute URLs
*
* @return new base URL
*/
private URL scanNodes(Node node, URLCollection urls, URL baseUrl) throws HTMLParseException {
if (node == null) {
return baseUrl;
}
String name = node.getNodeName();
int type = node.getNodeType();
switch (type) {
case Node.DOCUMENT_NODE:
scanNodes(((Document) node).getDocumentElement(), urls, baseUrl);
break;
case Node.ELEMENT_NODE:
NamedNodeMap attrs = node.getAttributes();
if (name.equalsIgnoreCase(TAG_BASE)) {
String tmp = getValue(attrs, ATT_HREF);
if (tmp != null) {
try {
baseUrl = ConversionUtils.makeRelativeURL(baseUrl, tmp);
} catch (MalformedURLException e) {
throw new HTMLParseException(e);
}
}
break;
}
if (name.equalsIgnoreCase(TAG_IMAGE) || name.equalsIgnoreCase(TAG_EMBED)) {
urls.addURL(getValue(attrs, ATT_SRC), baseUrl);
break;
}
if (name.equalsIgnoreCase(TAG_APPLET)) {
urls.addURL(getValue(attrs, "code"), baseUrl);
break;
}
if (name.equalsIgnoreCase(TAG_OBJECT)) {
String data = getValue(attrs, "codebase");
if(!StringUtils.isEmpty(data)) {
urls.addURL(data, baseUrl);
}
data = getValue(attrs, "data");
if(!StringUtils.isEmpty(data)) {
urls.addURL(data, baseUrl);
}
break;
}
if (name.equalsIgnoreCase(TAG_INPUT)) {
String src = getValue(attrs, ATT_SRC);
String typ = getValue(attrs, ATT_TYPE);
if ((src != null) && ATT_IS_IMAGE.equalsIgnoreCase(typ)) {
urls.addURL(src, baseUrl);
}
break;
}
if (TAG_LINK.equalsIgnoreCase(name) &&
(STYLESHEET.equalsIgnoreCase(getValue(attrs, ATT_REL))
|| SHORTCUT_ICON.equalsIgnoreCase(getValue(attrs, ATT_REL))
|| ICON.equalsIgnoreCase(getValue(attrs, ATT_REL)))) {
urls.addURL(getValue(attrs, ATT_HREF), baseUrl);
break;
}
if (name.equalsIgnoreCase(TAG_SCRIPT)) {
urls.addURL(getValue(attrs, ATT_SRC), baseUrl);
break;
}
if (name.equalsIgnoreCase(TAG_FRAME)) {
urls.addURL(getValue(attrs, ATT_SRC), baseUrl);
break;
}
if (name.equalsIgnoreCase(TAG_IFRAME)) {
urls.addURL(getValue(attrs, ATT_SRC), baseUrl);
break;
}
String back = getValue(attrs, ATT_BACKGROUND);
if (back != null) {
urls.addURL(back, baseUrl);
}
if (name.equalsIgnoreCase(TAG_BGSOUND)) {
urls.addURL(getValue(attrs, ATT_SRC), baseUrl);
break;
}
String style = getValue(attrs, ATT_STYLE);
if (style != null) {
HtmlParsingUtils.extractStyleURLs(baseUrl, urls, style);
}
NodeList children = node.getChildNodes();
if (children != null) {
int len = children.getLength();
for (int i = 0; i < len; i++) {
baseUrl = scanNodes(children.item(i), urls, baseUrl);
}
}
break;
default:
// ignored
break;
}
return baseUrl;
}
/*
* Helper method to get an attribute value, if it exists @param attrs list
* of attributes @param attname attribute name @return
*/
private String getValue(NamedNodeMap attrs, String attname) {
String v = null;
Node n = attrs.getNamedItem(attname);
if (n != null) {
v = n.getNodeValue();
}
return v;
}
/**
* Returns <code>tidy</code> as HTML parser.
*
* @return a <code>tidy</code> HTML parser
*/
private static Tidy getTidyParser(String encoding) {
log.debug("Start : getParser");
Tidy tidy = new Tidy();
tidy.setInputEncoding(encoding);
tidy.setOutputEncoding(StandardCharsets.UTF_8.name());
tidy.setQuiet(true);
tidy.setShowWarnings(false);
if (log.isDebugEnabled()) {
log.debug("getParser : tidy parser created - " + tidy);
}
log.debug("End : getParser");
return tidy;
}
/**
* Returns a node representing a whole xml given an xml document.
*
* @param text
* an xml document (as a byte array)
* @return a node representing a whole xml
*
* @throws SAXException
* indicates an error parsing the xml document
*/
private static Node getDOM(byte[] text, String encoding) throws SAXException {
log.debug("Start : getDOM");
Node node = getTidyParser(encoding).parseDOM(new ByteArrayInputStream(text), null);
if (log.isDebugEnabled()) {
log.debug("node : " + node);
}
log.debug("End : getDOM");
return node;
}
}
| |
package org.github.dtsopensource.local.test.dao.dataobject;
import java.util.ArrayList;
import java.util.List;
public class DtsTestDOExample {
protected String orderByClause;
protected boolean distinct;
protected List<Criteria> oredCriteria;
public DtsTestDOExample() {
oredCriteria = new ArrayList<Criteria>();
}
public void setOrderByClause(String orderByClause) {
this.orderByClause = orderByClause;
}
public String getOrderByClause() {
return orderByClause;
}
public void setDistinct(boolean distinct) {
this.distinct = distinct;
}
public boolean isDistinct() {
return distinct;
}
public List<Criteria> getOredCriteria() {
return oredCriteria;
}
public void or(Criteria criteria) {
oredCriteria.add(criteria);
}
public Criteria or() {
Criteria criteria = createCriteriaInternal();
oredCriteria.add(criteria);
return criteria;
}
public Criteria createCriteria() {
Criteria criteria = createCriteriaInternal();
if (oredCriteria.size() == 0) {
oredCriteria.add(criteria);
}
return criteria;
}
protected Criteria createCriteriaInternal() {
Criteria criteria = new Criteria();
return criteria;
}
public void clear() {
oredCriteria.clear();
orderByClause = null;
distinct = false;
}
protected abstract static class GeneratedCriteria {
protected List<Criterion> criteria;
protected GeneratedCriteria() {
super();
criteria = new ArrayList<Criterion>();
}
public boolean isValid() {
return criteria.size() > 0;
}
public List<Criterion> getAllCriteria() {
return criteria;
}
public List<Criterion> getCriteria() {
return criteria;
}
protected void addCriterion(String condition) {
if (condition == null) {
throw new RuntimeException("Value for condition cannot be null");
}
criteria.add(new Criterion(condition));
}
protected void addCriterion(String condition, Object value, String property) {
if (value == null) {
throw new RuntimeException("Value for " + property + " cannot be null");
}
criteria.add(new Criterion(condition, value));
}
protected void addCriterion(String condition, Object value1, Object value2, String property) {
if (value1 == null || value2 == null) {
throw new RuntimeException("Between values for " + property + " cannot be null");
}
criteria.add(new Criterion(condition, value1, value2));
}
public Criteria andNameIsNull() {
addCriterion("name is null");
return (Criteria) this;
}
public Criteria andNameIsNotNull() {
addCriterion("name is not null");
return (Criteria) this;
}
public Criteria andNameEqualTo(String value) {
addCriterion("name =", value, "name");
return (Criteria) this;
}
public Criteria andNameNotEqualTo(String value) {
addCriterion("name <>", value, "name");
return (Criteria) this;
}
public Criteria andNameGreaterThan(String value) {
addCriterion("name >", value, "name");
return (Criteria) this;
}
public Criteria andNameGreaterThanOrEqualTo(String value) {
addCriterion("name >=", value, "name");
return (Criteria) this;
}
public Criteria andNameLessThan(String value) {
addCriterion("name <", value, "name");
return (Criteria) this;
}
public Criteria andNameLessThanOrEqualTo(String value) {
addCriterion("name <=", value, "name");
return (Criteria) this;
}
public Criteria andNameLike(String value) {
addCriterion("name like", value, "name");
return (Criteria) this;
}
public Criteria andNameNotLike(String value) {
addCriterion("name not like", value, "name");
return (Criteria) this;
}
public Criteria andNameIn(List<String> values) {
addCriterion("name in", values, "name");
return (Criteria) this;
}
public Criteria andNameNotIn(List<String> values) {
addCriterion("name not in", values, "name");
return (Criteria) this;
}
public Criteria andNameBetween(String value1, String value2) {
addCriterion("name between", value1, value2, "name");
return (Criteria) this;
}
public Criteria andNameNotBetween(String value1, String value2) {
addCriterion("name not between", value1, value2, "name");
return (Criteria) this;
}
public Criteria andValueIsNull() {
addCriterion("value is null");
return (Criteria) this;
}
public Criteria andValueIsNotNull() {
addCriterion("value is not null");
return (Criteria) this;
}
public Criteria andValueEqualTo(String value) {
addCriterion("value =", value, "value");
return (Criteria) this;
}
public Criteria andValueNotEqualTo(String value) {
addCriterion("value <>", value, "value");
return (Criteria) this;
}
public Criteria andValueGreaterThan(String value) {
addCriterion("value >", value, "value");
return (Criteria) this;
}
public Criteria andValueGreaterThanOrEqualTo(String value) {
addCriterion("value >=", value, "value");
return (Criteria) this;
}
public Criteria andValueLessThan(String value) {
addCriterion("value <", value, "value");
return (Criteria) this;
}
public Criteria andValueLessThanOrEqualTo(String value) {
addCriterion("value <=", value, "value");
return (Criteria) this;
}
public Criteria andValueLike(String value) {
addCriterion("value like", value, "value");
return (Criteria) this;
}
public Criteria andValueNotLike(String value) {
addCriterion("value not like", value, "value");
return (Criteria) this;
}
public Criteria andValueIn(List<String> values) {
addCriterion("value in", values, "value");
return (Criteria) this;
}
public Criteria andValueNotIn(List<String> values) {
addCriterion("value not in", values, "value");
return (Criteria) this;
}
public Criteria andValueBetween(String value1, String value2) {
addCriterion("value between", value1, value2, "value");
return (Criteria) this;
}
public Criteria andValueNotBetween(String value1, String value2) {
addCriterion("value not between", value1, value2, "value");
return (Criteria) this;
}
}
public static class Criteria extends GeneratedCriteria {
protected Criteria() {
super();
}
}
public static class Criterion {
private String condition;
private Object value;
private Object secondValue;
private boolean noValue;
private boolean singleValue;
private boolean betweenValue;
private boolean listValue;
private String typeHandler;
public String getCondition() {
return condition;
}
public Object getValue() {
return value;
}
public Object getSecondValue() {
return secondValue;
}
public boolean isNoValue() {
return noValue;
}
public boolean isSingleValue() {
return singleValue;
}
public boolean isBetweenValue() {
return betweenValue;
}
public boolean isListValue() {
return listValue;
}
public String getTypeHandler() {
return typeHandler;
}
protected Criterion(String condition) {
super();
this.condition = condition;
this.typeHandler = null;
this.noValue = true;
}
protected Criterion(String condition, Object value, String typeHandler) {
super();
this.condition = condition;
this.value = value;
this.typeHandler = typeHandler;
if (value instanceof List<?>) {
this.listValue = true;
} else {
this.singleValue = true;
}
}
protected Criterion(String condition, Object value) {
this(condition, value, null);
}
protected Criterion(String condition, Object value, Object secondValue, String typeHandler) {
super();
this.condition = condition;
this.value = value;
this.secondValue = secondValue;
this.typeHandler = typeHandler;
this.betweenValue = true;
}
protected Criterion(String condition, Object value, Object secondValue) {
this(condition, value, secondValue, null);
}
}
}
| |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.decider;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.RestoreInProgress;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
import java.io.IOException;
import java.util.Collections;
import static java.util.Collections.singletonList;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.startsWith;
/**
* Test {@link RestoreInProgressAllocationDecider}
*/
public class RestoreInProgressAllocationDeciderTests extends ESAllocationTestCase {
public void testCanAllocatePrimary() {
ClusterState clusterState = createInitialClusterState();
ShardRouting shard;
if (randomBoolean()) {
shard = clusterState.getRoutingTable().shardRoutingTable("test", 0).primaryShard();
assertEquals(RecoverySource.Type.EMPTY_STORE, shard.recoverySource().getType());
} else {
shard = clusterState.getRoutingTable().shardRoutingTable("test", 0).replicaShards().get(0);
assertEquals(RecoverySource.Type.PEER, shard.recoverySource().getType());
}
final Decision decision = executeAllocation(clusterState, shard);
assertEquals(Decision.Type.YES, decision.type());
assertEquals("ignored as shard is not being recovered from a snapshot", decision.getExplanation());
}
public void testCannotAllocatePrimaryMissingInRestoreInProgress() {
ClusterState clusterState = createInitialClusterState();
RoutingTable routingTable = RoutingTable.builder(clusterState.getRoutingTable())
.addAsRestore(clusterState.getMetadata().index("test"), createSnapshotRecoverySource("_missing"))
.build();
clusterState = ClusterState.builder(clusterState)
.routingTable(routingTable)
.build();
ShardRouting primary = clusterState.getRoutingTable().shardRoutingTable("test", 0).primaryShard();
assertEquals(ShardRoutingState.UNASSIGNED, primary.state());
assertEquals(RecoverySource.Type.SNAPSHOT, primary.recoverySource().getType());
final Decision decision = executeAllocation(clusterState, primary);
assertEquals(Decision.Type.NO, decision.type());
assertThat(decision.getExplanation(), equalTo("shard has failed to be restored from the snapshot " +
"[_repository:_missing/_uuid] - manually close or delete the index [test] in order to retry to restore the snapshot again " +
"or use the reroute API to force the allocation of an empty primary shard. Details: [restore_source[_repository/_missing]]"));
}
public void testCanAllocatePrimaryExistingInRestoreInProgress() {
RecoverySource.SnapshotRecoverySource recoverySource = createSnapshotRecoverySource("_existing");
ClusterState clusterState = createInitialClusterState();
RoutingTable routingTable = RoutingTable.builder(clusterState.getRoutingTable())
.addAsRestore(clusterState.getMetadata().index("test"), recoverySource)
.build();
clusterState = ClusterState.builder(clusterState)
.routingTable(routingTable)
.build();
ShardRouting primary = clusterState.getRoutingTable().shardRoutingTable("test", 0).primaryShard();
assertEquals(ShardRoutingState.UNASSIGNED, primary.state());
assertEquals(RecoverySource.Type.SNAPSHOT, primary.recoverySource().getType());
routingTable = clusterState.routingTable();
final RestoreInProgress.State shardState;
if (randomBoolean()) {
shardState = randomFrom(RestoreInProgress.State.STARTED, RestoreInProgress.State.INIT);
} else {
shardState = RestoreInProgress.State.FAILURE;
UnassignedInfo currentInfo = primary.unassignedInfo();
UnassignedInfo newInfo = new UnassignedInfo(currentInfo.getReason(), currentInfo.getMessage(), new IOException("i/o failure"),
currentInfo.getNumFailedAllocations(), currentInfo.getUnassignedTimeInNanos(), currentInfo.getUnassignedTimeInMillis(),
currentInfo.isDelayed(), currentInfo.getLastAllocationStatus(), currentInfo.getFailedNodeIds());
primary = primary.updateUnassigned(newInfo, primary.recoverySource());
IndexRoutingTable indexRoutingTable = routingTable.index("test");
IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
for (final ObjectCursor<IndexShardRoutingTable> shardEntry : indexRoutingTable.getShards().values()) {
final IndexShardRoutingTable shardRoutingTable = shardEntry.value;
for (ShardRouting shardRouting : shardRoutingTable.getShards()) {
if (shardRouting.primary()) {
newIndexRoutingTable.addShard(primary);
} else {
newIndexRoutingTable.addShard(shardRouting);
}
}
}
routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
}
ImmutableOpenMap.Builder<ShardId, RestoreInProgress.ShardRestoreStatus> shards = ImmutableOpenMap.builder();
shards.put(primary.shardId(), new RestoreInProgress.ShardRestoreStatus(clusterState.getNodes().getLocalNodeId(), shardState));
Snapshot snapshot = recoverySource.snapshot();
RestoreInProgress.State restoreState = RestoreInProgress.State.STARTED;
RestoreInProgress.Entry restore =
new RestoreInProgress.Entry(recoverySource.restoreUUID(), snapshot, restoreState, singletonList("test"), shards.build());
clusterState = ClusterState.builder(clusterState)
.putCustom(RestoreInProgress.TYPE, new RestoreInProgress.Builder().add(restore).build())
.routingTable(routingTable)
.build();
Decision decision = executeAllocation(clusterState, primary);
if (shardState == RestoreInProgress.State.FAILURE) {
assertEquals(Decision.Type.NO, decision.type());
assertThat(decision.getExplanation(), startsWith("shard has failed to be restored from the snapshot " +
"[_repository:_existing/_uuid] - manually close or delete the index " +
"[test] in order to retry to restore the snapshot again or use the reroute API to force the allocation of " +
"an empty primary shard. Details: [restore_source[_repository/_existing], failure java.io.IOException: i/o failure"));
} else {
assertEquals(Decision.Type.YES, decision.type());
assertEquals("shard is currently being restored", decision.getExplanation());
}
}
private ClusterState createInitialClusterState() {
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metadata.index("test"))
.build();
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder()
.add(newNode("master", Collections.singleton(DiscoveryNodeRole.MASTER_ROLE)))
.localNodeId("master")
.masterNodeId("master")
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(routingTable)
.nodes(discoveryNodes)
.build();
assertEquals(2, clusterState.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size());
return clusterState;
}
private Decision executeAllocation(final ClusterState clusterState, final ShardRouting shardRouting) {
final AllocationDecider decider = new RestoreInProgressAllocationDecider();
final RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)),
clusterState.getRoutingNodes(), clusterState, null, null, 0L);
allocation.debugDecision(true);
final Decision decision;
if (randomBoolean()) {
decision = decider.canAllocate(shardRouting, allocation);
} else {
DiscoveryNode node = clusterState.getNodes().getMasterNode();
decision = decider.canAllocate(shardRouting, new RoutingNode(node.getId(), node), allocation);
}
return decision;
}
private RecoverySource.SnapshotRecoverySource createSnapshotRecoverySource(final String snapshotName) {
Snapshot snapshot = new Snapshot("_repository", new SnapshotId(snapshotName, "_uuid"));
return new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, Version.CURRENT,
new IndexId("test", UUIDs.randomBase64UUID(random())));
}
}
| |
package org.giterlab;
import android.graphics.BitmapFactory;
import android.media.MediaCodec;
import android.widget.Toast;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.CordovaPlugin;
import org.apache.cordova.PluginResult;
import org.json.JSONArray;
import org.json.JSONException;
import android.app.Application;
import android.content.Context;
import android.util.Log;
//import com.alibaba.sdk.android.callback.InitResultCallback;
import com.alibaba.sdk.android.push.CloudPushService;
import com.alibaba.sdk.android.push.CommonCallback;
import com.alibaba.sdk.android.push.common.util.SysInfoUtil;
import com.alibaba.sdk.android.push.noonesdk.PushServiceFactory;
import com.alibaba.fastjson.*;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static com.alibaba.sdk.android.push.AgooMessageReceiver.TAG;
public class Push extends CordovaPlugin{
private CallbackContext mCallbackContext;
@Override
public boolean execute(String action, JSONArray args, CallbackContext callbackContext) throws JSONException {
this.mCallbackContext = callbackContext;
Class thisclass=Push.class;
try {
Method method=thisclass.getDeclaredMethod(action,JSONArray.class,CallbackContext.class);
return (Boolean)method.invoke(this,args,callbackContext);
} catch (NoSuchMethodException e) {
mCallbackContext.error("NoSuchMethodException");
} catch (InvocationTargetException e) {
mCallbackContext.error("InvocationTargetException");
} catch (IllegalAccessException e) {
mCallbackContext.error("IllegalAccessException");
}
return false;
}
private boolean bindAccount(JSONArray args, final CallbackContext callbackContext) throws JSONException {
if(args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
if (args.get(0).toString().length()!=0){
cloudPushService.bindAccount(args.get(0).toString(), new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG, "bindAccount success"+s);
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG, "bindAccount failed"+s+s1);
}
});
}
callbackContext.success("success");
return true;
}
private boolean bindTagsandAlias(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if(args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
if (args.get(0).toString().length()!=0){
try{
TagsAlias tagsAlias=JSONObject.parseObject(args.get(1).toString(),TagsAlias.class);
cloudPushService.bindTag(tagsAlias.getTag_key(), tagsAlias.getTag_value(), tagsAlias.getAlias(), new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG, "bindTag success"+s);
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG, "bindTag failed"+s+s1);
}
});
}catch (Exception ex){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild JSON arguements"));
return false;
}
}
callbackContext.success("success");
return true;
}
private boolean unBindAccount(JSONArray args, final CallbackContext callbackContext)throws JSONException{
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
cloudPushService.unbindAccount(new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"unBindAccount success");
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"unBindAccount failed");
}
});
callbackContext.success("success");
return true;
}
private boolean unBindTagsandAlias(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if (args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
if (args.get(0).toString().length()!=0){
try{
TagsAlias tagsAlias=JSONObject.parseObject(args.get(1).toString(),TagsAlias.class);
cloudPushService.unbindTag(tagsAlias.getTag_key(), tagsAlias.getTag_value(), tagsAlias.getAlias(), new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG, "bindTag success"+s);
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG, "bindTag failed"+s+s1);
}
});
}catch (Exception ex){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild JSON arguements"));
return false;
}
}
callbackContext.success("success");
return true;
}
private boolean getDeviceId(JSONArray args, final CallbackContext callbackContext)throws JSONException{
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,cloudPushService.getDeviceId()));
Log.d(TAG,"unBindAccount success");
callbackContext.success("success");
return true;
}
private boolean listTags(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if (args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
if (args.get(0).toString().length()!=0){
try{
TagsAlias tagsAlias=JSONObject.parseObject(args.get(1).toString(),TagsAlias.class);
cloudPushService.listTags(tagsAlias.getTag_key(), new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"listTags success");
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"listTags failed");
}
});
}catch (Exception ex){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild JSON arguements"));
return false;
}
}
callbackContext.success("success");
return true;
}
private boolean listAlias(JSONArray args, final CallbackContext callbackContext)throws JSONException{
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
cloudPushService.listAliases(new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"listAlias success");
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"listAlias failed");
}
});
callbackContext.success("success");
return true;
}
private boolean removeAlias(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if (args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
if(args.get(0).toString().length()!=0){
cloudPushService.removeAlias(args.get(0).toString(), new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,s));
Log.d(TAG,"removeAlias success");
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,s));
Log.d(TAG,"removeAlias failed");
}
});
}
callbackContext.success("success");
return true;
}
private boolean setNotificationSoundFilePath(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if (args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
if (args.getString(0).length()!=0){
try {
cloudPushService.setNotificationSoundFilePath(args.getString(0));
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"setNotificationSoundFilePath success");
}catch (Exception ex){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"setNotificationSoundFilePath error");
}
}
return true;
}
private boolean setNotificationLargeIcon(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if (args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
if (args.getString(0).length()!=0){
try {
cloudPushService.setNotificationLargeIcon(BitmapFactory.decodeFile(args.getString(0)));
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"setNotificationLargeIcon success");
}catch (Exception ex){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"setNotificationLargeIcon error");
}
}
return true;
}
private boolean setNotificationSmallIcon(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if (args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
try {
cloudPushService.setNotificationSmallIcon(args.getInt(0));
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"setNotificationLargeIcon success");
}catch (Exception ex){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"setNotificationLargeIcon error");
}
return true;
}
private boolean setDoNotDisturb(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if (args.length()<4){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
int startHour=args.getInt(0);
int startMinute=args.getInt(1);
int endHour=args.getInt(2);
int endMinute=args.getInt(3);
if (startHour>23 || startMinute>59 || endHour>23 || endMinute>59){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
if (!((startHour*60+startMinute)<(endHour*60+endMinute))){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
cloudPushService.setDoNotDisturb(startHour, startMinute, endHour, endMinute, new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"setDoNotDisturb success");
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"setDoNotDisturb error");
}
});
return true;
}
private boolean setCloseDoNotturbMode(JSONArray args, final CallbackContext callbackContext)throws JSONException{
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
cloudPushService.closeDoNotDisturbMode();
return true;
}
private boolean setCleraNotifications(JSONArray args, final CallbackContext callbackContext)throws JSONException{
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
cloudPushService.clearNotifications();
return true;
}
private boolean bindPhoneNumber(JSONArray args, final CallbackContext callbackContext)throws JSONException{
if (args.length()<1){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
Pattern pattern=Pattern.compile("^((13[0-9])|(15[^4,\\D])|(18[0,5-9])|(17[0-9]))\\d{8}$");
Matcher matcher=pattern.matcher(args.getString(0));
if (!matcher.matches()){
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"invaild arguements"));
return false;
}
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
cloudPushService.bindPhoneNumber(args.getString(0), new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"bindPhoneNumber success");
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"bindPhoneNumber error");
}
});
return true;
}
private boolean unBindPhoneNum(JSONArray args, final CallbackContext callbackContext)throws JSONException{
CloudPushService cloudPushService=PushServiceFactory.getCloudPushService();
cloudPushService.unbindPhoneNumber(new CommonCallback() {
@Override
public void onSuccess(String s) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK,"ok"));
Log.d(TAG,"unBindPhoneNum success");
}
@Override
public void onFailed(String s, String s1) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR,"error"));
Log.d(TAG,"unBindPhoneNum error");
}
});
return true;
}
private boolean onMessageRes(JSONArray args, final CallbackContext callbackContext)throws JSONException{
new DXReceiver().setCallBack(new MsgCallBack() {
@Override
public void onMsgResvice(String jsonmsg) {
PluginResult pluginResult=new PluginResult(PluginResult.Status.OK,jsonmsg);
pluginResult.setKeepCallback(true);
callbackContext.sendPluginResult(pluginResult);
Log.d(TAG,"onMessageRes success");
}
});
return true;
}
private boolean onNotifyClick(JSONArray args, final CallbackContext callbackContext)throws JSONException{
new DXReceiver().setNotifyCallBack(new NotifyCallBack() {
@Override
public void onNotifyClick(String msg) {
PluginResult pluginResult=new PluginResult(PluginResult.Status.OK,msg);
pluginResult.setKeepCallback(true);
callbackContext.sendPluginResult(pluginResult);
Log.d(TAG,"onMessageRes success");
}
});
return true;
}
}
| |
package com.podio.task;
import java.util.List;
import javax.ws.rs.core.MediaType;
import org.joda.time.LocalDate;
import com.podio.BaseAPI;
import com.podio.ResourceFactory;
import com.podio.common.Empty;
import com.podio.common.Reference;
import com.sun.jersey.api.client.GenericType;
/**
* Tasks are used to track what work has to be done. Tasks have the following
* properties:
*
* <ul>
* <li>Tasks can be stand-alone or can be attached to other objects.
* <li>Tasks can be delegated to other users of Hoist.
* <li>Tasks can be private or public. When private, only the creator, the
* assignee and assignor can see the task.
* <li>Tasks can have a due date, which is the target date for completion. When
* tasks miss their due date, they become over-due.
* <li>Tasks can be started on or not started on. This is used to indicate to
* other users if the task is in progress.
* </ul>
*
* A task can be in one of the following states:
*
* <ul>
* <li>active: The task is active and not yet completed
* <li>completed: The task is completed
* </ul>
*
* The following actions can be performed on a task:
* <ul>
* <li>assign: Reassign the task to another user to make that user responsible
* for the task
* <li>update due date: Update the due date of the task
* <li>update text: Update the text of the task
* <li>update private: Make the task private or public
* <li>start: Indicate that worked have started on the task
* <li>stop: Indicate that work have been stopped
* <li>complete: Mark the task as completed
* <li>incomplete: Mark the task as being incomplete
* </ul>
*/
public class TaskAPI extends BaseAPI {
public TaskAPI(ResourceFactory resourceFactory) {
super(resourceFactory);
}
/**
* Returns the task with the given id.
*
* @param taskId
* The id of the task to retrieve
* @return The retrieved task
*/
public Task getTask(int taskId) {
return getResourceFactory().getApiResource("/task/" + taskId).get(
Task.class);
}
/**
* Assigns the task to another user. This makes the user responsible for the
* task and its completion.
*
* @param taskId
* The id of the task to assign
* @param responsible
* The id of the user the task should be assigned to
*/
public void assignTask(int taskId, int responsible) {
getResourceFactory()
.getApiResource("/task/" + taskId + "/assign")
.entity(new AssignValue(responsible),
MediaType.APPLICATION_JSON_TYPE).post();
}
/**
* Mark the given task as completed.
*
* @param taskId
* The id of the task to nark as complete
*/
public void completeTask(int taskId) {
getResourceFactory().getApiResource("/task/" + taskId + "/complete")
.entity(new Empty(), MediaType.APPLICATION_JSON_TYPE).post();
}
/**
* Mark the completed task as no longer being completed.
*
* @param taskId
* The id of the task to mark as incomplete
*/
public void incompleteTask(int taskId) {
getResourceFactory().getApiResource("/task/" + taskId + "/incomplete")
.entity(new Empty(), MediaType.APPLICATION_JSON_TYPE).post();
}
/**
* Updates the due date of the task to the given value
*
* @param taskId
* The id of the task
* @param dueDate
* The new due date of the task
*/
public void updateDueDate(int taskId, LocalDate dueDate) {
getResourceFactory()
.getApiResource("/task/" + taskId + "/due_date")
.entity(new TaskDueDate(dueDate),
MediaType.APPLICATION_JSON_TYPE).put();
}
/**
* Update the private flag on the given task.
*
* @param taskId
* The id of the task
* @param priv
* <code>true</code> if the task should be private,
* <code>false</code> otherwise
*/
public void updatePrivate(int taskId, boolean priv) {
getResourceFactory().getApiResource("/task/" + taskId + "/private")
.entity(new TaskPrivate(priv), MediaType.APPLICATION_JSON_TYPE)
.put();
}
/**
* Updates the text of the task.
*
* @param taskId
* The id of the task
* @param text
* The new text of the task
*/
public void updateText(int taskId, String text) {
getResourceFactory().getApiResource("/task/" + taskId + "/text")
.entity(new TaskText(text), MediaType.APPLICATION_JSON_TYPE)
.put();
}
/**
* Creates a new task with no reference to other objects.
*
* @param task
* The data of the task to be created
* @param silent
* Disable notifications
* @return The id of the newly created task
*/
public int createTask(TaskCreate task, boolean silent) {
return createTask(task, silent, true);
}
/**
* Creates a new task with no reference to other objects.
*
* @param task
* The data of the task to be created
* @param silent
* Disable notifications
* @param hook
* Execute hooks for the change
* @return The id of the newly created task
*/
public int createTask(TaskCreate task, boolean silent, boolean hook) {
TaskCreateResponse response = getResourceFactory()
.getApiResource("/task/")
.queryParam("silent", silent ? "1" : "0")
.queryParam("hook", hook ? "1" : "0")
.entity(task, MediaType.APPLICATION_JSON_TYPE)
.post(TaskCreateResponse.class);
return response.getId();
}
/**
* Creates a new task with a reference to the given object.
*
* @param task
* The data of the task to be created
* @param reference
* The reference to the object the task should be attached to
* @param silent
* Disable notifications
* @return The id of the newly created task
*/
public int createTaskWithReference(TaskCreate task, Reference reference,
boolean silent) {
return createTaskWithReference(task, reference, silent, true);
}
/**
* Creates a new task with a reference to the given object.
*
* @param task
* The data of the task to be created
* @param reference
* The reference to the object the task should be attached to
* @param silent
* Disable notifications
* @param hook
* Execute hooks for the change
* @return The id of the newly created task
*/
public int createTaskWithReference(TaskCreate task, Reference reference,
boolean silent, boolean hook) {
return getResourceFactory()
.getApiResource(
"/task/" + reference.getType().name().toLowerCase()
+ "/" + reference.getId() + "/")
.queryParam("silent", silent ? "1" : "0")
.queryParam("hook", hook ? "1" : "0")
.entity(task, MediaType.APPLICATION_JSON_TYPE)
.post(TaskCreateResponse.class).getId();
}
/**
* Gets a list of tasks with a reference to the given object. This will
* return both active and completed tasks. The reference will not be set on
* the individual tasks.
*
* @param reference
* The object on which to return tasks
* @return The list of tasks
*/
public List<Task> getTasksWithReference(Reference reference) {
return getResourceFactory().getApiResource(
"/task/" + reference.getType().name().toLowerCase() + "/"
+ reference.getId() + "/").get(
new GenericType<List<Task>>() {
});
}
/**
* Returns the active tasks of the user. This is the tasks where the user is
* responsible.
*
* The tasks will be sorted by due date and creation time, and grouped by
* their due date status.
*
* @return The tasks grouped by due date
*/
public TasksByDue getActiveTasks() {
return getResourceFactory().getApiResource("/task/active/").get(
TasksByDue.class);
}
/**
* Returns the tasks that the user has assigned to another user.
*
* @return The tasks grouped by due date
*/
public TasksByDue getAssignedActiveTasks() {
return getResourceFactory().getApiResource("/task/assigned/active/")
.get(TasksByDue.class);
}
/**
* Returns the tasks that is completed and where the active user is
* responsible.
*
* @return The list of tasks ordered by date of completion
*/
public List<Task> getCompletedTasks() {
return getResourceFactory().getApiResource("/task/completed/").get(
new GenericType<List<Task>>() {
});
}
}
| |
package com.painless.clock.setting;
import java.util.Collection;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.app.AlertDialog;
import android.app.SearchManager;
import android.content.DialogInterface;
import android.content.DialogInterface.OnClickListener;
import android.content.Intent;
import android.content.SharedPreferences;
import android.graphics.Bitmap;
import android.graphics.ColorMatrixColorFilter;
import android.graphics.drawable.BitmapDrawable;
import android.os.Build;
import android.os.Bundle;
import android.text.TextUtils;
import android.view.Menu;
import android.view.MenuItem;
import android.view.MenuItem.OnMenuItemClickListener;
import android.view.View;
import android.widget.ImageView;
import android.widget.SeekBar;
import android.widget.SeekBar.OnSeekBarChangeListener;
import android.widget.TextView;
import android.widget.Toast;
import com.painless.clock.Constants;
import com.painless.clock.R;
import com.painless.clock.util.WeatherUtil;
public class LocationViewActivity extends Activity implements View.OnClickListener, OnSeekBarChangeListener {
private final ColorMatrixColorFilter mAlphaFilter = new ColorMatrixColorFilter(new float[] {
0,0,0,0,255, 0,0,0,0,255, 0,0,0,0,255, 1,0,0,0,0
});
private boolean useFahrenheit = false;
private String mCurrentWeather;
private SharedPreferences pref;
private CityInfo mCurrentCityInfo;
private View mLocationView;
private View mMapContent;
private MapView mMapView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.location_view);
pref = getSharedPreferences(Constants.SHARED_PREFS_NAME, 0);
useFahrenheit = pref.getBoolean(Constants.USE_FARENHIET, false);
// initialize from prefs.
mCurrentCityInfo = new CityInfo();
mCurrentCityInfo.name = pref.getString(CityInfo.USER_LOCATION, CityInfo.LOCATION_DEFAULT);
mCurrentCityInfo.type = pref.getString(CityInfo.USER_LOCATION_TYPE, "");
mCurrentCityInfo.address = pref.getString(CityInfo.USER_LOCATION_ADDR, "");
mCurrentCityInfo.woeid = pref.getString(CityInfo.USER_LOCATION_ID, CityInfo.LOCATION_DEFAULT_ID);
mCurrentCityInfo.lat = pref.getFloat(CityInfo.USER_LOCATION_LAT, 0);
mCurrentCityInfo.lng = pref.getFloat(CityInfo.USER_LOCATION_LNG, 0);
mCurrentWeather = pref.getString(Constants.WEATHER_VALUE, Constants.WEATHER_DEFAULT);
mLocationView = findViewById(R.id.location_view);
mMapContent = findViewById(R.id.map_content);
mMapView = (MapView) findViewById(R.id.map_view);
((SeekBar) findViewById(R.id.zoom_seek)).setOnSeekBarChangeListener(this);
showCityInfo(mCurrentCityInfo);
}
private void showCityInfo(CityInfo info) {
mCurrentCityInfo = info;
getTV(R.id.txtName).setText(mCurrentCityInfo.name);
getTV(R.id.txtType).setText(mCurrentCityInfo.type);
getTV(R.id.txtAddress).setText(mCurrentCityInfo.address);
updateTemp();
mLocationView.setVisibility(View.VISIBLE);
mMapContent.setVisibility(View.GONE);
setTitle(R.string.app_name);
}
@Override
public void onBackPressed() {
if (mLocationView.getVisibility() == View.GONE) {
showCityInfo(mCurrentCityInfo);
} else {
super.onBackPressed();
}
}
private void updateTemp() {
getTV(R.id.butUnit).setText(useFahrenheit ? R.string.lp_degee_f : R.string.lp_degee_c);
Bitmap preview = WeatherUtil.createWeatherIcon(this, mCurrentWeather, useFahrenheit, WeatherUtil.isNightMode(mCurrentWeather));
BitmapDrawable drawable = new BitmapDrawable(getResources(), preview);
drawable.setColorFilter(mAlphaFilter);
((ImageView) findViewById(R.id.imgPreview)).setImageDrawable(drawable);
}
public void toggleUnitClicked(View v) {
useFahrenheit = !useFahrenheit;
updateTemp();
}
@SuppressLint("NewApi")
@Override
public boolean onCreateOptionsMenu(Menu menu) {
MenuItem item = menu.add(android.R.string.search_go)
.setIcon(android.R.drawable.ic_menu_search);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
item.setShowAsAction(MenuItem.SHOW_AS_ACTION_ALWAYS);
}
item.setOnMenuItemClickListener(new OnMenuItemClickListener() {
@Override
public boolean onMenuItemClick(MenuItem item) {
return onSearchRequested();
}
});
return true;
}
@Override
public void onNewIntent(Intent intent) {
setIntent(intent);
handleIntent(intent);
}
/**
* Process the search intent.
*/
private void handleIntent(Intent intent) {
if (Intent.ACTION_SEARCH.equals(intent.getAction())) {
String query = intent.getStringExtra(SearchManager.QUERY);
setTitle(getString(R.string.lb_search_title, query));
if (!TextUtils.isEmpty(query)) {
new LocationSearchTask(this) {
@Override
protected void onPostExecute(Collection<CityInfo> result) {
super.onPostExecute(result);
if (result != null) {
renderCities(result);
} else {
Toast.makeText(LocationViewActivity.this, R.string.lp_error_no_result, Toast.LENGTH_LONG).show();
}
}
}.execute(query);
}
}
}
private void renderCities(Collection<CityInfo> cities) {
mLocationView.setVisibility(View.GONE);
mMapContent.setVisibility(View.VISIBLE);
mMapView.removeAllViews();
for (CityInfo info : cities) {
View v = mMapView.addMarker(info);
if (v != null) {
v.setOnClickListener(this);
}
}
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) { }
@Override
public void onStopTrackingTouch(SeekBar seekBar) { };
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
float scale = 1 + progress / 10f;
mMapView.setZoom(scale);
}
@Override
public void onClick(View v) {
final CityInfo info = (CityInfo) v.getTag();
new AlertDialog.Builder(this)
.setTitle(info.name)
.setMessage(info.address)
.setPositiveButton(R.string.ip_set_back, new OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
new WeatherDataLoader(LocationViewActivity.this) {
@Override
public void onWeatherLoaded(String weather) {
mCurrentWeather = weather;
showCityInfo(info);
saveCityInfo();
};
}.execute(info.woeid);
}
}).show();
}
private void saveCityInfo() {
pref.edit()
.putString(CityInfo.USER_LOCATION, mCurrentCityInfo.name)
.putString(CityInfo.USER_LOCATION_TYPE, mCurrentCityInfo.type)
.putString(CityInfo.USER_LOCATION_ADDR, mCurrentCityInfo.address)
.putString(CityInfo.USER_LOCATION_ID, mCurrentCityInfo.woeid)
.putFloat(CityInfo.USER_LOCATION_LAT, (float) mCurrentCityInfo.lat)
.putFloat(CityInfo.USER_LOCATION_LNG, (float) mCurrentCityInfo.lng)
.putString(Constants.WEATHER_VALUE, mCurrentWeather)
.putBoolean(Constants.USE_FARENHIET, useFahrenheit)
.commit();
}
private TextView getTV(int id) {
return (TextView) findViewById(id);
}
}
| |
/*
* Copyright 2000-2012 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package git4idea.branch;
import com.google.common.collect.Maps;
import com.intellij.notification.Notification;
import com.intellij.notification.NotificationAction;
import com.intellij.notification.NotificationType;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.Task;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.util.Pair;
import com.intellij.openapi.vcs.VcsException;
import com.intellij.openapi.vcs.VcsNotifier;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.containers.MultiMap;
import com.intellij.vcs.log.Hash;
import git4idea.GitCommit;
import git4idea.GitLocalBranch;
import git4idea.GitRemoteBranch;
import git4idea.commands.*;
import git4idea.config.GitSharedSettings;
import git4idea.config.GitVersionSpecialty;
import git4idea.history.GitHistoryUtils;
import git4idea.repo.GitBranchTrackInfo;
import git4idea.repo.GitRepository;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static com.intellij.dvcs.DvcsUtil.getShortRepositoryName;
import static com.intellij.openapi.vcs.VcsNotifier.STANDARD_NOTIFICATION;
import static com.intellij.util.containers.ContainerUtil.exists;
import static com.intellij.util.containers.ContainerUtil.newHashMap;
/**
* Deletes a branch.
* If branch is not fully merged to the current branch, shows a dialog with the list of unmerged commits and with a list of branches
* current branch are merged to, and makes force delete, if wanted.
*/
class GitDeleteBranchOperation extends GitBranchOperation {
private static final Logger LOG = Logger.getInstance(GitDeleteBranchOperation.class);
static final String RESTORE = "Restore";
static final String VIEW_COMMITS = "View Commits";
static final String DELETE_TRACKED_BRANCH = "Delete Tracked Branch";
@NotNull private final String myBranchName;
@NotNull private final VcsNotifier myNotifier;
@NotNull private final Map<GitRepository, GitRemoteBranch> myTrackedBranches;
@NotNull private final Map<GitRepository, UnmergedBranchInfo> myUnmergedToBranches;
@NotNull private final Map<GitRepository, String> myDeletedBranchTips;
GitDeleteBranchOperation(@NotNull Project project, @NotNull Git git, @NotNull GitBranchUiHandler uiHandler,
@NotNull Collection<GitRepository> repositories, @NotNull String branchName) {
super(project, git, uiHandler, repositories);
myBranchName = branchName;
myNotifier = VcsNotifier.getInstance(myProject);
myTrackedBranches = findTrackedBranches(repositories, branchName);
myUnmergedToBranches = newHashMap();
myDeletedBranchTips = ContainerUtil.map2MapNotNull(repositories, (GitRepository repo) -> {
GitBranchesCollection branches = repo.getBranches();
GitLocalBranch branch = branches.findLocalBranch(myBranchName);
if (branch == null) {
LOG.error("Couldn't find branch by name " + myBranchName + " in " + repo);
return null;
}
Hash hash = branches.getHash(branch);
if (hash == null) {
LOG.error("Couldn't find hash for branch " + branch + " in " + repo);
return null;
}
return Pair.create(repo, hash.asString());
});
}
@Override
public void execute() {
boolean fatalErrorHappened = false;
while (hasMoreRepositories() && !fatalErrorHappened) {
final GitRepository repository = next();
GitSimpleEventDetector notFullyMergedDetector = new GitSimpleEventDetector(GitSimpleEventDetector.Event.BRANCH_NOT_FULLY_MERGED);
GitBranchNotMergedToUpstreamDetector notMergedToUpstreamDetector = new GitBranchNotMergedToUpstreamDetector();
GitCommandResult result = myGit.branchDelete(repository, myBranchName, false, notFullyMergedDetector, notMergedToUpstreamDetector);
if (result.success()) {
refresh(repository);
markSuccessful(repository);
}
else if (notFullyMergedDetector.hasHappened()) {
String baseBranch = notMergedToUpstreamDetector.getBaseBranch();
if (baseBranch == null) { // GitBranchNotMergedToUpstreamDetector didn't happen
baseBranch = myCurrentHeads.get(repository);
}
myUnmergedToBranches.put(repository, new UnmergedBranchInfo(myDeletedBranchTips.get(repository),
GitBranchUtil.stripRefsPrefix(baseBranch)));
GitCommandResult forceDeleteResult = myGit.branchDelete(repository, myBranchName, true);
if (forceDeleteResult.success()) {
refresh(repository);
markSuccessful(repository);
}
else {
fatalError(getErrorTitle(), forceDeleteResult.getErrorOutputAsHtmlString());
fatalErrorHappened = true;
}
}
else {
fatalError(getErrorTitle(), result.getErrorOutputAsJoinedString());
fatalErrorHappened = true;
}
}
if (!fatalErrorHappened) {
notifySuccess();
}
}
@Override
protected void notifySuccess() {
boolean unmergedCommits = !myUnmergedToBranches.isEmpty();
String message = "<b>Deleted Branch:</b> " + myBranchName;
if (unmergedCommits) message += "<br/>Unmerged commits were discarded";
Notification notification = STANDARD_NOTIFICATION.createNotification("", message, NotificationType.INFORMATION, null);
notification.addAction(NotificationAction.createSimple(RESTORE, () -> restoreInBackground(notification)));
if (unmergedCommits) {
notification.addAction(NotificationAction.createSimple(VIEW_COMMITS, () -> viewUnmergedCommitsInBackground(notification)));
}
if (!myTrackedBranches.isEmpty() &&
hasNoOtherTrackingBranch(myTrackedBranches, myBranchName) &&
trackedBranchIsNotProtected()) {
notification.addAction(NotificationAction.createSimple(DELETE_TRACKED_BRANCH, () -> deleteTrackedBranchInBackground()));
}
myNotifier.notify(notification);
}
private boolean trackedBranchIsNotProtected() {
return myTrackedBranches.values().stream()
.noneMatch(branch -> GitSharedSettings.getInstance(myProject).isBranchProtected(branch.getNameForRemoteOperations()));
}
private static boolean hasNoOtherTrackingBranch(@NotNull Map<GitRepository, GitRemoteBranch> trackedBranches,
@NotNull String localBranch) {
for (GitRepository repository : trackedBranches.keySet()) {
if (exists(repository.getBranchTrackInfos(), info -> !info.getLocalBranch().getName().equals(localBranch) &&
info.getRemoteBranch().equals(trackedBranches.get(repository)))) {
return false;
}
}
return true;
}
private static void refresh(@NotNull GitRepository... repositories) {
for (GitRepository repository : repositories) {
repository.update();
}
}
@Override
protected void rollback() {
GitCompoundResult result = doRollback();
if (!result.totalSuccess()) {
myNotifier.notifyError("Error during rollback of branch deletion", result.getErrorOutputWithReposIndication());
}
}
@NotNull
private GitCompoundResult doRollback() {
GitCompoundResult result = new GitCompoundResult(myProject);
for (GitRepository repository : getSuccessfulRepositories()) {
GitCommandResult res = myGit.branchCreate(repository, myBranchName, myDeletedBranchTips.get(repository));
result.append(repository, res);
// restore tracking
GitRemoteBranch trackedBranch = myTrackedBranches.get(repository);
if (trackedBranch != null) {
GitCommandResult setTrackResult = setUpTracking(repository, myBranchName, trackedBranch.getNameForLocalOperations());
if (!setTrackResult.success()) {
LOG.warn("Couldn't set " + myBranchName + " to track " + trackedBranch + " in " + repository.getRoot().getName() + ": " +
setTrackResult.getErrorOutputAsJoinedString());
}
}
refresh(repository);
}
return result;
}
@NotNull
private GitCommandResult setUpTracking(@NotNull GitRepository repository, @NotNull String branchName, @NotNull String trackedBranch) {
GitLineHandler handler = new GitLineHandler(myProject, repository.getRoot(), GitCommand.BRANCH);
if (GitVersionSpecialty.KNOWS_SET_UPSTREAM_TO.existsIn(repository)) {
handler.addParameters("--set-upstream-to", trackedBranch, branchName);
}
else {
handler.addParameters("--set-upstream", branchName, trackedBranch);
}
return myGit.runCommand(handler);
}
@NotNull
private String getErrorTitle() {
return String.format("Branch %s wasn't deleted", myBranchName);
}
@NotNull
public String getSuccessMessage() {
return String.format("Deleted branch %s", formatBranchName(myBranchName));
}
@NotNull
@Override
protected String getRollbackProposal() {
return "However branch deletion has succeeded for the following " + repositories() + ":<br/>" +
successfulRepositoriesJoined() +
"<br/>You may rollback (recreate " + myBranchName + " in these roots) not to let branches diverge.";
}
@NotNull
@Override
protected String getOperationName() {
return "branch deletion";
}
@NotNull
private static String formatBranchName(@NotNull String name) {
return "<b><code>" + name + "</code></b>";
}
/**
* Shows a dialog "the branch is not fully merged" with the list of unmerged commits.
* User may still want to force delete the branch.
* In multi-repository setup collects unmerged commits for all given repositories.
* @return true if the branch should be restored.
*/
private boolean showNotFullyMergedDialog(@NotNull Map<GitRepository, UnmergedBranchInfo> unmergedBranches) {
Map<GitRepository, List<GitCommit>> history = new HashMap<>();
// we don't confuse user with the absence of repositories which branch was deleted w/o force,
// we display no commits for them
for (GitRepository repository : getRepositories()) {
if (unmergedBranches.containsKey(repository)) {
UnmergedBranchInfo unmergedInfo = unmergedBranches.get(repository);
history.put(repository, getUnmergedCommits(repository, unmergedInfo.myTipOfDeletedUnmergedBranch, unmergedInfo.myBaseBranch));
}
else {
history.put(repository, Collections.emptyList());
}
}
Map<GitRepository, String> baseBranches = Maps.asMap(unmergedBranches.keySet(), it -> unmergedBranches.get(it).myBaseBranch);
return myUiHandler.showBranchIsNotFullyMergedDialog(myProject, history, baseBranches, myBranchName);
}
@NotNull
private static List<GitCommit> getUnmergedCommits(@NotNull GitRepository repository,
@NotNull String branchName,
@NotNull String baseBranch) {
String range = baseBranch + ".." + branchName;
try {
return GitHistoryUtils.history(repository.getProject(), repository.getRoot(), range);
}
catch (VcsException e) {
LOG.warn("Couldn't get `git log " + range + "` in " + getShortRepositoryName(repository), e);
}
return Collections.emptyList();
}
@NotNull
private static Map<GitRepository, GitRemoteBranch> findTrackedBranches(@NotNull Collection<GitRepository> repositories,
@NotNull String localBranchName) {
Map<GitRepository, GitRemoteBranch> trackedBranches = newHashMap();
for (GitRepository repository : repositories) {
GitBranchTrackInfo trackInfo = GitBranchUtil.getTrackInfo(repository, localBranchName);
if (trackInfo != null) trackedBranches.put(repository, trackInfo.getRemoteBranch());
}
return trackedBranches;
}
// warning: not deleting branch 'feature' that is not yet merged to
// 'refs/remotes/origin/feature', even though it is merged to HEAD.
// error: The branch 'feature' is not fully merged.
// If you are sure you want to delete it, run 'git branch -D feature'.
private static class GitBranchNotMergedToUpstreamDetector implements GitLineHandlerListener {
private static final Pattern PATTERN = Pattern.compile(".*'(.*)', even though it is merged to.*");
@Nullable private String myBaseBranch;
@Override
public void onLineAvailable(String line, Key outputType) {
Matcher matcher = PATTERN.matcher(line);
if (matcher.matches()) {
myBaseBranch = matcher.group(1);
}
}
@Override
public void processTerminated(int exitCode) {
}
@Override
public void startFailed(Throwable exception) {
}
@Nullable
public String getBaseBranch() {
return myBaseBranch;
}
}
static class UnmergedBranchInfo {
@NotNull private final String myTipOfDeletedUnmergedBranch;
@NotNull private final String myBaseBranch;
public UnmergedBranchInfo(@NotNull String tipOfDeletedUnmergedBranch, @NotNull String baseBranch) {
myTipOfDeletedUnmergedBranch = tipOfDeletedUnmergedBranch;
myBaseBranch = baseBranch;
}
}
private void deleteTrackedBranchInBackground() {
new Task.Backgroundable(myProject, "Deleting Remote Branch " + myBranchName + "...") {
@Override
public void run(@NotNull ProgressIndicator indicator) {
GitBrancher brancher = GitBrancher.getInstance(getProject());
MultiMap<String, GitRepository> grouped = groupTrackedBranchesByName();
for (String remoteBranch : grouped.keySet()) {
brancher.deleteRemoteBranch(remoteBranch, new ArrayList<>(grouped.get(remoteBranch)));
}
}
}.queue();
}
@NotNull
private MultiMap<String, GitRepository> groupTrackedBranchesByName() {
MultiMap<String, GitRepository> trackedBranchNames = MultiMap.create();
for (GitRepository repository : myTrackedBranches.keySet()) {
GitRemoteBranch trackedBranch = myTrackedBranches.get(repository);
if (trackedBranch != null) {
trackedBranchNames.putValue(trackedBranch.getNameForLocalOperations(), repository);
}
}
return trackedBranchNames;
}
private void restoreInBackground(@NotNull Notification notification) {
new Task.Backgroundable(myProject, "Restoring Branch " + myBranchName + "...") {
@Override
public void run(@NotNull ProgressIndicator indicator) {
rollbackBranchDeletion(notification);
}
}.queue();
}
private void rollbackBranchDeletion(@NotNull Notification notification) {
GitCompoundResult result = doRollback();
if (result.totalSuccess()) {
notification.expire();
}
else {
myNotifier.notifyError("Couldn't Restore " + formatBranchName(myBranchName), result.getErrorOutputWithReposIndication());
}
}
private void viewUnmergedCommitsInBackground(@NotNull Notification notification) {
new Task.Backgroundable(myProject, "Collecting Unmerged Commits...") {
@Override
public void run(@NotNull ProgressIndicator indicator) {
boolean restore = showNotFullyMergedDialog(myUnmergedToBranches);
if (restore) {
rollbackBranchDeletion(notification);
}
}
}.queue();
}
}
| |
package com.kns.adapter;
import java.util.ArrayList;
import java.util.List;
import android.app.Activity;
import android.content.Context;
import android.graphics.Bitmap;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.BaseAdapter;
import android.widget.Button;
import android.widget.ImageView;
import com.kns.model.Custom_Image_Model;
import com.kns.util.ImageUtil;
import com.squareup.picasso.Picasso;
import com.squareup.picasso.Transformation;
import com.sunil.selectmutiple.R;
public class Custom_Image_Adapter extends BaseAdapter{
private static final String TAG="Custom_Image_Adapter";
private LayoutInflater mInflater=null;
private List<Custom_Image_Model> videolist=null;
private Context context=null;
private Bitmap bitmap=null;
private Button btn_update;
private boolean isActionMultiplePick;
public Custom_Image_Adapter(Activity context, List<Custom_Image_Model> list){
mInflater = context.getLayoutInflater();
this.videolist=list;
this.context=context;
}
@Override
public int getCount() {
return videolist.size();
}
@Override
public Object getItem(int position) {
return null;
}
@Override
public long getItemId(int position) {
return 0;
}
public void setMultiplePick(boolean isMultiplePick) {
this.isActionMultiplePick = isMultiplePick;
}
public void selectAll(boolean selection) {
for (int i = 0; i < videolist.size(); i++) {
videolist.get(i).isSeleted = selection;
}
notifyDataSetChanged();
}
public boolean isAllSelected() {
boolean isAllSelected = true;
for (int i = 0; i < videolist.size(); i++) {
if (!videolist.get(i).isSeleted) {
isAllSelected = false;
break;
}
}
return isAllSelected;
}
public boolean isAnySelected() {
boolean isAnySelected = false;
for (int i = 0; i < videolist.size(); i++) {
if (videolist.get(i).isSeleted) {
isAnySelected = true;
break;
}
}
return isAnySelected;
}
public ArrayList<Custom_Image_Model> getSelected() {
ArrayList<Custom_Image_Model> dataT = new ArrayList<Custom_Image_Model>();
for (int i = 0; i < videolist.size(); i++) {
if (videolist.get(i).isSeleted) {
dataT.add(videolist.get(i));
}
}
return dataT;
}
public void changeSelection(View v, int position) {
if (videolist.get(position).isSeleted) {
videolist.get(position).isSeleted = false;
} else {
videolist.get(position).isSeleted = true;
}
((ViewHolder) v.getTag()).imgQueueMultiSelected.setSelected(videolist.get(position).isSeleted);
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
final ViewHolder holder;
if (convertView == null ) {
holder = new ViewHolder();
convertView = mInflater.inflate(R.layout.gallery_item, null);
holder.imgQueue = (ImageView) convertView.findViewById(R.id.imgQueue);
holder.imgQueueMultiSelected = (ImageView) convertView.findViewById(R.id.imgQueueMultiSelected);;
if (isActionMultiplePick) {
holder.imgQueueMultiSelected.setVisibility(View.VISIBLE);
} else {
holder.imgQueueMultiSelected.setVisibility(View.GONE);
}
convertView.setTag(holder);
}
else {
holder = (ViewHolder) convertView.getTag();
}
holder.imgQueue.setTag(position);
Custom_Image_Model imagemodel=videolist.get(position);
//String imageurl=imagemodel.getVideothumburl0();
String videourl=imagemodel.getImagerealurl();
String updatedvideothumb=imagemodel.getImagethumburl();
//holder.image.setDefaultImageResId(R.drawable.no_video);
//Log.v(TAG, "Updated thumb is: "+ updatedvideothumb);
ImageUtil.galleryLog(TAG, "Updated thumb is: "+ updatedvideothumb);
Transformation transformation = new Transformation() {
@Override public Bitmap transform(Bitmap source) {
int targetWidth = holder.imgQueue.getWidth();
double aspectRatio = (double) source.getHeight() / (double) source.getWidth();
int targetHeight = (int) (targetWidth * aspectRatio);
Bitmap result = Bitmap.createScaledBitmap(source, targetWidth, targetHeight, false);
if (result != source) {
// Same bitmap is returned if sizes are the same
source.recycle();
}
return result;
}
@Override public String key() {
return "transformation" + " desiredWidth";
}
};
if (updatedvideothumb!=null && !updatedvideothumb.isEmpty()) {
Picasso.with(context)
.load(updatedvideothumb)
//.resize(150, 150)
// .centerCrop()
.transform(transformation)
.placeholder(R.drawable.ic_dwnloadthumb)
.error(R.drawable.ic_nothumb)
.into(holder.imgQueue);
}
else{
holder.imgQueue.setImageResource(R.drawable.no_image);
}
if (isActionMultiplePick) {
holder.imgQueueMultiSelected.setSelected(videolist.get(position).isSeleted);
}
return convertView;
}
private static class ViewHolder {
ImageView imgQueue;
ImageView imgQueueMultiSelected;
}
}
| |
package io.vavr.collection;
import io.vavr.Function1;
import io.vavr.Tuple2;
import io.vavr.Tuple3;
import io.vavr.Value;
import org.assertj.core.api.Assertions;
import org.assertj.core.api.IterableAssert;
import org.assertj.core.api.ObjectAssert;
import org.junit.Test;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.Random;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collector;
import static java.util.Comparator.naturalOrder;
import static java.util.Comparator.reverseOrder;
import static java.util.stream.Collectors.toList;
import static io.vavr.Serializables.deserialize;
import static io.vavr.Serializables.serialize;
public class BitSetTest extends AbstractSortedSetTest {
private final static int MAX_BIT = 1_000_000;
private enum E {
V1, V2, V3
}
@Override
protected <T> IterableAssert<T> assertThat(Iterable<T> actual) {
return new IterableAssert<T>(actual) {
@Override
@SuppressWarnings("unchecked")
public IterableAssert<T> isEqualTo(Object obj) {
if (obj instanceof BitSet || actual instanceof BitSet) {
Assertions.assertThat(HashSet.ofAll(actual)).isEqualTo(HashSet.ofAll((Iterable<T>) obj));
} else {
super.isEqualTo(obj);
}
return this;
}
};
}
@Override
protected <T> ObjectAssert<T> assertThat(T actual) {
return new ObjectAssert<T>(actual) {
@Override
public ObjectAssert<T> isEqualTo(Object expected) {
if (actual instanceof Tuple2) {
final Tuple2<?, ?> t1 = (Tuple2<?, ?>) actual;
final Tuple2<?, ?> t2 = (Tuple2<?, ?>) expected;
assertThat((Iterable<?>) t1._1).isEqualTo(t2._1);
assertThat((Iterable<?>) t1._2).isEqualTo(t2._2);
return this;
} else if (actual instanceof Tuple3) {
final Tuple3<?, ?, ?> t1 = (Tuple3<?, ?, ?>) actual;
final Tuple3<?, ?, ?> t2 = (Tuple3<?, ?, ?>) expected;
assertThat((Iterable<?>) t1._1).isEqualTo(t2._1);
assertThat((Iterable<?>) t1._2).isEqualTo(t2._2);
assertThat((Iterable<?>) t1._3).isEqualTo(t2._3);
return this;
} else {
return super.isEqualTo(expected);
}
}
};
}
private <T> BitSet.Builder<T> bsBuilder() {
final Mapper<T> mapper = new Mapper<>();
return BitSet.withRelations(
(Function1<Integer, T> & Serializable) mapper::fromInt,
(Function1<T, Integer> & Serializable) mapper::toInt);
}
@Override
protected <T> Collector<T, ArrayList<T>, ? extends Traversable<T>> collector() {
return this.<T> bsBuilder().collector();
}
@Override
protected <T> BitSet<T> empty() {
return this.<T> bsBuilder().empty();
}
@Override
protected <T> BitSet<T> emptyWithNull() {
return empty();
}
@Override
protected boolean emptyShouldBeSingleton() {
return false;
}
@Override
protected <T> BitSet<T> of(T element) {
return this.<T> bsBuilder().of(element);
}
@Override
protected <T> BitSet<T> of(Comparator<? super T> comparator, T element) {
// comparator is not used
return this.<T> bsBuilder().of(element);
}
@SuppressWarnings("varargs")
@SafeVarargs
@Override
protected final <T> BitSet<T> of(Comparator<? super T> comparator, T... elements) {
// comparator is not used
return this.<T> bsBuilder().of(elements);
}
@SuppressWarnings("varargs")
@SafeVarargs
@Override
protected final <T> BitSet<T> of(T... elements) {
return this.<T> bsBuilder().of(elements);
}
@Override
protected boolean useIsEqualToInsteadOfIsSameAs() {
return true;
}
@Override
protected int getPeekNonNilPerformingAnAction() {
return 1;
}
@Override
protected <T> BitSet<T> ofAll(Iterable<? extends T> elements) {
return this.<T> bsBuilder().ofAll(elements);
}
@Override
protected <T extends Comparable<? super T>> BitSet<T> ofJavaStream(java.util.stream.Stream<? extends T> javaStream) {
return this.<T> bsBuilder().ofAll(javaStream);
}
@Override
protected BitSet<Boolean> ofAll(boolean... elements) {
return BitSet.ofAll(elements);
}
@Override
protected BitSet<Byte> ofAll(byte... elements) {
return BitSet.ofAll(elements);
}
@Override
protected BitSet<Character> ofAll(char... elements) {
return BitSet.ofAll(elements);
}
@Override
protected BitSet<Double> ofAll(double... elements) {
return this.<Double> bsBuilder().ofAll(Iterator.ofAll(elements));
}
@Override
protected BitSet<Float> ofAll(float... elements) {
return this.<Float> bsBuilder().ofAll(Iterator.ofAll(elements));
}
@Override
protected BitSet<Integer> ofAll(int... elements) {
return BitSet.ofAll(elements);
}
@Override
protected BitSet<Long> ofAll(long... elements) {
return BitSet.ofAll(elements);
}
@Override
protected BitSet<Short> ofAll(short... elements) {
return BitSet.ofAll(elements);
}
@Override
protected <T> BitSet<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
return this.<T> bsBuilder().tabulate(n, f);
}
@Override
protected <T> BitSet<T> fill(int n, Supplier<? extends T> s) {
return this.<T> bsBuilder().fill(n, s);
}
@Override
protected BitSet<Character> range(char from, char toExclusive) {
return BitSet.range(from, toExclusive);
}
@Override
protected BitSet<Character> rangeBy(char from, char toExclusive, int step) {
return BitSet.rangeBy(from, toExclusive, step);
}
@Override
protected BitSet<Double> rangeBy(double from, double toExclusive, double step) {
return this.<Double> bsBuilder().ofAll(Iterator.rangeBy(from, toExclusive, step));
}
private static boolean isBadRange(int a, int b) {
return a < 0 || b < 0 || a > MAX_BIT || b > MAX_BIT;
}
private static boolean isBadRange(long a, long b) {
return a < 0 || b < 0 || a > MAX_BIT || b > MAX_BIT;
}
@Override
protected BitSet<Integer> range(int from, int toExclusive) {
if (isBadRange(from, toExclusive)) {
return this.<Integer> bsBuilder().ofAll(Iterator.range(from, toExclusive));
} else {
return BitSet.range(from, toExclusive);
}
}
@Override
protected BitSet<Integer> rangeBy(int from, int toExclusive, int step) {
if (isBadRange(from, toExclusive)) {
return this.<Integer> bsBuilder().ofAll(Iterator.rangeBy(from, toExclusive, step));
} else {
return BitSet.rangeBy(from, toExclusive, step);
}
}
@Override
protected BitSet<Long> range(long from, long toExclusive) {
if (isBadRange(from, toExclusive)) {
return this.<Long> bsBuilder().ofAll(Iterator.range(from, toExclusive));
} else {
return BitSet.range(from, toExclusive);
}
}
@Override
protected BitSet<Long> rangeBy(long from, long toExclusive, long step) {
if (isBadRange(from, toExclusive)) {
return this.<Long> bsBuilder().ofAll(Iterator.rangeBy(from, toExclusive, step));
} else {
return BitSet.rangeBy(from, toExclusive, step);
}
}
@Override
protected BitSet<Character> rangeClosed(char from, char toInclusive) {
return BitSet.rangeClosed(from, toInclusive);
}
@Override
protected BitSet<Character> rangeClosedBy(char from, char toInclusive, int step) {
return BitSet.rangeClosedBy(from, toInclusive, step);
}
@Override
protected BitSet<Double> rangeClosedBy(double from, double toInclusive, double step) {
return this.<Double> bsBuilder().ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
@Override
protected BitSet<Integer> rangeClosed(int from, int toInclusive) {
if (isBadRange(from, toInclusive)) {
return this.<Integer> bsBuilder().ofAll(Iterator.rangeClosed(from, toInclusive));
} else {
return BitSet.rangeClosed(from, toInclusive);
}
}
@Override
protected BitSet<Integer> rangeClosedBy(int from, int toInclusive, int step) {
if (isBadRange(from, toInclusive)) {
return this.<Integer> bsBuilder().ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
} else {
return BitSet.rangeClosedBy(from, toInclusive, step);
}
}
@Override
protected BitSet<Long> rangeClosed(long from, long toInclusive) {
if (isBadRange(from, toInclusive)) {
return this.<Long> bsBuilder().ofAll(Iterator.rangeClosed(from, toInclusive));
} else {
return BitSet.rangeClosed(from, toInclusive);
}
}
@Override
protected BitSet<Long> rangeClosedBy(long from, long toInclusive, long step) {
if (isBadRange(from, toInclusive)) {
return this.<Long> bsBuilder().ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
} else {
return BitSet.rangeClosedBy(from, toInclusive, step);
}
}
// BitSet specific
@Test
public void testBitSet1() {
BitSet<Integer> bs = BitSet.empty();
bs = bs.add(2);
assertThat(bs.head()).isEqualTo(2);
assertThat(bs.length()).isEqualTo(1);
bs = bs.add(4);
assertThat(bs.head()).isEqualTo(2);
assertThat(bs.length()).isEqualTo(2);
bs = bs.remove(2);
assertThat(bs.head()).isEqualTo(4);
assertThat(bs.contains(2)).isFalse();
assertThat(bs.length()).isEqualTo(1);
bs = bs.remove(4);
assertThat(bs.isEmpty()).isTrue();
assertThat(bs.length()).isEqualTo(0);
}
@Test
public void testBitSet2() {
BitSet<Integer> bs = BitSet.empty();
bs = bs.add(2);
assertThat(bs.head()).isEqualTo(2);
assertThat(bs.add(2)).isSameAs(bs);
assertThat(bs.length()).isEqualTo(1);
bs = bs.add(70);
assertThat(bs.head()).isEqualTo(2);
assertThat(bs.add(2)).isSameAs(bs);
assertThat(bs.add(70)).isSameAs(bs);
assertThat(bs.length()).isEqualTo(2);
bs = bs.remove(2);
assertThat(bs.head()).isEqualTo(70);
assertThat(bs.contains(2)).isFalse();
assertThat(bs.length()).isEqualTo(1);
bs = bs.remove(70);
assertThat(bs.isEmpty()).isTrue();
assertThat(bs.length()).isEqualTo(0);
bs = bs.add(2);
bs = bs.add(70);
bs = bs.add(3);
assertThat(bs.length()).isEqualTo(3);
bs = bs.add(71);
assertThat(bs.length()).isEqualTo(4);
bs = bs.add(701);
assertThat(bs.length()).isEqualTo(5);
}
@Test
public void testBitSetN() {
BitSet<Integer> bs = BitSet.empty();
bs = bs.add(2);
assertThat(bs.head()).isEqualTo(2);
bs = bs.add(700);
assertThat(bs.head()).isEqualTo(2);
assertThat(bs.add(2)).isSameAs(bs);
assertThat(bs.add(700)).isSameAs(bs);
bs = bs.remove(2);
assertThat(bs.head()).isEqualTo(700);
assertThat(bs.contains(2)).isFalse();
bs = bs.remove(700);
assertThat(bs.isEmpty()).isTrue();
}
@Test
public void testFactories() {
assertThat(BitSet.of(7).contains(7)).isTrue(); // BitSet1, < 64
assertThat(BitSet.of(77).contains(77)).isTrue(); // BitSet2, < 2*64
assertThat(BitSet.of(777).contains(777)).isTrue(); // BitSetN, >= 2*64
assertThat(BitSet.ofAll(List.of(1).toJavaStream())).isEqualTo(BitSet.of(1));
assertThat(BitSet.fill(1, () -> 1)).isEqualTo(BitSet.of(1));
assertThat(BitSet.tabulate(1, i -> 1)).isEqualTo(BitSet.of(1));
}
@Test
public void shouldAllAll() {
assertThat(BitSet.empty().add(7).addAll(List.of(1, 2))).isEqualTo(BitSet.of(1, 2, 7));
assertThat(BitSet.empty().add(77).addAll(List.of(1, 2))).isEqualTo(BitSet.of(1, 2, 77));
assertThat(BitSet.empty().add(777).addAll(List.of(1, 2))).isEqualTo(BitSet.of(1, 2, 777));
}
@Test
public void shouldCollectInts() {
final Traversable<Integer> actual = java.util.stream.Stream.of(1, 2, 3).collect(BitSet.collector());
assertThat(actual).isEqualTo(of(1, 2, 3));
}
@Test
public void testEnums() {
BitSet<E> bs = BitSet.withEnum(E.class).empty();
bs = bs.add(E.V2);
assert bs.head() == E.V2;
bs = bs.add(E.V3);
assert bs.head() == E.V2;
bs = bs.remove(E.V2);
assert bs.head() == E.V3;
assert !bs.contains(E.V2);
assert bs.contains(E.V3);
}
@Test(expected = IllegalArgumentException.class)
public void shouldThrowAddNegativeElementToEmpty() {
BitSet.empty().add(-1);
}
@Test(expected = IllegalArgumentException.class)
public void shouldThrowAddNegativeElementToBitSet2() {
BitSet.empty().add(77).add(-1);
}
@Test(expected = IllegalArgumentException.class)
public void shouldThrowAddNegativeElementToBitSetN() {
BitSet.empty().add(777).add(-1);
}
@Test(expected = IllegalArgumentException.class)
public void shouldThrowAddNegativeElements() {
BitSet.empty().addAll(List.of(-1));
}
@Test(expected = IllegalArgumentException.class)
public void shouldThrowContainsNegativeElements() {
BitSet.empty().contains(-1);
}
@Test
public void shouldSerializeDeserializeNativeBitSet() {
final Object actual = deserialize(serialize(BitSet.of(1, 2, 3)));
final Object expected = BitSet.of(1, 2, 3);
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldSerializeDeserializeEnumBitSet() {
final Object actual = deserialize(serialize(BitSet.withEnum(E.class).of(E.V1, E.V2)));
final Object expected = BitSet.withEnum(E.class).of(E.V1, E.V2);
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldBehaveExactlyLikeAnotherBitSet() {
for (int i = 0; i < 10; i++) {
final Random random = getRandom(-1);
final java.util.BitSet mutableBitSet = new java.util.BitSet();
BitSet<Integer> functionalBitSet = BitSet.empty();
final int size = 5_000;
for (int j = 0; j < size; j++) {
/* Insert */
if (random.nextInt() % 3 == 0) {
assertMinimumsAreEqual(mutableBitSet, functionalBitSet);
final int value = random.nextInt(size);
mutableBitSet.set(value);
functionalBitSet = functionalBitSet.add(value);
}
assertMinimumsAreEqual(mutableBitSet, functionalBitSet);
/* Delete */
if (random.nextInt() % 5 == 0) {
if (!mutableBitSet.isEmpty()) { mutableBitSet.clear(mutableBitSet.nextSetBit(0)); }
if (!functionalBitSet.isEmpty()) { functionalBitSet = functionalBitSet.tail(); }
assertMinimumsAreEqual(mutableBitSet, functionalBitSet);
}
}
final Collection<Integer> oldValues = mutableBitSet.stream().sorted().boxed().collect(toList());
final Collection<Integer> newValues = functionalBitSet.toJavaList();
assertThat(oldValues).isEqualTo(newValues);
}
}
private void assertMinimumsAreEqual(java.util.BitSet oldSet, BitSet<Integer> newSet) {
assertThat(oldSet.isEmpty()).isEqualTo(newSet.isEmpty());
if (!newSet.isEmpty()) {
assertThat(oldSet.nextSetBit(0)).isEqualTo(newSet.head());
}
}
// -- toSortedSet
@Override
@Test
public void shouldConvertToSortedSetWithoutComparatorOnComparable() {
final Value<Integer> value = BitSet.of(3, 7, 1, 15, 0);
final Set<Integer> set = value.toSortedSet();
if (value.isSingleValued()) {
assertThat(set).isEqualTo(TreeSet.of(3));
} else {
assertThat(set).isEqualTo(TreeSet.of(0, 1, 3, 7, 15));
}
}
// -- toPriorityQueue
@Test
@Override
public void shouldConvertToPriorityQueueUsingImplicitComparator() {
final Value<Integer> value = BitSet.of(1, 3, 2);
final PriorityQueue<Integer> queue = value.toPriorityQueue();
if (value.isSingleValued()) {
assertThat(queue).isEqualTo(PriorityQueue.of(1));
} else {
assertThat(queue).isEqualTo(PriorityQueue.of(1, 2, 3));
}
}
@Test
@Override
public void shouldConvertToPriorityQueueUsingExplicitComparator() {
final Comparator<Integer> comparator = Comparator.naturalOrder();
final Value<Integer> value = BitSet.of(1, 3, 2);
final PriorityQueue<Integer> queue = value.toPriorityQueue(comparator);
if (value.isSingleValued()) {
assertThat(queue).isEqualTo(PriorityQueue.of(comparator, 1));
} else {
assertThat(queue).isEqualTo(PriorityQueue.of(comparator, 1, 2, 3));
}
}
// -- head, init, last, tail
@Test
public void shouldReturnHeadOfNonEmptyHavingReversedOrder() {
// BitSet can't have reverse order
}
@Test
public void shouldReturnInitOfNonEmptyHavingReversedOrder() {
// BitSet can't have reverse order
}
@Test
public void shouldReturnLastOfNonEmptyHavingReversedOrder() {
// BitSet can't have reverse order
}
@Test
public void shouldReturnTailOfNonEmptyHavingReversedOrder() {
// BitSet can't have reverse order
}
// -- classes
private static class Mapper<T> implements Serializable {
private static final long serialVersionUID = 1L;
private final java.util.Map<Integer, T> fromIntMap = new java.util.HashMap<>();
private final java.util.Map<T, Integer> toIntMap = new java.util.HashMap<>();
private int nextValue = 0;
synchronized T fromInt(Integer i) {
if (i < nextValue) {
return fromIntMap.get(i);
} else {
throw new RuntimeException();
}
}
synchronized Integer toInt(T value) {
Integer i = toIntMap.get(value);
if (i == null) {
i = nextValue++;
toIntMap.put(value, i);
fromIntMap.put(i, value);
}
return i;
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.felix.dm.impl;
import java.util.ArrayList;
import java.util.Dictionary;
import java.util.Iterator;
import java.util.List;
import org.apache.felix.dm.Component;
import org.apache.felix.dm.ComponentDeclaration;
import org.apache.felix.dm.ComponentDependencyDeclaration;
import org.apache.felix.dm.ComponentStateListener;
import org.apache.felix.dm.Dependency;
import org.apache.felix.dm.DependencyManager;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceRegistration;
/**
* This class allows to filter a Component interface. All Aspect/Adapters extend this class
* in order to add functionality to the default Component implementation.
*
* @author <a href="mailto:dev@felix.apache.org">Felix Project Team</a>
*/
public class FilterService implements Component, ComponentDeclaration {
protected ComponentImpl m_component;
protected List m_stateListeners = new ArrayList();
protected String m_init = "init";
protected String m_start = "start";
protected String m_stop = "stop";
protected String m_destroy = "destroy";
protected Object m_callbackObject;
protected Object m_compositionInstance;
protected String m_compositionMethod;
protected String[] m_serviceInterfaces;
protected Object m_serviceImpl;
protected Object m_factory;
protected String m_factoryCreateMethod;
protected Dictionary m_serviceProperties;
public FilterService(Component service) {
m_component = (ComponentImpl) service;
}
public Component add(Dependency dependency) {
m_component.add(dependency);
// Add the dependency (if optional) to all already instantiated services.
// If the dependency is required, our internal service will be stopped/restarted, so in this case
// we have nothing to do.
if (! dependency.isRequired()) {
AbstractDecorator ad = (AbstractDecorator) m_component.getService();
if (ad != null)
{
ad.addDependency(dependency);
}
}
return this;
}
public Component add(List dependencies) {
m_component.add(dependencies);
// Add the dependencies to all already instantiated services.
// If one dependency from the list is required, we have nothing to do, since our internal
// service will be stopped/restarted.
Iterator it = dependencies.iterator();
while (it.hasNext()) {
if (((Dependency) it.next()).isRequired()) {
return this;
}
}
// Ok, the list contains no required dependencies: add optionals dependencies in already instantiated
// services.
AbstractDecorator ad = (AbstractDecorator) m_component.getService();
if (ad != null) {
ad.addDependencies(dependencies);
}
return this;
}
public void addStateListener(ComponentStateListener listener) {
synchronized (this) {
m_stateListeners.add(listener);
}
// Add the listener to all already instantiated services.
AbstractDecorator ad = (AbstractDecorator) m_component.getService();
if (ad != null) {
ad.addStateListener(listener);
}
}
public List getDependencies() {
return m_component.getDependencies();
}
public Object getService() {
return m_component.getService();
}
public synchronized Dictionary getServiceProperties() {
return m_serviceProperties;
}
public ServiceRegistration getServiceRegistration() {
return m_component.getServiceRegistration();
}
public Component remove(Dependency dependency) {
m_component.remove(dependency);
// Remove the dependency (if optional) from all already instantiated services.
// If the dependency is required, our internal service will be stopped, so in this case
// we have nothing to do.
if (!dependency.isRequired())
{
AbstractDecorator ad = (AbstractDecorator) m_component.getService();
if (ad != null)
{
ad.removeDependency(dependency);
}
}
return this;
}
public void removeStateListener(ComponentStateListener listener) {
synchronized (this) {
m_stateListeners.remove(listener);
}
// Remove the listener from all already instantiated services.
AbstractDecorator ad = (AbstractDecorator) m_component.getService();
if (ad != null) {
ad.removeStateListener(listener);
}
}
public synchronized Component setCallbacks(Object instance, String init, String start, String stop, String destroy) {
m_component.ensureNotActive();
m_callbackObject = instance;
m_init = init;
m_start = start;
m_stop = stop;
m_destroy = destroy;
return this;
}
public Component setCallbacks(String init, String start, String stop, String destroy) {
setCallbacks(null, init, start, stop, destroy);
return this;
}
public synchronized Component setComposition(Object instance, String getMethod) {
m_component.ensureNotActive();
m_compositionInstance = instance;
m_compositionMethod = getMethod;
return this;
}
public synchronized Component setComposition(String getMethod) {
m_component.ensureNotActive();
m_compositionMethod = getMethod;
return this;
}
public synchronized Component setFactory(Object factory, String createMethod) {
m_component.ensureNotActive();
m_factory = factory;
m_factoryCreateMethod = createMethod;
return this;
}
public Component setFactory(String createMethod) {
return setFactory(null, createMethod);
}
public synchronized Component setImplementation(Object implementation) {
m_component.ensureNotActive();
m_serviceImpl = implementation;
return this;
}
public Component setInterface(String serviceName, Dictionary properties) {
return setInterface(new String[] { serviceName }, properties);
}
public synchronized Component setInterface(String[] serviceInterfaces, Dictionary properties) {
m_component.ensureNotActive();
if (serviceInterfaces != null) {
m_serviceInterfaces = new String[serviceInterfaces.length];
System.arraycopy(serviceInterfaces, 0, m_serviceInterfaces, 0, serviceInterfaces.length);
m_serviceProperties = properties;
}
return this;
}
public Component setServiceProperties(Dictionary serviceProperties) {
synchronized (this) {
m_serviceProperties = serviceProperties;
}
// Set the properties to all already instantiated services.
if (serviceProperties != null) {
AbstractDecorator ad = (AbstractDecorator) m_component.getService();
if (ad != null) {
ad.setServiceProperties(serviceProperties);
}
}
return this;
}
public void start() {
m_component.start();
}
public void stop() {
m_component.stop();
}
public void invokeCallbackMethod(Object[] instances, String methodName, Class[][] signatures, Object[][] parameters) {
m_component.invokeCallbackMethod(instances, methodName, signatures, parameters);
}
public Object[] getCompositionInstances() {
return m_component.getCompositionInstances();
}
public DependencyManager getDependencyManager() {
return m_component.getDependencyManager();
}
public Component setAutoConfig(Class clazz, boolean autoConfig) {
m_component.setAutoConfig(clazz, autoConfig);
return this;
}
public Component setAutoConfig(Class clazz, String instanceName) {
m_component.setAutoConfig(clazz, instanceName);
return this;
}
public boolean getAutoConfig(Class clazz) {
return m_component.getAutoConfig(clazz);
}
public String getAutoConfigInstance(Class clazz) {
return m_component.getAutoConfigInstance(clazz);
}
public ComponentDependencyDeclaration[] getComponentDependencies() {
return m_component.getComponentDependencies();
}
public String getName() {
return m_component.getName();
}
public int getState() {
return m_component.getState();
}
public BundleContext getBundleContext() {
return m_component.getBundleContext();
};
}
| |
/**
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.internal.operators;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.Test;
import org.mockito.InOrder;
import rx.Observable;
import rx.Observer;
import rx.Scheduler;
import rx.Subscriber;
import rx.exceptions.TestException;
import rx.functions.Action0;
import rx.functions.Func1;
import rx.schedulers.TestScheduler;
import rx.subjects.PublishSubject;
public class OperatorDebounceTest {
private TestScheduler scheduler;
private Observer<String> observer;
private Scheduler.Worker innerScheduler;
@Before
@SuppressWarnings("unchecked")
public void before() {
scheduler = new TestScheduler();
observer = mock(Observer.class);
innerScheduler = scheduler.createWorker();
}
@Test
public void testDebounceWithCompleted() {
Observable<String> source = Observable.create(new Observable.OnSubscribe<String>() {
@Override
public void call(Subscriber<? super String> observer) {
publishNext(observer, 100, "one"); // Should be skipped since "two" will arrive before the timeout expires.
publishNext(observer, 400, "two"); // Should be published since "three" will arrive after the timeout expires.
publishNext(observer, 900, "three"); // Should be skipped since onCompleted will arrive before the timeout expires.
publishCompleted(observer, 1000); // Should be published as soon as the timeout expires.
}
});
Observable<String> sampled = source.debounce(400, TimeUnit.MILLISECONDS, scheduler);
sampled.subscribe(observer);
scheduler.advanceTimeTo(0, TimeUnit.MILLISECONDS);
InOrder inOrder = inOrder(observer);
// must go to 800 since it must be 400 after when two is sent, which is at 400
scheduler.advanceTimeTo(800, TimeUnit.MILLISECONDS);
inOrder.verify(observer, times(1)).onNext("two");
scheduler.advanceTimeTo(1000, TimeUnit.MILLISECONDS);
inOrder.verify(observer, times(1)).onCompleted();
inOrder.verifyNoMoreInteractions();
}
@Test
public void testDebounceNeverEmits() {
Observable<String> source = Observable.create(new Observable.OnSubscribe<String>() {
@Override
public void call(Subscriber<? super String> observer) {
// all should be skipped since they are happening faster than the 200ms timeout
publishNext(observer, 100, "a"); // Should be skipped
publishNext(observer, 200, "b"); // Should be skipped
publishNext(observer, 300, "c"); // Should be skipped
publishNext(observer, 400, "d"); // Should be skipped
publishNext(observer, 500, "e"); // Should be skipped
publishNext(observer, 600, "f"); // Should be skipped
publishNext(observer, 700, "g"); // Should be skipped
publishNext(observer, 800, "h"); // Should be skipped
publishCompleted(observer, 900); // Should be published as soon as the timeout expires.
}
});
Observable<String> sampled = source.debounce(200, TimeUnit.MILLISECONDS, scheduler);
sampled.subscribe(observer);
scheduler.advanceTimeTo(0, TimeUnit.MILLISECONDS);
InOrder inOrder = inOrder(observer);
inOrder.verify(observer, times(0)).onNext(anyString());
scheduler.advanceTimeTo(1000, TimeUnit.MILLISECONDS);
inOrder.verify(observer, times(1)).onCompleted();
inOrder.verifyNoMoreInteractions();
}
@Test
public void testDebounceWithError() {
Observable<String> source = Observable.create(new Observable.OnSubscribe<String>() {
@Override
public void call(Subscriber<? super String> observer) {
Exception error = new TestException();
publishNext(observer, 100, "one"); // Should be published since "two" will arrive after the timeout expires.
publishNext(observer, 600, "two"); // Should be skipped since onError will arrive before the timeout expires.
publishError(observer, 700, error); // Should be published as soon as the timeout expires.
}
});
Observable<String> sampled = source.debounce(400, TimeUnit.MILLISECONDS, scheduler);
sampled.subscribe(observer);
scheduler.advanceTimeTo(0, TimeUnit.MILLISECONDS);
InOrder inOrder = inOrder(observer);
// 100 + 400 means it triggers at 500
scheduler.advanceTimeTo(500, TimeUnit.MILLISECONDS);
inOrder.verify(observer).onNext("one");
scheduler.advanceTimeTo(701, TimeUnit.MILLISECONDS);
inOrder.verify(observer).onError(any(TestException.class));
inOrder.verifyNoMoreInteractions();
}
private <T> void publishCompleted(final Observer<T> observer, long delay) {
innerScheduler.schedule(new Action0() {
@Override
public void call() {
observer.onCompleted();
}
}, delay, TimeUnit.MILLISECONDS);
}
private <T> void publishError(final Observer<T> observer, long delay, final Exception error) {
innerScheduler.schedule(new Action0() {
@Override
public void call() {
observer.onError(error);
}
}, delay, TimeUnit.MILLISECONDS);
}
private <T> void publishNext(final Observer<T> observer, final long delay, final T value) {
innerScheduler.schedule(new Action0() {
@Override
public void call() {
observer.onNext(value);
}
}, delay, TimeUnit.MILLISECONDS);
}
@Test
public void debounceSelectorNormal1() {
PublishSubject<Integer> source = PublishSubject.create();
final PublishSubject<Integer> debouncer = PublishSubject.create();
Func1<Integer, Observable<Integer>> debounceSel = new Func1<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> call(Integer t1) {
return debouncer;
}
};
@SuppressWarnings("unchecked")
Observer<Object> o = mock(Observer.class);
InOrder inOrder = inOrder(o);
source.debounce(debounceSel).subscribe(o);
source.onNext(1);
debouncer.onNext(1);
source.onNext(2);
source.onNext(3);
source.onNext(4);
debouncer.onNext(2);
source.onNext(5);
source.onCompleted();
inOrder.verify(o).onNext(1);
inOrder.verify(o).onNext(4);
inOrder.verify(o).onNext(5);
inOrder.verify(o).onCompleted();
verify(o, never()).onError(any(Throwable.class));
}
@Test
public void debounceSelectorFuncThrows() {
PublishSubject<Integer> source = PublishSubject.create();
Func1<Integer, Observable<Integer>> debounceSel = new Func1<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> call(Integer t1) {
throw new TestException();
}
};
@SuppressWarnings("unchecked")
Observer<Object> o = mock(Observer.class);
source.debounce(debounceSel).subscribe(o);
source.onNext(1);
verify(o, never()).onNext(any());
verify(o, never()).onCompleted();
verify(o).onError(any(TestException.class));
}
@Test
public void debounceSelectorObservableThrows() {
PublishSubject<Integer> source = PublishSubject.create();
Func1<Integer, Observable<Integer>> debounceSel = new Func1<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> call(Integer t1) {
return Observable.error(new TestException());
}
};
@SuppressWarnings("unchecked")
Observer<Object> o = mock(Observer.class);
source.debounce(debounceSel).subscribe(o);
source.onNext(1);
verify(o, never()).onNext(any());
verify(o, never()).onCompleted();
verify(o).onError(any(TestException.class));
}
@Test
public void debounceTimedLastIsNotLost() {
PublishSubject<Integer> source = PublishSubject.create();
@SuppressWarnings("unchecked")
Observer<Object> o = mock(Observer.class);
source.debounce(100, TimeUnit.MILLISECONDS, scheduler).subscribe(o);
source.onNext(1);
source.onCompleted();
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
verify(o).onNext(1);
verify(o).onCompleted();
verify(o, never()).onError(any(Throwable.class));
}
@Test
public void debounceSelectorLastIsNotLost() {
PublishSubject<Integer> source = PublishSubject.create();
final PublishSubject<Integer> debouncer = PublishSubject.create();
Func1<Integer, Observable<Integer>> debounceSel = new Func1<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> call(Integer t1) {
return debouncer;
}
};
@SuppressWarnings("unchecked")
Observer<Object> o = mock(Observer.class);
source.debounce(debounceSel).subscribe(o);
source.onNext(1);
source.onCompleted();
debouncer.onCompleted();
verify(o).onNext(1);
verify(o).onCompleted();
verify(o, never()).onError(any(Throwable.class));
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.file.remote;
import java.util.HashMap;
import java.util.Map;
import org.apache.camel.Category;
import org.apache.camel.FailedToCreateConsumerException;
import org.apache.camel.FailedToCreateProducerException;
import org.apache.camel.LoggingLevel;
import org.apache.camel.Processor;
import org.apache.camel.api.management.ManagedAttribute;
import org.apache.camel.api.management.ManagedResource;
import org.apache.camel.component.file.GenericFileConfiguration;
import org.apache.camel.component.file.GenericFileProcessStrategy;
import org.apache.camel.component.file.GenericFileProducer;
import org.apache.camel.component.file.remote.RemoteFileConfiguration.PathSeparator;
import org.apache.camel.component.file.remote.strategy.FtpProcessStrategyFactory;
import org.apache.camel.component.file.strategy.FileMoveExistingStrategy;
import org.apache.camel.spi.ClassResolver;
import org.apache.camel.spi.Metadata;
import org.apache.camel.spi.UriEndpoint;
import org.apache.camel.spi.UriParam;
import org.apache.camel.util.ObjectHelper;
import org.apache.commons.net.ftp.FTPClient;
import org.apache.commons.net.ftp.FTPClientConfig;
import org.apache.commons.net.ftp.FTPFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Upload and download files to/from FTP servers.
*/
@UriEndpoint(firstVersion = "1.1.0", scheme = "ftp", extendsScheme = "file", title = "FTP",
syntax = "ftp:host:port/directoryName", alternativeSyntax = "ftp:username:password@host:port/directoryName",
category = { Category.FILE })
@Metadata(excludeProperties = "appendChars,readLockIdempotentReleaseAsync,readLockIdempotentReleaseAsyncPoolSize,"
+ "readLockIdempotentReleaseDelay,readLockIdempotentReleaseExecutorService,"
+ "directoryMustExist,extendedAttributes,probeContentType,startingDirectoryMustExist,"
+ "startingDirectoryMustHaveAccess,chmodDirectory,forceWrites,copyAndDeleteOnRenameFail,"
+ "renameUsingCopy")
@ManagedResource(description = "Managed FtpEndpoint")
public class FtpEndpoint<T extends FTPFile> extends RemoteFileEndpoint<FTPFile> {
private static final Logger LOG = LoggerFactory.getLogger(FtpEndpoint.class);
protected int soTimeout;
protected int dataTimeout;
@UriParam
protected FtpConfiguration configuration;
@UriParam(label = "advanced")
protected FTPClientConfig ftpClientConfig;
@UriParam(label = "advanced", prefix = "ftpClientConfig.", multiValue = true)
protected Map<String, Object> ftpClientConfigParameters;
@UriParam(label = "advanced", prefix = "ftpClient.", multiValue = true)
protected Map<String, Object> ftpClientParameters;
@UriParam(label = "advanced")
protected FTPClient ftpClient;
@UriParam(label = "common", defaultValue = "DEBUG")
protected LoggingLevel transferLoggingLevel = LoggingLevel.DEBUG;
@UriParam(label = "common", defaultValue = "5")
protected int transferLoggingIntervalSeconds = 5;
@UriParam(label = "common")
protected boolean transferLoggingVerbose;
@UriParam(label = "consumer")
protected boolean resumeDownload;
public FtpEndpoint() {
}
public FtpEndpoint(String uri, RemoteFileComponent<FTPFile> component, FtpConfiguration configuration) {
super(uri, component, configuration);
this.configuration = configuration;
}
@Override
public String getScheme() {
return "ftp";
}
@Override
public RemoteFileConsumer<FTPFile> createConsumer(Processor processor) throws Exception {
if (isResumeDownload() && ObjectHelper.isEmpty(getLocalWorkDirectory())) {
throw new IllegalArgumentException("The option localWorkDirectory must be configured when resumeDownload=true");
}
if (isResumeDownload() && !getConfiguration().isBinary()) {
throw new IllegalArgumentException("The option binary must be enabled when resumeDownload=true");
}
return super.createConsumer(processor);
}
@Override
protected RemoteFileConsumer<FTPFile> buildConsumer(Processor processor) {
try {
return new FtpConsumer(
this, processor, createRemoteFileOperations(),
processStrategy != null ? processStrategy : createGenericFileStrategy());
} catch (Exception e) {
throw new FailedToCreateConsumerException(this, e);
}
}
@Override
protected GenericFileProducer<FTPFile> buildProducer() {
try {
if (this.getMoveExistingFileStrategy() == null) {
this.setMoveExistingFileStrategy(createDefaultFtpMoveExistingFileStrategy());
}
return new RemoteFileProducer<>(this, createRemoteFileOperations());
} catch (Exception e) {
throw new FailedToCreateProducerException(this, e);
}
}
/**
* Default Existing File Move Strategy
*
* @return the default implementation for ftp components
*/
private FileMoveExistingStrategy createDefaultFtpMoveExistingFileStrategy() {
return new FtpDefaultMoveExistingFileStrategy();
}
@Override
protected GenericFileProcessStrategy<FTPFile> createGenericFileStrategy() {
return new FtpProcessStrategyFactory().createGenericFileProcessStrategy(getCamelContext(), getParamsAsMap());
}
@Override
public RemoteFileOperations<FTPFile> createRemoteFileOperations() throws Exception {
// configure ftp client
FTPClient client = ftpClient;
if (client == null) {
// must use a new client if not explicit configured to use a custom
// client
client = createFtpClient();
}
// use configured buffer size which is larger and therefore faster (as
// the default is no buffer)
if (getBufferSize() > 0) {
client.setBufferSize(getBufferSize());
}
// set any endpoint configured timeouts
if (getConfiguration().getConnectTimeout() > -1) {
client.setConnectTimeout(getConfiguration().getConnectTimeout());
}
if (getConfiguration().getSoTimeout() > -1) {
soTimeout = getConfiguration().getSoTimeout();
}
dataTimeout = getConfiguration().getTimeout();
if (getConfiguration().getActivePortRange() != null) {
// parse it as min-max
String[] parts = getConfiguration().getActivePortRange().split("-");
if (parts.length != 2) {
throw new IllegalArgumentException("The option activePortRange should have syntax: min-max");
}
int min = getCamelContext().getTypeConverter().mandatoryConvertTo(int.class, parts[0]);
int max = getCamelContext().getTypeConverter().mandatoryConvertTo(int.class, parts[1]);
LOG.debug("Using active port range: {}-{}", min, max);
client.setActivePortRange(min, max);
}
// then lookup ftp client parameters and set those
if (ftpClientParameters != null) {
Map<String, Object> localParameters = new HashMap<>(ftpClientParameters);
// setting soTimeout has to be done later on FTPClient (after it has
// connected)
Object timeout = localParameters.remove("soTimeout");
if (timeout != null) {
soTimeout = getCamelContext().getTypeConverter().convertTo(int.class, timeout);
}
// and we want to keep data timeout so we can log it later
timeout = localParameters.remove("dataTimeout");
if (timeout != null) {
dataTimeout = getCamelContext().getTypeConverter().convertTo(int.class, timeout);
}
setProperties(client, localParameters);
}
if (ftpClientConfigParameters != null) {
// client config is optional so create a new one if we have
// parameter for it
if (ftpClientConfig == null) {
ftpClientConfig = new FTPClientConfig();
}
Map<String, Object> localConfigParameters = new HashMap<>(ftpClientConfigParameters);
setProperties(ftpClientConfig, localConfigParameters);
}
if (dataTimeout > 0) {
client.setDataTimeout(dataTimeout);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created FTPClient[connectTimeout: {}, soTimeout: {}, dataTimeout: {}, bufferSize: {}"
+ ", receiveDataSocketBufferSize: {}, sendDataSocketBufferSize: {}]: {}",
client.getConnectTimeout(), getSoTimeout(), dataTimeout, client.getBufferSize(),
client.getReceiveDataSocketBufferSize(), client.getSendDataSocketBufferSize(), client);
}
FtpOperations operations = new FtpOperations(client, getFtpClientConfig());
operations.setEndpoint(this);
return operations;
}
protected FTPClient createFtpClient() throws Exception {
FTPClient client = new FTPClient();
// use parser factory that can load classes via Camel to work in all runtimes
ClassResolver cr = getCamelContext().getClassResolver();
client.setParserFactory(new CamelFTPParserFactory(cr));
return client;
}
@Override
public FtpConfiguration getConfiguration() {
if (configuration == null) {
configuration = new FtpConfiguration();
}
return configuration;
}
@Override
public void setConfiguration(GenericFileConfiguration configuration) {
if (configuration == null) {
throw new IllegalArgumentException("FtpConfiguration expected");
}
// need to set on both
this.configuration = (FtpConfiguration) configuration;
super.setConfiguration(configuration);
}
public FTPClient getFtpClient() {
return ftpClient;
}
/**
* To use a custom instance of FTPClient
*/
public void setFtpClient(FTPClient ftpClient) {
this.ftpClient = ftpClient;
}
public FTPClientConfig getFtpClientConfig() {
return ftpClientConfig;
}
/**
* To use a custom instance of FTPClientConfig to configure the FTP client the endpoint should use.
*/
public void setFtpClientConfig(FTPClientConfig ftpClientConfig) {
this.ftpClientConfig = ftpClientConfig;
}
public Map<String, Object> getFtpClientParameters() {
return ftpClientParameters;
}
/**
* Used by FtpComponent to provide additional parameters for the FTPClient
*/
void setFtpClientParameters(Map<String, Object> ftpClientParameters) {
this.ftpClientParameters = ftpClientParameters;
}
public Map<String, Object> getFtpClientConfigParameters() {
return ftpClientConfigParameters;
}
/**
* Used by FtpComponent to provide additional parameters for the FTPClientConfig
*/
void setFtpClientConfigParameters(Map<String, Object> ftpClientConfigParameters) {
this.ftpClientConfigParameters = new HashMap<>(ftpClientConfigParameters);
}
public int getSoTimeout() {
return soTimeout;
}
/**
* Sets the soTimeout on the FTP client.
*/
public void setSoTimeout(int soTimeout) {
this.soTimeout = soTimeout;
}
public int getDataTimeout() {
return dataTimeout;
}
/**
* Sets the data timeout on the FTP client.
*/
public void setDataTimeout(int dataTimeout) {
this.dataTimeout = dataTimeout;
}
public LoggingLevel getTransferLoggingLevel() {
return transferLoggingLevel;
}
/**
* Configure the logging level to use when logging the progress of upload and download operations.
*/
public void setTransferLoggingLevel(LoggingLevel transferLoggingLevel) {
this.transferLoggingLevel = transferLoggingLevel;
}
@ManagedAttribute(description = "Logging level to use when logging the progress of upload and download operations")
public void setTransferLoggingLevelName(String transferLoggingLevel) {
this.transferLoggingLevel = getCamelContext().getTypeConverter().convertTo(LoggingLevel.class, transferLoggingLevel);
}
@ManagedAttribute
public String getTransferLoggingLevelName() {
return transferLoggingLevel.name();
}
@ManagedAttribute
public int getTransferLoggingIntervalSeconds() {
return transferLoggingIntervalSeconds;
}
/**
* Configures the interval in seconds to use when logging the progress of upload and download operations that are
* in-flight. This is used for logging progress when operations takes longer time.
*/
@ManagedAttribute(description = "Interval in seconds to use when logging the progress of upload and download operations that are in-flight")
public void setTransferLoggingIntervalSeconds(int transferLoggingIntervalSeconds) {
this.transferLoggingIntervalSeconds = transferLoggingIntervalSeconds;
}
@ManagedAttribute
public boolean isTransferLoggingVerbose() {
return transferLoggingVerbose;
}
/**
* Configures whether the perform verbose (fine grained) logging of the progress of upload and download operations.
*/
@ManagedAttribute(description = "Whether the perform verbose (fine grained) logging of the progress of upload and download operations")
public void setTransferLoggingVerbose(boolean transferLoggingVerbose) {
this.transferLoggingVerbose = transferLoggingVerbose;
}
public boolean isResumeDownload() {
return resumeDownload;
}
/**
* Configures whether resume download is enabled. This must be supported by the FTP server (almost all FTP servers
* support it). In addition the options <tt>localWorkDirectory</tt> must be configured so downloaded files are
* stored in a local directory, and the option <tt>binary</tt> must be enabled, which is required to support
* resuming of downloads.
*/
public void setResumeDownload(boolean resumeDownload) {
this.resumeDownload = resumeDownload;
}
@Override
public char getFileSeparator() {
// the regular ftp component should use the configured separator
// as FTP servers may require you to use windows or unix style
// and therefore you need to be able to control that
PathSeparator pathSeparator = getConfiguration().getSeparator();
switch (pathSeparator) {
case Windows:
return '\\';
case UNIX:
return '/';
default:
return super.getFileSeparator();
}
}
}
| |
/*
* PBrtJ -- Port of pbrt v3 to Java.
* Copyright (c) 2017 Rick Weyrauch.
*
* pbrt source code is Copyright(c) 1998-2016
* Matt Pharr, Greg Humphreys, and Wenzel Jakob.
*
*/
package org.pbrt.core;
import org.pbrt.accelerators.BVHAccel;
import org.pbrt.accelerators.KdTreeAccel;
import org.pbrt.accelerators.NoAccel;
import org.pbrt.cameras.EnvironmentCamera;
import org.pbrt.cameras.OrthographicCamera;
import org.pbrt.cameras.PerspectiveCamera;
import org.pbrt.cameras.RealisticCamera;
import org.pbrt.filters.*;
import org.pbrt.integrators.*;
import org.pbrt.lights.*;
import org.pbrt.materials.*;
import org.pbrt.media.GridDensityMedium;
import org.pbrt.media.HomogeneousMedium;
import org.pbrt.samplers.*;
import org.pbrt.shapes.*;
import org.pbrt.textures.*;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Objects;
import java.util.Stack;
public class Api {
private static final int MaxTransforms = 2;
private static final int StartTransformBits = 1 << 0;
private static final int EndTransformBits = 1 << 1;
private static final int AllTransformsBits = (1 << MaxTransforms) - 1;
private static class TransformSet {
// TransformSet Public Methods
public TransformSet() {
trans[0] = new Transform();
trans[1] = new Transform();
}
public TransformSet(TransformSet ts) {
this.trans[0] = new Transform(ts.trans[0]);
this.trans[1] = new Transform(ts.trans[1]);
}
public static TransformSet Inverse(TransformSet ts) {
TransformSet tInv = new TransformSet();
for (int i = 0; i < MaxTransforms; ++i) tInv.trans[i] = Transform.Inverse(ts.trans[i]);
return tInv;
}
public boolean IsAnimated() {
for (int i = 0; i < MaxTransforms - 1; ++i) {
if (trans[i].notEqual(trans[i + 1])) return true;
}
return false;
}
public Transform trans[] = new Transform[MaxTransforms];
}
private static class RenderOptions {
// RenderOptions Public Methods
public Integrator MakeIntegrator() {
Camera camera = MakeCamera();
if (camera == null) {
PBrtTLogger.Error("Unable to create camera");
return null;
}
Sampler sampler = MakeSampler(SamplerName, SamplerParams, camera.film);
if (sampler == null) {
PBrtTLogger.Error("Unable to create sampler.");
return null;
}
Integrator integrator = null;
if (Objects.equals(IntegratorName, "whitted")) {
integrator = WhittedIntegrator.Create(IntegratorParams, sampler, camera);
} else if (Objects.equals(IntegratorName, "directlighting")) {
integrator = DirectLightingIntegrator.Create(IntegratorParams, sampler, camera);
} else if (Objects.equals(IntegratorName, "path")) {
integrator = PathIntegrator.Create(IntegratorParams, sampler, camera);
} else if (Objects.equals(IntegratorName, "volpath")) {
integrator = VolPathIntegrator.Create(IntegratorParams, sampler, camera);
} else if (Objects.equals(IntegratorName, "bdpt")) {
integrator = BDPTIntegrator.Create(IntegratorParams, sampler, camera);
} else if (Objects.equals(IntegratorName, "mlt")) {
integrator = MLTIntegrator.Create(IntegratorParams, camera);
} else if (Objects.equals(IntegratorName, "sppm")) {
integrator = SPPMIntegrator.Create(IntegratorParams, camera);
} else {
PBrtTLogger.Error("Integrator \"%s\" unknown.", IntegratorName);
return null;
}
if (renderOptions.haveScatteringMedia && !Objects.equals(IntegratorName, "volpath") &&
!Objects.equals(IntegratorName, "bdpt") && !Objects.equals(IntegratorName, "mlt")) {
PBrtTLogger.Warning("Scene has scattering media but \"%s\" integrator doesn't support "+
"volume scattering. Consider using \"volpath\", \"bdpt\", or "+
"\"mlt\".", IntegratorName);
}
IntegratorParams.ReportUnused();
// Warn if no light sources are defined
if (lights.isEmpty()) {
PBrtTLogger.Warning("No light sources defined in scene; rendering a black image.");
}
return integrator;
}
public Scene MakeScene() {
Primitive[] primArray = new Primitive[1];
Primitive[] prims = primitives.toArray(primArray);
Primitive accelerator = MakeAccelerator(AcceleratorName, prims, AcceleratorParams);
if (accelerator == null) {
accelerator = new BVHAccel(prims);
}
Scene scene = new Scene(accelerator, lights);
// Erase primitives and lights from _RenderOptions_
primitives.clear();
lights.clear();
return scene;
}
public Camera MakeCamera() {
Filter filter = MakeFilter(FilterName, FilterParams);
Film film = MakeFilm(FilmName, FilmParams, filter);
if (film == null) {
PBrtTLogger.Error("Unable to create film.");
return null;
}
Camera camera = Api.MakeCamera(CameraName, CameraParams, CameraToWorld,
renderOptions.transformStartTime, renderOptions.transformEndTime, film);
return camera;
}
// RenderOptions Public Data
public float transformStartTime = 0, transformEndTime = 1;
public String FilterName = "box";
public ParamSet FilterParams = new ParamSet();
public String FilmName = "image";
public ParamSet FilmParams = new ParamSet();
public String SamplerName = "halton";
public ParamSet SamplerParams = new ParamSet();
public String AcceleratorName = "bvh";
public ParamSet AcceleratorParams = new ParamSet();
public String IntegratorName = "path";
public ParamSet IntegratorParams = new ParamSet();
public String CameraName = "perspective";
public ParamSet CameraParams = new ParamSet();
public TransformSet CameraToWorld = new TransformSet();
public HashMap<String, Medium> namedMedia = new HashMap<>();
public ArrayList<Light> lights = new ArrayList<>();
public ArrayList<Primitive> primitives = new ArrayList<>();
public HashMap<String, ArrayList<Primitive>> instances = new HashMap<>();
public ArrayList<Primitive> currentInstance = null;
boolean haveScatteringMedia = false;
}
private static class GraphicsState {
public GraphicsState() {}
public GraphicsState(GraphicsState gs) {
this.currentInsideMedium = gs.currentInsideMedium;
this.currentOutsideMedium = gs.currentOutsideMedium;
this.floatTextures = new HashMap<>(gs.floatTextures);
this.spectrumTextures = new HashMap<>(gs.spectrumTextures);
this.materialParams = new ParamSet(gs.materialParams);
this.material = gs.material;
this.namedMaterials = new HashMap<>(gs.namedMaterials);
this.currentNamedMaterial = gs.currentNamedMaterial;
this.areaLightParams = new ParamSet(gs.areaLightParams);
this.areaLight = gs.areaLight;
this.reverseOrientation = gs.reverseOrientation;
}
// Graphics State Methods
public Material CreateMaterial(ParamSet params) {
TextureParams mp = new TextureParams(params, materialParams, floatTextures, spectrumTextures);
Material mtl;
if (!Objects.equals(currentNamedMaterial, "")) {
mtl = namedMaterials.get(currentNamedMaterial);
if (mtl == null) {
PBrtTLogger.Error("Named material \"%s\" not defined. Using \"matte\".", currentNamedMaterial);
mtl = MakeMaterial("matte", mp);
}
} else {
mtl = MakeMaterial(material, mp);
if (mtl == null && !Objects.equals(material, "") && !Objects.equals(material, "none"))
mtl = MakeMaterial("matte", mp);
}
return mtl;
}
public MediumInterface CreateMediumInterface() {
MediumInterface m = new MediumInterface();
if (!currentInsideMedium.isEmpty()) {
m.inside = renderOptions.namedMedia.get(currentInsideMedium);
if (m.inside == null) {
PBrtTLogger.Error("Named medium \"%s\" undefined.", currentInsideMedium);
}
}
if (!currentOutsideMedium.isEmpty()) {
m.outside = renderOptions.namedMedia.get(currentOutsideMedium);
if (m.outside == null) {
PBrtTLogger.Error("Named medium \"%s\" undefined.", currentOutsideMedium);
}
}
return m;
}
// Graphics State
public String currentInsideMedium = "", currentOutsideMedium = "";
public HashMap<String, TextureFloat> floatTextures = new HashMap<>();
public HashMap<String, TextureSpectrum> spectrumTextures = new HashMap<>();
public ParamSet materialParams = new ParamSet();
public String material = "matte";
public HashMap<String, Material> namedMaterials = new HashMap<>();
public String currentNamedMaterial = "";
public ParamSet areaLightParams = new ParamSet();
public String areaLight = "";
public boolean reverseOrientation = false;
}
private static class TransformCache {
public class TransformPair {
Transform t;
Transform tInv;
}
// TransformCache Public Methods
public TransformPair Lookup(Transform t) {
TransformPair entry = cache.get(t);
if (entry == null) {
entry = new TransformPair();
entry.t = t;
entry.tInv = Transform.Inverse(t);
cache.put(t, entry);
}
return entry;
}
public void Clear() {
cache.clear();
}
// TransformCache Private Data
private HashMap<Transform, TransformPair> cache = new HashMap<>();
}
private enum APIState {Uninitialized, OptionsBlock, WorldBlock}
private static APIState currentApiState = APIState.Uninitialized;
private static TransformSet curTransform = new TransformSet();
private static int activeTransformBits = AllTransformsBits;
private static HashMap<String, TransformSet> namedCoordinateSystems = new HashMap<>();
private static RenderOptions renderOptions = new RenderOptions();
private static GraphicsState graphicsState = new GraphicsState();
private static Stack<GraphicsState> pushedGraphicsStates = new Stack<>();
private static Stack<TransformSet> pushedTransforms = new Stack<>();
private static Stack<Integer> pushedActiveTransformBits = new Stack<>();
private static TransformCache transformCache = new TransformCache();
private static int catIndentCount = 0;
private static ArrayList<Shape> MakeShapes(String name, Transform object2world, Transform world2object, boolean reverseOrientation, ParamSet paramSet) {
ArrayList<Shape> shapes = new ArrayList<>();
Shape s = null;
if (Objects.equals(name, "sphere")) {
s = Sphere.Create(object2world, world2object, reverseOrientation, paramSet);
}
else if (Objects.equals(name, "cylinder")) {
s = Cylinder.Create(object2world, world2object, reverseOrientation, paramSet);
} else if (Objects.equals(name, "disk")) {
s = Disk.Create(object2world, world2object, reverseOrientation, paramSet);
} else if (Objects.equals(name, "cone")) {
s = Cone.Create(object2world, world2object, reverseOrientation, paramSet);
} else if (Objects.equals(name, "paraboloid")) {
s = Paraboloid.Create(object2world, world2object, reverseOrientation, paramSet);
} else if (Objects.equals(name, "hyperboloid")) {
s = Hyperboloid.Create(object2world, world2object, reverseOrientation, paramSet);
}
if (s != null) {
shapes.add(s);
}
else if (Objects.equals(name, "curve")) {
shapes.addAll(Curve.Create(object2world, world2object, reverseOrientation, paramSet));
} else if (Objects.equals(name, "trianglemesh")) {
if (Pbrt.options.ToPly) {
/*
int count = 1;
String plyPrefix = new String(); // getenv("PLY_PREFIX") ? getenv("PLY_PREFIX") : "mesh";
String fn = StringPrintf("%s_%05d.ply", plyPrefix, count++);
Integer[] vi = paramSet.FindInt("indices");
Point3f[] P = paramSet.FindPoint3f("P");
Point2f[] uvs = paramSet.FindPoint2f("uv");
if (uvs == null) uvs = paramSet.FindPoint2f("st");
if (uvs == null) {
Float[] fuv = paramSet.FindFloat("uv");
if (fuv == null) fuv = paramSet.FindFloat("st");
if (fuv != null) {
Point2f[] tempUVs = new Point2f[uvs.length/2];
for (int i = 0; i < tempUVs.length; ++i)
tempUVs[i] = new Point2f(fuv[2 * i], fuv[2 * i + 1]);
uvs = tempUVs;
}
}
Normal3f[] N = paramSet.FindNormal3f("N");
Vector3f[] S = paramSet.FindVector3f("S");
if (!WritePlyFile(fn, nvi / 3, vi, npi, P, S, N, uvs))
PBrtTLogger.Error("Unable to write PLY file \"%s\"", fn);
System.out.format("%*sShape \"plymesh\" \"string filename\" \"%s\" ",
catIndentCount, "", fn);
String alphaTex = paramSet.FindTexture("alpha");
if (alphaTex != "")
System.out.format("\n%*s\"texture alpha\" \"%s\" ", catIndentCount + 8, "", alphaTex);
else {
int count;
float[] alpha = paramSet.FindFloat("alpha");
if (alpha != null)
System.out.format("\n%*s\"float alpha\" %f ", catIndentCount + 8, "", alpha[0]);
}
String shadowAlphaTex = paramSet.FindTexture("shadowalpha");
if (shadowAlphaTex != "")
System.out.format("\n%*s\"texture shadowalpha\" \"%s\" ",
catIndentCount + 8, "", shadowAlphaTex);
else {
int count;
float[] alpha = paramSet.FindFloat("shadowalpha");
if (alpha != null)
System.out.format("\n%*s\"float shadowalpha\" %f ", catIndentCount + 8, "", alpha[0]);
}
System.out.format("\n");
*/
} else {
shapes.addAll(Triangle.Create(object2world, world2object, reverseOrientation, paramSet, graphicsState.floatTextures));
}
} else if (Objects.equals(name, "plymesh")) {
shapes.addAll(PlyMesh.Create(object2world, world2object, reverseOrientation, paramSet, graphicsState.floatTextures));
} else if (Objects.equals(name, "heightfield")) {
shapes.addAll(HeightField.Create(object2world, world2object, reverseOrientation, paramSet));
} else if (Objects.equals(name, "loopsubdiv")) {
shapes.addAll(LoopSubdiv.Create(object2world, world2object, reverseOrientation, paramSet));
} else if (Objects.equals(name, "nurbs")) {
shapes.addAll(NURBS.Create(object2world, world2object, reverseOrientation, paramSet));
} else {
PBrtTLogger.Warning("Shape \"%s\" unknown.", name);
}
paramSet.ReportUnused();
return shapes;
}
private static Stats.Counter nMaterialsCreated = new Stats.Counter("Scene/Materials created");
private static Material MakeMaterial(String name, TextureParams mp) {
Material material = null;
if (Objects.equals(name, "") || Objects.equals(name, "none")) {
return null;
} else if (Objects.equals(name, "matte")) {
material = MatteMaterial.Create(mp);
} else if (Objects.equals(name, "plastic")) {
material = PlasticMaterial.Create(mp);
} else if (Objects.equals(name, "translucent")) {
material = TranslucentMaterial.Create(mp);
} else if (Objects.equals(name, "glass")) {
material = GlassMaterial.Create(mp);
} else if (Objects.equals(name, "mirror")) {
material = MirrorMaterial.Create(mp);
} else if (Objects.equals(name, "hair")) {
material = HairMaterial.Create(mp);
} else if (Objects.equals(name, "mix")) {
String m1 = mp.FindString("namedmaterial1", "");
String m2 = mp.FindString("namedmaterial2", "");
Material mat1 = graphicsState.namedMaterials.get(m1);
Material mat2 = graphicsState.namedMaterials.get(m2);
if (mat1 == null) {
PBrtTLogger.Error("Named material \"%s\" undefined. Using \"matte\"", m1);
mat1 = MakeMaterial("matte", mp);
}
if (mat2 == null) {
PBrtTLogger.Error("Named material \"%s\" undefined. Using \"matte\"", m2);
mat2 = MakeMaterial("matte", mp);
}
material = MixMaterial.Create(mp, mat1, mat2);
} else if (Objects.equals(name, "metal")) {
material = MetalMaterial.Create(mp);
} else if (Objects.equals(name, "substrate")) {
material = SubstrateMaterial.Create(mp);
} else if (Objects.equals(name, "uber")) {
material = UberMaterial.Create(mp);
} else if (Objects.equals(name, "subsurface")) {
material = SubsurfaceMaterial.Create(mp);
} else if (Objects.equals(name, "kdsubsurface")) {
material = KdSubsurfaceMaterial.Create(mp);
} else if (Objects.equals(name, "fourier")) {
material = FourierMaterial.Create(mp);
} else {
PBrtTLogger.Warning("Material \"%s\" unknown. Using \"matte\".", name);
material = MatteMaterial.Create(mp);
}
if ((Objects.equals(name, "subsurface") || Objects.equals(name, "kdsubsurface")) && (!Objects.equals(renderOptions.IntegratorName, "path") && (!Objects.equals(renderOptions.IntegratorName, "volpath")))) {
PBrtTLogger.Warning("Subsurface scattering material \"%s\" used, but \"%s\" integrator doesn't support subsurface scattering. Use \"path\" or \"volpath\".",
name, renderOptions.IntegratorName);
}
mp.ReportUnused();
if (material == null) {
PBrtTLogger.Error("Unable to create material \"%s\"", name);
} else {
nMaterialsCreated.increment();
}
return material;
}
private static TextureFloat MakeFloatTexture(String name, Transform tex2world, TextureParams tp) {
TextureFloat tex = null;
if (Objects.equals(name, "constant"))
tex = ConstantTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "scale"))
tex = ScaleTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "mix"))
tex = MixTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "bilerp"))
tex = BilerpTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "imagemap"))
tex = ImageTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "checkerboard"))
tex = CheckerBoardTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "dots"))
tex = DotsTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "fbm"))
tex = FBmTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "wrinkled"))
tex = WrinkledTextureFloat.CreateFloat(tex2world, tp);
else if (Objects.equals(name, "windy"))
tex = WindyTextureFloat.CreateFloat(tex2world, tp);
else
PBrtTLogger.Warning("Float texture \"%s\" unknown.", name);
tp.ReportUnused();
return tex;
}
private static TextureSpectrum MakeSpectrumTexture(String name, Transform tex2world, TextureParams tp) {
TextureSpectrum tex = null;
if (Objects.equals(name, "constant"))
tex = ConstantTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "scale"))
tex = ScaleTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "mix"))
tex = MixTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "bilerp"))
tex = BilerpTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "imagemap"))
tex = ImageTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "uv"))
tex = UVTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "checkerboard"))
tex = CheckerBoardTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "dots"))
tex = DotsTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "fbm"))
tex = FBmTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "wrinkled"))
tex = WrinkledTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "marble"))
tex = MarbleTextureSpectrum.CreateSpectrum(tex2world, tp);
else if (Objects.equals(name, "windy"))
tex = WindyTextureSpectrum.CreateSpectrum(tex2world, tp);
else
PBrtTLogger.Warning("Spectrum texture \"%s\" unknown.", name);
tp.ReportUnused();
return tex;
}
private static Medium MakeMedium(String name, ParamSet paramSet, Transform medium2world) {
float sig_a_rgb[] = {.0011f, .0024f, .014f}, sig_s_rgb[] = {2.55f, 3.21f, 3.77f};
Spectrum sig_a = Spectrum.FromRGB(sig_a_rgb),
sig_s = Spectrum.FromRGB(sig_s_rgb);
String preset = paramSet.FindOneString("preset", "");
Medium.ScatteringProps props = Medium.GetMediumScatteringProperties(preset);
if (props == null) {
PBrtTLogger.Warning("Material preset \"%s\" not found. Using defaults.", preset);
} else {
sig_a = props.sigma_a;
sig_s = props.sigma_s;
}
float scale = paramSet.FindOneFloat("scale", 1.f);
float g = paramSet.FindOneFloat("g", 0.0f);
sig_a = paramSet.FindOneSpectrum("sigma_a", sig_a);
sig_a.scale(scale);
sig_s = paramSet.FindOneSpectrum("sigma_s", sig_s);
sig_s.scale(scale);
Medium m = null;
if (Objects.equals(name, "homogeneous")) {
m = new HomogeneousMedium(sig_a, sig_s, g);
} else if (Objects.equals(name, "heterogeneous")) {
Float[] data = paramSet.FindFloat("density");
if (data == null) {
PBrtTLogger.Error("No \"density\" values provided for heterogeneous medium?");
return null;
}
int nx = paramSet.FindOneInt("nx", 1);
int ny = paramSet.FindOneInt("ny", 1);
int nz = paramSet.FindOneInt("nz", 1);
Point3f p0 = paramSet.FindOnePoint3f("p0", new Point3f(0.f, 0.f, 0.f));
Point3f p1 = paramSet.FindOnePoint3f("p1", new Point3f(1.f, 1.f, 1.f));
if (data.length != nx * ny * nz) {
PBrtTLogger.Error("GridDensityMedium has %d density values; expected nx*ny*nz = %d", data.length, nx * ny * nz);
return null;
}
Transform data2Medium = Transform.Translate(new Vector3f(p0)).concatenate(Transform.Scale(p1.x - p0.x, p1.y - p0.y, p1.z - p0.z));
m = new GridDensityMedium(sig_a, sig_s, g, nx, ny, nz, medium2world.concatenate(data2Medium), data);
} else {
PBrtTLogger.Warning("Medium \"%s\" unknown.", name);
}
paramSet.ReportUnused();
return m;
}
private static Light MakeLight(String name, ParamSet paramSet, Transform light2world, MediumInterface mediumInterface) {
Light light = null;
if (Objects.equals(name, "point"))
light = PointLight.Create(light2world, mediumInterface.outside, paramSet);
else if (Objects.equals(name, "spot"))
light = SpotLight.Create(light2world, mediumInterface.outside, paramSet);
else if (Objects.equals(name, "goniometric"))
light = GonioPhotometricLight.Create(light2world, mediumInterface.outside, paramSet);
else if (Objects.equals(name, "projection"))
light = ProjectionLight.Create(light2world, mediumInterface.outside, paramSet);
else if (Objects.equals(name, "distant"))
light = DistantLight.Create(light2world, paramSet);
else if (Objects.equals(name, "infinite") || Objects.equals(name, "exinfinite"))
light = InfiniteAreaLight.Create(light2world, paramSet);
else
PBrtTLogger.Warning("Light \"%s\" unknown.", name);
paramSet.ReportUnused();
return light;
}
private static AreaLight MakeAreaLight(String name, Transform light2world, MediumInterface mediumInterface, ParamSet paramSet, Shape shape) {
AreaLight area = null;
if (Objects.equals(name, "area") || Objects.equals(name, "diffuse"))
area = DiffuseAreaLight.Create(light2world, mediumInterface.outside, paramSet, shape);
else
PBrtTLogger.Warning("Area light \"%s\" unknown.", name);
paramSet.ReportUnused();
return area;
}
private static Primitive MakeAccelerator(String name, Primitive[] prims, ParamSet paramSet) {
Primitive accel = null;
if (Objects.equals(name, "bvh"))
accel = BVHAccel.Create(prims, paramSet);
else if (Objects.equals(name, "kdtree"))
accel = KdTreeAccel.Create(prims, paramSet);
else if (Objects.equals(name, "none"))
accel = NoAccel.Create(prims, paramSet);
else
PBrtTLogger.Warning("Accelerator \"%s\" unknown.", name);
paramSet.ReportUnused();
return accel;
}
private static Camera MakeCamera(String name, ParamSet paramSet, TransformSet cam2worldSet, float transformStart, float transformEnd, Film film) {
Camera camera = null;
MediumInterface mediumInterface = graphicsState.CreateMediumInterface();
TransformCache.TransformPair c2w0 = transformCache.Lookup(cam2worldSet.trans[0]);
TransformCache.TransformPair c2w1 = transformCache.Lookup(cam2worldSet.trans[1]);
AnimatedTransform animatedCam2World = new AnimatedTransform(c2w0.t, transformStart, c2w1.t, transformEnd);
if (Objects.equals(name, "perspective"))
camera = PerspectiveCamera.Create(paramSet, animatedCam2World, film, mediumInterface.outside);
else if (Objects.equals(name, "orthographic"))
camera = OrthographicCamera.Create(paramSet, animatedCam2World, film, mediumInterface.outside);
else if (Objects.equals(name, "realistic"))
camera = RealisticCamera.Create(paramSet, animatedCam2World, film, mediumInterface.outside);
else if (Objects.equals(name, "environment"))
camera = EnvironmentCamera.Create(paramSet, animatedCam2World, film, mediumInterface.outside);
else
PBrtTLogger.Warning("Camera \"%s\" unknown.", name);
paramSet.ReportUnused();
return camera;
}
private static Sampler MakeSampler(String name, ParamSet paramSet, Film film) {
Sampler sampler = null;
if (Objects.equals(name, "lowdiscrepancy") || Objects.equals(name, "02sequence"))
sampler = ZeroTwoSequence.Create(paramSet);
else if (Objects.equals(name, "maxmindist"))
sampler = MaxMinDistSampler.Create(paramSet);
else if (Objects.equals(name, "halton"))
sampler = HaltonSampler.Create(paramSet, film.GetSampleBounds());
else if (Objects.equals(name, "sobol"))
sampler = SobolSampler.Create(paramSet, film.GetSampleBounds());
else if (Objects.equals(name, "random"))
sampler = RandomSampler.Create(paramSet);
else if (Objects.equals(name, "stratified"))
sampler = StratifiedSampler.Create(paramSet);
else
PBrtTLogger.Warning("Sampler \"%s\" unknown.", name);
paramSet.ReportUnused();
return sampler;
}
private static Filter MakeFilter(String name, ParamSet paramSet) {
Filter filter = null;
if (Objects.equals(name, "box"))
filter = BoxFilter.Create(paramSet);
else if (Objects.equals(name, "gaussian"))
filter = GaussianFilter.Create(paramSet);
else if (Objects.equals(name, "mitchell"))
filter = MitchellFilter.Create(paramSet);
else if (Objects.equals(name, "sinc"))
filter = LanczosSincFilter.Create(paramSet);
else if (Objects.equals(name, "triangle"))
filter = TriangleFilter.Create(paramSet);
else {
PBrtTLogger.Error("Filter \"%s\" unknown.", name);
}
paramSet.ReportUnused();
return filter;
}
private static Film MakeFilm(String name, ParamSet paramSet, Filter filter) {
Film film = null;
if (Objects.equals(name, "image"))
film = Film.Create(paramSet, filter);
else
PBrtTLogger.Warning("Film \"%s\" unknown.", name);
paramSet.ReportUnused();
return film;
}
// API Function Declarations
public static void pbrtInit(Options opt) {
Pbrt.options = opt;
// API Initialization
if (currentApiState != APIState.Uninitialized)
PBrtTLogger.Error("pbrtInit() has already been called.");
currentApiState = APIState.OptionsBlock;
renderOptions = new RenderOptions();
graphicsState = new GraphicsState();
catIndentCount = 0;
// General \pbrt Initialization
//SampledSpectrum.Init();
//ParallelInit(); // Threads must be launched before the profiler is
// initialized.
}
public static void pbrtCleanup() {
// API Cleanup
if (currentApiState == APIState.Uninitialized)
PBrtTLogger.Error("pbrtCleanup() called without pbrtInit().");
else if (currentApiState == APIState.WorldBlock)
PBrtTLogger.Error("pbrtCleanup() called while inside world block.");
currentApiState = APIState.Uninitialized;
//ParallelCleanup();
renderOptions = null;
}
private static char[] spaces = new char[]{' '};
public static void pbrtIdentity() {
VERIFY_INITIALIZED("Identity");
for (int i = 0; i < MaxTransforms; ++i) {
if ((activeTransformBits & (1 << i)) != 0) {
curTransform.trans[i] = new Transform();
}
}
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sIdentity\n", new String(spaces, 0, catIndentCount));
}
public static void pbrtTranslate(float dx, float dy, float dz) {
VERIFY_INITIALIZED("Translate");
for (int i = 0; i < MaxTransforms; ++i) {
if ((activeTransformBits & (1 << i)) != 0) {
curTransform.trans[i] = curTransform.trans[i].concatenate(Transform.Translate(new Vector3f(dx, dy, dz)));
}
}
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sTranslate %.9g %.9g %.9g\n", new String(spaces, 0, catIndentCount), dx, dy, dz);
}
public static void pbrtRotate(float angle, float ax, float ay, float az) {
VERIFY_INITIALIZED("Rotate");
for (int i = 0; i < MaxTransforms; ++i) {
if ((activeTransformBits & (1 << i)) != 0) {
curTransform.trans[i] = curTransform.trans[i].concatenate(Transform.Rotate(angle, new Vector3f(ax, ay, az)));
}
}
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sRotate %.9g %.9g %.9g %.9g\n", new String(spaces, 0, catIndentCount), angle, ax, ay, az);
}
public static void pbrtScale(float sx, float sy, float sz) {
VERIFY_INITIALIZED("Scale");
for (int i = 0; i < MaxTransforms; ++i) {
if ((activeTransformBits & (1 << i)) != 0) {
curTransform.trans[i] = curTransform.trans[i].concatenate(Transform.Scale(sx, sy, sz));
}
}
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sScale %.9g %.9g %.9g\n", new String(spaces, 0, catIndentCount), sx, sy, sz);
}
}
public static void pbrtLookAt(float ex, float ey, float ez, float lx, float ly, float lz, float ux, float uy, float uz) {
VERIFY_INITIALIZED("LookAt");
Transform lookAt = Transform.LookAt(new Point3f(ex, ey, ez), new Point3f(lx, ly, lz), new Vector3f(ux, uy, uz));
for (int i = 0; i < MaxTransforms; ++i) {
if ((activeTransformBits & (1 << i)) != 0) {
curTransform.trans[i] = curTransform.trans[i].concatenate(lookAt);
}
}
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sLookAt %.9g %.9g %.9g\n%s%.9g %.9g %.9g\n%s%.9g %.9g %.9g\n",
new String(spaces, 0, catIndentCount), ex, ey, ez, new String(spaces, 0, catIndentCount+8), lx, ly, lz,
new String(spaces, 0, catIndentCount+8), ux, uy, uz);
}
}
public static void pbrtConcatTransform(float[] tr) {
VERIFY_INITIALIZED("ConcatTransform");
for (int i = 0; i < MaxTransforms; ++i) {
if ((activeTransformBits & (1 << i)) != 0) {
curTransform.trans[i] = curTransform.trans[i].concatenate(new Transform(new Matrix4x4(tr[0], tr[4], tr[8], tr[12], tr[1], tr[5],
tr[9], tr[13], tr[2], tr[6], tr[10], tr[14],
tr[3], tr[7], tr[11], tr[15])));
}
}
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sConcatTransform [ ", new String(spaces, 0, catIndentCount));
for (int i = 0; i < 16; ++i) System.out.format("%.9g ", tr[i]);
System.out.format(" ]\n");
}
}
public static void pbrtTransform(float[] tr) {
VERIFY_INITIALIZED("Transform");
for (int i = 0; i < MaxTransforms; ++i) {
if ((activeTransformBits & (1 << i)) != 0) {
curTransform.trans[i] = new Transform(new Matrix4x4(
tr[0], tr[4], tr[8], tr[12], tr[1], tr[5], tr[9], tr[13], tr[2],
tr[6], tr[10], tr[14], tr[3], tr[7], tr[11], tr[15]));
}
}
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sTransform [ ", new String(spaces, 0, catIndentCount));
for (int i = 0; i < 16; ++i) System.out.format("%.9g ", tr[i]);
System.out.format(" ]\n");
}
}
public static void pbrtCoordinateSystem(String name) {
VERIFY_INITIALIZED("CoordinateSystem");
namedCoordinateSystems.put(name, new TransformSet(curTransform));
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sCoordinateSystem \"%s\"\n", new String(spaces, 0, catIndentCount), name);
}
public static void pbrtCoordSysTransform(String name) {
VERIFY_INITIALIZED("CoordSysTransform");
if (namedCoordinateSystems.containsKey(name))
curTransform = namedCoordinateSystems.get(name);
else
PBrtTLogger.Warning("Couldn't find named coordinate system \"%s\"", name);
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sCoordSysTransform \"%s\"\n", new String(spaces, 0, catIndentCount), name);
}
public static void pbrtActiveTransformAll() {
activeTransformBits = AllTransformsBits;
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sActiveTransform All\n", new String(spaces, 0, catIndentCount));
}
public static void pbrtActiveTransformEndTime() {
activeTransformBits = EndTransformBits;
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sActiveTransform EndTime\n", new String(spaces, 0, catIndentCount));
}
public static void pbrtActiveTransformStartTime() {
activeTransformBits = StartTransformBits;
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sActiveTransform StartTime\n", new String(spaces, 0, catIndentCount));
}
public static void pbrtTransformTimes(float start, float end) {
VERIFY_OPTIONS("TransformTimes");
renderOptions.transformStartTime = start;
renderOptions.transformEndTime = end;
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sTransformTimes %.9g %.9g\n", new String(spaces, 0, catIndentCount), start, end);
}
public static void pbrtPixelFilter(String name, ParamSet params) {
VERIFY_OPTIONS("PixelFilter");
renderOptions.FilterName = name;
renderOptions.FilterParams = params;
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sPixelFilter \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtFilm(String type, ParamSet params) {
VERIFY_OPTIONS("Film");
renderOptions.FilmParams = params;
renderOptions.FilmName = type;
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sFilm \"%s\" ", new String(spaces, 0, catIndentCount), type);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtSampler(String name, ParamSet params) {
VERIFY_OPTIONS("Sampler");
renderOptions.SamplerName = name;
renderOptions.SamplerParams = params;
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sSampler \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtAccelerator(String name, ParamSet params) {
VERIFY_OPTIONS("Accelerator");
renderOptions.AcceleratorName = name;
renderOptions.AcceleratorParams = params;
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sAccelerator \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtIntegrator(String name, ParamSet params) {
VERIFY_OPTIONS("Integrator");
renderOptions.IntegratorName = name;
renderOptions.IntegratorParams = params;
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sIntegrator \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtCamera(String name, ParamSet params) {
VERIFY_OPTIONS("Camera");
renderOptions.CameraName = name;
renderOptions.CameraParams = params;
renderOptions.CameraToWorld = TransformSet.Inverse(curTransform);
namedCoordinateSystems.put("camera", new TransformSet(renderOptions.CameraToWorld));
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sCamera \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtMakeNamedMedium(String name, ParamSet params) {
VERIFY_INITIALIZED("MakeNamedMedium");
WARN_IF_ANIMATED_TRANSFORM("MakeNamedMedium");
String type = params.FindOneString("type", "");
if (type.isEmpty())
PBrtTLogger.Error("No parameter string \"type\" found in MakeNamedMedium");
else {
Medium medium = MakeMedium(type, params, curTransform.trans[0]);
if (medium != null) renderOptions.namedMedia.put(name, medium);
}
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sMakeNamedMedium \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtMediumInterface(String insideName, String outsideName) {
VERIFY_INITIALIZED("MediumInterface");
graphicsState.currentInsideMedium = insideName;
graphicsState.currentOutsideMedium = outsideName;
renderOptions.haveScatteringMedia = true;
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sMediumInterface \"%s\" \"%s\"\n", new String(spaces, 0, catIndentCount),
insideName, outsideName);
}
public static void pbrtWorldBegin() {
VERIFY_OPTIONS("WorldBegin");
currentApiState = APIState.WorldBlock;
for (int i = 0; i < MaxTransforms; ++i) curTransform.trans[i] = new Transform();
activeTransformBits = AllTransformsBits;
namedCoordinateSystems.put("world", new TransformSet(curTransform));
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("\n\nWorldBegin\n\n");
}
public static void pbrtAttributeBegin() {
VERIFY_WORLD("AttributeBegin");
pushedGraphicsStates.push(new GraphicsState(graphicsState));
pushedTransforms.push(new TransformSet(curTransform));
pushedActiveTransformBits.push(activeTransformBits);
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("\n%sAttributeBegin\n", new String(spaces, 0, catIndentCount));
catIndentCount += 4;
}
}
public static void pbrtAttributeEnd() {
VERIFY_WORLD("AttributeEnd");
if (pushedGraphicsStates.empty()) {
PBrtTLogger.Error("Unmatched pbrtAttributeEnd() encountered. Ignoring it.");
return;
}
graphicsState = pushedGraphicsStates.pop();
curTransform = pushedTransforms.pop();
activeTransformBits = pushedActiveTransformBits.pop();
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
catIndentCount -= 4;
System.out.format("%sAttributeEnd\n", new String(spaces, 0, catIndentCount));
}
}
public static void pbrtTransformBegin() {
VERIFY_WORLD("TransformBegin");
pushedTransforms.push(new TransformSet(curTransform));
pushedActiveTransformBits.push(activeTransformBits);
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sTransformBegin\n", new String(spaces, 0, catIndentCount));
catIndentCount += 4;
}
}
public static void pbrtTransformEnd() {
VERIFY_WORLD("TransformEnd");
if (pushedTransforms.empty()) {
PBrtTLogger.Error("Unmatched pbrtTransformEnd() encountered. Ignoring it.");
return;
}
curTransform = pushedTransforms.pop();
activeTransformBits = pushedActiveTransformBits.pop();
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
catIndentCount -= 4;
System.out.format("%sTransformEnd\n", new String(spaces, 0, catIndentCount));
}
}
public static void pbrtTexture(String name, String type, String texname, ParamSet params) {
VERIFY_WORLD("Texture");
TextureParams tp = new TextureParams(params, params, graphicsState.floatTextures, graphicsState.spectrumTextures);
if (Objects.equals(type, "float")) {
// Create _Float_ texture and store in _floatTextures_
if (graphicsState.floatTextures.containsKey(name)) {
PBrtTLogger.Warning("Texture \"%s\" being redefined", name);
}
WARN_IF_ANIMATED_TRANSFORM("Texture");
TextureFloat ft = MakeFloatTexture(texname, curTransform.trans[0], tp);
if (ft != null) graphicsState.floatTextures.put(name, ft);
} else if (Objects.equals(type, "color") || Objects.equals(type, "spectrum")) {
// Create _color_ texture and store in _spectrumTextures_
if (graphicsState.spectrumTextures.containsKey(name))
PBrtTLogger.Warning("Texture \"%s\" being redefined", name);
WARN_IF_ANIMATED_TRANSFORM("Texture");
TextureSpectrum st = MakeSpectrumTexture(texname, curTransform.trans[0], tp);
if (st != null) graphicsState.spectrumTextures.put(name, st);
} else {
PBrtTLogger.Error("Texture type \"%s\" unknown.", type);
}
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sTexture \"%s\" \"%s\" \"%s\" ", new String(spaces, 0, catIndentCount), name, type, texname);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtMaterial(String name, ParamSet params) {
VERIFY_WORLD("Material");
graphicsState.material = name;
graphicsState.materialParams = params;
graphicsState.currentNamedMaterial = "";
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sMaterial \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtMakeNamedMaterial(String name, ParamSet params) {
VERIFY_WORLD("MakeNamedMaterial");
// error checking, warning if replace, what to use for transform?
ParamSet emptyParams = new ParamSet();
TextureParams mp = new TextureParams(params, emptyParams, graphicsState.floatTextures, graphicsState.spectrumTextures);
String matName = mp.FindString("type","");
WARN_IF_ANIMATED_TRANSFORM("MakeNamedMaterial");
if (matName.isEmpty()) {
PBrtTLogger.Error("No parameter string \"type\" found in MakeNamedMaterial");
}
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sMakeNamedMaterial \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
} else {
Material mtl = MakeMaterial(matName, mp);
if (graphicsState.namedMaterials.containsKey(name))
PBrtTLogger.Warning("Named material \"%s\" redefined.", name);
graphicsState.namedMaterials.put(name, mtl);
}
}
public static void pbrtNamedMaterial(String name) {
VERIFY_WORLD("NamedMaterial");
graphicsState.currentNamedMaterial = name;
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sNamedMaterial \"%s\"\n", new String(spaces, 0, catIndentCount), name);
}
public static void pbrtLightSource(String name, ParamSet params) {
VERIFY_WORLD("LightSource");
WARN_IF_ANIMATED_TRANSFORM("LightSource");
MediumInterface mi = graphicsState.CreateMediumInterface();
Light lt = MakeLight(name, params, curTransform.trans[0], mi);
if (lt == null) {
PBrtTLogger.Error("LightSource: light type \"%s\" unknown.", name);
} else {
renderOptions.lights.add(lt);
}
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sLightSource \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtAreaLightSource(String name, ParamSet params) {
VERIFY_WORLD("AreaLightSource");
graphicsState.areaLight = name;
graphicsState.areaLightParams = params;
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sAreaLightSource \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
}
public static void pbrtShape(String name, ParamSet params) {
VERIFY_WORLD("Shape");
ArrayList<Primitive> prims = new ArrayList<>();
ArrayList<AreaLight> areaLights = new ArrayList<>();
if (Pbrt.options.Cat || (Pbrt.options.ToPly && !Objects.equals(name, "trianglemesh"))) {
System.out.format("%sShape \"%s\" ", new String(spaces, 0, catIndentCount), name);
params.Print(catIndentCount);
System.out.format("\n");
}
if (!curTransform.IsAnimated()) {
// Initialize _prims_ and _areaLights_ for static shape
// Create shapes for shape _name_
TransformCache.TransformPair tp = transformCache.Lookup(curTransform.trans[0]);
Transform ObjToWorld = tp.t;
Transform WorldToObj = tp.tInv;
ArrayList<Shape> shapes = MakeShapes(name, ObjToWorld, WorldToObj, graphicsState.reverseOrientation, params);
if (shapes.isEmpty()) return;
Material mtl = graphicsState.CreateMaterial(params);
params.ReportUnused();
MediumInterface mi = graphicsState.CreateMediumInterface();
for (Shape s : shapes) {
assert s != null;
// Possibly create area light for shape
AreaLight area = null;
if (!graphicsState.areaLight.isEmpty()) {
area = MakeAreaLight(graphicsState.areaLight, curTransform.trans[0], mi, graphicsState.areaLightParams, s);
if (area != null) areaLights.add(area);
}
prims.add(new GeometricPrimitive(s, mtl, area, mi));
}
} else {
// Initialize _prims_ and _areaLights_ for animated shape
// Create initial shape or shapes for animated shape
if (!graphicsState.areaLight.isEmpty()) {
PBrtTLogger.Warning("Ignoring currently set area light when creating animated shape");
}
TransformCache.TransformPair tp = transformCache.Lookup(new Transform());
ArrayList<Shape> shapes = MakeShapes(name, tp.t, tp.t, graphicsState.reverseOrientation, params);
if (shapes.isEmpty()) return;
// Create _GeometricPrimitive_(s) for animated shape
Material mtl = graphicsState.CreateMaterial(params);
params.ReportUnused();
MediumInterface mi = graphicsState.CreateMediumInterface();
for (Shape s : shapes) {
prims.add(new GeometricPrimitive(s, mtl, null, mi));
}
// Create single _TransformedPrimitive_ for _prims_
// Get _animatedObjectToWorld_ transform for shape
TransformCache.TransformPair tp0 = transformCache.Lookup(curTransform.trans[0]);
TransformCache.TransformPair tp1 = transformCache.Lookup(curTransform.trans[1]);
AnimatedTransform animatedObjectToWorld = new AnimatedTransform(tp0.t, renderOptions.transformStartTime, tp1.t,
renderOptions.transformEndTime);
if (prims.size() > 1) {
Primitive[] primArray = new Primitive[1];
Primitive bvh = new BVHAccel(prims.toArray(primArray));
prims.clear();
prims.add(bvh);
}
prims.set(0, new TransformedPrimitive(prims.get(0), animatedObjectToWorld));
}
// Add _prims_ and _areaLights_ to scene or current instance
if (renderOptions.currentInstance != null) {
if (!areaLights.isEmpty()) {
PBrtTLogger.Warning("Area lights not supported with object instancing");
}
renderOptions.currentInstance.addAll(prims);
} else {
renderOptions.primitives.addAll(prims);
if (!areaLights.isEmpty()) {
renderOptions.lights.addAll(areaLights);
}
}
}
public static void pbrtReverseOrientation() {
VERIFY_WORLD("ReverseOrientation");
graphicsState.reverseOrientation = !graphicsState.reverseOrientation;
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sReverseOrientation\n", new String(spaces, 0, catIndentCount));
}
}
public static void pbrtObjectBegin(String name) {
VERIFY_WORLD("ObjectBegin");
pbrtAttributeBegin();
if (renderOptions.currentInstance != null)
PBrtTLogger.Error("ObjectBegin called inside of instance definition");
renderOptions.instances.put(name, new ArrayList<>());
renderOptions.currentInstance = renderOptions.instances.get(name);
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sObjectBegin \"%s\"\n", new String(spaces, 0, catIndentCount), name);
}
}
private static Stats.Counter nObjectInstancesCreated = new Stats.Counter("Scene/Object instances created");
public static void pbrtObjectEnd() {
VERIFY_WORLD("ObjectEnd");
if (renderOptions.currentInstance == null)
PBrtTLogger.Error("ObjectEnd called outside of instance definition");
renderOptions.currentInstance = null;
pbrtAttributeEnd();
nObjectInstancesCreated.increment();
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sObjectEnd\n", new String(spaces, 0, catIndentCount));
}
}
private static Stats.Counter nObjectInstancesUsed = new Stats.Counter("Scene/Object instances used");
public static void pbrtObjectInstance(String name) {
VERIFY_WORLD("ObjectInstance");
// Perform object instance error checking
if (Pbrt.options.Cat || Pbrt.options.ToPly)
System.out.format("%sObjectInstance \"%s\"\n", new String(spaces, 0, catIndentCount), name);
if (renderOptions.currentInstance != null) {
PBrtTLogger.Error("ObjectInstance can't be called inside instance definition");
return;
}
if (!renderOptions.instances.containsKey(name)) {
PBrtTLogger.Error("Unable to find instance named \"%s\"", name);
return;
}
ArrayList<Primitive> in = renderOptions.instances.get(name);
if (in.isEmpty()) return;
nObjectInstancesUsed.increment();
if (in.size() > 1) {
// Create aggregate for instance _Primitive_s
Primitive[] primArray = new Primitive[1];
Primitive[] inPrims = in.toArray(primArray);
Primitive accel = MakeAccelerator(renderOptions.AcceleratorName, inPrims, renderOptions.AcceleratorParams);
if (accel == null) accel = new BVHAccel(inPrims);
in.clear();
in.add(accel);
}
// Create _animatedInstanceToWorld_ transform for instance
TransformCache.TransformPair tp0 = transformCache.Lookup(curTransform.trans[0]);
TransformCache.TransformPair tp1 = transformCache.Lookup(curTransform.trans[1]);
AnimatedTransform animatedInstanceToWorld = new AnimatedTransform(
tp0.t, renderOptions.transformStartTime,
tp1.t, renderOptions.transformEndTime);
Primitive prim = new TransformedPrimitive(in.get(0), animatedInstanceToWorld);
renderOptions.primitives.add(prim);
}
public static void pbrtWorldEnd() {
VERIFY_WORLD("WorldEnd");
// Ensure there are no pushed graphics states
while (!pushedGraphicsStates.empty()) {
PBrtTLogger.Warning("Missing end to pbrtAttributeBegin()");
pushedGraphicsStates.pop();
pushedTransforms.pop();
}
while (!pushedTransforms.empty()) {
PBrtTLogger.Warning("Missing end to pbrtTransformBegin()");
pushedTransforms.pop();
}
// Create scene and render
if (Pbrt.options.Cat || Pbrt.options.ToPly) {
System.out.format("%sWorldEnd\n", new String(spaces, 0, catIndentCount));
} else {
Integrator integrator = renderOptions.MakeIntegrator();
Scene scene = renderOptions.MakeScene();
if ((scene != null) && (integrator != null)) {
integrator.Render(scene);
}
//Parallel.MergeWorkerThreadStats();
Stats.ReportThreadStats();
if (!Pbrt.options.Quiet) {
try {
PrintWriter pw = new PrintWriter("renderStats.txt");
Stats.PrintStats(pw);
pw.close();
} catch (IOException e) {
e.printStackTrace();
}
Stats.ClearStats();
}
}
// Clean up after rendering
graphicsState = new GraphicsState();
transformCache.Clear();
currentApiState = APIState.OptionsBlock;
for (int i = 0; i < MaxTransforms; ++i) curTransform.trans[i] = new Transform();
activeTransformBits = AllTransformsBits;
namedCoordinateSystems.clear();
ImageTextureFloat.ClearCacheFloat();
ImageTextureSpectrum.ClearCacheSpectrum();
}
public static void pbrtParseFile(String filename) {
Parser.ParseFile(filename);
}
public static void pbrtParseString(String str) {
Parser.ParseString(str);
}
private static void VERIFY_INITIALIZED(String func) {
if (!(Pbrt.options.Cat || Pbrt.options.ToPly) && currentApiState == APIState.Uninitialized) {
PBrtTLogger.Error("pbrtInit() must be before calling \"%s()\". Ignoring.", func);
}
}
private static void VERIFY_OPTIONS(String func) {
VERIFY_INITIALIZED(func);
if (!(Pbrt.options.Cat || Pbrt.options.ToPly) && currentApiState == APIState.WorldBlock) {
PBrtTLogger.Error("Options cannot be set inside world block; \"%s\" not allowed. Ignoring.", func);
}
}
private static void VERIFY_WORLD(String func) {
VERIFY_INITIALIZED(func);
if(!(Pbrt.options.Cat || Pbrt.options.ToPly) && currentApiState ==APIState.OptionsBlock) {
PBrtTLogger.Error("Scene description must be inside world block; \"%s\" not allowed. Ignoring.", func);
}
}
private static void WARN_IF_ANIMATED_TRANSFORM(String func) {
if (curTransform.IsAnimated()) {
PBrtTLogger.Warning("Animated transformations set; ignoring for \"%s\" " +
"and using the start transform only", func);
}
}
}
| |
/*
* @notice
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Modifications copyright (C) 2020 Elasticsearch B.V.
*/
package org.elasticsearch.core.internal.io;
import org.elasticsearch.core.Nullable;
import java.io.Closeable;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileVisitResult;
import java.nio.file.FileVisitor;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Utilities for common I/O methods. Borrowed heavily from Lucene (org.apache.lucene.util.IOUtils).
*/
public final class IOUtils {
/**
* UTF-8 charset string.
* <p>Where possible, use {@link StandardCharsets#UTF_8} instead,
* as using the String constant may slow things down.
* @see StandardCharsets#UTF_8
*/
public static final String UTF_8 = StandardCharsets.UTF_8.name();
private IOUtils() {
// Static utils methods
}
/**
* Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are
* ignored. After everything is closed, the method either throws the first exception it hit
* while closing with other exceptions added as suppressed, or completes normally if there were
* no exceptions.
*
* @param objects objects to close
*/
public static void close(final Closeable... objects) throws IOException {
close(null, Arrays.asList(objects));
}
/**
* @see #close(Closeable...)
*/
public static void close(@Nullable Closeable closeable) throws IOException {
if (closeable != null) {
closeable.close();
}
}
/**
* Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are
* ignored. After everything is closed, the method adds any exceptions as suppressed to the
* original exception, or throws the first exception it hit if {@code Exception} is null. If
* no exceptions are encountered and the passed in exception is null, it completes normally.
*
* @param objects objects to close
*/
public static void close(final Exception e, final Closeable... objects) throws IOException {
close(e, Arrays.asList(objects));
}
/**
* Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are
* ignored. After everything is closed, the method either throws the first exception it hit
* while closing with other exceptions added as suppressed, or completes normally if there were
* no exceptions.
*
* @param objects objects to close
*/
public static void close(final Iterable<? extends Closeable> objects) throws IOException {
close(null, objects);
}
/**
* Closes all given {@link Closeable}s. If a non-null exception is passed in, or closing a
* stream causes an exception, throws the exception with other {@link RuntimeException} or
* {@link IOException} exceptions added as suppressed.
*
* @param ex existing Exception to add exceptions occurring during close to
* @param objects objects to close
*
* @see #close(Closeable...)
*/
public static void close(final Exception ex, final Iterable<? extends Closeable> objects) throws IOException {
Exception firstException = ex;
for (final Closeable object : objects) {
try {
close(object);
} catch (final IOException | RuntimeException e) {
if (firstException == null) {
firstException = e;
} else {
firstException.addSuppressed(e);
}
}
}
if (firstException != null) {
if (firstException instanceof IOException) {
throw (IOException) firstException;
} else {
// since we only assigned an IOException or a RuntimeException to ex above, in this case ex must be a RuntimeException
throw (RuntimeException) firstException;
}
}
}
/**
* Closes all given {@link Closeable}s, suppressing all thrown exceptions. Some of the {@link Closeable}s may be null, they are ignored.
*
* @param objects objects to close
*/
public static void closeWhileHandlingException(final Closeable... objects) {
closeWhileHandlingException(Arrays.asList(objects));
}
/**
* Closes all given {@link Closeable}s, suppressing all thrown exceptions.
*
* @param objects objects to close
*
* @see #closeWhileHandlingException(Closeable...)
*/
public static void closeWhileHandlingException(final Iterable<? extends Closeable> objects) {
for (final Closeable object : objects) {
closeWhileHandlingException(object);
}
}
/**
* @see #closeWhileHandlingException(Closeable...)
*/
public static void closeWhileHandlingException(final Closeable closeable) {
// noinspection EmptyCatchBlock
try {
close(closeable);
} catch (final IOException | RuntimeException e) {}
}
/**
* Deletes all given files, suppressing all thrown {@link IOException}s. Some of the files may be null, if so they are ignored.
*
* @param files the paths of files to delete
*/
public static void deleteFilesIgnoringExceptions(final Path... files) {
deleteFilesIgnoringExceptions(Arrays.asList(files));
}
/**
* Deletes all given files, suppressing all thrown {@link IOException}s. Some of the files may be null, if so they are ignored.
*
* @param files the paths of files to delete
*/
public static void deleteFilesIgnoringExceptions(final Collection<? extends Path> files) {
for (final Path name : files) {
if (name != null) {
// noinspection EmptyCatchBlock
try {
Files.delete(name);
} catch (final IOException ignored) {
}
}
}
}
/**
* Deletes one or more files or directories (and everything underneath it).
*
* @throws IOException if any of the given files (or their sub-hierarchy files in case of directories) cannot be removed.
*/
public static void rm(final Path... locations) throws IOException {
final LinkedHashMap<Path, Throwable> unremoved = rm(new LinkedHashMap<>(), locations);
if (unremoved.isEmpty() == false) {
final StringBuilder b = new StringBuilder("could not remove the following files (in the order of attempts):\n");
for (final Map.Entry<Path, Throwable> kv : unremoved.entrySet()) {
b.append(" ").append(kv.getKey().toAbsolutePath()).append(": ").append(kv.getValue()).append("\n");
}
throw new IOException(b.toString());
}
}
private static LinkedHashMap<Path, Throwable> rm(final LinkedHashMap<Path, Throwable> unremoved, final Path... locations) {
if (locations != null) {
for (final Path location : locations) {
// TODO: remove this leniency
if (location != null && Files.exists(location)) {
try {
Files.walkFileTree(location, new FileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(final Path dir, final BasicFileAttributes attrs) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(final Path dir, final IOException impossible) throws IOException {
assert impossible == null;
try {
Files.delete(dir);
} catch (final IOException e) {
unremoved.put(dir, e);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
try {
Files.delete(file);
} catch (final IOException exc) {
unremoved.put(file, exc);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(final Path file, final IOException exc) throws IOException {
if (exc != null) {
unremoved.put(file, exc);
}
return FileVisitResult.CONTINUE;
}
});
} catch (final IOException impossible) {
throw new AssertionError("visitor threw exception", impossible);
}
}
}
}
return unremoved;
}
// TODO: replace with constants class if needed (cf. org.apache.lucene.util.Constants)
public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
public static final boolean LINUX = System.getProperty("os.name").startsWith("Linux");
public static final boolean MAC_OS_X = System.getProperty("os.name").startsWith("Mac OS X");
/**
* Ensure that any writes to the given file is written to the storage device that contains it. The {@code isDir} parameter specifies
* whether or not the path to sync is a directory. This is needed because we open for read and ignore an {@link IOException} since not
* all filesystems and operating systems support fsyncing on a directory. For regular files we must open for write for the fsync to have
* an effect.
*
* @param fileToSync the file to fsync
* @param isDir if true, the given file is a directory (we open for read and ignore {@link IOException}s, because not all file
* systems and operating systems allow to fsync on a directory)
*/
public static void fsync(final Path fileToSync, final boolean isDir) throws IOException {
fsync(fileToSync, isDir, true);
}
/**
* Ensure that any writes to the given file is written to the storage device that contains it. The {@code isDir} parameter specifies
* whether or not the path to sync is a directory. This is needed because we open for read and ignore an {@link IOException} since not
* all filesystems and operating systems support fsyncing on a directory. For regular files we must open for write for the fsync to have
* an effect.
*
* @param fileToSync the file to fsync
* @param isDir if true, the given file is a directory (we open for read and ignore {@link IOException}s, because not all file
* systems and operating systems allow to fsync on a directory)
* @param metaData if {@code true} both the file's content and metadata will be sync, otherwise only the file's content will be sync
*/
public static void fsync(final Path fileToSync, final boolean isDir, final boolean metaData) throws IOException {
if (isDir && WINDOWS) {
// opening a directory on Windows fails, directories can not be fsynced there
if (Files.exists(fileToSync) == false) {
// yet do not suppress trying to fsync directories that do not exist
throw new NoSuchFileException(fileToSync.toString());
}
return;
}
try (FileChannel file = FileChannel.open(fileToSync, isDir ? StandardOpenOption.READ : StandardOpenOption.WRITE)) {
try {
file.force(metaData);
} catch (final IOException e) {
if (isDir) {
assert (LINUX || MAC_OS_X) == false
: "on Linux and MacOSX fsyncing a directory should not throw IOException, "
+ "we just don't want to rely on that in production (undocumented); got: "
+ e;
// ignore exception if it is a directory
return;
}
// throw original exception
throw e;
}
}
}
}
| |
/*
* Copyright (c) 2016 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, without warranties or
* conditions of any kind, EITHER EXPRESS OR IMPLIED. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.vmware.xenon.services.samples;
import java.net.URI;
import java.util.function.Consumer;
import com.vmware.xenon.common.Operation;
import com.vmware.xenon.common.ServiceDocument;
import com.vmware.xenon.common.ServiceSubscriptionState.ServiceSubscriber;
import com.vmware.xenon.common.StatefulService;
import com.vmware.xenon.common.UriUtils;
import com.vmware.xenon.services.common.QueryTask;
import com.vmware.xenon.services.common.QueryTask.QuerySpecification;
import com.vmware.xenon.services.common.ServiceUriPaths;
/**
* Load balanced continuous query sample
*
* A StatefulService that is responsible for creating the continuous query and a subscription to
* that continuous query. The Continuous Query service will be created on the local index.
* The StatefulService overrides handleNodeGroupMaintenance() to stop the Continuous Query & subscription
* when the node is no longer the owner.
*/
public class SampleContinuousQueryWatchService extends StatefulService {
public static final String FACTORY_LINK = ServiceUriPaths.SAMPLES + "/watches";
/**
* A known self-link for query task service, so that we can refer it easily for start and stop.
*/
public static final String QUERY_SELF_LINK = ServiceUriPaths.CORE_LOCAL_QUERY_TASKS + "/sample-continuous-query";
/**
* A known self-link for subscription service, so that we can refer it easily for start and stop.
*/
public static final String SUBSCRIPTION_SELF_LINK = ServiceUriPaths.CORE_CALLBACKS + "/sample-cq-subscription";
public SampleContinuousQueryWatchService() {
super(State.class);
toggleOption(ServiceOption.REPLICATION, true);
toggleOption(ServiceOption.OWNER_SELECTION, true);
toggleOption(ServiceOption.IDEMPOTENT_POST, true);
}
/**
* Create the service and validate the state.
*/
@Override
public void handleStart(Operation start) {
try {
validateState(start);
start.complete();
} catch (Exception e) {
start.fail(e);
}
}
/**
* Updates the service state with patched state. Service keeps track of notification counter in its state which
* is incremented by the number of new notifications.
*/
@Override
public void handlePatch(Operation patch) {
State state = getState(patch);
State patchBody = getBody(patch);
state.notificationsCounter += patchBody.notificationsCounter;
patch.setBody(state);
patch.complete();
}
/**
* This is overridable method in stateful services that is invoked by the Xenon runtime when there is change
* in the ownership of the state of the service. We override this method to check if we are owner or not
* and based on that create continuous query task if we are owner, and delete running continuous query task
* service and subscription if we are not the owner anymore.
*/
@Override
public void handleNodeGroupMaintenance(Operation op) {
// Create continuous queries and subscriptions in case of change in node group topology.
if (hasOption(ServiceOption.DOCUMENT_OWNER)) {
createAndSubscribeToContinuousQuery(op);
} else {
deleteSubscriptionAndContinuousQuery(op);
}
}
/**
* On each notification, this method is called for processing the updates from the notification.
*/
private void processResults(Operation op) {
QueryTask body = op.getBody(QueryTask.class);
if (body.results == null || body.results.documentLinks.isEmpty()) {
return;
}
State newState = new State();
newState.notificationsCounter = body.results.documents.size();
// patch the state with the number of new notifications received
Operation.createPatch(this, getSelfLink())
.setBody(newState)
.sendWith(this);
}
/**
* Called by handleNodeGroupMaintenance() to create the continuous query and subscription when
* current node is owner of the service state.
*/
private void createAndSubscribeToContinuousQuery(Operation op) {
getStateAndApply((s) -> {
QueryTask queryTask = QueryTask.create(s.querySpec);
queryTask.documentExpirationTimeMicros = Long.MAX_VALUE;
// Creating continuous query task service with known self-link. We will use this same
// self-link when deleting this query service when current node is not the ownership anymore.
queryTask.documentSelfLink = QUERY_SELF_LINK;
Operation post = Operation.createPost(getHost(), ServiceUriPaths.CORE_LOCAL_QUERY_TASKS)
.setBody(queryTask)
.setReferer(getHost().getUri());
// On successful creation of continuous query task service, create subscription to that query service.
getHost().sendWithDeferredResult(post)
.thenAccept((state) -> subscribeToContinuousQuery())
.whenCompleteNotify(op);
});
}
/**
* Create subscription service.
*
* It has two parts, subscription service on local host to listen for the notifications,
* and subscribing to query task service with our subscription service link which will be used
* by query task service to call our subscription service. Both of these tasks are done by
* startSubscriptionService() method of ServiceHost.
*/
private void subscribeToContinuousQuery() {
Operation post = Operation
.createPost(getHost(), QUERY_SELF_LINK)
.setReferer(getHost().getUri());
URI subscriptionUri = UriUtils.buildPublicUri(getHost(), SUBSCRIPTION_SELF_LINK);
ServiceSubscriber sr = ServiceSubscriber
.create(true)
.setUsePublicUri(true)
.setSubscriberReference(subscriptionUri);
// Create subscription service with processResults as callback to process the results.
getHost().startSubscriptionService(post, this::processResults, sr);
}
/**
* Called by handleNodeGroupMaintenance() to delete the continuous query and subscription when
* current node is not the owner of the service state.
*/
private void deleteSubscriptionAndContinuousQuery(Operation op) {
Operation unsubscribeOperation = Operation.createPost(getHost(), QUERY_SELF_LINK)
.setReferer(getHost().getUri())
.setCompletion((o, e) -> deleteContinuousQuery());
URI notificationTarget = UriUtils.buildPublicUri(getHost(), SUBSCRIPTION_SELF_LINK);
getHost().stopSubscriptionService(unsubscribeOperation, notificationTarget);
}
/**
* Stop the continuous query task service using its known self-link.
*/
private void deleteContinuousQuery() {
Operation.createDelete(getHost(), QUERY_SELF_LINK).sendWith(this);
}
/**
* Helper method to get current state of the service and apply action on that state.
*/
private void getStateAndApply(Consumer<? super State> action) {
Operation get = Operation.createGet(this, getSelfLink());
getHost().sendWithDeferredResult(get, State.class)
.thenAccept(action)
.whenCompleteNotify(get);
}
private void validateState(Operation op) {
if (!op.hasBody()) {
throw new IllegalArgumentException("attempt to initialize service with an empty state");
}
State state = op.getBody(State.class);
if (!state.querySpec.options.contains(QuerySpecification.QueryOption.CONTINUOUS)) {
throw new IllegalArgumentException("QueryTask should have QueryOption.CONTINUOUS option");
}
}
public static class State extends ServiceDocument {
public QuerySpecification querySpec;
public int notificationsCounter;
}
}
| |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support.replication;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.index.shard.ShardId;
import java.util.HashSet;
import java.util.Set;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
import static org.elasticsearch.test.ESTestCase.randomFrom;
import static org.elasticsearch.test.ESTestCase.randomIntBetween;
/**
* Helper methods for generating cluster states
*/
public class ClusterStateCreationUtils {
/**
* Creates cluster state with and index that has one shard and #(replicaStates) replicas
*
* @param index name of the index
* @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param primaryState state of primary
* @param replicaStates states of the replicas. length of this array determines also the number of replicas
*/
public static ClusterState state(String index, boolean activePrimaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) {
final int numberOfReplicas = replicaStates.length;
int numberOfNodes = numberOfReplicas + 1;
if (primaryState == ShardRoutingState.RELOCATING) {
numberOfNodes++;
}
for (ShardRoutingState state : replicaStates) {
if (state == ShardRoutingState.RELOCATING) {
numberOfNodes++;
}
}
numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures
final ShardId shardId = new ShardId(index, "_na_", 0);
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
Set<String> unassignedNodes = new HashSet<>();
for (int i = 0; i < numberOfNodes + 1; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
unassignedNodes.add(node.id());
}
discoBuilder.localNodeId(newNode(0).id());
discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
RoutingTable.Builder routing = new RoutingTable.Builder();
routing.addAsNew(indexMetaData);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
String primaryNode = null;
String relocatingNode = null;
UnassignedInfo unassignedInfo = null;
if (primaryState != ShardRoutingState.UNASSIGNED) {
if (activePrimaryLocal) {
primaryNode = newNode(0).id();
unassignedNodes.remove(primaryNode);
} else {
Set<String> unassignedNodesExecludingPrimary = new HashSet<>(unassignedNodes);
unassignedNodesExecludingPrimary.remove(newNode(0).id());
primaryNode = selectAndRemove(unassignedNodesExecludingPrimary);
}
if (primaryState == ShardRoutingState.RELOCATING) {
relocatingNode = selectAndRemove(unassignedNodes);
}
} else {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true, primaryState, unassignedInfo));
for (ShardRoutingState replicaState : replicaStates) {
String replicaNode = null;
relocatingNode = null;
unassignedInfo = null;
if (replicaState != ShardRoutingState.UNASSIGNED) {
assert primaryNode != null : "a replica is assigned but the primary isn't";
replicaNode = selectAndRemove(unassignedNodes);
if (replicaState == ShardRoutingState.RELOCATING) {
relocatingNode = selectAndRemove(unassignedNodes);
}
} else {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, null, false, replicaState, unassignedInfo));
}
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);
state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded());
state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(indexMetaData.getIndex()).addIndexShard(indexShardRoutingBuilder.build())).build());
return state.build();
}
/**
* Creates cluster state with several shards and one replica and all shards STARTED.
*/
public static ClusterState stateWithAssignedPrimariesAndOneReplica(String index, int numberOfShards) {
int numberOfNodes = 2; // we need a non-local master to test shard failures
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (int i = 0; i < numberOfNodes + 1; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
}
discoBuilder.localNodeId(newNode(0).id());
discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);
state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded());
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetaData.getIndex());
for (int i = 0; i < numberOfShards; i++) {
RoutingTable.Builder routing = new RoutingTable.Builder();
routing.addAsNew(indexMetaData);
final ShardId shardId = new ShardId(index, "_na_", i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true, ShardRoutingState.STARTED, null));
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false, ShardRoutingState.STARTED, null));
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
}
state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());
return state.build();
}
/**
* Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas.
* Primary will be STARTED in cluster state but replicas will be one of UNASSIGNED, INITIALIZING, STARTED or RELOCATING.
*
* @param index name of the index
* @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param numberOfReplicas number of replicas
*/
public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal, int numberOfReplicas) {
int assignedReplicas = randomIntBetween(0, numberOfReplicas);
return stateWithActivePrimary(index, activePrimaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas);
}
/**
* Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas.
* Primary will be STARTED in cluster state. Some (unassignedReplicas) will be UNASSIGNED and
* some (assignedReplicas) will be one of INITIALIZING, STARTED or RELOCATING.
*
* @param index name of the index
* @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param assignedReplicas number of replicas that should have INITIALIZING, STARTED or RELOCATING state
* @param unassignedReplicas number of replicas that should be unassigned
*/
public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal, int assignedReplicas, int unassignedReplicas) {
ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas];
// no point in randomizing - node assignment later on does it too.
for (int i = 0; i < assignedReplicas; i++) {
replicaStates[i] = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING);
}
for (int i = assignedReplicas; i < replicaStates.length; i++) {
replicaStates[i] = ShardRoutingState.UNASSIGNED;
}
return state(index, activePrimaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates);
}
/**
* Creates a cluster state with no index
*/
public static ClusterState stateWithNoShard() {
int numberOfNodes = 2;
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
Set<String> unassignedNodes = new HashSet<>();
for (int i = 0; i < numberOfNodes + 1; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
unassignedNodes.add(node.id());
}
discoBuilder.localNodeId(newNode(0).id());
discoBuilder.masterNodeId(newNode(1).id());
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);
state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
state.routingTable(RoutingTable.builder().build());
return state.build();
}
/**
* Creates a cluster state where local node and master node can be specified
* @param localNode node in allNodes that is the local node
* @param masterNode node in allNodes that is the master node. Can be null if no master exists
* @param allNodes all nodes in the cluster
* @return cluster state
*/
public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) {
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (DiscoveryNode node : allNodes) {
discoBuilder.put(node);
}
if (masterNode != null) {
discoBuilder.masterNodeId(masterNode.id());
}
discoBuilder.localNodeId(localNode.id());
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);
state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
return state.build();
}
private static DiscoveryNode newNode(int nodeId) {
return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT);
}
static private String selectAndRemove(Set<String> strings) {
String selection = randomFrom(strings.toArray(new String[strings.size()]));
strings.remove(selection);
return selection;
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.cache.xmlcache;
import java.io.File;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import org.apache.geode.annotations.Immutable;
import org.apache.geode.cache.AttributesFactory;
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.CacheListener;
import org.apache.geode.cache.CacheLoader;
import org.apache.geode.cache.CacheWriter;
import org.apache.geode.cache.CustomExpiry;
import org.apache.geode.cache.DataPolicy;
import org.apache.geode.cache.DiskStoreFactory;
import org.apache.geode.cache.DiskWriteAttributes;
import org.apache.geode.cache.EvictionAttributes;
import org.apache.geode.cache.ExpirationAttributes;
import org.apache.geode.cache.InterestPolicy;
import org.apache.geode.cache.MembershipAttributes;
import org.apache.geode.cache.MirrorType;
import org.apache.geode.cache.PartitionAttributes;
import org.apache.geode.cache.PartitionAttributesFactory;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.RegionAttributes;
import org.apache.geode.cache.Scope;
import org.apache.geode.cache.SubscriptionAttributes;
import org.apache.geode.cache.client.internal.InternalClientCache;
import org.apache.geode.compression.Compressor;
import org.apache.geode.internal.cache.EvictionAttributesImpl;
import org.apache.geode.internal.cache.GemFireCacheImpl;
import org.apache.geode.internal.cache.PartitionAttributesImpl;
import org.apache.geode.internal.cache.PartitionedRegionHelper;
import org.apache.geode.internal.cache.UserSpecifiedRegionAttributes;
/**
* Represents {@link RegionAttributes} that are created declaratively. Notice that it implements the
* {@link RegionAttributes} interface so that this class must be updated when
* {@link RegionAttributes} is modified. This class is public for testing purposes.
*
*
* @since GemFire 3.0
*/
public class RegionAttributesCreation extends UserSpecifiedRegionAttributes
implements Serializable {
private static final long serialVersionUID = 2241078661206355376L;
@Immutable
private static final RegionAttributes defaultAttributes = new AttributesFactory().create();
/** The attributes' cache listener */
private ArrayList cacheListeners;
/** The attributes' gateway senders */
private Set<String> gatewaySenderIds;
/** The attributes' AsyncEventQueues */
private Set<String> asyncEventQueueIds;
/** The attributes' cache loader */
private CacheLoader cacheLoader;
/** The attributes' cache writer */
private CacheWriter cacheWriter;
/** The attributes' entry idle timeout */
private ExpirationAttributes entryIdleTimeout;
/** The attributes' custom entry idle timeout */
private CustomExpiry customEntryIdleTimeout;
/** The attributes' entry time to live */
private ExpirationAttributes entryTimeToLive;
/** The attributes' custom entry time to live */
private CustomExpiry customEntryTimeToLive;
/** The attributes' initial capacity */
private int initialCapacity;
/** The attributes' key constraint */
private Class keyConstraint;
/** The attributes' key constraint */
private Class valueConstraint;
/** The attributes' load factor */
private float loadFactor;
/** The attributes' region idle timeout */
private ExpirationAttributes regionIdleTimeout;
/** The attributes' region time to live */
private ExpirationAttributes regionTimeToLive;
/** The attributes' scope */
private Scope scope;
/** The attributes' statistics enabled */
private boolean statisticsEnabled;
/** The attributes ignore-jta flag */
private boolean ignoreJTA;
/** The attributes' become lock grantor setting */
private boolean isLockGrantor;
/** The attributes' concurrency level */
private int concurrencyLevel;
/** whether versioning is enabled */
private boolean concurrencyChecksEnabled = true;
/** The attributes' EarlyAck */
private boolean earlyAck;
/** The attributes' MulticastEnabled */
private boolean multicastEnabled;
/** The attributes' disk write attributes */
private DiskWriteAttributes diskWriteAttributes;
/** The attributes' disk directories */
private File[] diskDirs;
private int[] diskSizes;
/**
* disk store name of the region
*
* @since GemFire prPersistPrint2
*/
private String diskStoreName;
private boolean isDiskSynchronous = AttributesFactory.DEFAULT_DISK_SYNCHRONOUS;
private boolean cloningEnabled = false;
/** The DataPolicy attribute */
private DataPolicy dataPolicy;
private boolean indexMaintenanceSynchronous;
/**
* The attributes's id
*
* @since GemFire 4.1
*/
private String id;
/**
* The id of the attributes that this attributes "inherits"
*
* @since GemFire 4.1
*/
private String refid;
/** The partitioning attributes */
private PartitionAttributes partitionAttributes;
/** The membership attributes */
private MembershipAttributes membershipAttributes;
/** The subscription attributes */
private SubscriptionAttributes subscriptionAttributes;
private EvictionAttributesImpl evictionAttributes;
/**
* Whether to mark this region as a publisher
*
* @since GemFire 4.2.3
*/
private boolean publisher;
/**
* Whether to enable subscription conflation for this region
*
* @since GemFire 4.2
*/
private boolean enableSubscriptionConflation;
/**
* Whether to enable a async conflation for this region
*
* @since GemFire 4.2.3
*/
private boolean enableAsyncConflation;
/**
* The client to server Connection Pool
*
* @since GemFire 5.7
*/
private String poolName;
/**
* The region compressor.
*
* @since GemFire 8.0
*/
private Compressor compressor;
/**
* True if usage of off-heap memory is enabled for this region.
*
* @since Geode 1.0
*/
private boolean offHeap;
private static RegionAttributes getDefaultAttributes(CacheCreation cc) {
if (cc != null) {
return cc.getDefaultAttributes();
} else {
return defaultAttributes;
}
}
/**
* Creates a new <code>RegionAttributesCreation</code> with the default region attributes.
*/
public RegionAttributesCreation(CacheCreation cc) {
this(cc, getDefaultAttributes(cc), true);
}
public RegionAttributesCreation() {
this(defaultAttributes, true);
}
public RegionAttributesCreation(RegionAttributes attrs, boolean defaults) {
this(null, attrs, defaults);
}
/**
* Creates a new <code>RegionAttributesCreation</code> with the given region attributes. NOTE:
* Currently attrs will not be an instance of RegionAttributesCreation. If it could be then this
* code should be changed to use attrs' hasXXX methods to initialize the has booleans when
* defaults is false.
*
* @param attrs the attributes with which to initialize this region.
* @param defaults true if <code>attrs</code> are defaults; false if they are not
*/
public RegionAttributesCreation(CacheCreation cc, RegionAttributes attrs, boolean defaults) {
this.cacheListeners = new ArrayList(Arrays.asList(attrs.getCacheListeners()));
this.gatewaySenderIds = new HashSet<String>(attrs.getGatewaySenderIds());
this.asyncEventQueueIds = new HashSet<String>(attrs.getAsyncEventQueueIds());
this.cacheLoader = attrs.getCacheLoader();
this.cacheWriter = attrs.getCacheWriter();
this.entryIdleTimeout = attrs.getEntryIdleTimeout();
this.customEntryIdleTimeout = attrs.getCustomEntryIdleTimeout();
this.entryTimeToLive = attrs.getEntryTimeToLive();
this.customEntryTimeToLive = attrs.getCustomEntryTimeToLive();
this.initialCapacity = attrs.getInitialCapacity();
this.keyConstraint = attrs.getKeyConstraint();
this.valueConstraint = attrs.getValueConstraint();
this.loadFactor = attrs.getLoadFactor();
this.regionIdleTimeout = attrs.getRegionIdleTimeout();
this.regionTimeToLive = attrs.getRegionTimeToLive();
this.scope = attrs.getScope();
this.statisticsEnabled = attrs.getStatisticsEnabled();
this.ignoreJTA = attrs.getIgnoreJTA();
this.concurrencyLevel = attrs.getConcurrencyLevel();
this.concurrencyChecksEnabled = attrs.getConcurrencyChecksEnabled();
this.earlyAck = attrs.getEarlyAck();
this.diskStoreName = attrs.getDiskStoreName();
if (this.diskStoreName == null) {
this.diskWriteAttributes = attrs.getDiskWriteAttributes();
this.diskDirs = attrs.getDiskDirs();
this.diskSizes = attrs.getDiskDirSizes();
} else {
this.diskWriteAttributes = null;
this.diskDirs = null;
this.diskSizes = null;
}
this.isDiskSynchronous = attrs.isDiskSynchronous();
this.indexMaintenanceSynchronous = attrs.getIndexMaintenanceSynchronous();
this.partitionAttributes = attrs.getPartitionAttributes();
this.membershipAttributes = attrs.getMembershipAttributes();
this.subscriptionAttributes = attrs.getSubscriptionAttributes();
this.dataPolicy = attrs.getDataPolicy();
this.evictionAttributes = (EvictionAttributesImpl) attrs.getEvictionAttributes();
this.id = null;
this.refid = null;
this.enableSubscriptionConflation = attrs.getEnableSubscriptionConflation();
this.publisher = attrs.getPublisher();
this.enableAsyncConflation = attrs.getEnableAsyncConflation();
this.poolName = attrs.getPoolName();
this.multicastEnabled = attrs.getMulticastEnabled();
this.cloningEnabled = attrs.getCloningEnabled();
this.compressor = attrs.getCompressor();
this.offHeap = attrs.getOffHeap();
if (attrs instanceof UserSpecifiedRegionAttributes) {
UserSpecifiedRegionAttributes nonDefault = (UserSpecifiedRegionAttributes) attrs;
this.requiresPoolName = nonDefault.requiresPoolName;
if (!defaults) {
// Selectively set has* fields to true, propagating those non-default
// (aka user specified) fields as such
initHasFields(nonDefault);
}
} else if (!defaults) {
// Set all fields to true
setAllHasFields(true);
}
}
/**
* Returns whether or not two objects are {@linkplain Object#equals equals} taking
* <code>null</code> into account.
*/
static boolean equal(Object o1, Object o2) {
if (o1 == null) {
if (o2 != null) {
return false;
} else {
return true;
}
} else {
return o1.equals(o2);
}
}
/**
* returns true if two long[] are equal
*
* @return true if equal
*/
private boolean equal(long[] array1, long[] array2) {
if (array1.length != array2.length) {
return false;
}
for (int i = 0; i < array1.length; i++) {
if (array1[i] != array2[i]) {
return false;
}
}
return true;
}
/**
* returns true if two int[] are equal
*
* @return true if equal
*/
private boolean equal(int[] array1, int[] array2) {
if (array1.length != array2.length) {
return false;
}
for (int i = 0; i < array1.length; i++) {
if (array1[i] != array2[i]) {
return false;
}
}
return true;
}
/**
* Returns whether or not two <code>File</code> arrays specify the same files.
*/
private boolean equal(File[] array1, File[] array2) {
if (array1.length != array2.length) {
return false;
}
for (int i = 0; i < array1.length; i++) {
boolean found = false;
for (int j = 0; j < array2.length; j++) {
if (equal(array1[i].getAbsoluteFile(), array2[j].getAbsoluteFile())) {
found = true;
break;
}
}
if (!found) {
StringBuffer sb = new StringBuffer();
sb.append("Didn't find ");
sb.append(array1[i]);
sb.append(" in ");
for (int k = 0; k < array2.length; k++) {
sb.append(array2[k]);
sb.append(" ");
}
System.out.println(sb);
return false;
}
}
return true;
}
/**
* Returns whether or not this <code>RegionAttributesCreation</code> is equivalent to another
* <code>RegionAttributes</code>.
*/
public boolean sameAs(RegionAttributes other) {
if (!equal(this.cacheListeners, Arrays.asList(other.getCacheListeners()))) {
throw new RuntimeException(
"CacheListeners are not the same");
}
if (!equal(this.entryIdleTimeout, other.getEntryIdleTimeout())) {
throw new RuntimeException(
"EntryIdleTimeout is not the same");
}
if (!equal(this.customEntryIdleTimeout, other.getCustomEntryIdleTimeout())) {
throw new RuntimeException(String.format(
"CustomEntryIdleTimeout is not the same. this %s, other: %s", this.customEntryIdleTimeout,
other.getCustomEntryIdleTimeout()));
}
if (!equal(this.entryTimeToLive, other.getEntryTimeToLive())) {
throw new RuntimeException(
"EntryTimeToLive is not the same");
}
if (!equal(this.customEntryTimeToLive, other.getCustomEntryTimeToLive())) {
throw new RuntimeException(
"CustomEntryTimeToLive is not the same");
}
if (!equal(this.partitionAttributes, other.getPartitionAttributes())) {
throw new RuntimeException(
String.format("PartitionAttributes are not the same. this: %s, other: %s",
this, other.getPartitionAttributes()));
}
if (!equal(this.membershipAttributes, other.getMembershipAttributes())) {
throw new RuntimeException(
"Membership Attributes are not the same");
}
if (!equal(this.subscriptionAttributes, other.getSubscriptionAttributes())) {
throw new RuntimeException(
"Subscription Attributes are not the same");
}
if (!equal(this.evictionAttributes, other.getEvictionAttributes())) {
throw new RuntimeException(
String.format("Eviction Attributes are not the same: this: %s other: %s",
this.evictionAttributes, other.getEvictionAttributes()));
}
if (this.diskStoreName == null) {
// only compare the DWA, diskDirs and diskSizes when disk store is not configured
if (!equal(this.diskWriteAttributes, other.getDiskWriteAttributes())) {
throw new RuntimeException(
"DistWriteAttributes are not the same");
}
if (!equal(this.diskDirs, other.getDiskDirs())) {
throw new RuntimeException(
"Disk Dirs are not the same");
}
if (!equal(this.diskSizes, other.getDiskDirSizes())) {
throw new RuntimeException(
"Disk Dir Sizes are not the same");
}
}
if (!equal(this.diskStoreName, other.getDiskStoreName())) {
throw new RuntimeException(
String.format("DiskStore is not the same: this: %s other: %s",
new Object[] {this.diskStoreName, other.getDiskStoreName()}));
}
if (this.isDiskSynchronous != other.isDiskSynchronous()) {
throw new RuntimeException(
"Disk Synchronous write is not the same.");
}
if (this.dataPolicy != other.getDataPolicy()) {
throw new RuntimeException(
String.format("Data Policies are not the same: this: %s other: %s",
new Object[] {this.getDataPolicy(), other.getDataPolicy()}));
}
if (this.earlyAck != other.getEarlyAck()) {
throw new RuntimeException(
"Early Ack is not the same");
}
if (this.enableSubscriptionConflation != other.getEnableSubscriptionConflation()) {
throw new RuntimeException(
"Enable Subscription Conflation is not the same");
}
if (this.enableAsyncConflation != other.getEnableAsyncConflation()) {
throw new RuntimeException(
"Enable Async Conflation is not the same");
}
if (this.initialCapacity != other.getInitialCapacity()) {
throw new RuntimeException(
"initial Capacity is not the same");
}
if (!equal(this.keyConstraint, other.getKeyConstraint())) {
throw new RuntimeException(
"Key Constraints are not the same");
}
if (!equal(this.valueConstraint, other.getValueConstraint())) {
throw new RuntimeException(
"Value Constraints are not the same");
}
if (this.loadFactor != other.getLoadFactor()) {
throw new RuntimeException(
"Load Factors are not the same");
}
if (!equal(this.regionIdleTimeout, other.getRegionIdleTimeout())) {
throw new RuntimeException(
"Region Idle Timeout is not the same");
}
if (!equal(this.scope, this.getScope())) {
throw new RuntimeException(
"Scope is not the same");
}
if (this.statisticsEnabled != other.getStatisticsEnabled()) {
throw new RuntimeException(
String.format("Statistics enabled is not the same: this: %s other: %s",
new Object[] {Boolean.valueOf(this.statisticsEnabled),
Boolean.valueOf(other.getStatisticsEnabled())}));
}
if (this.ignoreJTA != other.getIgnoreJTA()) {
throw new RuntimeException(
"Ignore JTA is not the same");
}
if (this.concurrencyLevel != other.getConcurrencyLevel()) {
throw new RuntimeException(
"ConcurrencyLevel is not the same");
}
if (this.concurrencyChecksEnabled != other.getConcurrencyChecksEnabled()) {
throw new RuntimeException(
"ConcurrencyChecksEnabled is not the same");
}
if (this.indexMaintenanceSynchronous != other.getIndexMaintenanceSynchronous()) {
throw new RuntimeException(
"Index Maintenance Synchronous is not the same");
}
if (!equal(this.poolName, other.getPoolName())) {
throw new RuntimeException(
"poolName is not the same: " + this.poolName + " != " + other.getPoolName());
}
if (!equal(this.cacheLoader, other.getCacheLoader())) {
throw new RuntimeException("CacheLoader are not the same");
}
if (!equal(this.cacheWriter, other.getCacheWriter())) {
throw new RuntimeException("CacheWriter is not the same");
}
if (this.multicastEnabled != other.getMulticastEnabled()) {
String s = "MulticastEnabled is not the same: " + this.multicastEnabled + "!="
+ other.getMulticastEnabled();
throw new RuntimeException(s);
}
if (this.cloningEnabled != other.getCloningEnabled()) {
throw new RuntimeException(
String.format("Cloning enabled is not the same: this: %s other: %s",
new Object[] {Boolean.valueOf(this.cloningEnabled),
Boolean.valueOf(other.getCloningEnabled())}));
}
if (!equal(this.compressor, other.getCompressor())) {
throw new RuntimeException("Compressors are not the same.");
}
if (this.offHeap != other.getOffHeap()) {
throw new RuntimeException(
"EnableOffHeapMemory is not the same");
}
return true;
}
@Override
public CacheLoader getCacheLoader() {
return this.cacheLoader;
}
public CacheLoader setCacheLoader(CacheLoader cacheLoader) {
CacheLoader old = this.cacheLoader;
this.cacheLoader = cacheLoader;
setHasCacheLoader(true);
return old;
}
@Override
public CacheWriter getCacheWriter() {
return this.cacheWriter;
}
public CacheWriter setCacheWriter(CacheWriter cacheWriter) {
CacheWriter old = this.cacheWriter;
this.cacheWriter = cacheWriter;
setHasCacheWriter(true);
return old;
}
@Override
public Class getKeyConstraint() {
return this.keyConstraint;
}
public void setKeyConstraint(Class keyConstraint) {
this.keyConstraint = keyConstraint;
setHasKeyConstraint(true);
}
@Override
public Class getValueConstraint() {
return this.valueConstraint;
}
public void setValueConstraint(Class valueConstraint) {
this.valueConstraint = valueConstraint;
setHasValueConstraint(true);
}
@Override
public ExpirationAttributes getRegionTimeToLive() {
return this.regionTimeToLive;
}
public ExpirationAttributes setRegionTimeToLive(ExpirationAttributes timeToLive) {
ExpirationAttributes old = this.regionTimeToLive;
this.regionTimeToLive = timeToLive;
setHasRegionTimeToLive(true);
return old;
}
@Override
public ExpirationAttributes getRegionIdleTimeout() {
return this.regionIdleTimeout;
}
public ExpirationAttributes setRegionIdleTimeout(ExpirationAttributes idleTimeout) {
ExpirationAttributes old = this.regionIdleTimeout;
this.regionIdleTimeout = idleTimeout;
setHasRegionIdleTimeout(true);
return old;
}
@Override
public ExpirationAttributes getEntryTimeToLive() {
return this.entryTimeToLive;
}
@Override
public CustomExpiry getCustomEntryTimeToLive() {
return this.customEntryTimeToLive;
}
public ExpirationAttributes setEntryTimeToLive(ExpirationAttributes timeToLive) {
ExpirationAttributes old = this.entryTimeToLive;
this.entryTimeToLive = timeToLive;
setHasEntryTimeToLive(true);
return old;
}
public CustomExpiry setCustomEntryTimeToLive(CustomExpiry custom) {
CustomExpiry old = this.customEntryTimeToLive;
this.customEntryTimeToLive = custom;
setHasCustomEntryTimeToLive(true);
return old;
}
@Override
public ExpirationAttributes getEntryIdleTimeout() {
return this.entryIdleTimeout;
}
@Override
public CustomExpiry getCustomEntryIdleTimeout() {
return this.customEntryIdleTimeout;
}
public ExpirationAttributes setEntryIdleTimeout(ExpirationAttributes idleTimeout) {
ExpirationAttributes old = this.entryIdleTimeout;
this.entryIdleTimeout = idleTimeout;
setHasEntryIdleTimeout(true);
return old;
}
public CustomExpiry setCustomEntryIdleTimeout(CustomExpiry custom) {
CustomExpiry old = this.customEntryIdleTimeout;
this.customEntryIdleTimeout = custom;
setHasCustomEntryIdleTimeout(true);
return old;
}
@Override
public MirrorType getMirrorType() {
if (this.dataPolicy.isNormal() || this.dataPolicy.isPreloaded() || this.dataPolicy.isEmpty()
|| this.dataPolicy.withPartitioning()) {
return MirrorType.NONE;
} else if (this.dataPolicy.withReplication()) {
return MirrorType.KEYS_VALUES;
} else {
throw new IllegalStateException(
String.format("No mirror type corresponds to data policy %s",
this.dataPolicy));
}
}
public void setMirrorType(MirrorType mirrorType) {
DataPolicy dp = mirrorType.getDataPolicy();
if (dp.withReplication()) {
// requested a mirror type that has replication
// if current data policy is not replicated change it
if (!getDataPolicy().withReplication()) {
setDataPolicy(dp);
}
} else {
// requested a mirror type none;
// if current data policy is replicated change it
if (getDataPolicy().withReplication()) {
setDataPolicy(dp);
}
}
}
@Override
public DataPolicy getDataPolicy() {
return this.dataPolicy;
}
public void setDataPolicy(DataPolicy dataPolicy) {
this.dataPolicy = dataPolicy;
setHasDataPolicy(true);
if (this.dataPolicy.withPartitioning() && !this.hasPartitionAttributes()) {
setPartitionAttributes((new PartitionAttributesFactory()).create());
setHasPartitionAttributes(false);
}
}
public void secretlySetDataPolicy(DataPolicy dataPolicy) {
this.dataPolicy = dataPolicy;
}
@Override
public Scope getScope() {
return this.scope;
}
public void setScope(Scope scope) {
this.scope = scope;
setHasScope(true);
}
@Override
public CacheListener[] getCacheListeners() {
CacheListener[] result = new CacheListener[this.cacheListeners.size()];
this.cacheListeners.toArray(result);
return result;
}
@Override
public CacheListener getCacheListener() {
if (this.cacheListeners.isEmpty()) {
return null;
} else if (this.cacheListeners.size() == 1) {
return (CacheListener) this.cacheListeners.get(0);
} else {
throw new IllegalStateException(
"more than one cache listener exists");
}
}
public void initCacheListeners(CacheListener[] listeners) {
this.cacheListeners = new ArrayList(Arrays.asList(listeners));
setHasCacheListeners(true);
}
public void addCacheListener(CacheListener listener) {
this.cacheListeners.add(listener);
setHasCacheListeners(true);
}
public void setCacheListener(CacheListener listener) {
this.cacheListeners = new ArrayList(1);
this.cacheListeners.add(listener);
setHasCacheListeners(true);
}
public void initGatewaySenders(Set<String> gatewaySenderIds) {
this.gatewaySenderIds = new HashSet<String>(gatewaySenderIds);
setHasGatewaySenderIds(true);
}
public void initAsyncEventQueues(Set<String> asyncEventQueues) {
this.asyncEventQueueIds = new HashSet<String>(asyncEventQueues);
setHasAsyncEventListeners(true);
}
public void addGatewaySenderId(String gatewaySenderId) {
this.gatewaySenderIds.add(gatewaySenderId);
setHasGatewaySenderIds(true);
}
public void addAsyncEventQueueId(String asyncEventQueueId) {
this.asyncEventQueueIds.add(asyncEventQueueId);
setHasAsyncEventListeners(true);
}
@Override
public int getInitialCapacity() {
return this.initialCapacity;
}
public void setInitialCapacity(int initialCapacity) {
this.initialCapacity = initialCapacity;
setHasInitialCapacity(true);
}
@Override
public float getLoadFactor() {
return this.loadFactor;
}
public void setLoadFactor(float loadFactor) {
this.loadFactor = loadFactor;
setHasLoadFactor(true);
}
@Override
public int getConcurrencyLevel() {
return this.concurrencyLevel;
}
@Override
public boolean getConcurrencyChecksEnabled() {
return this.concurrencyChecksEnabled;
}
public void setConcurrencyLevel(int concurrencyLevel) {
this.concurrencyLevel = concurrencyLevel;
setHasConcurrencyLevel(true);
}
public void setConcurrencyChecksEnabled(boolean enabled) {
this.concurrencyChecksEnabled = enabled;
setHasConcurrencyChecksEnabled(true);
}
@Override
public boolean getStatisticsEnabled() {
return this.statisticsEnabled;
}
public void setStatisticsEnabled(boolean statisticsEnabled) {
this.statisticsEnabled = statisticsEnabled;
setHasStatisticsEnabled(true);
}
@Override
public boolean getIgnoreJTA() {
return this.ignoreJTA;
}
public void setIgnoreJTA(boolean flag) {
this.ignoreJTA = flag;
setHasIgnoreJTA(true);
}
@Override
public boolean isLockGrantor() {
return this.isLockGrantor;
}
public void setLockGrantor(boolean isLockGrantor) {
this.isLockGrantor = isLockGrantor;
setHasIsLockGrantor(true);
}
@Override
public boolean getPersistBackup() {
return getDataPolicy().withPersistence();
}
public void setPersistBackup(boolean persistBackup) {
if (persistBackup) {
if (!getDataPolicy().withPersistence()) {
if (getDataPolicy().withPartitioning()) {
setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
} else {
setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
}
}
} else {
// It is less clear what we should do here for backwards compat.
// If the current data policy is persist then we need to change it
// otherwise just leave it alone
if (getDataPolicy().withReplication()) {
setDataPolicy(DataPolicy.REPLICATE);
} else if (getDataPolicy().withPartitioning()) {
setDataPolicy(DataPolicy.PARTITION);
}
}
}
@Override
public boolean getEarlyAck() {
return this.earlyAck;
}
public void setEarlyAck(boolean earlyAck) {
this.earlyAck = earlyAck;
setHasEarlyAck(true);
}
@Override
public boolean getMulticastEnabled() {
return this.multicastEnabled;
}
public void setMulticastEnabled(boolean multicastEnabled) {
this.multicastEnabled = multicastEnabled;
setHasMulticastEnabled(true);
}
/**
* @deprecated as of prPersistSprint1
*/
@Override
@Deprecated
public boolean getPublisher() {
return this.publisher;
}
/**
* @deprecated as of prPersistSprint1
*/
@Deprecated
public void setPublisher(boolean v) {
// nothing
}
@Override
public boolean getEnableConflation() { // deprecated in 5.0
return getEnableSubscriptionConflation();
}
@Override
public boolean getEnableBridgeConflation() { // deprecated in 5.7
return getEnableSubscriptionConflation();
}
@Override
public boolean getEnableSubscriptionConflation() {
return this.enableSubscriptionConflation;
}
public void setEnableBridgeConflation(boolean v) {// deprecated in 5.7
setEnableSubscriptionConflation(v);
}
public void setEnableSubscriptionConflation(boolean v) {
this.enableSubscriptionConflation = v;
setHasEnableSubscriptionConflation(true);
}
@Override
public boolean getEnableAsyncConflation() {
return this.enableAsyncConflation;
}
public void setEnableAsyncConflation(boolean enableAsyncConflation) {
this.enableAsyncConflation = enableAsyncConflation;
setHasEnableAsyncConflation(true);
}
public void setIndexMaintenanceSynchronous(boolean isSynchronous) {
this.indexMaintenanceSynchronous = isSynchronous;
setHasIndexMaintenanceSynchronous(true);
}
/**
* @deprecated as of prPersistSprint2
*/
@Override
public DiskWriteAttributes getDiskWriteAttributes() {
// not throw exception for mixed API, since it's internal
return this.diskWriteAttributes;
}
/**
* @deprecated as of prPersistSprint2
*/
@Deprecated
public void setDiskWriteAttributes(DiskWriteAttributes attrs) {
// not throw exception for mixed API, since it's internal
this.diskWriteAttributes = attrs;
this.isDiskSynchronous = attrs.isSynchronous();
setHasDiskWriteAttributes(true);
}
/**
* @deprecated as of prPersistSprint2
*/
@Override
@Deprecated
public File[] getDiskDirs() {
// not throw exception for mixed API, since it's internal
return this.diskDirs;
}
/**
* @deprecated as of prPersistSprint2
*/
@Override
@Deprecated
public int[] getDiskDirSizes() {
// not throw exception for mixed API, since it's internal
return this.diskSizes;
}
/**
* @deprecated as of prPersistSprint2
*/
@Deprecated
public void setDiskDirs(File[] diskDirs) {
// not throw exception for mixed API, since it's internal
checkIfDirectoriesExist(diskDirs);
this.diskDirs = diskDirs;
this.diskSizes = new int[diskDirs.length];
for (int i = 0; i < diskDirs.length; i++) {
this.diskSizes[i] = DiskStoreFactory.DEFAULT_DISK_DIR_SIZE;
}
setHasDiskDirs(true);
}
@Override
public String getDiskStoreName() {
return this.diskStoreName;
}
public void setDiskStoreName(String diskStoreName) {
this.diskStoreName = diskStoreName;
setHasDiskStoreName(true);
}
@Override
public boolean isDiskSynchronous() {
return this.isDiskSynchronous;
// If DiskWriteAttributes is set, the flag needs to be checked from DiskWriteAttribs
// TODO: Should we set the correct value in the flag isDiskSynchronous
}
public void setDiskSynchronous(boolean isDiskSynchronous) {
this.isDiskSynchronous = isDiskSynchronous;
setHasDiskSynchronous(true);
}
/**
* Checks if directories exist
*
*/
private void checkIfDirectoriesExist(File[] diskDirs) {
for (int i = 0; i < diskDirs.length; i++) {
if (!diskDirs[i].isDirectory()) {
throw new IllegalArgumentException(
String.format("%s was not an existing directory.",
diskDirs[i]));
}
}
}
/**
* @deprecated as of prPersistSprint2
*/
@Deprecated
public void setDiskDirsAndSize(File[] diskDirs, int[] sizes) {
// not throw exception for mixed API, since it's internal
checkIfDirectoriesExist(diskDirs);
this.diskDirs = diskDirs;
if (sizes.length != this.diskDirs.length) {
throw new IllegalArgumentException(
String.format(
"Number of diskSizes is %s which is not equal to number of disk Dirs which is %s",
new Object[] {Integer.valueOf(sizes.length), Integer.valueOf(diskDirs.length)}));
}
verifyNonNegativeDirSize(sizes);
this.diskSizes = sizes;
this.setHasDiskDirs(true);
}
private void verifyNonNegativeDirSize(int[] sizes) {
for (int i = 0; i < sizes.length; i++) {
if (sizes[i] < 0) {
throw new IllegalArgumentException(
String.format("Dir size cannot be negative : %s",
Integer.valueOf(sizes[i])));
}
}
}
@Override
public boolean getIndexMaintenanceSynchronous() {
return this.indexMaintenanceSynchronous;
}
/**
* Sets the id of the region attributes being created
*
* @since GemFire 4.1
*/
public void setId(String id) {
this.id = id;
}
/**
* Returns the id of the region attributes being created
*
* @since GemFire 4.1
*/
public String getId() {
return this.id;
}
/**
* Sets the refid of the region attributes being created
*
* @since GemFire 4.1
*/
public void setRefid(String refid) {
this.refid = refid;
}
/**
* Returns the refid of the region attributes being created
*
* @since GemFire 4.1
*/
public String getRefid() {
return this.refid;
}
/**
* Causes this region attributes to inherit all of the attributes of its "parent" attributes
* specified by its <code>refid</code>.
*
* @param cache Used to look up named region attributes
*
* @throws IllegalStateException If no region attributes named <code>refid</code> exist.
*
* @since GemFire 4.1
*/
void inheritAttributes(Cache cache) {
inheritAttributes(cache, true);
}
void inheritAttributes(Cache cache, boolean setDefaultPool) {
if (this.refid == null) {
// No attributes to inherit
if (setDefaultPool && this.requiresPoolName && !hasPoolName()) {
String defaultPoolName = null;
if (cache instanceof GemFireCacheImpl) {
InternalClientCache gfc = (InternalClientCache) cache;
if (gfc.getDefaultPool() != null) {
defaultPoolName = gfc.getDefaultPool().getName();
}
} else if (cache instanceof ClientCacheCreation) {
ClientCacheCreation ccc = (ClientCacheCreation) cache;
defaultPoolName = ccc.getDefaultPoolName();
}
if (defaultPoolName != null) {
setPoolName(defaultPoolName);
}
}
return;
}
RegionAttributes parent = cache.getRegionAttributes(this.refid);
if (parent == null) {
throw new IllegalStateException(
String.format("Cannot reference non-existing region attributes named %s",
this.refid));
}
final boolean parentIsUserSpecified = parent instanceof UserSpecifiedRegionAttributes;
final UserSpecifiedRegionAttributes parentWithHas;
if (parentIsUserSpecified) {
parentWithHas = (UserSpecifiedRegionAttributes) parent;
} else {
parentWithHas = null;
}
if (parentWithHas != null) {
if (setDefaultPool && parentWithHas.requiresPoolName) {
this.requiresPoolName = true;
if (!hasPoolName()) {
String defaultPoolName = null;
if (cache instanceof GemFireCacheImpl) {
InternalClientCache gfc = (InternalClientCache) cache;
if (gfc.getDefaultPool() != null) {
defaultPoolName = gfc.getDefaultPool().getName();
}
} else if (cache instanceof ClientCacheCreation) {
ClientCacheCreation ccc = (ClientCacheCreation) cache;
defaultPoolName = ccc.getDefaultPoolName();
}
if (defaultPoolName != null) {
setPoolName(defaultPoolName);
}
}
}
}
// Inherit attributes that are not overridden
if (!hasCacheListeners()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasCacheListeners()) {
initCacheListeners(parent.getCacheListeners());
}
} else {
initCacheListeners(parent.getCacheListeners());
}
}
if (!hasGatewaySenderId()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasGatewaySenderId()) {
initGatewaySenders(parent.getGatewaySenderIds());
}
} else {
initGatewaySenders(parent.getGatewaySenderIds());
}
}
if (!hasAsyncEventListeners()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasAsyncEventListeners()) {
initAsyncEventQueues(parent.getAsyncEventQueueIds());
}
} else {
initAsyncEventQueues(parent.getAsyncEventQueueIds());
}
}
if (!hasCacheLoader()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasCacheLoader()) {
setCacheLoader(parent.getCacheLoader());
}
} else {
setCacheLoader(parent.getCacheLoader());
}
}
if (!hasCacheWriter()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasCacheWriter()) {
setCacheWriter(parent.getCacheWriter());
}
} else {
setCacheWriter(parent.getCacheWriter());
}
}
if (!hasEntryIdleTimeout()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasEntryIdleTimeout()) {
setEntryIdleTimeout(parent.getEntryIdleTimeout());
}
} else {
setEntryIdleTimeout(parent.getEntryIdleTimeout());
}
}
if (!hasCustomEntryIdleTimeout()) {
setCustomEntryIdleTimeout(parent.getCustomEntryIdleTimeout());
}
if (!hasEntryTimeToLive()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasEntryTimeToLive()) {
setEntryTimeToLive(parent.getEntryTimeToLive());
}
} else {
setEntryTimeToLive(parent.getEntryTimeToLive());
}
}
if (!hasCustomEntryTimeToLive()) {
setCustomEntryTimeToLive(parent.getCustomEntryTimeToLive());
}
if (!hasInitialCapacity()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasInitialCapacity()) {
setInitialCapacity(parent.getInitialCapacity());
}
} else {
setInitialCapacity(parent.getInitialCapacity());
}
}
if (!hasKeyConstraint()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasKeyConstraint()) {
setKeyConstraint(parent.getKeyConstraint());
}
} else {
setKeyConstraint(parent.getKeyConstraint());
}
}
if (!hasValueConstraint()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasValueConstraint()) {
setValueConstraint(parent.getValueConstraint());
}
} else {
setValueConstraint(parent.getValueConstraint());
}
}
if (!hasLoadFactor()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasLoadFactor()) {
setLoadFactor(parent.getLoadFactor());
}
} else {
setLoadFactor(parent.getLoadFactor());
}
}
if (!hasRegionIdleTimeout()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasRegionIdleTimeout()) {
setRegionIdleTimeout(parent.getRegionIdleTimeout());
}
} else {
setRegionIdleTimeout(parent.getRegionIdleTimeout());
}
}
if (!hasRegionTimeToLive()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasRegionTimeToLive()) {
setRegionTimeToLive(parent.getRegionTimeToLive());
}
} else {
setRegionTimeToLive(parent.getRegionTimeToLive());
}
}
if (!hasScope()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasScope()) {
setScope(parent.getScope());
}
} else {
setScope(parent.getScope());
}
}
if (!hasStatisticsEnabled()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasStatisticsEnabled()) {
setStatisticsEnabled(parent.getStatisticsEnabled());
}
} else {
setStatisticsEnabled(parent.getStatisticsEnabled());
}
}
if (!hasIgnoreJTA()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasIgnoreJTA()) {
setIgnoreJTA(parent.getIgnoreJTA());
}
} else {
setIgnoreJTA(parent.getIgnoreJTA());
}
}
if (!hasIsLockGrantor()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasIsLockGrantor()) {
setLockGrantor(parent.isLockGrantor());
}
} else {
setLockGrantor(parent.isLockGrantor());
}
}
if (!hasConcurrencyLevel()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasConcurrencyLevel()) {
setConcurrencyLevel(parent.getConcurrencyLevel());
}
} else {
setConcurrencyLevel(parent.getConcurrencyLevel());
}
}
if (!hasConcurrencyChecksEnabled()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasConcurrencyChecksEnabled()) {
setConcurrencyChecksEnabled(parent.getConcurrencyChecksEnabled());
}
} else {
setConcurrencyChecksEnabled(parent.getConcurrencyChecksEnabled());
}
}
// no need to do persistBackup since it is done by dataPolicy
if (!hasEarlyAck()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasEarlyAck()) {
setEarlyAck(parent.getEarlyAck());
}
} else {
setEarlyAck(parent.getEarlyAck());
}
}
if (!this.hasEnableSubscriptionConflation()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasEnableSubscriptionConflation()) {
setEnableSubscriptionConflation(parent.getEnableSubscriptionConflation());
}
} else {
setEnableSubscriptionConflation(parent.getEnableSubscriptionConflation());
}
}
if (!hasPublisher()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasPublisher()) {
setPublisher(parent.getPublisher());
}
} else {
setPublisher(parent.getPublisher());
}
}
if (!hasEnableAsyncConflation()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasEnableAsyncConflation()) {
setEnableAsyncConflation(parent.getEnableAsyncConflation());
}
} else {
setEnableAsyncConflation(parent.getEnableAsyncConflation());
}
}
if (!hasMulticastEnabled()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasMulticastEnabled()) {
setMulticastEnabled(parent.getMulticastEnabled());
}
} else {
setMulticastEnabled(parent.getMulticastEnabled());
}
}
if (!hasDiskWriteAttributes()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasDiskWriteAttributes()) {
setDiskWriteAttributes(parent.getDiskWriteAttributes());
}
} else {
setDiskWriteAttributes(parent.getDiskWriteAttributes());
}
}
if (!hasDiskDirs()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasDiskDirs()) {
setDiskDirs(parent.getDiskDirs());
}
} else {
setDiskDirs(parent.getDiskDirs());
}
}
if (!hasIndexMaintenanceSynchronous()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasIndexMaintenanceSynchronous()) {
setIndexMaintenanceSynchronous(parent.getIndexMaintenanceSynchronous());
}
} else {
setIndexMaintenanceSynchronous(parent.getIndexMaintenanceSynchronous());
}
}
if (!hasPartitionAttributes()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasPartitionAttributes()) {
setPartitionAttributes(parent.getPartitionAttributes());
}
} else {
setPartitionAttributes(parent.getPartitionAttributes());
}
}
if (!hasSubscriptionAttributes()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasSubscriptionAttributes()) {
setSubscriptionAttributes(parent.getSubscriptionAttributes());
}
} else {
setSubscriptionAttributes(parent.getSubscriptionAttributes());
}
}
if (!hasDataPolicy()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasDataPolicy()) {
setDataPolicy(parent.getDataPolicy());
}
} else {
setDataPolicy(parent.getDataPolicy());
}
}
if (!hasEvictionAttributes()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasEvictionAttributes()) {
setEvictionAttributes(parent.getEvictionAttributes());
}
} else {
setEvictionAttributes(parent.getEvictionAttributes());
}
}
if (!hasPoolName()) {
setPoolName(parent.getPoolName());
}
if (!hasDiskStoreName()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasDiskStoreName()) {
setDiskStoreName(parent.getDiskStoreName());
}
} else {
setDiskStoreName(parent.getDiskStoreName());
}
}
if (!hasDiskSynchronous()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasDiskSynchronous()) {
setDiskSynchronous(parent.isDiskSynchronous());
}
} else {
setDiskSynchronous(parent.isDiskSynchronous());
}
}
if (!hasCompressor()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasCompressor()) {
setCompressor(parent.getCompressor());
}
} else {
setCompressor(parent.getCompressor());
}
}
if (!hasConcurrencyChecksEnabled()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasConcurrencyChecksEnabled()) {
setConcurrencyChecksEnabled(parent.getConcurrencyChecksEnabled());
}
} else {
setConcurrencyChecksEnabled(parent.getConcurrencyChecksEnabled());
}
}
if (!hasMulticastEnabled()) { // bug #38836 - inherit multicast setting
if (parentIsUserSpecified) {
if (parentWithHas.hasMulticastEnabled()) {
setMulticastEnabled(parent.getMulticastEnabled());
}
} else {
setMulticastEnabled(parent.getMulticastEnabled());
}
}
if (!hasOffHeap()) {
if (parentIsUserSpecified) {
if (parentWithHas.hasOffHeap()) {
setOffHeap(parent.getOffHeap());
}
} else {
setOffHeap(parent.getOffHeap());
}
}
}
@Override
public PartitionAttributes getPartitionAttributes() {
return this.partitionAttributes;
}
public void setPartitionAttributes(PartitionAttributes partitionAttr) {
if (partitionAttr != null) {
if (!hasDataPolicy()) {
setDataPolicy(PartitionedRegionHelper.DEFAULT_DATA_POLICY);
setHasDataPolicy(false);
} else if (!PartitionedRegionHelper.ALLOWED_DATA_POLICIES.contains(getDataPolicy())) {
throw new IllegalStateException(
String.format(
"Data policy %s is not allowed for a partitioned region. DataPolicies other than %s are not allowed.",
this.getDataPolicy(), PartitionedRegionHelper.ALLOWED_DATA_POLICIES));
}
if (hasPartitionAttributes()
&& partitionAttributes instanceof PartitionAttributesImpl
&& partitionAttr instanceof PartitionAttributesImpl) {
// Make a copy and call merge on it to prevent bug 51616
PartitionAttributesImpl copy = ((PartitionAttributesImpl) partitionAttributes).copy();
copy.merge((PartitionAttributesImpl) partitionAttr);
this.partitionAttributes = copy;
} else {
this.partitionAttributes = partitionAttr;
}
setHasPartitionAttributes(true);
} else {
partitionAttributes = null;
setHasPartitionAttributes(false);
}
}
/**
* @deprecated this API is scheduled to be removed
*/
@Override
@Deprecated
public MembershipAttributes getMembershipAttributes() {
return this.membershipAttributes;
}
/**
* @deprecated this API is scheduled to be removed
*/
@Deprecated
public void setMembershipAttributes(MembershipAttributes pa) {
this.membershipAttributes = pa;
setHasMembershipAttributes(true);
}
/** @since GemFire 5.0 */
@Override
public SubscriptionAttributes getSubscriptionAttributes() {
return this.subscriptionAttributes;
}
/** @since GemFire 5.0 */
public void setSubscriptionAttributes(SubscriptionAttributes pa) {
this.subscriptionAttributes = pa;
setHasSubscriptionAttributes(true);
}
public Region getRegion() {
throw new UnsupportedOperationException("Should not be invoked");
}
public void setEvictionAttributes(EvictionAttributes ea) {
this.evictionAttributes = (EvictionAttributesImpl) ea;
setHasEvictionAttributes(true);
}
@Override
public EvictionAttributes getEvictionAttributes() {
return this.evictionAttributes;
}
public void setPoolName(String poolName) {
if ("".equals(poolName)) {
poolName = null;
}
this.poolName = poolName;
setHasPoolName(true);
}
@Override
public String getPoolName() {
return this.poolName;
}
public void setCloningEnable(boolean val) {
this.cloningEnabled = val;
setHasCloningEnabled(true);
}
@Override
public boolean getCloningEnabled() {
return this.cloningEnabled;
}
public void setCompressor(Compressor compressor) {
this.compressor = compressor;
setHasCompressor(true);
// Cloning must be enabled when a compressor is set
if (compressor != null) {
setCloningEnable(true);
}
}
@Override
public Compressor getCompressor() {
return this.compressor;
}
public void setOffHeap(boolean offHeap) {
this.offHeap = offHeap;
setHasOffHeap(true);
}
@Override
public boolean getOffHeap() {
return this.offHeap;
}
public void prepareForValidation() {
// As of 6.5 we automatically enable stats if expiration is used.
{
if (!hasStatisticsEnabled() && !getStatisticsEnabled()
&& (getRegionTimeToLive().getTimeout() != 0 || getRegionIdleTimeout().getTimeout() != 0
|| getEntryTimeToLive().getTimeout() != 0 || getEntryIdleTimeout().getTimeout() != 0
|| getCustomEntryIdleTimeout() != null || getCustomEntryTimeToLive() != null)) {
// TODO: we could do some more implementation work so that we would
// not need to enable stats unless entryIdleTimeout is enabled.
// We need the stats in that case because we need a new type of RegionEntry
// so we know that last time it was accessed. But for all the others we
// the stat less region keeps track of everything we need.
// The only problem is that some places in the code are conditionalized
// on statisticsEnabled.
setStatisticsEnabled(true);
}
if (getDataPolicy().withReplication() && !getDataPolicy().withPersistence()
&& getScope().isDistributed()) {
if (getEvictionAttributes().getAction().isLocalDestroy()
|| getEntryIdleTimeout().getAction().isLocal()
|| getEntryTimeToLive().getAction().isLocal()
|| getRegionIdleTimeout().getAction().isLocalInvalidate()
|| getRegionTimeToLive().getAction().isLocalInvalidate()) {
// new to 6.5; switch to PRELOADED and interest ALL
setDataPolicy(DataPolicy.PRELOADED);
setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
}
}
// enable concurrency checks for persistent regions
if (!hasConcurrencyChecksEnabled() && !getConcurrencyChecksEnabled()
&& getDataPolicy().withPersistence()) {
setConcurrencyChecksEnabled(true);
}
}
}
@Override
public Set<String> getAsyncEventQueueIds() {
return this.asyncEventQueueIds;
}
@Override
public Set<String> getGatewaySenderIds() {
return this.gatewaySenderIds;
}
}
| |
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.cm.client.canvas.controls.containment;
import java.util.Optional;
import java.util.function.Function;
import javax.enterprise.context.Dependent;
import javax.inject.Inject;
import com.ait.lienzo.client.core.shape.wires.IContainmentAcceptor;
import com.ait.lienzo.client.core.shape.wires.WiresContainer;
import com.ait.lienzo.client.core.shape.wires.WiresShape;
import org.kie.workbench.common.stunner.client.lienzo.canvas.controls.AbstractAcceptorControl;
import org.kie.workbench.common.stunner.client.lienzo.canvas.wires.WiresCanvas;
import org.kie.workbench.common.stunner.client.lienzo.canvas.wires.WiresUtils;
import org.kie.workbench.common.stunner.cm.client.command.CaseManagementCanvasCommandFactory;
import org.kie.workbench.common.stunner.cm.client.wires.AbstractCaseManagementShape;
import org.kie.workbench.common.stunner.cm.client.wires.CaseManagementContainmentStateHolder;
import org.kie.workbench.common.stunner.cm.qualifiers.CaseManagementEditor;
import org.kie.workbench.common.stunner.core.client.canvas.AbstractCanvasHandler;
import org.kie.workbench.common.stunner.core.client.canvas.controls.containment.ContainmentAcceptorControl;
import org.kie.workbench.common.stunner.core.client.command.CanvasViolation;
import org.kie.workbench.common.stunner.core.command.Command;
import org.kie.workbench.common.stunner.core.command.CommandResult;
import org.kie.workbench.common.stunner.core.command.impl.CompositeCommand;
import org.kie.workbench.common.stunner.core.command.util.CommandUtils;
import org.kie.workbench.common.stunner.core.graph.Edge;
import org.kie.workbench.common.stunner.core.graph.Element;
import org.kie.workbench.common.stunner.core.graph.Node;
import org.kie.workbench.common.stunner.core.graph.content.relationship.Child;
@Dependent
@CaseManagementEditor
public class CaseManagementContainmentAcceptorControlImpl extends AbstractAcceptorControl
implements ContainmentAcceptorControl<AbstractCanvasHandler> {
final IContainmentAcceptor CONTAINMENT_ACCEPTOR = new CanvasManagementContainmentAcceptor();
private final CaseManagementCanvasCommandFactory canvasCommandFactory;
private final CaseManagementContainmentStateHolder state;
@Inject
public CaseManagementContainmentAcceptorControlImpl(final @CaseManagementEditor CaseManagementCanvasCommandFactory canvasCommandFactory,
final CaseManagementContainmentStateHolder state) {
this.canvasCommandFactory = canvasCommandFactory;
this.state = state;
}
@Override
protected void onEnable(final WiresCanvas.View view) {
view.setContainmentAcceptor(CONTAINMENT_ACCEPTOR);
}
@Override
protected void onDisable(final WiresCanvas.View view) {
view.setContainmentAcceptor(IContainmentAcceptor.NONE);
}
@Override
public boolean allow(final Element parent,
final Node[] children) {
return evaluate(parent,
children,
command -> getCommandManager().allow(getCanvasHandler(),
command));
}
@Override
public boolean accept(final Element parent,
final Node[] children) {
throw new UnsupportedOperationException();
}
private boolean evaluate(final Element parent,
final Node[] children,
final Function<Command<AbstractCanvasHandler, CanvasViolation>, CommandResult<CanvasViolation>> executor) {
if (parent == null && (children == null || children.length == 0)) {
return false;
}
final Node child = children[0];
final Optional<Edge<?, Node>> edge = getFirstIncomingEdge(child,
e -> e.getContent() instanceof Child);
if (edge.isPresent()) {
final Command<AbstractCanvasHandler, CanvasViolation> command = buildCommands(parent,
child,
edge.get());
final CommandResult<CanvasViolation> result = executor.apply(command);
return isCommandSuccess(result);
}
return true;
}
private Command<AbstractCanvasHandler, CanvasViolation> buildCommands(final Element parent,
final Node child,
final Edge edge) {
final CompositeCommand.Builder<AbstractCanvasHandler, CanvasViolation> builder =
new CompositeCommand.Builder<AbstractCanvasHandler, CanvasViolation>()
.reverse();
if (null != edge.getSourceNode()) {
builder.addCommand(
canvasCommandFactory.removeChild(edge.getSourceNode(),
child)
);
}
if (null != parent) {
builder.addCommand(
canvasCommandFactory.setChildNode((Node) parent,
child)
);
}
return builder.build();
}
Command<AbstractCanvasHandler, CanvasViolation> getAddEdgeCommand(final Node parent,
final Node child) {
return canvasCommandFactory.setChildNode(parent,
child);
}
Command<AbstractCanvasHandler, CanvasViolation> getSetEdgeCommand(final Node parent,
final Node child,
final Optional<Integer> index,
final Optional<Node> originalParent,
final Optional<Integer> originalIndex) {
return canvasCommandFactory.setChildNode(parent,
child,
index,
originalParent,
originalIndex);
}
Command<AbstractCanvasHandler, CanvasViolation> getDeleteEdgeCommand(final Node parent,
final Node child) {
return canvasCommandFactory.removeChild(parent,
child);
}
class CanvasManagementContainmentAcceptor implements IContainmentAcceptor {
@Override
public boolean containmentAllowed(final WiresContainer wiresContainer,
final WiresShape[] wiresShapes) {
final WiresShape wiresShape = wiresShapes[0];
if (!isWiresViewAccept(wiresContainer,
wiresShape)) {
return false;
}
final Node childNode = WiresUtils.getNode(getCanvasHandler(),
wiresShape);
final Node parentNode = WiresUtils.getNode(getCanvasHandler(),
wiresContainer);
return allow(parentNode,
new Node[]{childNode});
}
@Override
public boolean acceptContainment(final WiresContainer wiresContainer,
final WiresShape[] wiresShapes) {
if (state.getGhost().isPresent() &&
containmentAllowed(wiresContainer,
wiresShapes)) {
final AbstractCaseManagementShape container = (AbstractCaseManagementShape) wiresContainer;
final AbstractCaseManagementShape ghost = state.getGhost().get();
final int index = container.getIndex(ghost);
if (index >= 0) {
final Optional<Integer> newIndex = Optional.of(index);
final Optional<WiresContainer> originalContainer = state.getOriginalParent();
final Optional<Integer> originalIndex = state.getOriginalIndex();
final CommandResult<CanvasViolation> result =
getCommandManager().execute(getCanvasHandler(),
makeAddMutationCommand(wiresShapes[0],
wiresContainer,
newIndex,
originalContainer,
originalIndex));
return !CommandUtils.isError(result);
}
}
return false;
}
protected Command<AbstractCanvasHandler, CanvasViolation> makeAddMutationCommand(final WiresShape shape,
final WiresContainer container,
final Optional<Integer> index,
final Optional<WiresContainer> originalContainer,
final Optional<Integer> originalIndex) {
final Node parent = WiresUtils.getNode(getCanvasHandler(),
container);
final Node child = WiresUtils.getNode(getCanvasHandler(),
shape);
final Optional<Node> originalParent = originalContainer.flatMap((c) -> Optional.ofNullable(WiresUtils.getNode(getCanvasHandler(),
c)));
// Set relationship.
return getSetEdgeCommand(parent,
child,
index,
originalParent,
originalIndex);
}
}
}
| |
package com.github.codechapin.postcss.tokenizer;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.io.StringReader;
import static com.github.codechapin.postcss.tokenizer.Token.token;
import static com.github.codechapin.postcss.tokenizer.TokenType.*;
/**
*
*/
public class TokenizerTest {
private static final Tokenizer TOKENIZER = new Tokenizer();
@Test
public void tokenizesEmptyFile() {
test("");
}
@Test
public void tokenizesSpace() {
test("\r\n \f\t",
token(SPACE, "\r\n \f\t", 1, 1, 2, 3)
);
}
@Test
public void tokenizesWord() {
test("ab",
token(WORD, "ab", 1, 1, 1, 2)
);
}
@Test
public void splitsWordByExclamationMark() {
test("aa!bb",
token(WORD, "aa", 1, 1, 1, 2),
token(WORD, "!bb", 1, 3, 1, 5)
);
}
@Test
public void changesLinesInSpaces() {
test("a \n b",
token(WORD, "a"),
token(SPACE, " \n ", 1, 2, 2, 1),
token(WORD, "b", 2, 2)
);
}
@Test
public void tokenizesControlChars() {
test("{:;}",
token(OPEN_CURLY, "{"),
token(COLON, ":", 1, 2, 1, 2),
token(SEMICOLON, ";", 1, 3),
token(CLOSE_CURLY, "}", 1, 4)
);
}
@Test
public void escapesControlSymbols() {
test("\\(\\{\\\"\\@",
token(WORD, "\\(", 1, 1, 1, 2),
token(WORD, "\\{", 1, 3, 1, 4),
token(WORD, "\\\"", 1, 5, 1, 6),
token(WORD, "\\@", 1, 7, 1, 8)
);
}
@Test
public void escapesBackslash() {
test("\\\\\\\\{",
token(WORD, "\\\\\\\\", 1, 1, 1, 4),
token(OPEN_CURLY, "{", 1, 5, 1, 5)
);
}
@Test
public void tokenizesString() {
test("'\"'\"\\\"\"",
token(STRING, "'\"'", 1, 1, 1, 3),
token(STRING, "\"\\\"\"", 1, 4, 1, 7)
);
}
@Test
void tokenizesEscapedString() {
test("\"\\\\\"",
token(STRING, "\"\\\\\"", 1, 1, 1, 4)
);
}
@Test
void tokenizesEscapedBackslashInEscapedString() {
test("'\\'\"\\\\'",
token(STRING, "'\\'\"\\\\'", 1, 1, 1, 7)
);
}
@Test
public void tokenizesComment() {
test("/* a\nb c*/",
token(COMMENT, "/* a\nb c*/", 1, 1, 2, 5)
);
}
@Test
public void changesLinesInComments() {
test("a/* \n */b",
token(WORD, "a"),
token(COMMENT, "/* \n */", 1, 2, 2, 3),
token(WORD, "b", 2, 4)
);
}
@Test
public void tokenizesSimpleBrackets() {
test("(ab)",
token(BRACKETS, "(ab)", 1, 1, 1, 4)
);
}
@Test
public void tokenizesComplicatedBrackets() {
test("(())(\"\")(/**/)(\\\\)(\n)(",
token(OPEN_PARENTHESES, "(", 1, 1),
token(BRACKETS, "()", 1, 2, 1, 3),
token(CLOSE_PARENTHESES, ")", 1, 4),
token(OPEN_PARENTHESES, "(", 1, 5),
token(STRING, "\"\"", 1, 6, 1, 7),
token(CLOSE_PARENTHESES, ")", 1, 8),
token(OPEN_PARENTHESES, "(", 1, 9),
token(COMMENT, "/**/", 1, 10, 1, 13),
token(CLOSE_PARENTHESES, ")", 1, 14),
token(OPEN_PARENTHESES, "(", 1, 15),
token(WORD, "\\\\", 1, 16, 1, 17),
token(CLOSE_PARENTHESES, ")", 1, 18),
token(OPEN_PARENTHESES, "(", 1, 19),
token(SPACE, "\n", 2, 0),
token(CLOSE_PARENTHESES, ")", 2, 1),
token(OPEN_PARENTHESES, "(", 2, 2)
);
}
@Test
public void tokenizesAtSymbol() {
test("@",
token(AT_WORD, "@")
);
}
@Test
public void tokenizesAtWord() {
test("@word",
token(AT_WORD, "@word", 1, 1, 1, 5)
);
}
@Test
public void tokenizesAtWordEnd() {
test("@one{@two()@three\"\"@four;",
token(AT_WORD, "@one", 1, 1, 1, 4),
token(OPEN_CURLY, "{", 1, 5),
token(AT_WORD, "@two", 1, 6, 1, 9),
token(BRACKETS, "()", 1, 10, 1, 11),
token(AT_WORD, "@three", 1, 12, 1, 17),
token(STRING, "\"\"", 1, 18, 1, 19),
token(AT_WORD, "@four", 1, 20, 1, 24),
token(SEMICOLON, ";", 1, 25)
);
}
@Test
public void tokenizesUrls() {
test("url(/*\\))",
token(WORD, "url", 1, 1, 1, 3),
token(BRACKETS, "(/*\\))", 1, 4, 1, 9)
);
}
@Test
public void tokenizesQuotedUrls() {
test("url(\")\")",
token(WORD, "url", 1, 1, 1, 3),
token(OPEN_PARENTHESES, "(", 1, 4),
token(STRING, "\")\"", 1, 5, 1, 7),
token(CLOSE_PARENTHESES, ")", 1, 8)
);
}
@Test
public void tokenizesCSS() {
final String css = "a {\n" +
" content: \"a\";\n" +
" width: calc(1px;)\n" +
" }\n" +
"/* small screen */\n" +
"@media screen {}";
test(css,
token(WORD, "a"),
token(SPACE, " ", 1, 2, 1, 2),
token(OPEN_CURLY, "{", 1, 3),
token(SPACE, "\n ", 2, 0, 2, 2),
token(WORD, "content", 2, 3, 2, 9),
token(COLON, ":", 2, 10),
token(SPACE, " ", 2, 11),
token(STRING, "\"a\"", 2, 12, 2, 14),
token(SEMICOLON, ";", 2, 15),
token(SPACE, "\n ", 3, 0, 3, 2),
token(WORD, "width", 3, 3, 3, 7),
token(COLON, ":", 3, 8),
token(SPACE, " ", 3, 9),
token(WORD, "calc", 3, 10, 3, 13),
token(BRACKETS, "(1px;)", 3, 14, 3, 19),
token(SPACE, "\n ", 4, 0, 4, 2),
token(CLOSE_CURLY, "}", 4, 3),
token(SPACE, "\n", 5, 0),
token(COMMENT, "/* small screen */", 5, 1, 5, 18),
token(SPACE, "\n", 6, 0),
token(AT_WORD, "@media", 6, 1, 6, 6),
token(SPACE, " ", 6, 7),
token(WORD, "screen", 6, 8, 6, 13),
token(SPACE, " ", 6, 14),
token(OPEN_CURLY, "{", 6, 15),
token(CLOSE_CURLY, "}", 6, 16)
);
}
@Test(expectedExceptions = TokenizerException.class, expectedExceptionsMessageRegExp = ":1:2: Unclosed quote")
public void throwsErrorOnUnclosedString() {
test(" \"");
}
@Test(expectedExceptions = TokenizerException.class, expectedExceptionsMessageRegExp = ":1:2: Unclosed comment")
public void throwsErrorOnUnclosedComment() {
test(" /*");
}
@Test(expectedExceptions = TokenizerException.class, expectedExceptionsMessageRegExp = ":1:4: Unclosed bracket")
public void throwsErrorOnUnclosedUrl() {
test("url(");
}
private void test(final String css, final Token... expected) {
try {
TOKENIZER.tokenize(new StringReader(css));
if (expected.length > 0) {
int i = 0;
while (TOKENIZER.hasNext()) {
final Token token = TOKENIZER.next();
if (i < expected.length) {
Assert.assertEquals(token, expected[i], "Not equals token at position: " + (i + 1));
}
i++;
}
Assert.assertEquals(i, expected.length, "The number of tokens is not the same.");
} else {
while (TOKENIZER.hasNext()) {
Assert.assertNotNull(TOKENIZER.next());
}
}
} finally {
TOKENIZER.reset();
}
}
}
| |
/*
* Copyright 2000-2012 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.idea.svn16;
import com.intellij.execution.process.ProcessOutput;
import com.intellij.openapi.command.WriteCommandAction;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vcs.FileStatus;
import com.intellij.openapi.vcs.VcsConfiguration;
import com.intellij.openapi.vcs.VcsException;
import com.intellij.openapi.vcs.changes.Change;
import com.intellij.openapi.vcs.changes.ChangeListManager;
import com.intellij.openapi.vcs.rollback.RollbackProgressListener;
import com.intellij.openapi.vfs.VirtualFile;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.idea.svn.SvnVcs;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
/**
* @author yole
*/
public class SvnRenameTest extends Svn16TestCase {
@NonNls private static final String LOG_SEPARATOR = "------------------------------------------------------------------------\n";
@NonNls private static final String LOG_SEPARATOR_START = "-------------";
@Test
public void testSimpleRename() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile a = createFileInCommand("a.txt", "test");
checkin();
renameFileInCommand(a, "b.txt");
verifySorted(runSvn("status"), "A + b.txt", "D a.txt");
}
// IDEADEV-18844
@Test
public void testRenameReplace() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile a = createFileInCommand("a.txt", "old");
final VirtualFile aNew = createFileInCommand("aNew.txt", "new");
checkin();
renameFileInCommand(a, "aOld.txt");
renameFileInCommand(aNew, "a.txt");
final ProcessOutput result = runSvn("status");
verifySorted(result, "A + aOld.txt", "D aNew.txt", "R + a.txt");
}
// IDEADEV-16251
@Test
public void testRenameAddedPackage() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile dir = createDirInCommand(myWorkingCopyDir, "child");
createFileInCommand(dir, "a.txt", "content");
renameFileInCommand(dir, "newchild");
verifySorted(runSvn("status"), "A newchild", "A newchild" + File.separatorChar + "a.txt");
}
// IDEADEV-8091
@Test
public void testDoubleRename() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile a = createFileInCommand("a.txt", "test");
checkin();
renameFileInCommand(a, "b.txt");
renameFileInCommand(a, "c.txt");
verifySorted(runSvn("status"), "A + c.txt", "D a.txt");
}
// IDEADEV-15876
@Test
public void testRenamePackageWithChildren() throws Exception {
final VirtualFile child = prepareDirectoriesForRename();
renameFileInCommand(child, "childnew");
final ProcessOutput result = runSvn("status");
verifySorted(result, "A + childnew",
"D child",
"D child" + File.separatorChar + "a.txt",
"D child" + File.separatorChar + "grandChild",
"D child" + File.separatorChar + "grandChild" + File.separatorChar + "b.txt");
refreshVfs(); // wait for end of refresh operations initiated from SvnFileSystemListener
final ChangeListManager changeListManager = ChangeListManager.getInstance(myProject);
changeListManager.ensureUpToDate(false);
List<Change> changes = new ArrayList<Change>(changeListManager.getDefaultChangeList().getChanges());
Assert.assertEquals(4, changes.size());
sortChanges(changes);
verifyChange(changes.get(0), "child", "childnew");
verifyChange(changes.get(1), "child" + File.separatorChar + "a.txt", "childnew" + File.separatorChar + "a.txt");
verifyChange(changes.get(2), "child" + File.separatorChar + "grandChild", "childnew" + File.separatorChar + "grandChild");
verifyChange(changes.get(3), "child" + File.separatorChar + "grandChild" + File.separatorChar + "b.txt", "childnew" + File.separatorChar + "grandChild" + File.separatorChar + "b.txt");
VirtualFile oldChild = myWorkingCopyDir.findChild("child");
Assert.assertEquals(FileStatus.DELETED, changeListManager.getStatus(oldChild));
}
private VirtualFile prepareDirectoriesForRename() throws IOException {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile child = createDirInCommand(myWorkingCopyDir, "child");
final VirtualFile grandChild = createDirInCommand(child, "grandChild");
createFileInCommand(child, "a.txt", "a");
createFileInCommand(grandChild, "b.txt", "b");
checkin();
return child;
}
// IDEADEV-19065
@Test
public void testCommitAfterRenameDir() throws Exception {
final VirtualFile child = prepareDirectoriesForRename();
renameFileInCommand(child, "newchild");
checkin();
final ProcessOutput runResult = runSvn("log", "-q", "newchild/a.txt");
verify(runResult);
final List<String> lines = StringUtil.split(runResult.getStdout(), "\n");
for (Iterator<String> iterator = lines.iterator(); iterator.hasNext();) {
final String next = iterator.next();
if (next.startsWith(LOG_SEPARATOR_START)) {
iterator.remove();
}
}
Assert.assertEquals(2, lines.size());
Assert.assertTrue(lines.get(0).startsWith("r2 |"));
Assert.assertTrue(lines.get(1).startsWith("r1 |"));
}
// IDEADEV-9755
@Test
public void testRollbackRenameDir() throws Exception {
final VirtualFile child = prepareDirectoriesForRename();
renameFileInCommand(child, "newchild");
final ChangeListManager changeListManager = ChangeListManager.getInstance(myProject);
changeListManager.ensureUpToDate(false);
final Change change = changeListManager.getChange(myWorkingCopyDir.findChild("newchild"));
Assert.assertNotNull(change);
final List<VcsException> exceptions = new ArrayList<VcsException>();
SvnVcs.getInstance(myProject).getRollbackEnvironment().rollbackChanges(Collections.singletonList(change), exceptions,
RollbackProgressListener.EMPTY);
Assert.assertTrue(exceptions.isEmpty());
Assert.assertFalse(new File(myWorkingCopyDir.getPath(), "newchild").exists());
Assert.assertTrue(new File(myWorkingCopyDir.getPath(), "child").exists());
}
// IDEADEV-7697
@Test
public void testMovePackageToParent() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile child = createDirInCommand(myWorkingCopyDir, "child");
final VirtualFile grandChild = createDirInCommand(child, "grandChild");
createFileInCommand(grandChild, "a.txt", "a");
checkin();
final ChangeListManager changeListManager = ChangeListManager.getInstance(myProject);
moveFileInCommand(grandChild, myWorkingCopyDir);
refreshVfs(); // wait for end of refresh operations initiated from SvnFileSystemListener
changeListManager.ensureUpToDate(false);
final List<Change> changes = new ArrayList<Change>(changeListManager.getDefaultChangeList().getChanges());
Assert.assertEquals(listToString(changes), 2, changes.size());
sortChanges(changes);
verifyChange(changes.get(0), "child" + File.separatorChar + "grandChild", "grandChild");
verifyChange(changes.get(1), "child" + File.separatorChar + "grandChild" + File.separatorChar + "a.txt", "grandChild" + File.separatorChar + "a.txt");
}
private String listToString(final List<Change> changes) {
return "{" + StringUtil.join(changes, StringUtil.createToStringFunction(Change.class), ",") + "}";
}
// IDEADEV-19223
@Test
public void testRollbackRenameWithUnversioned() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile child = createDirInCommand(myWorkingCopyDir, "child");
createFileInCommand(child, "a.txt", "a");
checkin();
disableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile unversioned = createFileInCommand(child, "u.txt", "u");
final VirtualFile unversionedDir = createDirInCommand(child, "uc");
createFileInCommand(unversionedDir, "c.txt", "c");
final ChangeListManager changeListManager = ChangeListManager.getInstance(myProject);
changeListManager.ensureUpToDate(false);
Assert.assertEquals(FileStatus.UNKNOWN, changeListManager.getStatus(unversioned));
renameFileInCommand(child, "newchild");
File childPath = new File(myWorkingCopyDir.getPath(), "child");
File newChildPath = new File(myWorkingCopyDir.getPath(), "newchild");
Assert.assertTrue(new File(newChildPath, "a.txt").exists());
Assert.assertTrue(new File(newChildPath, "u.txt").exists());
Assert.assertFalse(new File(childPath, "u.txt").exists());
refreshVfs();
changeListManager.ensureUpToDate(false);
final List<Change> changes = new ArrayList<Change>();
changes.add(ChangeListManager.getInstance(myProject).getChange(myWorkingCopyDir.findChild("newchild").findChild("a.txt")));
changes.add(ChangeListManager.getInstance(myProject).getChange(myWorkingCopyDir.findChild("newchild")));
final List<VcsException> exceptions = new ArrayList<VcsException>();
SvnVcs.getInstance(myProject).getRollbackEnvironment().rollbackChanges(changes, exceptions, RollbackProgressListener.EMPTY);
try {
Thread.sleep(300);
}
catch (InterruptedException e) {
//
}
Assert.assertTrue(exceptions.isEmpty());
final File fileA = new File(childPath, "a.txt");
Assert.assertTrue(fileA.getAbsolutePath(), fileA.exists());
final File fileU = new File(childPath, "u.txt");
Assert.assertTrue(fileU.getAbsolutePath(), fileU.exists());
final File unversionedDirFile = new File(childPath, "uc");
Assert.assertTrue(unversionedDirFile.exists());
Assert.assertTrue(new File(unversionedDirFile, "c.txt").exists());
}
// IDEA-13824
@Test
public void testRenameFileRenameDir() throws Exception {
final VirtualFile child = prepareDirectoriesForRename();
final VirtualFile f = child.findChild("a.txt");
renameFileInCommand(f, "anew.txt");
renameFileInCommand(child, "newchild");
verifySorted(runSvn("status"), "A + newchild", "A + newchild" + File.separatorChar + "anew.txt",
"D child", "D child" + File.separatorChar + "a.txt", "D child" + File.separatorChar + "grandChild", "D child" + File.separatorChar + "grandChild" + File.separatorChar + "b.txt", "D + newchild" + File.separatorChar + "a.txt");
final ChangeListManager changeListManager = ChangeListManager.getInstance(myProject);
refreshVfs(); // wait for end of refresh operations initiated from SvnFileSystemListener
changeListManager.ensureUpToDate(false);
final List<Change> changes = new ArrayList<Change>(changeListManager.getDefaultChangeList().getChanges());
final List<VcsException> list = SvnVcs.getInstance(myProject).getCheckinEnvironment().commit(changes, "test");
Assert.assertEquals(0, list.size());
}
// IDEADEV-19364
@Test
public void testUndoMovePackage() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile parent1 = createDirInCommand(myWorkingCopyDir, "parent1");
final VirtualFile parent2 = createDirInCommand(myWorkingCopyDir, "parent2");
final VirtualFile child = createDirInCommand(parent1, "child");
createFileInCommand(child, "a.txt", "a");
checkin();
moveFileInCommand(child, parent2);
undo();
final File childPath = new File(parent1.getPath(), "child");
Assert.assertTrue(childPath.exists());
Assert.assertTrue(new File(childPath, "a.txt").exists());
}
// IDEADEV-19552
@Test
public void testUndoRename() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile file = createFileInCommand(myWorkingCopyDir, "a.txt", "A");
checkin();
renameFileInCommand(file, "b.txt");
undo();
Assert.assertTrue(new File(myWorkingCopyDir.getPath(), "a.txt").exists());
Assert.assertFalse(new File(myWorkingCopyDir.getPath(), "b.txt").exists());
}
// IDEADEV-19336
/*@Bombed(user = "Ira", month = 6, day = 15)
@Test
public void testUndoMoveCommittedPackage() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
enableSilentOperation(VcsConfiguration.StandardConfirmation.REMOVE);
final VirtualFile parent1 = createDirInCommand(myWorkingCopyDir, "parent1");
final VirtualFile parent2 = createDirInCommand(myWorkingCopyDir, "parent2");
final VirtualFile child = createDirInCommand(parent1, "child");
createFileInCommand(child, "a.txt", "a");
checkin();
moveFileInCommand(child, parent2);
checkin();
undo();
verifySorted(runSvn("status"), "A + parent1" + File.separatorChar + "child", "D parent2" + File.separatorChar + "child", "D parent2" + File.separatorChar + "child" + File.separatorChar + "a.txt");
}*/
@Test
public void testMoveToNewPackage() throws Throwable {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile file = createFileInCommand(myWorkingCopyDir, "a.txt", "A");
moveToNewPackage(file, "child");
verifySorted(runSvn("status"), "A child", "A child" + File.separatorChar + "a.txt");
}
@Test
public void testMoveToNewPackageCommitted() throws Throwable {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile file = createFileInCommand(myWorkingCopyDir, "a.txt", "A");
checkin();
moveToNewPackage(file, "child");
verifySorted(runSvn("status"), "A child", "A + child" + File.separatorChar + "a.txt", "D a.txt");
}
@Test
public void testMoveToUnversioned() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile file = createFileInCommand(myWorkingCopyDir, "a.txt", "A");
final VirtualFile child = moveToNewPackage(file, "child");
verifySorted(runSvn("status"), "A child", "A child" + File.separatorChar + "a.txt");
checkin();
disableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile unversioned = createDirInCommand(myWorkingCopyDir, "unversioned");
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
verifySorted(runSvn("status"), "? unversioned");
moveFileInCommand(child, unversioned);
verifySorted(runSvn("status"), "? unversioned", "D child", "D child" + File.separator + "a.txt");
}
@Test
public void testUndoMoveUnversionedToUnversioned() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
disableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile file = createFileInCommand(myWorkingCopyDir, "a.txt", "A");
verifySorted(runSvn("status"), "? a.txt");
final VirtualFile unversioned = createDirInCommand(myWorkingCopyDir, "unversioned");
moveFileInCommand(file, unversioned);
verifySorted(runSvn("status"), "? unversioned");
undo();
verifySorted(runSvn("status"), "? a.txt", "? unversioned");
}
@Test
public void testUndoMoveAddedToUnversioned() throws Exception {
enableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile file = createFileInCommand(myWorkingCopyDir, "a.txt", "A");
verifySorted(runSvn("status"), "A a.txt");
disableSilentOperation(VcsConfiguration.StandardConfirmation.ADD);
final VirtualFile unversioned = createDirInCommand(myWorkingCopyDir, "unversioned");
moveFileInCommand(file, unversioned);
verifySorted(runSvn("status"), "? unversioned");
undo();
verifySorted(runSvn("status"), "? a.txt", "? unversioned");
}
private VirtualFile moveToNewPackage(final VirtualFile file, final String packageName) throws Exception {
final VirtualFile[] dir = new VirtualFile[1];
new WriteCommandAction.Simple(myProject) {
@Override
public void run() {
try {
dir[0] = myWorkingCopyDir.createChildDirectory(this, packageName);
file.move(this, dir[0]);
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
}.execute().throwException();
return dir[0];
}
}
| |
package org.cagrid.dorian.model.idp;
import java.io.Serializable;
import java.util.Calendar;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import org.jvnet.jaxb2_commons.lang.Equals;
import org.jvnet.jaxb2_commons.lang.EqualsStrategy;
import org.jvnet.jaxb2_commons.lang.HashCode;
import org.jvnet.jaxb2_commons.lang.HashCodeStrategy;
import org.jvnet.jaxb2_commons.lang.JAXBEqualsStrategy;
import org.jvnet.jaxb2_commons.lang.JAXBHashCodeStrategy;
import org.jvnet.jaxb2_commons.lang.JAXBToStringStrategy;
import org.jvnet.jaxb2_commons.lang.ToString;
import org.jvnet.jaxb2_commons.lang.ToStringStrategy;
import org.jvnet.jaxb2_commons.locator.ObjectLocator;
import org.jvnet.jaxb2_commons.locator.util.LocatorUtils;
import org.w3._2001.xmlschema.Adapter1;
/**
* <p>Java class for IdentityProviderAuditRecord complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="IdentityProviderAuditRecord">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="TargetId" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="ReportingPartyId" type="{http://www.w3.org/2001/XMLSchema}string"/>
* <element name="AuditType" type="{http://cagrid.nci.nih.gov/1/dorian-idp}IdentityProviderAudit"/>
* <element name="OccurredAt" type="{http://www.w3.org/2001/XMLSchema}dateTime"/>
* <element name="AuditMessage" type="{http://www.w3.org/2001/XMLSchema}string"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "IdentityProviderAuditRecord", propOrder = {
"targetId",
"reportingPartyId",
"auditType",
"occurredAt",
"auditMessage"
})
public class IdentityProviderAuditRecord
implements Serializable, Equals, HashCode, ToString
{
@XmlElement(name = "TargetId")
protected String targetId;
@XmlElement(name = "ReportingPartyId", required = true)
protected String reportingPartyId;
@XmlElement(name = "AuditType", required = true)
protected IdentityProviderAudit auditType;
@XmlElement(name = "OccurredAt", required = true, type = String.class)
@XmlJavaTypeAdapter(Adapter1 .class)
@XmlSchemaType(name = "dateTime")
protected Calendar occurredAt;
@XmlElement(name = "AuditMessage", required = true)
protected String auditMessage;
/**
* Gets the value of the targetId property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getTargetId() {
return targetId;
}
/**
* Sets the value of the targetId property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setTargetId(String value) {
this.targetId = value;
}
/**
* Gets the value of the reportingPartyId property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getReportingPartyId() {
return reportingPartyId;
}
/**
* Sets the value of the reportingPartyId property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setReportingPartyId(String value) {
this.reportingPartyId = value;
}
/**
* Gets the value of the auditType property.
*
* @return
* possible object is
* {@link IdentityProviderAudit }
*
*/
public IdentityProviderAudit getAuditType() {
return auditType;
}
/**
* Sets the value of the auditType property.
*
* @param value
* allowed object is
* {@link IdentityProviderAudit }
*
*/
public void setAuditType(IdentityProviderAudit value) {
this.auditType = value;
}
/**
* Gets the value of the occurredAt property.
*
* @return
* possible object is
* {@link String }
*
*/
public Calendar getOccurredAt() {
return occurredAt;
}
/**
* Sets the value of the occurredAt property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOccurredAt(Calendar value) {
this.occurredAt = value;
}
/**
* Gets the value of the auditMessage property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getAuditMessage() {
return auditMessage;
}
/**
* Sets the value of the auditMessage property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setAuditMessage(String value) {
this.auditMessage = value;
}
public String toString() {
final ToStringStrategy strategy = JAXBToStringStrategy.INSTANCE;
final StringBuilder buffer = new StringBuilder();
append(null, buffer, strategy);
return buffer.toString();
}
public StringBuilder append(ObjectLocator locator, StringBuilder buffer, ToStringStrategy strategy) {
strategy.appendStart(locator, this, buffer);
appendFields(locator, buffer, strategy);
strategy.appendEnd(locator, this, buffer);
return buffer;
}
public StringBuilder appendFields(ObjectLocator locator, StringBuilder buffer, ToStringStrategy strategy) {
{
String theTargetId;
theTargetId = this.getTargetId();
strategy.appendField(locator, this, "targetId", buffer, theTargetId);
}
{
String theReportingPartyId;
theReportingPartyId = this.getReportingPartyId();
strategy.appendField(locator, this, "reportingPartyId", buffer, theReportingPartyId);
}
{
IdentityProviderAudit theAuditType;
theAuditType = this.getAuditType();
strategy.appendField(locator, this, "auditType", buffer, theAuditType);
}
{
Calendar theOccurredAt;
theOccurredAt = this.getOccurredAt();
strategy.appendField(locator, this, "occurredAt", buffer, theOccurredAt);
}
{
String theAuditMessage;
theAuditMessage = this.getAuditMessage();
strategy.appendField(locator, this, "auditMessage", buffer, theAuditMessage);
}
return buffer;
}
public int hashCode(ObjectLocator locator, HashCodeStrategy strategy) {
int currentHashCode = 1;
{
String theTargetId;
theTargetId = this.getTargetId();
currentHashCode = strategy.hashCode(LocatorUtils.property(locator, "targetId", theTargetId), currentHashCode, theTargetId);
}
{
String theReportingPartyId;
theReportingPartyId = this.getReportingPartyId();
currentHashCode = strategy.hashCode(LocatorUtils.property(locator, "reportingPartyId", theReportingPartyId), currentHashCode, theReportingPartyId);
}
{
IdentityProviderAudit theAuditType;
theAuditType = this.getAuditType();
currentHashCode = strategy.hashCode(LocatorUtils.property(locator, "auditType", theAuditType), currentHashCode, theAuditType);
}
{
Calendar theOccurredAt;
theOccurredAt = this.getOccurredAt();
currentHashCode = strategy.hashCode(LocatorUtils.property(locator, "occurredAt", theOccurredAt), currentHashCode, theOccurredAt);
}
{
String theAuditMessage;
theAuditMessage = this.getAuditMessage();
currentHashCode = strategy.hashCode(LocatorUtils.property(locator, "auditMessage", theAuditMessage), currentHashCode, theAuditMessage);
}
return currentHashCode;
}
public int hashCode() {
final HashCodeStrategy strategy = JAXBHashCodeStrategy.INSTANCE;
return this.hashCode(null, strategy);
}
public boolean equals(ObjectLocator thisLocator, ObjectLocator thatLocator, Object object, EqualsStrategy strategy) {
if (!(object instanceof IdentityProviderAuditRecord)) {
return false;
}
if (this == object) {
return true;
}
final IdentityProviderAuditRecord that = ((IdentityProviderAuditRecord) object);
{
String lhsTargetId;
lhsTargetId = this.getTargetId();
String rhsTargetId;
rhsTargetId = that.getTargetId();
if (!strategy.equals(LocatorUtils.property(thisLocator, "targetId", lhsTargetId), LocatorUtils.property(thatLocator, "targetId", rhsTargetId), lhsTargetId, rhsTargetId)) {
return false;
}
}
{
String lhsReportingPartyId;
lhsReportingPartyId = this.getReportingPartyId();
String rhsReportingPartyId;
rhsReportingPartyId = that.getReportingPartyId();
if (!strategy.equals(LocatorUtils.property(thisLocator, "reportingPartyId", lhsReportingPartyId), LocatorUtils.property(thatLocator, "reportingPartyId", rhsReportingPartyId), lhsReportingPartyId, rhsReportingPartyId)) {
return false;
}
}
{
IdentityProviderAudit lhsAuditType;
lhsAuditType = this.getAuditType();
IdentityProviderAudit rhsAuditType;
rhsAuditType = that.getAuditType();
if (!strategy.equals(LocatorUtils.property(thisLocator, "auditType", lhsAuditType), LocatorUtils.property(thatLocator, "auditType", rhsAuditType), lhsAuditType, rhsAuditType)) {
return false;
}
}
{
Calendar lhsOccurredAt;
lhsOccurredAt = this.getOccurredAt();
Calendar rhsOccurredAt;
rhsOccurredAt = that.getOccurredAt();
if (!strategy.equals(LocatorUtils.property(thisLocator, "occurredAt", lhsOccurredAt), LocatorUtils.property(thatLocator, "occurredAt", rhsOccurredAt), lhsOccurredAt, rhsOccurredAt)) {
return false;
}
}
{
String lhsAuditMessage;
lhsAuditMessage = this.getAuditMessage();
String rhsAuditMessage;
rhsAuditMessage = that.getAuditMessage();
if (!strategy.equals(LocatorUtils.property(thisLocator, "auditMessage", lhsAuditMessage), LocatorUtils.property(thatLocator, "auditMessage", rhsAuditMessage), lhsAuditMessage, rhsAuditMessage)) {
return false;
}
}
return true;
}
public boolean equals(Object object) {
final EqualsStrategy strategy = JAXBEqualsStrategy.INSTANCE;
return equals(null, null, object, strategy);
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.runners.direct;
import static com.google.common.base.Preconditions.checkState;
import static org.apache.beam.runners.direct.DirectGraphs.getProducer;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import com.google.common.collect.ContiguousSet;
import com.google.common.collect.DiscreteDomain;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Range;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Set;
import javax.annotation.Nullable;
import org.apache.beam.runners.direct.DirectRunner.CommittedBundle;
import org.apache.beam.runners.direct.DirectRunner.UncommittedBundle;
import org.apache.beam.runners.direct.UnboundedReadDeduplicator.NeverDeduplicator;
import org.apache.beam.runners.direct.UnboundedReadEvaluatorFactory.UnboundedSourceShard;
import org.apache.beam.sdk.coders.BigEndianLongCoder;
import org.apache.beam.sdk.coders.Coder;
import org.apache.beam.sdk.coders.CoderException;
import org.apache.beam.sdk.coders.CustomCoder;
import org.apache.beam.sdk.coders.VarLongCoder;
import org.apache.beam.sdk.io.CountingSource;
import org.apache.beam.sdk.io.Read;
import org.apache.beam.sdk.io.UnboundedSource;
import org.apache.beam.sdk.io.UnboundedSource.CheckpointMark;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.testing.SourceTestUtils;
import org.apache.beam.sdk.testing.TestPipeline;
import org.apache.beam.sdk.transforms.AppliedPTransform;
import org.apache.beam.sdk.transforms.SerializableFunction;
import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
import org.apache.beam.sdk.util.CoderUtils;
import org.apache.beam.sdk.util.VarInt;
import org.apache.beam.sdk.util.WindowedValue;
import org.apache.beam.sdk.values.PCollection;
import org.hamcrest.Matchers;
import org.joda.time.DateTime;
import org.joda.time.Instant;
import org.joda.time.ReadableInstant;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Tests for {@link UnboundedReadEvaluatorFactory}.
*/
@RunWith(JUnit4.class)
public class UnboundedReadEvaluatorFactoryTest {
private PCollection<Long> longs;
private UnboundedReadEvaluatorFactory factory;
private EvaluationContext context;
private UncommittedBundle<Long> output;
private BundleFactory bundleFactory = ImmutableListBundleFactory.create();
private UnboundedSource<Long, ?> source;
private DirectGraph graph;
@Rule public ExpectedException thrown = ExpectedException.none();
@Rule public TestPipeline p = TestPipeline.create().enableAbandonedNodeEnforcement(false);
@Before
public void setup() {
source = CountingSource.unboundedWithTimestampFn(new LongToInstantFn());
longs = p.apply(Read.from(source));
context = mock(EvaluationContext.class);
factory = new UnboundedReadEvaluatorFactory(context);
output = bundleFactory.createBundle(longs);
graph = DirectGraphs.getGraph(p);
when(context.createBundle(longs)).thenReturn(output);
}
@Test
public void generatesInitialSplits() throws Exception {
when(context.createRootBundle()).thenAnswer(new Answer<UncommittedBundle<?>>() {
@Override
public UncommittedBundle<?> answer(InvocationOnMock invocation) throws Throwable {
return bundleFactory.createRootBundle();
}
});
int numSplits = 5;
Collection<CommittedBundle<?>> initialInputs =
new UnboundedReadEvaluatorFactory.InputProvider(context)
.getInitialInputs(graph.getProducer(longs), numSplits);
// CountingSource.unbounded has very good splitting behavior
assertThat(initialInputs, hasSize(numSplits));
int readPerSplit = 100;
int totalSize = numSplits * readPerSplit;
Set<Long> expectedOutputs =
ContiguousSet.create(Range.closedOpen(0L, (long) totalSize), DiscreteDomain.longs());
Collection<Long> readItems = new ArrayList<>(totalSize);
for (CommittedBundle<?> initialInput : initialInputs) {
CommittedBundle<UnboundedSourceShard<Long, ?>> shardBundle =
(CommittedBundle<UnboundedSourceShard<Long, ?>>) initialInput;
WindowedValue<UnboundedSourceShard<Long, ?>> shard =
Iterables.getOnlyElement(shardBundle.getElements());
assertThat(shard.getTimestamp(), equalTo(BoundedWindow.TIMESTAMP_MIN_VALUE));
assertThat(shard.getWindows(), Matchers.<BoundedWindow>contains(GlobalWindow.INSTANCE));
UnboundedSource<Long, ?> shardSource = shard.getValue().getSource();
readItems.addAll(
SourceTestUtils.readNItemsFromUnstartedReader(
shardSource.createReader(
PipelineOptionsFactory.create(), null /* No starting checkpoint */),
readPerSplit));
}
assertThat(readItems, containsInAnyOrder(expectedOutputs.toArray(new Long[0])));
}
@Test
public void unboundedSourceInMemoryTransformEvaluatorProducesElements() throws Exception {
when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle());
Collection<CommittedBundle<?>> initialInputs =
new UnboundedReadEvaluatorFactory.InputProvider(context)
.getInitialInputs(graph.getProducer(longs), 1);
CommittedBundle<?> inputShards = Iterables.getOnlyElement(initialInputs);
UnboundedSourceShard<Long, ?> inputShard =
(UnboundedSourceShard<Long, ?>)
Iterables.getOnlyElement(inputShards.getElements()).getValue();
TransformEvaluator<? super UnboundedSourceShard<Long, ?>> evaluator =
factory.forApplication(graph.getProducer(longs), inputShards);
evaluator.processElement((WindowedValue) Iterables.getOnlyElement(inputShards.getElements()));
TransformResult<? super UnboundedSourceShard<Long, ?>> result = evaluator.finishBundle();
WindowedValue<? super UnboundedSourceShard<Long, ?>> residual =
Iterables.getOnlyElement(result.getUnprocessedElements());
assertThat(
residual.getTimestamp(), Matchers.<ReadableInstant>lessThan(DateTime.now().toInstant()));
UnboundedSourceShard<Long, ?> residualShard =
(UnboundedSourceShard<Long, ?>) residual.getValue();
assertThat(
residualShard.getSource(),
Matchers.<UnboundedSource<Long, ?>>equalTo(inputShard.getSource()));
assertThat(residualShard.getCheckpoint(), not(nullValue()));
assertThat(
output.commit(Instant.now()).getElements(),
containsInAnyOrder(
tgw(1L), tgw(2L), tgw(4L), tgw(8L), tgw(9L), tgw(7L), tgw(6L), tgw(5L), tgw(3L),
tgw(0L)));
}
@Test
public void unboundedSourceWithDuplicatesMultipleCalls() throws Exception {
Long[] outputs = new Long[20];
for (long i = 0L; i < 20L; i++) {
outputs[(int) i] = i % 5L;
}
TestUnboundedSource<Long> source =
new TestUnboundedSource<>(BigEndianLongCoder.of(), outputs);
source.dedupes = true;
PCollection<Long> pcollection = p.apply(Read.from(source));
AppliedPTransform<?, ?, ?> sourceTransform = getProducer(pcollection);
when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle());
Collection<CommittedBundle<?>> initialInputs =
new UnboundedReadEvaluatorFactory.InputProvider(context)
.getInitialInputs(sourceTransform, 1);
UncommittedBundle<Long> output = bundleFactory.createBundle(pcollection);
when(context.createBundle(pcollection)).thenReturn(output);
CommittedBundle<?> inputBundle = Iterables.getOnlyElement(initialInputs);
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> evaluator =
factory.forApplication(sourceTransform, inputBundle);
for (WindowedValue<?> value : inputBundle.getElements()) {
evaluator.processElement(
(WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>) value);
}
TransformResult<UnboundedSourceShard<Long, TestCheckpointMark>> result =
evaluator.finishBundle();
assertThat(
output.commit(Instant.now()).getElements(),
containsInAnyOrder(tgw(1L), tgw(2L), tgw(4L), tgw(3L), tgw(0L)));
UncommittedBundle<Long> secondOutput = bundleFactory.createBundle(longs);
when(context.createBundle(longs)).thenReturn(secondOutput);
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> secondEvaluator =
factory.forApplication(sourceTransform, inputBundle);
WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>> residual =
(WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>)
Iterables.getOnlyElement(result.getUnprocessedElements());
secondEvaluator.processElement(residual);
secondEvaluator.finishBundle();
assertThat(
secondOutput.commit(Instant.now()).getElements(),
Matchers.<WindowedValue<Long>>emptyIterable());
}
@Test
public void noElementsAvailableReaderIncludedInResidual() throws Exception {
// Read with a very slow rate so by the second read there are no more elements
PCollection<Long> pcollection =
p.apply(Read.from(new TestUnboundedSource<>(VarLongCoder.of(), 1L)));
AppliedPTransform<?, ?, ?> sourceTransform = DirectGraphs.getProducer(pcollection);
when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle());
Collection<CommittedBundle<?>> initialInputs =
new UnboundedReadEvaluatorFactory.InputProvider(context)
.getInitialInputs(sourceTransform, 1);
// Process the initial shard. This might produce some output, and will produce a residual shard
// which should produce no output when read from within the following day.
when(context.createBundle(pcollection)).thenReturn(bundleFactory.createBundle(pcollection));
CommittedBundle<?> inputBundle = Iterables.getOnlyElement(initialInputs);
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> evaluator =
factory.forApplication(sourceTransform, inputBundle);
for (WindowedValue<?> value : inputBundle.getElements()) {
evaluator.processElement(
(WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>) value);
}
TransformResult<UnboundedSourceShard<Long, TestCheckpointMark>> result =
evaluator.finishBundle();
// Read from the residual of the first read. This should not produce any output, but should
// include a residual shard in the result.
UncommittedBundle<Long> secondOutput = bundleFactory.createBundle(longs);
when(context.createBundle(longs)).thenReturn(secondOutput);
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> secondEvaluator =
factory.forApplication(sourceTransform, inputBundle);
WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>> residual =
(WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>)
Iterables.getOnlyElement(result.getUnprocessedElements());
secondEvaluator.processElement(residual);
TransformResult<UnboundedSourceShard<Long, TestCheckpointMark>> secondResult =
secondEvaluator.finishBundle();
// Sanity check that nothing was output (The test would have to run for more than a day to do
// so correctly.)
assertThat(
secondOutput.commit(Instant.now()).getElements(),
Matchers.<WindowedValue<Long>>emptyIterable());
// Test that even though the reader produced no outputs, there is still a residual shard with
// the updated watermark.
WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>> unprocessed =
(WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>)
Iterables.getOnlyElement(secondResult.getUnprocessedElements());
assertThat(
unprocessed.getTimestamp(), Matchers.<ReadableInstant>greaterThan(residual.getTimestamp()));
assertThat(unprocessed.getValue().getExistingReader(), not(nullValue()));
}
@Test
public void evaluatorReusesReader() throws Exception {
ContiguousSet<Long> elems = ContiguousSet.create(Range.closed(0L, 20L), DiscreteDomain.longs());
TestUnboundedSource<Long> source =
new TestUnboundedSource<>(BigEndianLongCoder.of(), elems.toArray(new Long[0]));
PCollection<Long> pcollection = p.apply(Read.from(source));
DirectGraph graph = DirectGraphs.getGraph(p);
AppliedPTransform<?, ?, ?> sourceTransform =
graph.getProducer(pcollection);
when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle());
UncommittedBundle<Long> output = bundleFactory.createBundle(pcollection);
when(context.createBundle(pcollection)).thenReturn(output);
WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>> shard =
WindowedValue.valueInGlobalWindow(
UnboundedSourceShard.unstarted(source, NeverDeduplicator.create()));
CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> inputBundle =
bundleFactory
.<UnboundedSourceShard<Long, TestCheckpointMark>>createRootBundle()
.add(shard)
.commit(Instant.now());
UnboundedReadEvaluatorFactory factory =
new UnboundedReadEvaluatorFactory(context, 1.0 /* Always reuse */);
new UnboundedReadEvaluatorFactory.InputProvider(context).getInitialInputs(sourceTransform, 1);
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> evaluator =
factory.forApplication(sourceTransform, inputBundle);
evaluator.processElement(shard);
TransformResult<UnboundedSourceShard<Long, TestCheckpointMark>> result =
evaluator.finishBundle();
CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> residual =
inputBundle.withElements(
(Iterable<WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>>)
result.getUnprocessedElements());
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> secondEvaluator =
factory.forApplication(sourceTransform, residual);
secondEvaluator.processElement(Iterables.getOnlyElement(residual.getElements()));
secondEvaluator.finishBundle();
assertThat(TestUnboundedSource.readerClosedCount, equalTo(0));
}
@Test
public void evaluatorClosesReaderAndResumesFromCheckpoint() throws Exception {
ContiguousSet<Long> elems = ContiguousSet.create(Range.closed(0L, 20L), DiscreteDomain.longs());
TestUnboundedSource<Long> source =
new TestUnboundedSource<>(BigEndianLongCoder.of(), elems.toArray(new Long[0]));
PCollection<Long> pcollection = p.apply(Read.from(source));
AppliedPTransform<?, ?, ?> sourceTransform =
DirectGraphs.getGraph(p).getProducer(pcollection);
when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle());
UncommittedBundle<Long> output = bundleFactory.createBundle(pcollection);
when(context.createBundle(pcollection)).thenReturn(output);
WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>> shard =
WindowedValue.valueInGlobalWindow(
UnboundedSourceShard.unstarted(source, NeverDeduplicator.create()));
CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> inputBundle =
bundleFactory
.<UnboundedSourceShard<Long, TestCheckpointMark>>createRootBundle()
.add(shard)
.commit(Instant.now());
UnboundedReadEvaluatorFactory factory =
new UnboundedReadEvaluatorFactory(context, 0.0 /* never reuse */);
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> evaluator =
factory.forApplication(sourceTransform, inputBundle);
evaluator.processElement(shard);
TransformResult<UnboundedSourceShard<Long, TestCheckpointMark>> result =
evaluator.finishBundle();
CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> residual =
inputBundle.withElements(
(Iterable<WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>>)
result.getUnprocessedElements());
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> secondEvaluator =
factory.forApplication(sourceTransform, residual);
secondEvaluator.processElement(Iterables.getOnlyElement(residual.getElements()));
secondEvaluator.finishBundle();
assertThat(TestUnboundedSource.readerClosedCount, equalTo(2));
assertThat(
Iterables.getOnlyElement(residual.getElements()).getValue().getCheckpoint().isFinalized(),
is(true));
}
@Test
public void evaluatorThrowsInCloseRethrows() throws Exception {
ContiguousSet<Long> elems = ContiguousSet.create(Range.closed(0L, 20L), DiscreteDomain.longs());
TestUnboundedSource<Long> source =
new TestUnboundedSource<>(BigEndianLongCoder.of(), elems.toArray(new Long[0]))
.throwsOnClose();
PCollection<Long> pcollection = p.apply(Read.from(source));
AppliedPTransform<?, ?, ?> sourceTransform =
DirectGraphs.getGraph(p).getProducer(pcollection);
when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle());
UncommittedBundle<Long> output = bundleFactory.createBundle(pcollection);
when(context.createBundle(pcollection)).thenReturn(output);
WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>> shard =
WindowedValue.valueInGlobalWindow(
UnboundedSourceShard.unstarted(source, NeverDeduplicator.create()));
CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> inputBundle =
bundleFactory
.<UnboundedSourceShard<Long, TestCheckpointMark>>createRootBundle()
.add(shard)
.commit(Instant.now());
UnboundedReadEvaluatorFactory factory =
new UnboundedReadEvaluatorFactory(context, 0.0 /* never reuse */);
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> evaluator =
factory.forApplication(sourceTransform, inputBundle);
thrown.expect(IOException.class);
thrown.expectMessage("throws on close");
evaluator.processElement(shard);
}
/**
* A terse alias for producing timestamped longs in the {@link GlobalWindow}, where
* the timestamp is the epoch offset by the value of the element.
*/
private static WindowedValue<Long> tgw(Long elem) {
return WindowedValue.timestampedValueInGlobalWindow(elem, new Instant(elem));
}
private static class LongToInstantFn implements SerializableFunction<Long, Instant> {
@Override
public Instant apply(Long input) {
return new Instant(input);
}
}
private static class TestUnboundedSource<T> extends UnboundedSource<T, TestCheckpointMark> {
private static int getWatermarkCalls = 0;
static int readerClosedCount;
static int readerAdvancedCount;
private final Coder<T> coder;
private final List<T> elems;
private boolean dedupes = false;
private boolean throwOnClose;
public TestUnboundedSource(Coder<T> coder, T... elems) {
this(coder, false, Arrays.asList(elems));
}
private TestUnboundedSource(Coder<T> coder, boolean throwOnClose, List<T> elems) {
readerAdvancedCount = 0;
readerClosedCount = 0;
this.coder = coder;
this.elems = elems;
this.throwOnClose = throwOnClose;
}
@Override
public List<? extends UnboundedSource<T, TestCheckpointMark>> split(
int desiredNumSplits, PipelineOptions options) throws Exception {
return ImmutableList.of(this);
}
@Override
public UnboundedSource.UnboundedReader<T> createReader(
PipelineOptions options, @Nullable TestCheckpointMark checkpointMark) {
checkState(
checkpointMark == null || checkpointMark.decoded,
"Cannot resume from a checkpoint that has not been decoded");
return new TestUnboundedReader(elems, checkpointMark == null ? -1 : checkpointMark.index);
}
@Override
@Nullable
public Coder<TestCheckpointMark> getCheckpointMarkCoder() {
return new TestCheckpointMark.Coder();
}
@Override
public boolean requiresDeduping() {
return dedupes;
}
@Override
public void validate() {}
@Override
public Coder<T> getDefaultOutputCoder() {
return coder;
}
public TestUnboundedSource<T> throwsOnClose() {
return new TestUnboundedSource<>(coder, true, elems);
}
private class TestUnboundedReader extends UnboundedReader<T> {
private final List<T> elems;
private int index;
private boolean closed = false;
public TestUnboundedReader(List<T> elems, int startIndex) {
this.elems = elems;
this.index = startIndex;
}
@Override
public boolean start() throws IOException {
return advance();
}
@Override
public boolean advance() throws IOException {
readerAdvancedCount++;
if (index + 1 < elems.size()) {
index++;
return true;
}
return false;
}
@Override
public Instant getWatermark() {
getWatermarkCalls++;
return new Instant(index + getWatermarkCalls);
}
@Override
public CheckpointMark getCheckpointMark() {
return new TestCheckpointMark(index);
}
@Override
public UnboundedSource<T, ?> getCurrentSource() {
TestUnboundedSource<T> source = TestUnboundedSource.this;
return source;
}
@Override
public T getCurrent() throws NoSuchElementException {
return elems.get(index);
}
@Override
public Instant getCurrentTimestamp() throws NoSuchElementException {
return new Instant(index);
}
@Override
public byte[] getCurrentRecordId() {
try {
return CoderUtils.encodeToByteArray(coder, getCurrent());
} catch (CoderException e) {
throw new RuntimeException(e);
}
}
@Override
public void close() throws IOException {
try {
readerClosedCount++;
// Enforce the AutoCloseable contract. Close is not idempotent.
assertThat(closed, is(false));
if (throwOnClose) {
throw new IOException(String.format("%s throws on close", TestUnboundedSource.this));
}
} finally {
closed = true;
}
}
}
}
private static class TestCheckpointMark implements CheckpointMark {
final int index;
private boolean finalized = false;
private boolean decoded = false;
private TestCheckpointMark(int index) {
this.index = index;
}
@Override
public void finalizeCheckpoint() throws IOException {
checkState(
!finalized, "%s was finalized more than once", TestCheckpointMark.class.getSimpleName());
checkState(
!decoded,
"%s was finalized after being decoded",
TestCheckpointMark.class.getSimpleName());
finalized = true;
}
boolean isFinalized() {
return finalized;
}
public static class Coder extends CustomCoder<TestCheckpointMark> {
@Override
public void encode(
TestCheckpointMark value,
OutputStream outStream,
org.apache.beam.sdk.coders.Coder.Context context)
throws IOException {
VarInt.encode(value.index, outStream);
}
@Override
public TestCheckpointMark decode(
InputStream inStream, org.apache.beam.sdk.coders.Coder.Context context)
throws IOException {
TestCheckpointMark decoded = new TestCheckpointMark(VarInt.decodeInt(inStream));
decoded.decoded = true;
return decoded;
}
}
}
}
| |
/**
* Licensed to the Rhiot under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rhiot.component.pi4j.i2c.driver;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.pi4j.io.i2c.I2CDevice;
import io.rhiot.component.pi4j.i2c.I2CConstants;
import io.rhiot.component.pi4j.i2c.I2CConsumer;
import io.rhiot.component.pi4j.i2c.I2CEndpoint;
/**
* inspired by
* https://github.com/richards-tech/RTIMULibCS/blob/master/RTIMULibCS/RTIMULibCS
* /Devices/HTS221/HTS221HumiditySensor.cs and
* https://github.com/richards-tech/RTIMULib/blob/master/RTIMULib/IMUDrivers/
* RTHumidityHTS221.cpp
*/
public final class HTS221Consumer extends I2CConsumer {
private static final transient Logger LOG = LoggerFactory.getLogger(HTS221Consumer.class);
public static byte HTS221_ADDRESS = 0x5F;
public static byte AV_CONF = 0x10;
public static byte CTRL_REG1 = 0x20;
public static byte CTRL_REG2 = 0x21;
public static byte CTRL_REG3 = 0x22;
public static byte STATUS_REG = 0x27;
/**
* or H_OUT
*/
public static byte HUMIDITY_OUT_L = 0x28;
public static byte HUMIDITY_OUT_H = 0x29;
/**
* or T_OUT
*/
public static byte TEMP_OUT_L = 0x2A;
public static byte TEMP_OUT_H = 0x2B;
public static byte H0_rH_x2 = 0x30;
public static byte H1_rH_x2 = 0x31;
public static byte T0_degC_x8 = 0x32;
public static byte T1_degC_x8 = 0x33;
public static byte T1_T0_msb = 0x35;
public static byte T0_OUT = 0x3C; // 0x3D
public static byte T1_OUT = 0x3E; // 0x3F
public static byte H0_T0_OUT = 0x36; // 0x37
public static byte H1_T0_OUT = 0x3A; // 0x3B
public static byte TEMP_DATA_AVAILABLE_MASK = 0x01;
public static byte HUMI_DATA_AVAILABLE_MASK = 0x02;
private ByteBuffer buffer = ByteBuffer.allocate(2);
private double internalHumiditySlope;
private double internalHumidityYIntercept;
private double internalTemperatureSlope;
private double internalTemperatureYIntercept;
private HTS221TemperatureResolutionMode temperatureMode = HTS221TemperatureResolutionMode.AVGT_16;
private HTS221HumidityResolutionMode humidityMode = HTS221HumidityResolutionMode.AVGH_32;
private HTS221ControlRegistry1 bdu = HTS221ControlRegistry1.BDU_UPDATE_AFTER_READING;
private HTS221ControlRegistry1 odr = HTS221ControlRegistry1.ODR_12DOT5_HZ;
private HTS221ControlRegistry1 pd = HTS221ControlRegistry1.PD_ACTIVE;
/**
* We use the same registry
*/
public static byte WHO_AM_I = 0x0F;
public HTS221Consumer(I2CEndpoint endpoint, Processor processor, I2CDevice device) {
super(endpoint, processor, device);
buffer.order(ByteOrder.LITTLE_ENDIAN);
}
@Override
protected void createBody(Exchange exchange) throws IOException {
HTS221Value body = new HTS221Value();
body.setHumidity(getHumidity());
body.setTemperature(readTemperature());
LOG.debug("" + body);
exchange.getIn().setBody(body);
}
public static boolean available(byte status, byte exected) {
return ((status & exected) == exected);
}
@Override
public void doStart() throws Exception {
super.doStart();
byte result = (byte) (0xff & read(WHO_AM_I));
LOG.debug("WHO_AM_I : " + toHexToString(result));
byte crtl1 = (byte) (odr.value | bdu.value << 2 | pd.value << 7);
byte avconf = (byte) (humidityMode.average | temperatureMode.average << 3);
LOG.debug("crtl1 : " + toHexToString(crtl1));
LOG.debug("avconf : " + toHexToString(avconf));
write(CTRL_REG1, crtl1);
write(AV_CONF, avconf);
temperatureCalibration();
humidityCalibration();
}
@Override
public void doStop() throws Exception {
super.doStop();
byte crtl1 = (byte) read(CTRL_REG1);
byte maskToPowerDown = (byte) (0xff ^ (~HTS221ControlRegistry1.PD_POWER_DOWN.value << 7));
crtl1 &= maskToPowerDown;
write(CTRL_REG1, crtl1);
}
public double getHumidity() throws IOException {
read(HUMIDITY_OUT_L | I2CConstants.MULTI_BYTE_READ_MASK, buffer.array(), 0, 2);
short rawHumidity = buffer.getShort(0);
return (rawHumidity * internalHumiditySlope + internalHumidityYIntercept);
}
public double readTemperature() throws IOException {
read(TEMP_OUT_L | I2CConstants.MULTI_BYTE_READ_MASK, buffer.array(), 0, 2);
short TEMP_OUT_L = buffer.getShort(0);
LOG.debug("TEMP_OUT_L " + TEMP_OUT_L);
return (TEMP_OUT_L * internalTemperatureSlope + internalTemperatureYIntercept);
}
public void humidityCalibration() throws Exception {
read(H0_rH_x2, buffer.array(), 0, 1);
buffer.put(1, (byte) 0);
short H0_H_2 = buffer.getShort(0);
LOG.debug("H0_H_2 " + H0_H_2);
double H0 = H0_H_2 / 2.0;
LOG.debug("H0 " + H0);
read(H1_rH_x2, buffer.array(), 0, 1);
buffer.put(1, (byte) 0);
short H1_H_2 = buffer.getShort(0);
LOG.debug("H1_H_2 " + H1_H_2);
double H1 = H1_H_2 / 2.0;
LOG.debug("H1 " + H1);
read(H0_T0_OUT | I2CConstants.MULTI_BYTE_READ_MASK, buffer.array(), 0, 2);
short H0_T0_OUT = buffer.getShort(0);
LOG.debug("H0_T0_OUT: " + toHexToString(H0_T0_OUT));
read(H1_T0_OUT | I2CConstants.MULTI_BYTE_READ_MASK, buffer.array(), 0, 2);
short H1_T0_OUT = buffer.getShort(0);
LOG.debug("H1_T0_OUT: " + toHexToString(H1_T0_OUT));
internalHumiditySlope = (H1 - H0) / (H1_T0_OUT - H0_T0_OUT);
internalHumidityYIntercept = H0 - (internalHumiditySlope * H0_T0_OUT);
}
public void temperatureCalibration() throws Exception {
byte tempMSB = (byte) (0x0f & read(T1_T0_msb));
LOG.debug("T1/T0 msb:" + toHexToString(tempMSB));
// retrieve T0 / (Msb T0_degC U T0_degC)_x8
byte temp0LSB = (byte) (0xff & read(T0_degC_x8));
buffer.put(0, temp0LSB);
buffer.put(1, (byte) (tempMSB & 0x03));
short T0_C_8 = buffer.getShort(0);
LOG.debug("T0_C_8:" + toHexToString(T0_C_8));
double T0 = T0_C_8 / 8.0;
LOG.debug("T0 " + T0);
// retrieve T1 / (Msb T1_degC U T1_degC)_x8
byte temp1LSB = (byte) (read(T1_degC_x8));
buffer.put(0, temp1LSB);
buffer.put(1, (byte) ((byte) (tempMSB & 0x0C) >> 2));
short T1_C_8 = buffer.getShort(0);
LOG.debug("T1_C_8:" + toHexToString(T1_C_8));
double T1 = T1_C_8 / 8.0;
LOG.debug("T1 " + T1);
// retrieve T0_OUT
read(T0_OUT | I2CConstants.MULTI_BYTE_READ_MASK, buffer.array(), 0, 2);
short T0_OUT = buffer.getShort(0);
LOG.debug("T0_OUT " + toHexToString(T0_OUT));
// retrieve T1_OUT
read(T1_OUT | I2CConstants.MULTI_BYTE_READ_MASK, buffer.array(), 0, 2);
short T1_OUT = buffer.getShort(0);
LOG.debug("T1_OUT " + toHexToString(T1_OUT));
// Temperature calibration slope
internalTemperatureSlope = ((T1 - T0) / (T1_OUT - T0_OUT));
// Temperature calibration y intercept
internalTemperatureYIntercept = (T0 - (internalTemperatureSlope * T0_OUT));
}
public static String toHexToString(byte i) {
return String.format("0b%8s", Integer.toBinaryString(0x000000ff & i)).replace(' ', '0');
}
public static String toHexToString(short i) {
return String.format("0b%16s", Integer.toBinaryString(0x0000ffff & i)).replace(' ', '0');
}
public HTS221TemperatureResolutionMode getTemperatureMode() {
return temperatureMode;
}
public void setTemperatureMode(HTS221TemperatureResolutionMode temperatureMode) {
this.temperatureMode = temperatureMode;
}
public HTS221HumidityResolutionMode getHumidityMode() {
return humidityMode;
}
public void setHumidityMode(HTS221HumidityResolutionMode humidityMode) {
this.humidityMode = humidityMode;
}
public HTS221ControlRegistry1 getBdu() {
return bdu;
}
public void setBdu(HTS221ControlRegistry1 bdu) {
this.bdu = bdu;
}
public HTS221ControlRegistry1 getOdr() {
return odr;
}
public void setOdr(HTS221ControlRegistry1 odr) {
this.odr = odr;
}
public HTS221ControlRegistry1 getPd() {
return pd;
}
public void setPd(HTS221ControlRegistry1 pd) {
this.pd = pd;
}
}
| |
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.mdm.integration.device.operation;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
import junit.framework.Assert;
import org.apache.commons.httpclient.HttpStatus;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import org.wso2.carbon.automation.engine.context.TestUserMode;
import org.wso2.carbon.automation.test.utils.http.client.HttpResponse;
import org.wso2.mdm.integration.common.*;
/**
* This contain tests to check operations supported by Android. Test are executed against a previously enrolled device
*/
public class AndroidOperation extends TestBase {
private RestClient client;
@BeforeTest(alwaysRun = true, groups = {Constants.AndroidEnrollment.ENROLLMENT_GROUP})
public void initTest() throws Exception {
super.init(TestUserMode.SUPER_TENANT_ADMIN);
String accessTokenString = "Bearer " + OAuthUtil.getOAuthToken(backendHTTPURL, backendHTTPSURL);
this.client = new RestClient(backendHTTPURL, Constants.APPLICATION_JSON, accessTokenString);
//Enroll a device
JsonObject enrollmentData = PayloadGenerator.getJsonPayload(
Constants.AndroidEnrollment.ENROLLMENT_PAYLOAD_FILE_NAME,
Constants.HTTP_METHOD_POST);
enrollmentData.addProperty(Constants.DEVICE_IDENTIFIER_KEY, Constants.DEVICE_ID);
client.post(Constants.AndroidEnrollment.ENROLLMENT_ENDPOINT, enrollmentData.toString());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android device lock operation.")
public void testLock() throws Exception {
HttpResponse response = client.post(Constants.AndroidOperations.LOCK_ENDPOINT,
Constants.AndroidOperations.COMMAND_OPERATION_PAYLOAD);
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
/*
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android getPendingOperations.")
public void testGetPendingOperations() throws Exception {
HttpResponse response = client.put(Constants.AndroidOperations.OPERATION_ENDPOINT + Constants.DEVICE_ID,
"[]");
Assert.assertEquals(response.getResponseCode(), HttpStatus.SC_CREATED);
}*/
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android device location operation.")
public void testLocation() throws Exception {
HttpResponse response = client.post(Constants.AndroidOperations.LOCATION_ENDPOINT,
Constants.AndroidOperations.COMMAND_OPERATION_PAYLOAD);
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android device clear password " +
"operation.")
public void testClearPassword() throws Exception {
HttpResponse response = client.post(Constants.AndroidOperations.CLEAR_PASSWORD_ENDPOINT,
Constants.AndroidOperations.COMMAND_OPERATION_PAYLOAD);
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android device camera operation.")
public void testCamera() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.CAMERA_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.CAMERA_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android device information operation.")
public void testDeviceInfo() throws Exception {
HttpResponse response = client.post(Constants.AndroidOperations.DEVICE_INFO_ENDPOINT,
Constants.AndroidOperations.COMMAND_OPERATION_PAYLOAD);
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android enterprise-wipe operation.")
public void testEnterpriseWipe() throws Exception {
HttpResponse response = client.post(Constants.AndroidOperations.ENTERPRISE_WIPE_ENDPOINT,
Constants.AndroidOperations.COMMAND_OPERATION_PAYLOAD);
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android wipe data operation.")
public void testWipeData() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.WIPE_DATA_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.WIPE_DATA_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android application list operation.")
public void testApplicationList() throws Exception {
HttpResponse response = client.post(Constants.AndroidOperations.APPLICATION_LIST_ENDPOINT,
Constants.AndroidOperations.COMMAND_OPERATION_PAYLOAD);
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android ring operation.")
public void testRing() throws Exception {
HttpResponse response = client.post(Constants.AndroidOperations.RING_ENDPOINT,
Constants.AndroidOperations.COMMAND_OPERATION_PAYLOAD);
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android mute operation.")
public void testMute() throws Exception {
HttpResponse response = client.post(Constants.AndroidOperations.MUTE_ENDPOINT,
Constants.AndroidOperations.COMMAND_OPERATION_PAYLOAD);
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android install apps operation.")
public void testInstallApps() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.INSTALL_APPS_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.INSTALL_APPS_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android uninstall apps operation.")
public void testUninstallApps() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.INSTALL_APPS_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.UNINSTALL_APPS_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android blacklist apps operation.")
public void testBlacklistApps() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.INSTALL_APPS_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.BLACKLIST_APPS_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android notification operation.")
public void testNotification() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.NOTIFICATION_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.NOTIFICATION_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android WiFi operation.")
public void testWiFi() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.WIFI_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.WIFI_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android encrypt operation.")
public void testEncrypt() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.ENCRYPT_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.ENCRYPT_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android change lock operation.")
public void testChangeLock() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.CHANGE_LOCK_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.CHANGE_LOCK_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android password policy operation.")
public void testPasswordPolicy() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.PASSWORD_POLICY_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.PASSWORD_POLICY_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
@Test(groups = Constants.AndroidOperations.OPERATIONS_GROUP, description = "Test Android web clip operation.")
public void testWebClip() throws Exception {
JsonObject operationData = PayloadGenerator.getJsonPayload(
Constants.AndroidOperations.OPERATION_PAYLOAD_FILE_NAME,
Constants.AndroidOperations.WEB_CLIP_OPERATION);
JsonArray deviceIds = new JsonArray();
JsonPrimitive deviceID = new JsonPrimitive(Constants.DEVICE_ID);
deviceIds.add(deviceID);
operationData.add(Constants.DEVICE_IDENTIFIERS_KEY, deviceIds);
HttpResponse response = client.post(Constants.AndroidOperations.WEB_CLIP_ENDPOINT,
operationData.toString());
Assert.assertEquals(HttpStatus.SC_CREATED, response.getResponseCode());
}
}
| |
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2022 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.hana.ui.views;
import org.eclipse.jface.dialogs.IDialogPage;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.graphics.Image;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.widgets.*;
import org.jkiss.dbeaver.ext.hana.ui.internal.HANAEdition;
import org.jkiss.dbeaver.ext.hana.ui.internal.HANAMessages;
import org.jkiss.dbeaver.model.DBPDataSourceContainer;
import org.jkiss.dbeaver.model.connection.DBPConnectionConfiguration;
import org.jkiss.dbeaver.ui.IDialogPageProvider;
import org.jkiss.dbeaver.ui.UIUtils;
import org.jkiss.dbeaver.ui.dialogs.connection.ConnectionPageWithAuth;
import org.jkiss.dbeaver.ui.dialogs.connection.DriverPropertiesDialogPage;
import org.jkiss.utils.CommonUtils;
import java.util.Locale;
import java.util.Map.Entry;
/*
* when edition==GENERIC, don't show/touch any driver properties for
* - compatibility with previous configuration
* - full control over all driver properties
*
* since JDBC 2.6 client the 'encrypt' property is automatically set for connections to port 443 (e.g. HANA Cloud)
* https://help.sap.com/viewer/79ae9d3916b84356a89744c65793b924/2.6/en-US/22485d2937c4427fbbedefe3cc158571.html
* so we do not have to add checkboxes here.
*
*/
public class HANAConnectionPage extends ConnectionPageWithAuth implements IDialogPageProvider {
final static String PROP_DATABASE_NAME = "databaseName";
final static String PROV_PROP_INSTANCE_NUMBER = "instanceNumber";
final static String PROV_PROP_EDITION = "edition";
private Combo editionCombo;
private Text hostText;
private Text portText;
private Label instanceLabel;
private Text instanceText;
private Label databaseLabel;
private Text databaseText;
private boolean created;
private HANAEdition edition;
// saved custom value while Text is read-only
private String portValue;
private String instanceValue;
private String databaseValue;
private final Image logoImage;
public HANAConnectionPage() {
logoImage = createImage("icons/sap_hana_logo.png"); //$NON-NLS-1$
}
@Override
public void dispose() {
super.dispose();
UIUtils.dispose(logoImage);
}
@Override
public Image getImage() {
return logoImage;
}
@Override
public void createControl(Composite composite) {
Composite settingsGroup = new Composite(composite, SWT.NONE);
settingsGroup.setLayout(new GridLayout(1, false));
settingsGroup.setLayoutData(new GridData(GridData.FILL_BOTH));
Composite addrGroup = UIUtils.createControlGroup(settingsGroup, HANAMessages.label_connection, 2, 0, 0);
addrGroup.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
UIUtils.createControlLabel(addrGroup, HANAMessages.label_edition);
editionCombo = new Combo(addrGroup, SWT.DROP_DOWN | SWT.READ_ONLY);
for (HANAEdition edition : HANAEdition.values()) {
editionCombo.add(edition.getTitle());
}
hostText = UIUtils.createLabelText(addrGroup, HANAMessages.label_host, "");
portText = UIUtils.createLabelText(addrGroup, HANAMessages.label_port, "");
portText.setLayoutData(new GridData(GridData.HORIZONTAL_ALIGN_BEGINNING));
((GridData)portText.getLayoutData()).widthHint = UIUtils.getFontHeight(portText) * 5;
portText.addVerifyListener(UIUtils.getIntegerVerifyListener(Locale.getDefault()));
instanceLabel = UIUtils.createControlLabel(addrGroup, HANAMessages.label_instance);
instanceText = new Text(addrGroup, SWT.BORDER);
instanceText.setLayoutData(new GridData(GridData.VERTICAL_ALIGN_BEGINNING));
((GridData)instanceText.getLayoutData()).widthHint = UIUtils.getFontHeight(instanceText) * 2;
instanceText.addVerifyListener(UIUtils.getIntegerVerifyListener(Locale.getDefault()));
instanceText.setToolTipText(HANAMessages.tooltip_instance);
databaseLabel = UIUtils.createControlLabel(addrGroup, HANAMessages.label_database);
databaseText = new Text(addrGroup, SWT.BORDER);
databaseText.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
editionCombo.addSelectionListener(new SelectionAdapter() {
@Override public void widgetSelected(SelectionEvent e) { editionUpdated(); site.updateButtons(); }
});
hostText.addModifyListener(e -> site.updateButtons());
portText.addModifyListener(e -> site.updateButtons());
instanceText.addModifyListener(e -> { instanceUpdated(); site.updateButtons(); });
databaseText.addModifyListener(e -> site.updateButtons());
createAuthPanel(settingsGroup, 1);
createDriverPanel(settingsGroup);
setControl(settingsGroup);
created = true;
}
@Override
public boolean isComplete() {
if (CommonUtils.isEmpty(hostText.getText().trim()))
return false;
if (CommonUtils.isEmpty(portText.getText().trim()))
return false;
if (edition != HANAEdition.GENERIC) {
if (instanceText.getEditable()) {
int instance = CommonUtils.toInt(instanceText.getText().trim(), -1);
if(instance < 0 || instance > 99) return false;
}
if (databaseText.getEditable() && CommonUtils.isEmpty(databaseText.getText().trim()))
return false;
}
return super.isComplete();
}
/*
* HANA driver properties are case insensitive. Reuse and cleanup properties set previously w/o HANA specific connection page
*/
private String getProperty(DBPConnectionConfiguration connectionInfo, String name) {
for (Entry<String, String> entry : connectionInfo.getProperties().entrySet()) {
if(entry.getKey().equalsIgnoreCase(name)) {
return entry.getValue();
}
}
return null;
}
private void setProperty(DBPConnectionConfiguration connectionInfo, String name, String value) {
for (Entry<String, String> entry : connectionInfo.getProperties().entrySet()) {
if(entry.getKey().equalsIgnoreCase(name) && !entry.getKey().equals(name)) {
connectionInfo.removeProperty(name);
}
}
connectionInfo.setProperty(name, value);
}
private void removeProperty(DBPConnectionConfiguration connectionInfo, String name) {
for (Entry<String, String> entry : connectionInfo.getProperties().entrySet()) {
if(entry.getKey().equalsIgnoreCase(name)) {
connectionInfo.removeProperty(name);
}
}
}
@Override
public void loadSettings() {
super.loadSettings();
DBPConnectionConfiguration connectionInfo = site.getActiveDataSource().getConnectionConfiguration();
edition = HANAEdition.fromName(connectionInfo.getProviderProperty(PROV_PROP_EDITION));
portValue = CommonUtils.notEmpty(connectionInfo.getHostPort());
instanceValue = CommonUtils.notEmpty(connectionInfo.getProviderProperty(PROV_PROP_INSTANCE_NUMBER));
databaseValue = CommonUtils.notEmpty(getProperty(connectionInfo, PROP_DATABASE_NAME));
if(created) {
editionCombo.select(edition.ordinal());
hostText.setText(CommonUtils.notEmpty(connectionInfo.getHostName()));
portText.setText(portValue);
instanceText.setText(instanceValue);
databaseText.setText(databaseValue);
editionUpdated();
}
}
@Override
public void saveSettings(DBPDataSourceContainer dataSource) {
DBPConnectionConfiguration connectionInfo = dataSource.getConnectionConfiguration();
connectionInfo.setProviderProperty(PROV_PROP_EDITION, edition.name());
if (created) {
connectionInfo.setHostName(hostText.getText().trim());
connectionInfo.setHostPort(portText.getText().trim());
if (edition != HANAEdition.GENERIC) {
instanceValue = instanceText.getText().trim();
if (instanceValue.isEmpty()) {
connectionInfo.removeProviderProperty(PROV_PROP_INSTANCE_NUMBER);
} else {
connectionInfo.setProviderProperty(PROV_PROP_INSTANCE_NUMBER, instanceValue);
}
databaseValue = databaseText.getText().trim();
if (databaseValue.isEmpty()) {
removeProperty(connectionInfo, PROP_DATABASE_NAME);
} else {
setProperty(connectionInfo, PROP_DATABASE_NAME, databaseValue);
}
}
}
super.saveSettings(dataSource);
}
@Override
public IDialogPage[] getDialogPages(boolean extrasOnly, boolean forceCreate) {
return new IDialogPage[] { new DriverPropertiesDialogPage(this) };
}
private void editionUpdated() {
// save old values
if (portText.getEditable()) {
portValue = portText.getText().trim();
}
if (instanceText.getEditable()) {
instanceValue = instanceText.getText().trim();
}
if (databaseText.getEditable()) {
databaseValue = databaseText.getText().trim();
}
edition = HANAEdition.fromTitle(editionCombo.getText());
portText.setEditable(edition == HANAEdition.GENERIC || edition == HANAEdition.EXPRESS);
UIUtils.fixReadonlyTextBackground(portText);
switch (edition) {
case GENERIC:
portText.setText(portValue);
break;
case PLATFORM_SINGLE_DB:
case PLATFORM_SYSTEM_DB:
case PLATFORM_TENANT_DB:
instanceUpdated();
break;
case EXPRESS:
if(portValue.isEmpty())
portText.setText("39015");
else
portText.setText(portValue);
break;
case CLOUD:
portText.setText("443");
break;
default:
break;
}
if (edition == HANAEdition.PLATFORM_SINGLE_DB || edition == HANAEdition.PLATFORM_SYSTEM_DB || edition == HANAEdition.PLATFORM_TENANT_DB) {
if(instanceValue.isEmpty()) {
int port = CommonUtils.toInt(portValue);
if(port >= 30000 && port <= 39999)
instanceText.setText(String.valueOf((port-30000)/100));
} else {
instanceText.setText(instanceValue);
}
instanceText.setEditable(true);
} else if (edition == HANAEdition.EXPRESS) {
instanceText.setText("90");
instanceText.setEditable(false);
} else {
instanceText.setText("");
instanceText.setEditable(false);
}
UIUtils.fixReadonlyTextBackground(instanceText);
if (edition == HANAEdition.PLATFORM_TENANT_DB) {
databaseText.setText(databaseValue);
databaseText.setEditable(true);
} else {
databaseText.setText("");
databaseText.setEditable(false);
}
UIUtils.fixReadonlyTextBackground(databaseText);
toggleControlVisibility(instanceLabel);
toggleControlVisibility(instanceText);
toggleControlVisibility(databaseLabel);
toggleControlVisibility(databaseText);
((Composite)getControl()).layout(true, true);
}
private void toggleControlVisibility(Control control) {
control.setVisible(edition != HANAEdition.GENERIC);
Object layoutData = control.getLayoutData();
if (layoutData instanceof GridData) {
((GridData) layoutData).exclude = (edition == HANAEdition.GENERIC);
}
}
private void instanceUpdated() {
int instance = CommonUtils.toInt(instanceText.getText().trim(), 0);
switch (edition) {
case PLATFORM_SINGLE_DB:
portText.setText(String.format("3%02d15", instance));
break;
case PLATFORM_SYSTEM_DB:
case PLATFORM_TENANT_DB:
portText.setText(String.format("3%02d13", instance));
break;
default:
break;
}
}
}
| |
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.storage.ldap.mappers.membership.role;
import org.jboss.logging.Logger;
import org.keycloak.component.ComponentModel;
import org.keycloak.models.ClientModel;
import org.keycloak.models.ModelException;
import org.keycloak.models.RealmModel;
import org.keycloak.models.RoleContainerModel;
import org.keycloak.models.RoleModel;
import org.keycloak.models.UserModel;
import org.keycloak.models.utils.KeycloakModelUtils;
import org.keycloak.models.utils.RoleUtils;
import org.keycloak.models.utils.UserModelDelegate;
import org.keycloak.storage.ldap.LDAPStorageProvider;
import org.keycloak.storage.ldap.LDAPUtils;
import org.keycloak.storage.ldap.idm.model.LDAPObject;
import org.keycloak.storage.ldap.idm.query.Condition;
import org.keycloak.storage.ldap.idm.query.internal.LDAPQuery;
import org.keycloak.storage.ldap.idm.query.internal.LDAPQueryConditionsBuilder;
import org.keycloak.storage.ldap.mappers.AbstractLDAPStorageMapper;
import org.keycloak.storage.ldap.mappers.membership.CommonLDAPGroupMapper;
import org.keycloak.storage.ldap.mappers.membership.CommonLDAPGroupMapperConfig;
import org.keycloak.storage.ldap.mappers.membership.LDAPGroupMapperMode;
import org.keycloak.storage.ldap.mappers.membership.UserRolesRetrieveStrategy;
import org.keycloak.storage.user.SynchronizationResult;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Map realm roles or roles of particular client to LDAP groups
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class RoleLDAPStorageMapper extends AbstractLDAPStorageMapper implements CommonLDAPGroupMapper {
private static final Logger logger = Logger.getLogger(RoleLDAPStorageMapper.class);
private final RoleMapperConfig config;
private final RoleLDAPStorageMapperFactory factory;
public RoleLDAPStorageMapper(ComponentModel mapperModel, LDAPStorageProvider ldapProvider, RealmModel realm, RoleLDAPStorageMapperFactory factory) {
super(mapperModel, ldapProvider, realm);
this.config = new RoleMapperConfig(mapperModel);
this.factory = factory;
}
@Override
public LDAPQuery createLDAPGroupQuery() {
return createRoleQuery();
}
@Override
public CommonLDAPGroupMapperConfig getConfig() {
return config;
}
@Override
public void onImportUserFromLDAP(LDAPObject ldapUser, UserModel user, boolean isCreate) {
LDAPGroupMapperMode mode = config.getMode();
// For now, import LDAP role mappings just during create
if (mode == LDAPGroupMapperMode.IMPORT && isCreate) {
List<LDAPObject> ldapRoles = getLDAPRoleMappings(ldapUser);
// Import role mappings from LDAP into Keycloak DB
String roleNameAttr = config.getRoleNameLdapAttribute();
for (LDAPObject ldapRole : ldapRoles) {
String roleName = ldapRole.getAttributeAsString(roleNameAttr);
RoleContainerModel roleContainer = getTargetRoleContainer();
RoleModel role = roleContainer.getRole(roleName);
if (role == null) {
role = roleContainer.addRole(roleName);
}
logger.debugf("Granting role [%s] to user [%s] during import from LDAP", roleName, user.getUsername());
user.grantRole(role);
}
}
}
@Override
public void onRegisterUserToLDAP(LDAPObject ldapUser, UserModel localUser) {
}
// Sync roles from LDAP to Keycloak DB
@Override
public SynchronizationResult syncDataFromFederationProviderToKeycloak() {
SynchronizationResult syncResult = new SynchronizationResult() {
@Override
public String getStatus() {
return String.format("%d imported roles, %d roles already exists in Keycloak", getAdded(), getUpdated());
}
};
logger.debugf("Syncing roles from LDAP into Keycloak DB. Mapper is [%s], LDAP provider is [%s]", mapperModel.getName(), ldapProvider.getModel().getName());
// Send LDAP query to load all roles
LDAPQuery ldapRoleQuery = createRoleQuery();
List<LDAPObject> ldapRoles = LDAPUtils.loadAllLDAPObjects(ldapRoleQuery, ldapProvider);
RoleContainerModel roleContainer = getTargetRoleContainer();
String rolesRdnAttr = config.getRoleNameLdapAttribute();
for (LDAPObject ldapRole : ldapRoles) {
String roleName = ldapRole.getAttributeAsString(rolesRdnAttr);
if (roleContainer.getRole(roleName) == null) {
logger.debugf("Syncing role [%s] from LDAP to keycloak DB", roleName);
roleContainer.addRole(roleName);
syncResult.increaseAdded();
} else {
syncResult.increaseUpdated();
}
}
return syncResult;
}
// Sync roles from Keycloak back to LDAP
@Override
public SynchronizationResult syncDataFromKeycloakToFederationProvider() {
SynchronizationResult syncResult = new SynchronizationResult() {
@Override
public String getStatus() {
return String.format("%d roles imported to LDAP, %d roles already existed in LDAP", getAdded(), getUpdated());
}
};
if (config.getMode() != LDAPGroupMapperMode.LDAP_ONLY) {
logger.warnf("Ignored sync for federation mapper '%s' as it's mode is '%s'", mapperModel.getName(), config.getMode().toString());
return syncResult;
}
logger.debugf("Syncing roles from Keycloak into LDAP. Mapper is [%s], LDAP provider is [%s]", mapperModel.getName(), ldapProvider.getModel().getName());
// Send LDAP query to see which roles exists there
LDAPQuery ldapQuery = createRoleQuery();
List<LDAPObject> ldapRoles = ldapQuery.getResultList();
Set<String> ldapRoleNames = new HashSet<>();
String rolesRdnAttr = config.getRoleNameLdapAttribute();
for (LDAPObject ldapRole : ldapRoles) {
String roleName = ldapRole.getAttributeAsString(rolesRdnAttr);
ldapRoleNames.add(roleName);
}
RoleContainerModel roleContainer = getTargetRoleContainer();
Set<RoleModel> keycloakRoles = roleContainer.getRoles();
for (RoleModel keycloakRole : keycloakRoles) {
String roleName = keycloakRole.getName();
if (ldapRoleNames.contains(roleName)) {
syncResult.increaseUpdated();
} else {
logger.debugf("Syncing role [%s] from Keycloak to LDAP", roleName);
createLDAPRole(roleName);
syncResult.increaseAdded();
}
}
return syncResult;
}
// TODO: Possible to merge with GroupMapper and move to common class
public LDAPQuery createRoleQuery() {
LDAPQuery ldapQuery = new LDAPQuery(ldapProvider);
// For now, use same search scope, which is configured "globally" and used for user's search.
ldapQuery.setSearchScope(ldapProvider.getLdapIdentityStore().getConfig().getSearchScope());
String rolesDn = config.getRolesDn();
ldapQuery.setSearchDn(rolesDn);
Collection<String> roleObjectClasses = config.getRoleObjectClasses(ldapProvider);
ldapQuery.addObjectClasses(roleObjectClasses);
String rolesRdnAttr = config.getRoleNameLdapAttribute();
String customFilter = config.getCustomLdapFilter();
if (customFilter != null && customFilter.trim().length() > 0) {
Condition customFilterCondition = new LDAPQueryConditionsBuilder().addCustomLDAPFilter(customFilter);
ldapQuery.addWhereCondition(customFilterCondition);
}
String membershipAttr = config.getMembershipLdapAttribute();
ldapQuery.addReturningLdapAttribute(rolesRdnAttr);
ldapQuery.addReturningLdapAttribute(membershipAttr);
return ldapQuery;
}
protected RoleContainerModel getTargetRoleContainer() {
boolean realmRolesMapping = config.isRealmRolesMapping();
if (realmRolesMapping) {
return realm;
} else {
String clientId = config.getClientId();
if (clientId == null) {
throw new ModelException("Using client roles mapping is requested, but parameter client.id not found!");
}
ClientModel client = realm.getClientByClientId(clientId);
if (client == null) {
throw new ModelException("Can't found requested client with clientId: " + clientId);
}
return client;
}
}
public LDAPObject createLDAPRole(String roleName) {
LDAPObject ldapRole = LDAPUtils.createLDAPGroup(ldapProvider, roleName, config.getRoleNameLdapAttribute(), config.getRoleObjectClasses(ldapProvider),
config.getRolesDn(), Collections.<String, Set<String>>emptyMap());
logger.debugf("Creating role [%s] to LDAP with DN [%s]", roleName, ldapRole.getDn().toString());
return ldapRole;
}
public void addRoleMappingInLDAP(String roleName, LDAPObject ldapUser) {
LDAPObject ldapRole = loadLDAPRoleByName(roleName);
if (ldapRole == null) {
ldapRole = createLDAPRole(roleName);
}
LDAPUtils.addMember(ldapProvider, config.getMembershipTypeLdapAttribute(), config.getMembershipLdapAttribute(), ldapRole, ldapUser, true);
}
public void deleteRoleMappingInLDAP(LDAPObject ldapUser, LDAPObject ldapRole) {
LDAPUtils.deleteMember(ldapProvider, config.getMembershipTypeLdapAttribute(), config.getMembershipLdapAttribute(), ldapRole, ldapUser, true);
}
public LDAPObject loadLDAPRoleByName(String roleName) {
LDAPQuery ldapQuery = createRoleQuery();
Condition roleNameCondition = new LDAPQueryConditionsBuilder().equal(config.getRoleNameLdapAttribute(), roleName);
ldapQuery.addWhereCondition(roleNameCondition);
return ldapQuery.getFirstResult();
}
protected List<LDAPObject> getLDAPRoleMappings(LDAPObject ldapUser) {
String strategyKey = config.getUserRolesRetrieveStrategy();
UserRolesRetrieveStrategy strategy = factory.getUserRolesRetrieveStrategy(strategyKey);
return strategy.getLDAPRoleMappings(this, ldapUser);
}
@Override
public UserModel proxy(LDAPObject ldapUser, UserModel delegate) {
final LDAPGroupMapperMode mode = config.getMode();
// For IMPORT mode, all operations are performed against local DB
if (mode == LDAPGroupMapperMode.IMPORT) {
return delegate;
} else {
return new LDAPRoleMappingsUserDelegate(delegate, ldapUser);
}
}
@Override
public void beforeLDAPQuery(LDAPQuery query) {
String strategyKey = config.getUserRolesRetrieveStrategy();
UserRolesRetrieveStrategy strategy = factory.getUserRolesRetrieveStrategy(strategyKey);
strategy.beforeUserLDAPQuery(query);
}
public class LDAPRoleMappingsUserDelegate extends UserModelDelegate {
private final LDAPObject ldapUser;
private final RoleContainerModel roleContainer;
// Avoid loading role mappings from LDAP more times per-request
private Set<RoleModel> cachedLDAPRoleMappings;
public LDAPRoleMappingsUserDelegate(UserModel user, LDAPObject ldapUser) {
super(user);
this.ldapUser = ldapUser;
this.roleContainer = getTargetRoleContainer();
}
@Override
public Set<RoleModel> getRealmRoleMappings() {
if (roleContainer.equals(realm)) {
Set<RoleModel> ldapRoleMappings = getLDAPRoleMappingsConverted();
if (config.getMode() == LDAPGroupMapperMode.LDAP_ONLY) {
// Use just role mappings from LDAP
return ldapRoleMappings;
} else {
// Merge mappings from both DB and LDAP
Set<RoleModel> modelRoleMappings = super.getRealmRoleMappings();
ldapRoleMappings.addAll(modelRoleMappings);
return ldapRoleMappings;
}
} else {
return super.getRealmRoleMappings();
}
}
@Override
public Set<RoleModel> getClientRoleMappings(ClientModel client) {
if (roleContainer.equals(client)) {
Set<RoleModel> ldapRoleMappings = getLDAPRoleMappingsConverted();
if (config.getMode() == LDAPGroupMapperMode.LDAP_ONLY) {
// Use just role mappings from LDAP
return ldapRoleMappings;
} else {
// Merge mappings from both DB and LDAP
Set<RoleModel> modelRoleMappings = super.getClientRoleMappings(client);
ldapRoleMappings.addAll(modelRoleMappings);
return ldapRoleMappings;
}
} else {
return super.getClientRoleMappings(client);
}
}
@Override
public boolean hasRole(RoleModel role) {
Set<RoleModel> roles = getRoleMappings();
return RoleUtils.hasRole(roles, role)
|| RoleUtils.hasRoleFromGroup(getGroups(), role, true);
}
@Override
public void grantRole(RoleModel role) {
if (config.getMode() == LDAPGroupMapperMode.LDAP_ONLY) {
if (role.getContainer().equals(roleContainer)) {
// We need to create new role mappings in LDAP
cachedLDAPRoleMappings = null;
addRoleMappingInLDAP(role.getName(), ldapUser);
} else {
super.grantRole(role);
}
} else {
super.grantRole(role);
}
}
@Override
public Set<RoleModel> getRoleMappings() {
Set<RoleModel> modelRoleMappings = super.getRoleMappings();
Set<RoleModel> ldapRoleMappings = getLDAPRoleMappingsConverted();
if (config.getMode() == LDAPGroupMapperMode.LDAP_ONLY) {
// For LDAP-only we want to retrieve role mappings of target container just from LDAP
Set<RoleModel> modelRolesCopy = new HashSet<>(modelRoleMappings);
for (RoleModel role : modelRolesCopy) {
if (role.getContainer().equals(roleContainer)) {
modelRoleMappings.remove(role);
}
}
}
modelRoleMappings.addAll(ldapRoleMappings);
return modelRoleMappings;
}
protected Set<RoleModel> getLDAPRoleMappingsConverted() {
if (cachedLDAPRoleMappings != null) {
return new HashSet<>(cachedLDAPRoleMappings);
}
List<LDAPObject> ldapRoles = getLDAPRoleMappings(ldapUser);
Set<RoleModel> roles = new HashSet<>();
String roleNameLdapAttr = config.getRoleNameLdapAttribute();
for (LDAPObject role : ldapRoles) {
String roleName = role.getAttributeAsString(roleNameLdapAttr);
RoleModel modelRole = roleContainer.getRole(roleName);
if (modelRole == null) {
// Add role to local DB
modelRole = roleContainer.addRole(roleName);
}
roles.add(modelRole);
}
cachedLDAPRoleMappings = new HashSet<>(roles);
return roles;
}
@Override
public void deleteRoleMapping(RoleModel role) {
if (role.getContainer().equals(roleContainer)) {
LDAPQuery ldapQuery = createRoleQuery();
LDAPQueryConditionsBuilder conditionsBuilder = new LDAPQueryConditionsBuilder();
Condition roleNameCondition = conditionsBuilder.equal(config.getRoleNameLdapAttribute(), role.getName());
String membershipUserAttr = LDAPUtils.getMemberValueOfChildObject(ldapUser, config.getMembershipTypeLdapAttribute());
Condition membershipCondition = conditionsBuilder.equal(config.getMembershipLdapAttribute(), membershipUserAttr);
ldapQuery.addWhereCondition(roleNameCondition).addWhereCondition(membershipCondition);
LDAPObject ldapRole = ldapQuery.getFirstResult();
if (ldapRole == null) {
// Role mapping doesn't exist in LDAP. For LDAP_ONLY mode, we don't need to do anything. For READ_ONLY, delete it in local DB.
if (config.getMode() == LDAPGroupMapperMode.READ_ONLY) {
super.deleteRoleMapping(role);
}
} else {
// Role mappings exists in LDAP. For LDAP_ONLY mode, we can just delete it in LDAP. For READ_ONLY we can't delete it -> throw error
if (config.getMode() == LDAPGroupMapperMode.READ_ONLY) {
throw new ModelException("Not possible to delete LDAP role mappings as mapper mode is READ_ONLY");
} else {
// Delete ldap role mappings
cachedLDAPRoleMappings = null;
deleteRoleMappingInLDAP(ldapUser, ldapRole);
}
}
} else {
super.deleteRoleMapping(role);
}
}
}
}
| |
/*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.compiler.inspection;
import com.intellij.codeInsight.FileModificationService;
import com.intellij.codeInsight.daemon.GroupNames;
import com.intellij.codeInsight.intention.HighPriorityAction;
import com.intellij.codeInspection.LocalQuickFix;
import com.intellij.codeInspection.ProblemDescriptor;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.WriteAction;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.ui.DialogWrapper;
import com.intellij.openapi.util.Pair;
import com.intellij.psi.*;
import com.intellij.psi.codeStyle.JavaCodeStyleManager;
import com.intellij.refactoring.ui.MemberSelectionPanel;
import com.intellij.refactoring.util.classMembers.MemberInfo;
import com.intellij.util.ArrayUtil;
import com.intellij.util.ObjectUtils;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.TestOnly;
import javax.swing.*;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class ChangeSuperClassFix implements LocalQuickFix, HighPriorityAction {
@NotNull
private final SmartPsiElementPointer<PsiClass> myNewSuperClass;
@NotNull
private final SmartPsiElementPointer<PsiClass> myOldSuperClass;
@NotNull
private final SmartPsiElementPointer<PsiClass> myTargetClass;
private final int myInheritorCount;
@NotNull
private final String myNewSuperName;
private final boolean myImplements;
public ChangeSuperClassFix(@NotNull PsiClass targetClass,
@NotNull PsiClass newSuperClass,
@NotNull PsiClass oldSuperClass,
final int percent,
final boolean isImplements) {
final SmartPointerManager smartPointerManager = SmartPointerManager.getInstance(newSuperClass.getProject());
myNewSuperName = ObjectUtils.notNull(newSuperClass.getQualifiedName());
myTargetClass = smartPointerManager.createSmartPsiElementPointer(targetClass);
myNewSuperClass = smartPointerManager.createSmartPsiElementPointer(newSuperClass);
myOldSuperClass = smartPointerManager.createSmartPsiElementPointer(oldSuperClass);
myInheritorCount = percent;
myImplements = isImplements;
}
@NotNull
@TestOnly
public PsiClass getNewSuperClass() {
return ObjectUtils.notNull(myNewSuperClass.getElement());
}
@TestOnly
public int getInheritorCount() {
return myInheritorCount;
}
@NotNull
@Override
public String getName() {
return String.format("Make " + (myImplements ? "implements" : "extends") + " '%s'", myNewSuperName);
}
@NotNull
@Override
public String getFamilyName() {
return GroupNames.INHERITANCE_GROUP_NAME;
}
@Override
public boolean startInWriteAction() {
return false;
}
@Override
public void applyFix(@NotNull final Project project, @NotNull final ProblemDescriptor problemDescriptor) {
final PsiClass oldSuperClass = myOldSuperClass.getElement();
final PsiClass newSuperClass = myNewSuperClass.getElement();
if (oldSuperClass == null || newSuperClass == null) return;
PsiClass aClass = myTargetClass.getElement();
if (aClass == null || !FileModificationService.getInstance().preparePsiElementsForWrite(aClass)) return;
changeSuperClass(aClass, oldSuperClass, newSuperClass);
}
/**
* oldSuperClass and newSuperClass can be interfaces or classes in any combination
* <p/>
* 1. not checks that oldSuperClass is really super of aClass
* 2. not checks that newSuperClass not exists in currently existed supers
*/
private static void changeSuperClass(@NotNull final PsiClass aClass,
@NotNull final PsiClass oldSuperClass,
@NotNull final PsiClass newSuperClass) {
PsiMethod[] ownMethods = aClass.getMethods();
// first is own method, second is parent
List<Pair<PsiMethod, Set<PsiMethod>>> oldOverridenMethods =
Stream.of(ownMethods).map(m -> {
if (m.isConstructor()) return null;
PsiMethod[] supers = m.findSuperMethods(oldSuperClass);
if (supers.length == 0) return null;
return Pair.create(m, ContainerUtil.set(supers));
}).filter(Objects::nonNull).collect(Collectors.toList());
JavaPsiFacade psiFacade = JavaPsiFacade.getInstance(aClass.getProject());
PsiElementFactory factory = psiFacade.getElementFactory();
WriteAction.run(() -> {
PsiElement ref;
if (aClass instanceof PsiAnonymousClass) {
ref = ((PsiAnonymousClass)aClass).getBaseClassReference().replace(factory.createClassReferenceElement(newSuperClass));
} else {
PsiReferenceList extendsList = ObjectUtils.notNull(aClass.getExtendsList());
PsiJavaCodeReferenceElement[] refElements =
ArrayUtil.mergeArrays(getReferences(extendsList), getReferences(aClass.getImplementsList()));
for (PsiJavaCodeReferenceElement refElement : refElements) {
if (refElement.isReferenceTo(oldSuperClass)) {
refElement.delete();
}
}
PsiReferenceList list;
if (newSuperClass.isInterface() && !aClass.isInterface()) {
list = aClass.getImplementsList();
}
else {
list = extendsList;
PsiJavaCodeReferenceElement[] elements = list.getReferenceElements();
if (elements.length == 1) {
PsiClass objectClass = psiFacade.findClass(CommonClassNames.JAVA_LANG_OBJECT, aClass.getResolveScope());
if (objectClass != null && elements[0].isReferenceTo(objectClass)) {
elements[0].delete();
}
}
}
assert list != null;
ref = list.add(factory.createClassReferenceElement(newSuperClass));
}
JavaCodeStyleManager.getInstance(aClass.getProject()).shortenClassReferences(ref);
});
List<MemberInfo> memberInfos = oldOverridenMethods.stream().filter(m -> {
Set<PsiMethod> newSupers = ContainerUtil.set(m.getFirst().findSuperMethods(newSuperClass));
return !newSupers.equals(m.getSecond());
}).map(m -> m.getFirst())
.map(m -> {
MemberInfo info = new MemberInfo(m);
info.setChecked(true);
return info;
}).collect(Collectors.toList());
if (memberInfos.isEmpty()) {
return;
}
List<PsiMethod> toDelete = getOverridenMethodsToDelete(memberInfos, newSuperClass.getName(), aClass.getProject());
if (!toDelete.isEmpty()) {
WriteAction.run(() -> {
for (PsiMethod method : toDelete) {
method.delete();
}
});
}
}
@NotNull
private static PsiJavaCodeReferenceElement[] getReferences(PsiReferenceList list) {
return list == null ? PsiJavaCodeReferenceElement.EMPTY_ARRAY : list.getReferenceElements();
}
@NotNull
private static List<PsiMethod> getOverridenMethodsToDelete(List<MemberInfo> candidates,
String newClassName,
Project project) {
if (ApplicationManager.getApplication().isUnitTestMode()) {
return ContainerUtil.map(candidates, c -> (PsiMethod)c.getMember());
}
MemberSelectionPanel panel =
new MemberSelectionPanel("<html>Choose members to delete since they are already defined in <b>" + newClassName + "</b>",
candidates,
null);
DialogWrapper dlg = new DialogWrapper(project, false) {
{
setOKButtonText("Remove");
setTitle("Choose Members");
init();
}
@NotNull
@Override
protected JComponent createCenterPanel() {
return panel;
}
};
return dlg.showAndGet()
? ContainerUtil.map(panel.getTable().getSelectedMemberInfos(), info -> (PsiMethod)info.getMember())
: Collections.emptyList();
}
}
| |
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.workbench.screens.guided.rule.client.editor;
import java.util.Collections;
import java.util.List;
import com.google.gwtmockito.GwtMock;
import com.google.gwtmockito.GwtMockito;
import com.google.gwtmockito.GwtMockitoTestRunner;
import com.google.gwtmockito.WithClassesToStub;
import org.assertj.core.api.Assertions;
import org.drools.workbench.models.datamodel.rule.DSLSentence;
import org.drools.workbench.models.datamodel.rule.IAction;
import org.drools.workbench.models.datamodel.rule.RuleModel;
import org.drools.workbench.screens.guided.rule.client.editor.plugin.RuleModellerActionPlugin;
import org.gwtbootstrap3.client.ui.Heading;
import org.gwtbootstrap3.client.ui.ListBox;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.kie.workbench.common.widgets.client.datamodel.AsyncPackageDataModelOracle;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.Mock;
import org.uberfire.mvp.Command;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@WithClassesToStub({Heading.class})
@RunWith(GwtMockitoTestRunner.class)
public class RuleModellerActionSelectorPopupTest {
private static final String ACTION_ID = "modify score id";
private static final String ACTION_DESCRIPTION = "modify score";
@Mock
private RuleModeller ruleModeller;
@Mock
private AsyncPackageDataModelOracle oracle;
@Mock
private RuleModellerActionPlugin actionPlugin;
@Mock
private IAction iAction;
@Mock
private ListBox listBox;
@GwtMock
private RuleModellerSelectorFilter filterWidget;
@Captor
private ArgumentCaptor<String> keyCaptor;
@Captor
private ArgumentCaptor<Command> commandArgumentCaptor;
private RuleModel model;
private RuleModellerActionSelectorPopup popup;
@Before
public void setUp() {
GwtMockito.useProviderForType(ListBox.class, aClass -> listBox);
this.model = spy(new RuleModel());
when(oracle.getDSLActions()).thenReturn(Collections.singletonList(new DSLSentence() {{
setDefinition("dslSentence");
}}));
when(oracle.getFactTypes()).thenReturn(new String[]{"Applicant"});
when(oracle.getGlobalVariables()).thenReturn(new String[]{"$global"});
when(ruleModeller.isDSLEnabled()).thenReturn(true);
when(actionPlugin.getId()).thenReturn(ACTION_ID);
when(actionPlugin.getActionAddDescription()).thenReturn(ACTION_DESCRIPTION);
this.popup = spy(new RuleModellerActionSelectorPopup(model,
ruleModeller,
Collections.singletonList(actionPlugin),
0,
oracle));
reset(model);
}
@Test
public void checkAddUpdateNotModifyGetsPatternBindings() {
popup.addUpdateNotModify(false);
verify(model).getLHSPatternVariables();
}
@Test
public void checkAddRetractionsGetsPatternBindings() {
popup.addRetractions(false);
verify(model).getLHSPatternVariables();
}
@Test
public void checkAddModifiesGetsLhsBindings() {
popup.addModifies(false);
verify(model).getAllLHSVariables();
}
@Test
public void checkAddCallMethodOnGetsAllBindings() {
popup.addCallMethodOn(false);
verify(model).getAllLHSVariables();
verify(model).getRHSBoundFacts();
}
@Test
public void testActionPlugins() throws Exception {
// reset due to calls in constructor
reset(actionPlugin);
doReturn(ACTION_DESCRIPTION).when(actionPlugin).getActionAddDescription();
doReturn(ACTION_ID).when(actionPlugin).getId();
doReturn(iAction).when(actionPlugin).createIAction(eq(ruleModeller));
popup.getContent();
verify(actionPlugin).createIAction(ruleModeller);
verify(actionPlugin).addPluginToActionList(eq(ruleModeller), commandArgumentCaptor.capture());
// reset due to adding a lot of different items before custom action plugins
// listbox is used as popup.choices
reset(listBox);
commandArgumentCaptor.getValue().execute();
verify(listBox).addItem(eq(ACTION_DESCRIPTION), eq(ACTION_ID));
Assertions.assertThat(popup.cmds).containsKeys(ACTION_ID);
// reset
// now we need listbox as popup.positionCbo
reset(listBox);
doReturn("123").when(listBox).getValue(anyInt());
popup.cmds.get(ACTION_ID).execute();
verify(model).addRhsItem(iAction, 123);
verify(popup).hide();
}
@Test
public void testLoadContentFiltered() throws Exception {
reset(listBox, actionPlugin);
when(actionPlugin.getId()).thenReturn(ACTION_ID);
when(actionPlugin.getActionAddDescription()).thenReturn(ACTION_DESCRIPTION);
when(filterWidget.getFilterText()).thenReturn("cheese");
popup = new RuleModellerActionSelectorPopup(model,
ruleModeller,
Collections.singletonList(actionPlugin),
0,
oracle);
verify(actionPlugin).addPluginToActionList(eq(ruleModeller), commandArgumentCaptor.capture());
commandArgumentCaptor.getValue().execute();
verify(listBox, atLeastOnce()).addItem(keyCaptor.capture(), anyString());
final List<String> keys = keyCaptor.getAllValues();
assertThat(keys).containsExactly("ChangeFieldValuesOf0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"AddFreeFormDrl",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"CallMethodOn0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
ACTION_DESCRIPTION);
}
@Test
public void testLoadContentFilteredFactMatched() throws Exception {
reset(listBox, actionPlugin);
when(actionPlugin.getId()).thenReturn(ACTION_ID);
when(actionPlugin.getActionAddDescription()).thenReturn(ACTION_DESCRIPTION);
when(filterWidget.getFilterText()).thenReturn("applicant");
popup = new RuleModellerActionSelectorPopup(model,
ruleModeller,
Collections.singletonList(actionPlugin),
0,
oracle);
verify(actionPlugin).addPluginToActionList(eq(ruleModeller), commandArgumentCaptor.capture());
commandArgumentCaptor.getValue().execute();
verify(listBox, atLeastOnce()).addItem(keyCaptor.capture(), anyString());
final List<String> keys = keyCaptor.getAllValues();
assertThat(keys).containsExactly("ChangeFieldValuesOf0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"InsertFact0(Applicant)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"LogicallyInsertFact0(Applicant)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"AddFreeFormDrl",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"CallMethodOn0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
ACTION_DESCRIPTION);
}
@Test
public void testLoadContentDslSentenceMatched() throws Exception {
reset(listBox, actionPlugin);
when(actionPlugin.getId()).thenReturn(ACTION_ID);
when(actionPlugin.getActionAddDescription()).thenReturn(ACTION_DESCRIPTION);
when(filterWidget.getFilterText()).thenReturn("dsl");
popup = new RuleModellerActionSelectorPopup(model,
ruleModeller,
Collections.singletonList(actionPlugin),
0,
oracle);
verify(actionPlugin).addPluginToActionList(eq(ruleModeller), commandArgumentCaptor.capture());
commandArgumentCaptor.getValue().execute();
verify(listBox, atLeastOnce()).addItem(keyCaptor.capture(), anyString());
final List<String> keys = keyCaptor.getAllValues();
assertThat(keys).containsExactly("dslSentence",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"ChangeFieldValuesOf0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"AddFreeFormDrl",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"CallMethodOn0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
ACTION_DESCRIPTION);
}
@Test
public void testLoadContentBothDslSentenceAndFactMatched() throws Exception {
reset(listBox, actionPlugin);
when(actionPlugin.getId()).thenReturn(ACTION_ID);
when(actionPlugin.getActionAddDescription()).thenReturn(ACTION_DESCRIPTION);
// ds[l], app[l]icant
when(filterWidget.getFilterText()).thenReturn("l");
popup = new RuleModellerActionSelectorPopup(model,
ruleModeller,
Collections.singletonList(actionPlugin),
0,
oracle);
verify(actionPlugin).addPluginToActionList(eq(ruleModeller), commandArgumentCaptor.capture());
commandArgumentCaptor.getValue().execute();
verify(listBox, atLeastOnce()).addItem(keyCaptor.capture(), anyString());
final List<String> keys = keyCaptor.getAllValues();
assertThat(keys).containsExactly("dslSentence",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"ChangeFieldValuesOf0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"InsertFact0(Applicant)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"LogicallyInsertFact0(Applicant)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"AddFreeFormDrl",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"CallMethodOn0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
ACTION_DESCRIPTION);
}
@Test
public void testLoadContentUnfiltered() throws Exception {
verify(actionPlugin).addPluginToActionList(eq(ruleModeller), commandArgumentCaptor.capture());
commandArgumentCaptor.getValue().execute();
verify(listBox, atLeastOnce()).addItem(keyCaptor.capture(), anyString());
final List<String> keys = keyCaptor.getAllValues();
assertThat(keys).containsExactly("dslSentence",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"ChangeFieldValuesOf0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"InsertFact0(Applicant)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"LogicallyInsertFact0(Applicant)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"AddFreeFormDrl",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
"CallMethodOn0($global)",
AbstractRuleModellerSelectorPopup.SECTION_SEPARATOR,
ACTION_DESCRIPTION);
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.jci.compilers;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.ServiceLoader;
import java.util.Set;
import javax.tools.Diagnostic;
import javax.tools.DiagnosticCollector;
import javax.tools.FileObject;
import javax.tools.JavaCompiler;
import javax.tools.JavaFileManager;
import javax.tools.JavaFileObject;
import javax.tools.SimpleJavaFileObject;
import javax.tools.ToolProvider;
import javax.tools.JavaCompiler.CompilationTask;
import org.apache.commons.jci.problems.CompilationProblem;
import org.apache.commons.jci.readers.ResourceReader;
import org.apache.commons.jci.stores.ResourceStore;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public final class Jsr199JavaCompiler extends AbstractJavaCompiler {
private final Log log = LogFactory.getLog(Jsr199JavaCompiler.class);
private class CompilationUnit extends SimpleJavaFileObject {
final private ResourceReader reader;
final private String name;
public CompilationUnit(final String pName, final ResourceReader pReader) {
super(URI.create("reader:///" + pName), Kind.SOURCE);
reader = pReader;
name = pName;
}
@Override
public boolean delete() {
log.debug("delete");
return super.delete();
}
@Override
public CharSequence getCharContent(boolean encodingErrors) throws IOException {
log.debug("getCharContent of " + name);
byte[] content = reader.getBytes(name);
return new String(content);
}
@Override
public long getLastModified() {
log.debug("getLastModified");
return super.getLastModified();
}
@Override
public String getName() {
log.debug("getName " + super.getName());
return super.getName();
}
@Override
public boolean isNameCompatible(String simpleName, Kind kind) {
log.debug("isNameCompatible " + simpleName + " " + kind);
// return super.isNameCompatible(simpleName, kind);
return true;
}
@Override
public InputStream openInputStream() throws IOException {
log.debug("openInputStream");
return super.openInputStream();
}
@Override
public OutputStream openOutputStream() throws IOException {
log.debug("openOutputStream");
return super.openOutputStream();
}
@Override
public Reader openReader(boolean arg0) throws IOException {
log.debug("openReader");
return super.openReader(arg0);
}
@Override
public Writer openWriter() throws IOException {
log.debug("openWriter");
return super.openWriter();
}
@Override
public URI toUri() {
// log.debug("toUri " + super.toUri());
return super.toUri();
}
}
private class JciJavaFileManager implements JavaFileManager {
// private final ResourceStore store;
final Collection<JavaFileObject> units;
public JciJavaFileManager(final Collection<JavaFileObject> pUnits, final ResourceStore pStore) {
// store = pStore;
units = pUnits;
}
public void close() {
log.debug("close");
}
public void flush() {
log.debug("flush");
}
public ClassLoader getClassLoader(JavaFileManager.Location location) {
log.debug("getClassLoader");
return null;
}
public FileObject getFileForInput(JavaFileManager.Location location, String packageName, String relativeName) {
log.debug("getFileForInput");
return null;
}
public FileObject getFileForOutput(JavaFileManager.Location location, String packageName, String relativeName, FileObject sibling) {
log.debug("getFileForOutput");
return null;
}
public JavaFileObject getJavaFileForInput(JavaFileManager.Location location, String className, JavaFileObject.Kind kind) {
log.debug("getJavaFileForInput");
return null;
}
public JavaFileObject getJavaFileForOutput(JavaFileManager.Location location, String className, JavaFileObject.Kind kind,
FileObject sibling) {
log.debug("getJavaFileForOutput");
return null;
}
public int isSupportedOption(String option) {
log.debug("isSupportedOption " + option);
return 0;
}
public boolean handleOption(String current, Iterator<String> remaining) {
log.debug("handleOption " + current);
return false;
}
public boolean hasLocation(JavaFileManager.Location location) {
log.debug("hasLocation " + location);
return false;
}
public String inferBinaryName(JavaFileManager.Location location, JavaFileObject file) {
String s = file.getName().replaceFirst(".java", ".class");
log.debug("inferBinaryName " + file.getName() + " -> " + s);
return s;
}
public Iterable<JavaFileObject> list(JavaFileManager.Location location, String packageName, Set<JavaFileObject.Kind> kinds,
boolean recurse) {
if (packageName.startsWith("java.")) {
return new ArrayList<JavaFileObject>();
}
log.debug("list " + location + packageName + kinds + recurse);
return units;
}
public boolean isSameFile(FileObject fileobject, FileObject fileobject1) {
return false;
}
}
private final Jsr199JavaCompilerSettings settings;
public Jsr199JavaCompiler() {
settings = new Jsr199JavaCompilerSettings();
}
public Jsr199JavaCompiler(final Jsr199JavaCompilerSettings pSettings) {
settings = pSettings;
}
public CompilationResult compile(final String[] pResourcePaths, final ResourceReader pReader, final ResourceStore pStore,
final ClassLoader classLoader, JavaCompilerSettings settings) {
final Collection<JavaFileObject> units = new ArrayList<JavaFileObject>();
for (int i = 0; i < pResourcePaths.length; i++) {
final String sourcePath = pResourcePaths[i];
log.debug("compiling " + sourcePath);
units.add(new CompilationUnit(sourcePath, pReader));
}
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
if (compiler == null) {
ServiceLoader<javax.tools.JavaCompiler> loader = ServiceLoader.load(javax.tools.JavaCompiler.class);
compiler = loader.iterator().next();
}
if (compiler == null) {
throw new RuntimeException("No java compiler in class path");
}
final JavaFileManager fileManager = new JciJavaFileManager(units, pStore);
final DiagnosticCollector<JavaFileObject> diagnostics = new DiagnosticCollector<JavaFileObject>();
CompilationTask task = compiler.getTask(null, fileManager, diagnostics, null, null, units);
if (task.call().booleanValue()) {
log.debug("compiled");
}
final List<Diagnostic<? extends JavaFileObject>> jsrProblems = diagnostics.getDiagnostics();
final CompilationProblem[] problems = new CompilationProblem[jsrProblems.size()];
int i = 0;
for (final Diagnostic<? extends JavaFileObject> jsrProblem : jsrProblems) {
problems[i++] = new Jsr199CompilationProblem(jsrProblem);
}
return new CompilationResult(problems);
}
public JavaCompilerSettings createDefaultSettings() {
return this.settings;
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.stratos.rest.endpoint.util.converter;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.stratos.autoscaler.stub.autoscale.policy.AutoscalePolicy;
import org.apache.stratos.autoscaler.stub.deployment.policy.ApplicationPolicy;
import org.apache.stratos.autoscaler.stub.deployment.policy.DeploymentPolicy;
import org.apache.stratos.autoscaler.stub.partition.NetworkPartitionRef;
import org.apache.stratos.autoscaler.stub.partition.PartitionRef;
import org.apache.stratos.autoscaler.stub.pojo.*;
import org.apache.stratos.autoscaler.stub.pojo.Dependencies;
import org.apache.stratos.autoscaler.stub.pojo.ServiceGroup;
import org.apache.stratos.cloud.controller.stub.CloudControllerServiceCartridgeNotFoundExceptionException;
import org.apache.stratos.cloud.controller.stub.domain.*;
import org.apache.stratos.common.beans.IaasProviderInfoBean;
import org.apache.stratos.common.beans.application.*;
import org.apache.stratos.common.beans.application.domain.mapping.DomainMappingBean;
import org.apache.stratos.common.beans.application.signup.ApplicationSignUpBean;
import org.apache.stratos.common.beans.artifact.repository.ArtifactRepositoryBean;
import org.apache.stratos.common.beans.cartridge.*;
import org.apache.stratos.common.beans.kubernetes.*;
import org.apache.stratos.common.beans.partition.NetworkPartitionBean;
import org.apache.stratos.common.beans.partition.NetworkPartitionReferenceBean;
import org.apache.stratos.common.beans.partition.PartitionBean;
import org.apache.stratos.common.beans.partition.PartitionReferenceBean;
import org.apache.stratos.common.beans.policy.autoscale.*;
import org.apache.stratos.common.beans.policy.deployment.ApplicationPolicyBean;
import org.apache.stratos.common.beans.policy.deployment.DeploymentPolicyBean;
import org.apache.stratos.common.beans.topology.*;
import org.apache.stratos.common.client.AutoscalerServiceClient;
import org.apache.stratos.common.client.CloudControllerServiceClient;
import org.apache.stratos.common.util.CommonUtil;
import org.apache.stratos.manager.service.stub.domain.application.signup.ApplicationSignUp;
import org.apache.stratos.manager.service.stub.domain.application.signup.ArtifactRepository;
import org.apache.stratos.manager.service.stub.domain.application.signup.DomainMapping;
import org.apache.stratos.messaging.domain.application.Application;
import org.apache.stratos.messaging.domain.application.Group;
import org.apache.stratos.messaging.domain.instance.ApplicationInstance;
import org.apache.stratos.messaging.domain.instance.ClusterInstance;
import org.apache.stratos.messaging.domain.instance.GroupInstance;
import org.apache.stratos.messaging.domain.topology.Cluster;
import org.apache.stratos.messaging.domain.topology.KubernetesService;
import org.apache.stratos.messaging.domain.topology.Port;
import org.apache.stratos.rest.endpoint.api.StratosApiV41Utils;
import org.apache.stratos.rest.endpoint.exception.RestAPIException;
import org.apache.stratos.rest.endpoint.exception.ServiceGroupDefinitionException;
import org.wso2.carbon.context.PrivilegedCarbonContext;
import org.wso2.carbon.stratos.common.beans.TenantInfoBean;
import java.rmi.RemoteException;
import java.util.*;
public class ObjectConverter {
private static final Log log = LogFactory.getLog(ObjectConverter.class);
public static final String CLUSTER_PROPERTY = "cluster";
public static Cartridge convertCartridgeBeanToStubCartridgeConfig(
CartridgeBean cartridgeBean, String cartridgeUuid, int tenantId) throws RestAPIException {
if (cartridgeBean == null) {
return null;
}
Cartridge cartridge = new Cartridge();
cartridge.setUuid(cartridgeUuid);
cartridge.setTenantId(tenantId);
cartridge.setType(cartridgeBean.getType());
cartridge.setHostName(cartridgeBean.getHost());
cartridge.setProvider(cartridgeBean.getProvider());
cartridge.setCategory(cartridgeBean.getCategory());
cartridge.setVersion(cartridgeBean.getVersion());
cartridge.setMultiTenant(cartridgeBean.isMultiTenant());
cartridge.setDisplayName(cartridgeBean.getDisplayName());
cartridge.setDescription(cartridgeBean.getDescription());
cartridge.setLoadBalancingIPType(cartridgeBean.getLoadBalancingIPType());
if (cartridgeBean.getMetadataKeys() != null) {
cartridge.setMetadataKeys(cartridgeBean.getMetadataKeys().
toArray(new String[cartridgeBean.getMetadataKeys().size()]));
}
// deployment information
if (cartridgeBean.getDeployment() != null) {
cartridge.setBaseDir(cartridgeBean.getDeployment().getBaseDir());
if (cartridgeBean.getDeployment().getDir() != null && !cartridgeBean.getDeployment().getDir().isEmpty()) {
cartridge.setDeploymentDirs(cartridgeBean.getDeployment().getDir().
toArray(new String[cartridgeBean.getDeployment().getDir().size()]));
}
}
// port mapping
if (cartridgeBean.getPortMapping() != null && !cartridgeBean.getPortMapping().isEmpty()) {
cartridge.setPortMappings(convertPortMappingBeansToStubPortMappings(cartridgeBean.getPortMapping()));
}
// persistence mapping
if (cartridgeBean.getPersistence() != null) {
cartridge.setPersistence(convertPersistenceBeanToStubPersistence(cartridgeBean.getPersistence()));
}
// iaas providers
if (cartridgeBean.getIaasProvider() != null && !cartridgeBean.getIaasProvider().isEmpty()) {
cartridge.setIaasConfigs(convertIaasProviderBeansToStubIaasConfig(cartridgeBean.getIaasProvider(),
tenantId));
}
// properties
if (cartridgeBean.getProperty() != null && !cartridgeBean.getProperty().isEmpty()) {
cartridge.setProperties(convertPropertyBeansToCCStubProperties(cartridgeBean.getProperty(), tenantId));
}
return cartridge;
}
private static PortMapping[] convertPortMappingBeansToStubPortMappings(List<PortMappingBean> portMappingBeans) {
if (portMappingBeans == null) {
return null;
}
//convert to an array
PortMappingBean[] portMappingBeanArray = new PortMappingBean[portMappingBeans.size()];
portMappingBeans.toArray(portMappingBeanArray);
PortMapping[] portMappingArray = new PortMapping[portMappingBeanArray.length];
for (int i = 0; i < portMappingBeanArray.length; i++) {
PortMapping portMapping = new PortMapping();
portMapping.setName(portMappingBeanArray[i].getName());
portMapping.setProtocol(portMappingBeanArray[i].getProtocol());
portMapping.setPort(portMappingBeanArray[i].getPort());
portMapping.setProxyPort(portMappingBeanArray[i].getProxyPort());
portMapping.setKubernetesPortType(portMappingBeanArray[i].getKubernetesPortType());
portMappingArray[i] = portMapping;
}
return portMappingArray;
}
private static List<PortMappingBean> convertPortMappingsToStubPortMappingBeans(
PortMapping[] portMappingps) {
if (portMappingps == null || portMappingps[0] == null) {
return null;
}
List<PortMappingBean> portMappingBeans = new ArrayList<PortMappingBean>();
for (PortMapping portMapping : portMappingps) {
PortMappingBean portMappingBean = new PortMappingBean();
portMappingBean.setName(portMapping.getName());
portMappingBean.setProtocol(portMapping.getProtocol());
portMappingBean.setPort(portMapping.getPort());
portMappingBean.setProxyPort(portMapping.getProxyPort());
portMappingBean.setKubernetesPortType(portMapping.getKubernetesPortType());
portMappingBeans.add(portMappingBean);
}
return portMappingBeans;
}
/**
* Convert Persistence To PersistenceBean
*
* @param iaasConfigs iaasConfigs
* @return PersistenceBean
*/
private static List<IaasProviderBean> convertIaaSProviderToIaaSProviderBean(IaasConfig[] iaasConfigs) {
if (iaasConfigs == null || iaasConfigs[0] == null) {
return null;
}
List<IaasProviderBean> iaasProviderBeans = new ArrayList<IaasProviderBean>();
for (IaasConfig iaasConfig : iaasConfigs) {
IaasProviderBean iaasProviderBean = new IaasProviderBean();
iaasProviderBean.setType(iaasConfig.getType());
iaasProviderBean.setImageId(iaasConfig.getImageId());
iaasProviderBean.setName(iaasConfig.getName());
iaasProviderBean.setClassName(iaasConfig.getClassName());
iaasProviderBean.setCredential(iaasConfig.getCredential());
iaasProviderBean.setIdentity(iaasConfig.getIdentity());
iaasProviderBean.setProvider(iaasConfig.getProvider());
if (iaasConfig.getProperties() != null) {
//set the Properties instance to IaasConfig instance
iaasProviderBean.setProperty(convertCCStubPropertiesToPropertyBeanList(
iaasConfig.getProperties()));
}
if (iaasConfig.getNetworkInterfaces() != null) {
iaasProviderBean.setNetworkInterfaces(ObjectConverter.
convertNetworkInterfacesToNetworkInterfaceBeans(
iaasConfig.getNetworkInterfaces()));
}
iaasProviderBeans.add(iaasProviderBean);
}
return iaasProviderBeans;
}
private static IaasConfig[] convertIaasProviderBeansToStubIaasConfig(List<IaasProviderBean> iaasProviderBeans,int tenantId)
throws RestAPIException {
if (iaasProviderBeans == null) {
return null;
}
//convert to an array
IaasProviderBean[] iaasProviderBeansArray = new IaasProviderBean[iaasProviderBeans.size()];
iaasProviderBeans.toArray(iaasProviderBeansArray);
IaasConfig[] iaasConfigsArray = new IaasConfig[iaasProviderBeansArray.length];
for (int i = 0; i < iaasProviderBeansArray.length; i++) {
IaasConfig iaasConfig = new IaasConfig();
iaasConfig.setType(iaasProviderBeansArray[i].getType());
iaasConfig.setImageId(iaasProviderBeansArray[i].getImageId());
iaasConfig.setName(iaasProviderBeansArray[i].getName());
iaasConfig.setClassName(iaasProviderBeansArray[i].getClassName());
iaasConfig.setCredential(iaasProviderBeansArray[i].getCredential());
iaasConfig.setIdentity(iaasProviderBeansArray[i].getIdentity());
iaasConfig.setProvider(iaasProviderBeansArray[i].getProvider());
if (iaasProviderBeansArray[i].getProperty() != null && !iaasProviderBeansArray[i].getProperty().isEmpty()) {
//set the Properties instance to IaasConfig instance
iaasConfig.setProperties(convertPropertyBeansToCCStubProperties(iaasProviderBeansArray[i].getProperty(),tenantId));
}
if (iaasProviderBeansArray[i].getNetworkInterfaces() != null
&& !iaasProviderBeansArray[i].getNetworkInterfaces().isEmpty()) {
iaasConfig.setNetworkInterfaces(ObjectConverter.convertNetworkInterfaceBeansToNetworkInterfaces(
iaasProviderBeansArray[i].getNetworkInterfaces()));
}
iaasConfigsArray[i] = iaasConfig;
}
return iaasConfigsArray;
}
private static Persistence convertPersistenceBeanToStubPersistence(
org.apache.stratos.common.beans.cartridge.PersistenceBean persistenceBean) {
if (persistenceBean == null) {
return null;
}
Persistence persistence = new Persistence();
persistence.setPersistenceRequired(true);
VolumeBean[] volumeBean = new VolumeBean[persistenceBean.getVolume().size()];
persistenceBean.getVolume().toArray(volumeBean);
Volume[] volumes = new Volume[persistenceBean.getVolume().size()];
for (int i = 0; i < volumes.length; i++) {
Volume volume = new Volume();
volume.setId(volumeBean[i].getId());
volume.setVolumeId(volumeBean[i].getVolumeId());
if (StringUtils.isEmpty(volume.getVolumeId())) {
volume.setSize(Integer.parseInt(volumeBean[i].getSize()));
}
volume.setDevice(volumeBean[i].getDevice());
volume.setRemoveOntermination(volumeBean[i].isRemoveOnTermination());
volume.setMappingPath(volumeBean[i].getMappingPath());
volume.setSnapshotId(volumeBean[i].getSnapshotId());
volumes[i] = volume;
}
persistence.setVolumes(volumes);
return persistence;
}
private static org.apache.stratos.cloud.controller.stub.Properties convertPropertyBeansToCCStubProperties(
List<org.apache.stratos.common.beans.PropertyBean> propertyBeans,int tenantId) throws RestAPIException {
if (propertyBeans == null) {
return null;
}
List<org.apache.stratos.cloud.controller.stub.Property> stubPropertiesList =
new ArrayList<org.apache.stratos.cloud.controller.stub.Property>();
for (org.apache.stratos.common.beans.PropertyBean propertyBean : propertyBeans) {
org.apache.stratos.cloud.controller.stub.Property stubProperty
= new org.apache.stratos.cloud.controller.stub.Property();
stubProperty.setName(propertyBean.getName());
if(propertyBean.getName().equals(CLUSTER_PROPERTY)){
stubProperty.setValue(StratosApiV41Utils.getKubernetesClusterUuidByTenant(propertyBean.getValue(),tenantId));
}
else{
stubProperty.setValue(propertyBean.getValue());
}
stubPropertiesList.add(stubProperty);
}
org.apache.stratos.cloud.controller.stub.Properties stubProperties
= new org.apache.stratos.cloud.controller.stub.Properties();
org.apache.stratos.cloud.controller.stub.Property[] stubPropertiesArray =
stubPropertiesList.toArray(new org.apache.stratos.cloud.controller.stub.Property[stubPropertiesList.size()]);
stubProperties.setProperties(stubPropertiesArray);
return stubProperties;
}
private static NetworkInterfaces convertNetworkInterfaceBeansToNetworkInterfaces(
List<NetworkInterfaceBean> networkInterfaceBeans) {
if (networkInterfaceBeans == null) {
return null;
}
NetworkInterface[] networkInterfacesArray = new NetworkInterface[networkInterfaceBeans.size()];
int i = 0;
for (NetworkInterfaceBean nib : networkInterfaceBeans) {
NetworkInterface networkInterface = new NetworkInterface();
networkInterface.setNetworkUuid(nib.getNetworkUuid());
networkInterface.setFixedIp(nib.getFixedIp());
networkInterface.setPortUuid(nib.getPortUuid());
if (nib.getFloatingNetworks() != null && !nib.getFloatingNetworks().isEmpty()) {
networkInterface.setFloatingNetworks(
ObjectConverter.convertFloatingNetworkBeansToFloatingNetworks(nib.getFloatingNetworks()));
}
networkInterfacesArray[i++] = networkInterface;
}
NetworkInterfaces networkInterfaces = new NetworkInterfaces();
networkInterfaces.setNetworkInterfaces(networkInterfacesArray);
return networkInterfaces;
}
private static List<NetworkInterfaceBean> convertNetworkInterfacesToNetworkInterfaceBeans(
NetworkInterfaces networkInterfaces) {
if (networkInterfaces == null || networkInterfaces.getNetworkInterfaces() == null ||
networkInterfaces.getNetworkInterfaces()[0] == null) {
return null;
}
List<NetworkInterfaceBean> networkInterfaceBeans = new ArrayList<NetworkInterfaceBean>();
for (NetworkInterface networkInterface : networkInterfaces.getNetworkInterfaces()) {
NetworkInterfaceBean networkInterfaceBean = new NetworkInterfaceBean();
networkInterfaceBean.setNetworkUuid(networkInterface.getNetworkUuid());
networkInterfaceBean.setFixedIp(networkInterface.getFixedIp());
networkInterfaceBean.setPortUuid(networkInterface.getPortUuid());
if (networkInterface.getFloatingNetworks() != null &&
networkInterface.getFloatingNetworks().getFloatingNetworks() != null &&
networkInterface.getFloatingNetworks().getFloatingNetworks()[0] != null) {
networkInterfaceBean.setFloatingNetworks(
ObjectConverter.convertFloatingNetworksToFloatingNetworkBeans(
networkInterface.getFloatingNetworks()));
}
networkInterfaceBeans.add(networkInterfaceBean);
}
return networkInterfaceBeans;
}
private static FloatingNetworks convertFloatingNetworkBeansToFloatingNetworks(
List<FloatingNetworkBean> floatingNetworkBeans) {
if (floatingNetworkBeans == null) {
return null;
}
FloatingNetwork[] floatingNetworksArray = new FloatingNetwork[floatingNetworkBeans.size()];
int i = 0;
for (FloatingNetworkBean floatingNetworkBean : floatingNetworkBeans) {
FloatingNetwork floatingNetwork = new FloatingNetwork();
floatingNetwork.setName(floatingNetworkBean.getName());
floatingNetwork.setNetworkUuid(floatingNetworkBean.getNetworkUuid());
floatingNetwork.setFloatingIP(floatingNetworkBean.getFloatingIP());
floatingNetworksArray[i++] = floatingNetwork;
}
FloatingNetworks floatingNetworks = new FloatingNetworks();
floatingNetworks.setFloatingNetworks(floatingNetworksArray);
return floatingNetworks;
}
private static List<FloatingNetworkBean> convertFloatingNetworksToFloatingNetworkBeans(
FloatingNetworks floatingNetworks) {
List<FloatingNetworkBean> floatingNetworkBeans = new ArrayList<FloatingNetworkBean>();
for (FloatingNetwork floatingNetwork : floatingNetworks.getFloatingNetworks()) {
FloatingNetworkBean floatingNetworkBean = new FloatingNetworkBean();
floatingNetworkBean.setName(floatingNetwork.getName());
floatingNetworkBean.setNetworkUuid(floatingNetwork.getNetworkUuid());
floatingNetworkBean.setFloatingIP(floatingNetwork.getFloatingIP());
floatingNetworkBeans.add(floatingNetworkBean);
}
return floatingNetworkBeans;
}
private static org.apache.stratos.cloud.controller.stub.domain.Partition convertPartitionToStubPartition
(PartitionBean partition,int tenantId) throws RestAPIException {
if (partition == null) {
return null;
}
org.apache.stratos.cloud.controller.stub.domain.Partition stubPartition = new
org.apache.stratos.cloud.controller.stub.domain.Partition();
stubPartition.setId(partition.getId());
stubPartition.setUuid(UUID.randomUUID().toString());
stubPartition.setTenantId(tenantId);
stubPartition.setProperties(convertPropertyBeansToCCStubProperties(partition.getProperty(), tenantId));
return stubPartition;
}
public static org.apache.stratos.autoscaler.stub.autoscale.policy.AutoscalePolicy convertToCCAutoscalerPojo(
AutoscalePolicyBean autoscalePolicyBean, String uuid, int tenantId) {
if (autoscalePolicyBean == null) {
return null;
}
org.apache.stratos.autoscaler.stub.autoscale.policy.AutoscalePolicy autoscalePolicy = new
org.apache.stratos.autoscaler.stub.autoscale.policy.AutoscalePolicy();
autoscalePolicy.setId(autoscalePolicyBean.getId());
autoscalePolicy.setUuid(uuid);
autoscalePolicy.setTenantId(tenantId);
autoscalePolicy.setDescription(autoscalePolicyBean.getDescription());
autoscalePolicy.setDisplayName(autoscalePolicyBean.getDisplayName());
autoscalePolicy.setTenantId(tenantId);
if (autoscalePolicyBean.getLoadThresholds() != null) {
org.apache.stratos.autoscaler.stub.autoscale.policy.LoadThresholds loadThresholds = new
org.apache.stratos.autoscaler.stub.autoscale.policy.LoadThresholds();
if (autoscalePolicyBean.getLoadThresholds().getLoadAverage() != null) {
//set load average information
loadThresholds.setLoadAverageThreshold(
autoscalePolicyBean.getLoadThresholds().getLoadAverage().getThreshold());
}
if (autoscalePolicyBean.getLoadThresholds().getRequestsInFlight() != null) {
//set request in flight information
loadThresholds.setRequestsInFlightThreshold(
autoscalePolicyBean.getLoadThresholds().getRequestsInFlight().getThreshold());
}
if (autoscalePolicyBean.getLoadThresholds().getMemoryConsumption() != null) {
//set memory consumption information
loadThresholds.setMemoryConsumptionThreshold(
autoscalePolicyBean.getLoadThresholds().
getMemoryConsumption().getThreshold());
}
autoscalePolicy.setLoadThresholds(loadThresholds);
}
return autoscalePolicy;
}
public static NetworkPartitionBean convertCCStubNetworkPartitionToNetworkPartition(
org.apache.stratos.cloud.controller.stub.domain.NetworkPartition stubNetworkPartition) {
if (stubNetworkPartition == null) {
return null;
}
NetworkPartitionBean networkPartition = new NetworkPartitionBean();
networkPartition.setId(stubNetworkPartition.getId());
networkPartition.setProvider(stubNetworkPartition.getProvider());
if (stubNetworkPartition.getPartitions() != null) {
List<PartitionBean> partitionList = new ArrayList<PartitionBean>();
for (org.apache.stratos.cloud.controller.stub.domain.Partition stubPartition : stubNetworkPartition.getPartitions()) {
if (stubPartition != null) {
partitionList.add(convertCCStubPartitionToPartition(stubPartition));
}
}
networkPartition.setPartitions(partitionList);
}
if (stubNetworkPartition.getProperties() != null) {
List<org.apache.stratos.common.beans.PropertyBean> propertyBeanList
= new ArrayList<org.apache.stratos.common.beans.PropertyBean>();
if (stubNetworkPartition.getProperties() != null) {
if (stubNetworkPartition.getProperties().getProperties() != null) {
for (org.apache.stratos.cloud.controller.stub.Property stubProperty :
stubNetworkPartition.getProperties().getProperties()) {
if (stubProperty != null) {
propertyBeanList.add(convertStubPropertyToPropertyBean(stubProperty));
}
}
}
}
networkPartition.setProperties(propertyBeanList);
}
return networkPartition;
}
public static ApplicationPolicyBean convertASStubApplicationPolicyToApplicationPolicy(ApplicationPolicy applicationPolicy) {
if (applicationPolicy == null) {
return null;
}
ApplicationPolicyBean applicationPolicyBean = new ApplicationPolicyBean();
applicationPolicyBean.setId(applicationPolicy.getId());
applicationPolicyBean.setAlgorithm(applicationPolicy.getAlgorithm());
applicationPolicyBean.setNetworkPartitions(applicationPolicy.getNetworkPartitions());
if (applicationPolicy.getProperties() != null) {
List<org.apache.stratos.common.beans.PropertyBean> propertyBeanList
= new ArrayList<org.apache.stratos.common.beans.PropertyBean>();
for (org.apache.stratos.autoscaler.stub.Property stubProperty : applicationPolicy.getProperties().getProperties()) {
if (stubProperty != null) {
org.apache.stratos.common.beans.PropertyBean propertyBean
= new org.apache.stratos.common.beans.PropertyBean();
propertyBean.setName(stubProperty.getName());
propertyBean.setValue(String.valueOf(stubProperty.getValue()));
propertyBeanList.add(propertyBean);
}
}
applicationPolicyBean.setProperties(propertyBeanList);
}
return applicationPolicyBean;
}
public static ApplicationPolicyBean[] convertASStubApplicationPoliciesToApplicationPolicies(
ApplicationPolicy[] applicationPolicies) {
ApplicationPolicyBean[] applicationPolicyBeans;
if (applicationPolicies == null) {
applicationPolicyBeans = new ApplicationPolicyBean[0];
return applicationPolicyBeans;
}
applicationPolicyBeans = new ApplicationPolicyBean[applicationPolicies.length];
for (int i = 0; i < applicationPolicies.length; i++) {
applicationPolicyBeans[i] = convertASStubApplicationPolicyToApplicationPolicy(applicationPolicies[i]);
}
return applicationPolicyBeans;
}
private static org.apache.stratos.autoscaler.stub.Properties getASPropertiesFromCommonProperties(
List<org.apache.stratos.common.beans.PropertyBean> propertyBeans) {
if (propertyBeans == null || propertyBeans.isEmpty()) {
return null;
}
//convert to an array
org.apache.stratos.common.beans.PropertyBean[] propertyBeansArray
= new org.apache.stratos.common.beans.PropertyBean[propertyBeans.size()];
propertyBeans.toArray(propertyBeansArray);
org.apache.stratos.autoscaler.stub.Property[] propertyArray =
new org.apache.stratos.autoscaler.stub.Property[propertyBeansArray.length];
for (int j = 0; j < propertyBeansArray.length; j++) {
org.apache.stratos.autoscaler.stub.Property property = new org.apache.stratos.autoscaler.stub.Property();
property.setName(propertyBeansArray[j].getName());
property.setValue(propertyBeansArray[j].getValue());
propertyArray[j] = property;
}
org.apache.stratos.autoscaler.stub.Properties properties = new org.apache.stratos.autoscaler.stub.Properties();
properties.setProperties(propertyArray);
return properties;
}
private static PartitionBean convertCCStubPartitionToPartition(org.apache.stratos.cloud.controller.stub.domain.Partition stubPartition) {
if (stubPartition == null) {
return null;
}
PartitionBean partitionBean = new PartitionBean();
partitionBean.setId(stubPartition.getId());
partitionBean.setDescription(stubPartition.getDescription());
if (stubPartition.getProperties() != null) {
List<org.apache.stratos.common.beans.PropertyBean> propertyBeanList
= new ArrayList<org.apache.stratos.common.beans.PropertyBean>();
if (stubPartition.getProperties().getProperties() != null) {
for (org.apache.stratos.cloud.controller.stub.Property stubProperty :
stubPartition.getProperties().getProperties()) {
if (stubProperty != null) {
propertyBeanList.add(convertStubPropertyToPropertyBean(stubProperty));
}
}
}
partitionBean.setProperty(propertyBeanList);
}
return partitionBean;
}
public static org.apache.stratos.cloud.controller.stub.domain.NetworkPartition
convertNetworkPartitionToCCStubNetworkPartition(NetworkPartitionBean networkPartitionBean,
String networkPartitionUuid, int tenantId) throws RestAPIException {
org.apache.stratos.cloud.controller.stub.domain.NetworkPartition networkPartition
= new org.apache.stratos.cloud.controller.stub.domain.NetworkPartition();
networkPartition.setId(networkPartitionBean.getId());
networkPartition.setProvider(networkPartitionBean.getProvider());
networkPartition.setUuid(networkPartitionUuid);
networkPartition.setTenantId(tenantId);
if (networkPartitionBean.getPartitions() != null && !networkPartitionBean.getPartitions().isEmpty()) {
networkPartition.setPartitions(convertToStubPartitions(networkPartitionBean.getPartitions(), tenantId));
}
if (networkPartitionBean.getProperties() != null && !networkPartitionBean.getProperties().isEmpty()) {
networkPartition.setProperties(convertPropertyBeansToCCStubProperties(networkPartitionBean.getProperties
(), tenantId));
}
return networkPartition;
}
private static List<NetworkPartitionReferenceBean> convertASStubNetworkPartitionsToNetworkPartitionReferences(
NetworkPartitionRef[] networkPartitions) {
List<NetworkPartitionReferenceBean> networkPartitionBeans = new ArrayList<NetworkPartitionReferenceBean>();
for (NetworkPartitionRef networkPartitionRef : networkPartitions) {
NetworkPartitionReferenceBean networkPartitionReferenceBean = new NetworkPartitionReferenceBean();
networkPartitionReferenceBean.setId(networkPartitionRef.getId());
networkPartitionReferenceBean.setPartitionAlgo(networkPartitionRef.getPartitionAlgo());
networkPartitionReferenceBean.setPartitions(
convertASStubPartitionsToPartitions(networkPartitionRef.getPartitionRefs()));
networkPartitionBeans.add(networkPartitionReferenceBean);
}
return networkPartitionBeans;
}
public static ClusterBean convertClusterToClusterBean(Cluster cluster, String alias) {
ClusterBean clusterBean = new
ClusterBean();
clusterBean.setAlias(alias);
clusterBean.setServiceName(cluster.getServiceName());
clusterBean.setClusterId(cluster.getClusterId());
clusterBean.setLbCluster(cluster.isLbCluster());
clusterBean.setTenantRange(cluster.getTenantRange());
clusterBean.setProperty(convertJavaUtilPropertiesToPropertyBeans(cluster.getProperties()));
clusterBean.setMember(new ArrayList<MemberBean>());
clusterBean.setHostNames(new ArrayList<String>());
Collection<ClusterInstance> clusterInstances = cluster.getClusterInstances();
List<InstanceBean> instancesList =
new ArrayList<InstanceBean>();
if (clusterInstances != null) {
for (ClusterInstance clusterInstance : clusterInstances) {
InstanceBean instance =
new InstanceBean();
instance.setInstanceId(clusterInstance.getInstanceId());
instance.setStatus(clusterInstance.getStatus().toString());
instancesList.add(instance);
}
clusterBean.setInstances(instancesList);
}
for (org.apache.stratos.messaging.domain.topology.Member member : cluster.getMembers()) {
MemberBean memberBean = new MemberBean();
memberBean.setServiceName(member.getServiceName());
memberBean.setClusterId(member.getClusterId());
memberBean.setMemberId(member.getMemberId());
memberBean.setClusterInstanceId(member.getClusterInstanceId());
memberBean.setLbClusterId(member.getLbClusterId());
memberBean.setNetworkPartitionId(member.getNetworkPartitionId());
memberBean.setPartitionId(member.getPartitionId());
if (member.getDefaultPrivateIP() == null) {
memberBean.setDefaultPrivateIP("NULL");
} else {
memberBean.setDefaultPrivateIP(member.getDefaultPrivateIP());
}
if (member.getDefaultPublicIP() == null) {
memberBean.setDefaultPublicIP("NULL");
} else {
memberBean.setDefaultPublicIP(member.getDefaultPublicIP());
}
memberBean.setMemberPrivateIPs(member.getMemberPrivateIPs());
memberBean.setMemberPublicIPs(member.getMemberPublicIPs());
memberBean.setStatus(member.getStatus().toString());
memberBean.setProperty(convertJavaUtilPropertiesToPropertyBeans(member.getProperties()));
clusterBean.getMember().add(memberBean);
}
for (String hostname : cluster.getHostNames()) {
clusterBean.getHostNames().add(hostname);
}
return clusterBean;
}
public static ClusterInstanceBean convertClusterToClusterInstanceBean(String instanceId,
Cluster cluster, String alias) throws RestAPIException {
ClusterInstanceBean clusterInstanceBean = new ClusterInstanceBean();
clusterInstanceBean.setAlias(alias);
clusterInstanceBean.setServiceName(cluster.getServiceName());
clusterInstanceBean.setClusterId(cluster.getClusterId());
clusterInstanceBean.setInstanceId(instanceId);
clusterInstanceBean.setParentInstanceId(instanceId);
if (cluster.getInstanceContexts(instanceId) != null) {
clusterInstanceBean.setStatus(cluster.getInstanceContexts(instanceId).
getStatus().toString());
}
clusterInstanceBean.setTenantRange(cluster.getTenantRange());
clusterInstanceBean.setMember(new ArrayList<MemberBean>());
clusterInstanceBean.setHostNames(new ArrayList<String>());
for (org.apache.stratos.messaging.domain.topology.Member member : cluster.getMembers()) {
if (member.getClusterInstanceId().equals(instanceId)) {
MemberBean memberBean = new MemberBean();
memberBean.setClusterId(member.getClusterId());
memberBean.setLbClusterId(member.getLbClusterId());
NetworkPartition netWorkPartition=null;
try {
netWorkPartition= CloudControllerServiceClient.getInstance().getNetworkPartition(member.getNetworkPartitionId());
} catch (RemoteException e) {
log.error("Error when getting the network partition");
throw new RestAPIException(e);
}
if(netWorkPartition!=null) {
memberBean.setNetworkPartitionId(netWorkPartition.getId());
Partition[] partition = netWorkPartition.getPartitions();
for(int i=0;i<partition.length;i++){
if(partition[i].getUuid().equals(member.getPartitionId())){
memberBean.setPartitionId(partition[i].getId());
}
}
}
memberBean.setMemberId(member.getMemberId());
memberBean.setClusterInstanceId(member.getClusterInstanceId());
memberBean.setDefaultPrivateIP(member.getDefaultPrivateIP());
memberBean.setDefaultPublicIP(member.getDefaultPublicIP());
memberBean.setMemberPrivateIPs(member.getMemberPrivateIPs());
memberBean.setMemberPublicIPs(member.getMemberPublicIPs());
memberBean.setPorts(convertStubPortsToPortMappingBeans(member.getPorts()));
memberBean.setServiceName(member.getServiceName());
memberBean.setStatus(member.getStatus().toString());
memberBean.setProperty(convertJavaUtilPropertiesToPropertyBeans(member.getProperties()));
clusterInstanceBean.getMember().add(memberBean);
}
}
clusterInstanceBean.setAccessUrls(cluster.getAccessUrls());
for (String hostname : cluster.getHostNames()) {
clusterInstanceBean.getHostNames().add(hostname);
}
clusterInstanceBean.setKubernetesServices(convertKubernetesServiceToKubernetesServiceBean(
cluster.getKubernetesServices()));
return clusterInstanceBean;
}
private static List<PortMappingBean> convertStubPortsToPortMappingBeans(Collection<Port> ports) {
List<PortMappingBean> portMappingBeans = new ArrayList<PortMappingBean>();
if (ports != null) {
for (Port port : ports) {
PortMappingBean portMappingBean = new PortMappingBean();
portMappingBean.setProtocol(port.getProtocol());
portMappingBean.setPort(port.getValue());
portMappingBean.setProxyPort(port.getProxy());
portMappingBeans.add(portMappingBean);
}
}
return portMappingBeans;
}
private static List<KubernetesServiceBean> convertKubernetesServiceToKubernetesServiceBean(
List<KubernetesService> kubernetesServices) {
List<KubernetesServiceBean> kubernetesServiceBeans = new ArrayList<KubernetesServiceBean>();
if (kubernetesServices != null) {
for (KubernetesService kubernetesService : kubernetesServices) {
KubernetesServiceBean kubernetesServiceBean = new KubernetesServiceBean();
kubernetesServiceBean.setId(kubernetesService.getId());
kubernetesServiceBean.setPublicIPs(kubernetesService.getPublicIPs());
kubernetesServiceBean.setPortalIP(kubernetesService.getPortalIP());
kubernetesServiceBean.setProtocol(kubernetesService.getProtocol());
kubernetesServiceBean.setPort(kubernetesService.getPort());
kubernetesServiceBean.setContainerPort(kubernetesService.getContainerPort());
kubernetesServiceBeans.add(kubernetesServiceBean);
}
}
return kubernetesServiceBeans;
}
private static org.apache.stratos.cloud.controller.stub.domain.Partition[] convertToStubPartitions
(List<PartitionBean> partitionList,int tenantId) throws RestAPIException {
org.apache.stratos.cloud.controller.stub.domain.Partition[] partitions
= new org.apache.stratos.cloud.controller.stub.domain.Partition[partitionList.size()];
for (int i = 0; i < partitionList.size(); i++) {
partitions[i] = convertPartitionToStubPartition(partitionList.get(i),tenantId);
}
return partitions;
}
private static List<PartitionReferenceBean> convertASStubPartitionsToPartitions(
PartitionRef[] partitions) {
List<PartitionReferenceBean> partitionBeans = new ArrayList<PartitionReferenceBean>();
for (PartitionRef partition : partitions) {
PartitionReferenceBean partitionBean = new PartitionReferenceBean();
partitionBean.setId(partition.getId());
partitionBean.setPartitionMax(partition.getPartitionMax());
partitionBeans.add(partitionBean);
}
return partitionBeans;
}
public static PartitionBean[] populatePartitionPojos(org.apache.stratos.cloud.controller.stub.domain.Partition[]
partitions) {
PartitionBean[] partitionBeans;
if (partitions == null) {
partitionBeans = new PartitionBean[0];
return partitionBeans;
}
partitionBeans = new PartitionBean[partitions.length];
for (int i = 0; i < partitions.length; i++) {
partitionBeans[i] = populatePartitionPojo(partitions[i]);
}
return partitionBeans;
}
public static PartitionBean populatePartitionPojo(org.apache.stratos.cloud.controller.stub.domain.Partition partition) {
PartitionBean partitionBean = new PartitionBean();
if (partition == null) {
return partitionBean;
}
partitionBean.setDescription(partition.getDescription());
//properties
if (partition.getProperties() != null) {
List<org.apache.stratos.common.beans.PropertyBean> propertyBeans
= convertCCStubPropertiesToPropertyBeans(partition.getProperties());
partitionBean.setProperty(propertyBeans);
}
return partitionBean;
}
private static List<org.apache.stratos.common.beans.PropertyBean> convertJavaUtilPropertiesToPropertyBeans(
java.util.Properties properties) {
List<org.apache.stratos.common.beans.PropertyBean> propertyBeans = null;
if (properties != null && !properties.isEmpty()) {
Enumeration<?> e = properties.propertyNames();
propertyBeans = new ArrayList<org.apache.stratos.common.beans.PropertyBean>();
while (e.hasMoreElements()) {
String key = (String) e.nextElement();
String value = properties.getProperty(key);
org.apache.stratos.common.beans.PropertyBean propertyBean
= new org.apache.stratos.common.beans.PropertyBean();
propertyBean.setName(key);
propertyBean.setValue(value);
propertyBeans.add(propertyBean);
}
}
return propertyBeans;
}
public static AutoscalePolicyBean[] convertStubAutoscalePoliciesToAutoscalePolicies(
org.apache.stratos.autoscaler.stub.autoscale.policy.AutoscalePolicy[] autoscalePolicies) {
AutoscalePolicyBean[] autoscalePolicyBeans;
if (autoscalePolicies == null) {
autoscalePolicyBeans = new AutoscalePolicyBean[0];
return autoscalePolicyBeans;
}
autoscalePolicyBeans = new AutoscalePolicyBean[autoscalePolicies.length];
for (int i = 0; i < autoscalePolicies.length; i++) {
autoscalePolicyBeans[i] = convertStubAutoscalePolicyToAutoscalePolicy(autoscalePolicies[i]);
}
return autoscalePolicyBeans;
}
public static AutoscalePolicyBean convertStubAutoscalePolicyToAutoscalePolicy(
org.apache.stratos.autoscaler.stub.autoscale.policy.AutoscalePolicy autoscalePolicy) {
if (autoscalePolicy == null) {
return null;
}
AutoscalePolicyBean autoscalePolicyBean = new AutoscalePolicyBean();
autoscalePolicyBean.setId(autoscalePolicy.getId());
autoscalePolicyBean.setDescription(autoscalePolicy.getDescription());
autoscalePolicyBean.setDisplayName(autoscalePolicy.getDisplayName());
if (autoscalePolicy.getLoadThresholds() != null) {
autoscalePolicyBean.setLoadThresholds(convertStubLoadThresholdsToLoadThresholds(autoscalePolicy.getLoadThresholds()));
}
return autoscalePolicyBean;
}
private static LoadThresholdsBean convertStubLoadThresholdsToLoadThresholds(
org.apache.stratos.autoscaler.stub.autoscale.policy.LoadThresholds loadThresholds) {
LoadThresholdsBean loadThresholdBean = new LoadThresholdsBean();
if (loadThresholds.getLoadAverageThreshold() != 0) {
LoadAverageThresholdsBean loadAverage = new LoadAverageThresholdsBean();
loadAverage.setThreshold(loadThresholds.getLoadAverageThreshold());
loadThresholdBean.setLoadAverage(loadAverage);
}
if (loadThresholds.getMemoryConsumptionThreshold() != 0) {
MemoryConsumptionThresholdsBean memoryConsumption = new MemoryConsumptionThresholdsBean();
memoryConsumption.setThreshold(loadThresholds.getMemoryConsumptionThreshold());
loadThresholdBean.setMemoryConsumption(memoryConsumption);
}
if (loadThresholds.getRequestsInFlightThreshold() != 0) {
RequestsInFlightThresholdsBean requestsInFlight = new RequestsInFlightThresholdsBean();
requestsInFlight.setThreshold(loadThresholds.getRequestsInFlightThreshold());
loadThresholdBean.setRequestsInFlight(requestsInFlight);
}
return loadThresholdBean;
}
public static NetworkPartitionBean[] convertCCStubNetworkPartitionsToNetworkPartitions(
org.apache.stratos.cloud.controller.stub.domain.NetworkPartition[] networkPartitions) {
NetworkPartitionBean[] networkPartitionGroupsBeans;
if (networkPartitions == null) {
networkPartitionGroupsBeans = new NetworkPartitionBean[0];
return networkPartitionGroupsBeans;
}
networkPartitionGroupsBeans = new NetworkPartitionBean[networkPartitions.length];
for (int i = 0; i < networkPartitions.length; i++) {
networkPartitionGroupsBeans[i] = convertCCStubNetworkPartitionToNetworkPartition(networkPartitions[i]);
}
return networkPartitionGroupsBeans;
}
public static org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesCluster
convertToCCKubernetesClusterPojo(KubernetesClusterBean kubernetesClusterBean, String kubernetesClusterUuid,
int tenantId) throws RestAPIException {
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesCluster kubernetesCluster = new
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesCluster();
kubernetesCluster.setClusterUuid(kubernetesClusterUuid);
kubernetesCluster.setClusterId(kubernetesClusterBean.getClusterId());
kubernetesCluster.setDescription(kubernetesClusterBean.getDescription());
kubernetesCluster.setKubernetesMaster(convertStubKubernetesMasterToKubernetesMaster(
kubernetesClusterBean.getKubernetesMaster(), tenantId));
kubernetesCluster.setPortRange(convertPortRangeToStubPortRange(kubernetesClusterBean.getPortRange()));
kubernetesCluster.setKubernetesHosts(convertToASKubernetesHostsPojo(kubernetesClusterBean.getKubernetesHosts(),
tenantId));
kubernetesCluster.setProperties((convertPropertyBeansToCCStubProperties(kubernetesClusterBean.getProperty(),
tenantId)));
kubernetesCluster.setTenantId(tenantId);
return kubernetesCluster;
}
private static org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost[]
convertToASKubernetesHostsPojo(List<KubernetesHostBean> kubernetesHosts,int tenantId) throws RestAPIException {
if (kubernetesHosts == null || kubernetesHosts.isEmpty()) {
return null;
}
int kubernetesHostCount = kubernetesHosts.size();
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost[] kubernetesHostsArr
= new org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost[kubernetesHostCount];
for (int i = 0; i < kubernetesHostCount; i++) {
KubernetesHostBean kubernetesHostBean = kubernetesHosts.get(i);
kubernetesHostsArr[i] = convertKubernetesHostToStubKubernetesHost(kubernetesHostBean,tenantId);
}
return kubernetesHostsArr;
}
private static org.apache.stratos.cloud.controller.stub.domain.kubernetes.PortRange
convertPortRangeToStubPortRange(PortRangeBean portRangeBean) {
if (portRangeBean == null) {
return null;
}
org.apache.stratos.cloud.controller.stub.domain.kubernetes.PortRange
portRange = new org.apache.stratos.cloud.controller.stub.domain.kubernetes.PortRange();
portRange.setLower(portRangeBean.getLower());
portRange.setUpper(portRangeBean.getUpper());
return portRange;
}
public static org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost
convertKubernetesHostToStubKubernetesHost(KubernetesHostBean kubernetesHostBean,int tenantId)
throws RestAPIException {
if (kubernetesHostBean == null) {
return null;
}
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost
kubernetesHost = new org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost();
kubernetesHost.setHostId(kubernetesHostBean.getHostId());
kubernetesHost.setPrivateIPAddress(kubernetesHostBean.getPrivateIPAddress());
kubernetesHost.setPublicIPAddress(kubernetesHostBean.getPublicIPAddress());
kubernetesHost.setHostname(kubernetesHostBean.getHostname());
kubernetesHost.setProperties(convertPropertyBeansToCCStubProperties(kubernetesHostBean.getProperty(),tenantId));
return kubernetesHost;
}
public static org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesMaster
convertStubKubernetesMasterToKubernetesMaster(KubernetesMasterBean kubernetesMasterBean,int tenantId)
throws RestAPIException {
if (kubernetesMasterBean == null) {
return null;
}
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesMaster
kubernetesMaster = new org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesMaster();
kubernetesMaster.setHostId(kubernetesMasterBean.getHostId());
kubernetesMaster.setPrivateIPAddress(kubernetesMasterBean.getPrivateIPAddress());
kubernetesMaster.setPublicIPAddress(kubernetesMasterBean.getPublicIPAddress());
kubernetesMaster.setHostname(kubernetesMasterBean.getHostname());
kubernetesMaster.setProperties(convertPropertyBeansToCCStubProperties(kubernetesMasterBean.getProperty(),tenantId));
return kubernetesMaster;
}
public static KubernetesClusterBean[] convertStubKubernetesClustersToKubernetesClusters(
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesCluster[] kubernetesClusters) {
if (kubernetesClusters == null) {
return null;
}
KubernetesClusterBean[] kubernetesClustersBean = new KubernetesClusterBean[kubernetesClusters.length];
for (int i = 0; i < kubernetesClusters.length; i++) {
kubernetesClustersBean[i] = convertStubKubernetesClusterToKubernetesCluster(kubernetesClusters[i]);
}
return kubernetesClustersBean;
}
public static KubernetesClusterBean convertStubKubernetesClusterToKubernetesCluster(
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesCluster kubernetesCluster) {
if (kubernetesCluster == null) {
return null;
}
KubernetesClusterBean kubernetesClusterBean = new KubernetesClusterBean();
kubernetesClusterBean.setClusterId(kubernetesCluster.getClusterId());
kubernetesClusterBean.setDescription(kubernetesCluster.getDescription());
kubernetesClusterBean.setPortRange(convertStubPortRangeToPortRange(kubernetesCluster.getPortRange()));
kubernetesClusterBean.setKubernetesHosts(convertStubKubernetesHostsToKubernetesHosts(kubernetesCluster.getKubernetesHosts()));
kubernetesClusterBean.setKubernetesMaster(convertStubKubernetesMasterToKubernetesMaster(
kubernetesCluster.getKubernetesMaster()));
kubernetesClusterBean.setProperty(convertCCStubPropertiesToPropertyBeans(kubernetesCluster.getProperties()));
return kubernetesClusterBean;
}
public static KubernetesMasterBean convertStubKubernetesMasterToKubernetesMaster(
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesMaster kubernetesMaster) {
if (kubernetesMaster == null) {
return null;
}
KubernetesMasterBean kubernetesMasterBean = new KubernetesMasterBean();
kubernetesMasterBean.setHostId(kubernetesMaster.getHostId());
kubernetesMasterBean.setHostname(kubernetesMaster.getHostname());
kubernetesMasterBean.setPrivateIPAddress(kubernetesMaster.getPrivateIPAddress());
kubernetesMasterBean.setPublicIPAddress(kubernetesMaster.getPublicIPAddress());
kubernetesMasterBean.setProperty(convertCCStubPropertiesToPropertyBeans(kubernetesMaster.getProperties()));
return kubernetesMasterBean;
}
public static List<KubernetesHostBean> convertStubKubernetesHostsToKubernetesHosts(
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost[] kubernetesHosts) {
if (kubernetesHosts == null) {
return null;
}
List<KubernetesHostBean> kubernetesHostList = new ArrayList<KubernetesHostBean>();
for (org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost kubernetesHost : kubernetesHosts) {
kubernetesHostList.add(convertStubKubernetesHostToKubernetesHost(kubernetesHost));
}
return kubernetesHostList;
}
private static KubernetesHostBean convertStubKubernetesHostToKubernetesHost(
org.apache.stratos.cloud.controller.stub.domain.kubernetes.KubernetesHost kubernetesHost) {
if (kubernetesHost == null) {
return null;
}
KubernetesHostBean kubernetesHostBean = new KubernetesHostBean();
kubernetesHostBean.setHostId(kubernetesHost.getHostId());
kubernetesHostBean.setHostname(kubernetesHost.getHostname());
kubernetesHostBean.setPrivateIPAddress(kubernetesHost.getPrivateIPAddress());
kubernetesHostBean.setPublicIPAddress(kubernetesHost.getPublicIPAddress());
kubernetesHostBean.setProperty(convertCCStubPropertiesToPropertyBeans(kubernetesHost.getProperties()));
return kubernetesHostBean;
}
private static List<org.apache.stratos.common.beans.PropertyBean> convertCCStubPropertiesToPropertyBeans(
org.apache.stratos.cloud.controller.stub.Properties properties) {
if (properties == null || properties.getProperties() == null) {
return null;
}
List<org.apache.stratos.common.beans.PropertyBean> propertyBeanList
= new ArrayList<org.apache.stratos.common.beans.PropertyBean>();
for (int i = 0; i < properties.getProperties().length; i++) {
propertyBeanList.add(convertStubPropertyToPropertyBean(properties.getProperties()[i]));
}
return propertyBeanList;
}
private static org.apache.stratos.common.beans.PropertyBean convertStubPropertyToPropertyBean(
org.apache.stratos.cloud.controller.stub.Property propertyE) {
if (propertyE == null) {
return null;
}
org.apache.stratos.common.beans.PropertyBean propertyBean
= new org.apache.stratos.common.beans.PropertyBean();
propertyBean.setName(propertyE.getName());
propertyBean.setValue(propertyE.getValue());
return propertyBean;
}
private static PortRangeBean convertStubPortRangeToPortRange(
org.apache.stratos.cloud.controller.stub.domain.kubernetes.PortRange portRange) {
if (portRange == null) {
return null;
}
PortRangeBean portRangeBean = new PortRangeBean();
portRangeBean.setUpper(portRange.getUpper());
portRangeBean.setLower(portRange.getLower());
return portRangeBean;
}
public static ApplicationContext convertApplicationDefinitionToStubApplicationContext(
ApplicationBean applicationDefinition, String applicationUuid, int tenantId) throws RestAPIException {
org.apache.stratos.autoscaler.stub.pojo.ApplicationContext applicationContext =
new org.apache.stratos.autoscaler.stub.pojo.ApplicationContext();
applicationContext.setApplicationUuid(applicationUuid);
applicationContext.setApplicationId(applicationDefinition.getApplicationId());
applicationContext.setAlias(applicationDefinition.getAlias());
applicationContext.setMultiTenant(applicationDefinition.isMultiTenant());
applicationContext.setName(applicationDefinition.getName());
applicationContext.setDescription(applicationDefinition.getDescription());
applicationContext.setStatus(applicationDefinition.getStatus());
applicationContext.setTenantId(tenantId);
// convert and set components
if (applicationDefinition.getComponents() != null) {
org.apache.stratos.autoscaler.stub.pojo.ComponentContext componentContext =
new org.apache.stratos.autoscaler.stub.pojo.ComponentContext();
// top level Groups
if (applicationDefinition.getComponents().getGroups() != null) {
componentContext.setGroupContexts(
convertGroupDefinitionsToStubGroupContexts(applicationDefinition.getComponents().getGroups(),
tenantId));
}
// top level dependency information
if (applicationDefinition.getComponents().getDependencies() != null) {
componentContext.setDependencyContext(
convertDependencyDefinitionsToDependencyContexts(applicationDefinition.getComponents().getDependencies()));
}
// top level cartridge context information
if (applicationDefinition.getComponents().getCartridges() != null) {
componentContext.setCartridgeContexts(convertCartridgeReferenceBeansToStubCartridgeContexts
(applicationDefinition.getComponents().getCartridges(), tenantId));
}
applicationContext.setComponents(componentContext);
}
return applicationContext;
}
public static ApplicationBean convertStubApplicationContextToApplicationDefinition(
ApplicationContext applicationContext) {
if (applicationContext == null) {
return null;
}
ApplicationBean applicationDefinition = new ApplicationBean();
applicationDefinition.setApplicationId(applicationContext.getApplicationId());
applicationDefinition.setAlias(applicationContext.getAlias());
applicationDefinition.setMultiTenant(applicationContext.getMultiTenant());
applicationDefinition.setName(applicationContext.getName());
applicationDefinition.setDescription(applicationContext.getDescription());
applicationDefinition.setStatus(applicationContext.getStatus());
// convert and set components
if (applicationContext.getComponents() != null) {
applicationDefinition.setComponents(new ComponentBean());
// top level Groups
if (applicationContext.getComponents().getGroupContexts() != null) {
applicationDefinition.getComponents().setGroups(
convertStubGroupContextsToGroupDefinitions(applicationContext.getComponents().getGroupContexts()));
}
// top level dependency information
if (applicationContext.getComponents().getDependencyContext() != null) {
applicationDefinition.getComponents().setDependencies(
convertStubDependencyContextsToDependencyDefinitions(applicationContext.getComponents().getDependencyContext()));
}
// top level cartridge context information
if (applicationContext.getComponents().getCartridgeContexts() != null) {
applicationDefinition.getComponents().setCartridges(
convertStubCartridgeContextsToCartridgeReferenceBeans(applicationContext.getComponents().getCartridgeContexts()));
}
}
return applicationDefinition;
}
private static List<CartridgeGroupReferenceBean> convertStubGroupContextsToGroupDefinitions(GroupContext[] groupContexts) {
List<CartridgeGroupReferenceBean> groupDefinitions = new ArrayList<CartridgeGroupReferenceBean>();
if (groupContexts != null) {
for (GroupContext groupContext : groupContexts) {
if (groupContext != null) {
CartridgeGroupReferenceBean groupDefinition = new CartridgeGroupReferenceBean();
groupDefinition.setUuid(groupContext.getUuid());
groupDefinition.setAlias(groupContext.getAlias());
groupDefinition.setGroupMaxInstances(groupContext.getGroupMaxInstances());
groupDefinition.setGroupMinInstances(groupContext.getGroupMinInstances());
groupDefinition.setTenantId(groupContext.getTenantId());
groupDefinition.setName(groupContext.getName());
groupDefinition.setGroups(convertStubGroupContextsToGroupDefinitions(groupContext.getGroupContexts()));
groupDefinition.setCartridges(convertStubCartridgeContextsToCartridgeReferenceBeans(
groupContext.getCartridgeContexts()));
groupDefinitions.add(groupDefinition);
}
}
}
return groupDefinitions;
}
private static DependencyBean convertStubDependencyContextsToDependencyDefinitions(DependencyContext dependencyContext) {
DependencyBean dependencyBean = new DependencyBean();
dependencyBean.setTerminationBehaviour(dependencyContext.getTerminationBehaviour());
if (dependencyContext.getStartupOrdersContexts() != null) {
List<StartupOrderBean> startupOrderBeans = convertStringArrayToStartupOrderBeans(
dependencyContext.getStartupOrdersContexts());
dependencyBean.setStartupOrders(startupOrderBeans);
}
if (dependencyContext.getScalingDependents() != null) {
List<ScalingDependentsBean> scalingDependentBeans = convertStringArrayToDependentScalingBeans(
dependencyContext.getScalingDependents());
dependencyBean.setScalingDependents(scalingDependentBeans);
}
return dependencyBean;
}
private static List<StartupOrderBean> convertStringArrayToStartupOrderBeans(String[] startupOrders) {
List<StartupOrderBean> startupOrderBeans = new ArrayList<StartupOrderBean>();
if (startupOrders != null) {
for (String aliasArrayList : startupOrders) {
if (StringUtils.isNotBlank(aliasArrayList)) {
String[] aliasArray = aliasArrayList.split(",");
StartupOrderBean startupOrderBean = new StartupOrderBean();
for (String alias : aliasArray) {
if (StringUtils.isNotBlank(alias)) {
startupOrderBean.addAlias(alias);
}
}
startupOrderBeans.add(startupOrderBean);
}
}
}
return startupOrderBeans;
}
private static List<ScalingDependentsBean> convertStringArrayToDependentScalingBeans(String[] scalingDependents) {
List<ScalingDependentsBean> scalingDependentBeans = new ArrayList<ScalingDependentsBean>();
if (scalingDependents != null) {
for (String aliasArrayList : scalingDependents) {
if (StringUtils.isNotBlank(aliasArrayList)) {
String[] aliasArray = aliasArrayList.split(",");
ScalingDependentsBean scalingDependentsBean = new ScalingDependentsBean();
for (String alias : aliasArray) {
if (StringUtils.isNotBlank(alias)) {
scalingDependentsBean.addAlias(alias);
}
}
scalingDependentBeans.add(scalingDependentsBean);
}
}
}
return scalingDependentBeans;
}
private static List<CartridgeReferenceBean> convertStubCartridgeContextsToCartridgeReferenceBeans(
CartridgeContext[] cartridgeContexts) {
List<CartridgeReferenceBean> cartridgeDefinitions = new ArrayList<CartridgeReferenceBean>();
if (cartridgeContexts != null) {
for (CartridgeContext cartridgeContext : cartridgeContexts) {
if (cartridgeContext != null) {
CartridgeReferenceBean cartridgeDefinition = new CartridgeReferenceBean();
cartridgeDefinition.setUuid(cartridgeContext.getUuid());
cartridgeDefinition.setTenantId(cartridgeContext.getTenantId());
cartridgeDefinition.setType(cartridgeContext.getType());
cartridgeDefinition.setCartridgeMin(cartridgeContext.getCartridgeMin());
cartridgeDefinition.setCartridgeMax(cartridgeContext.getCartridgeMax());
cartridgeDefinition.setSubscribableInfo(convertStubSubscribableInfoContextToSubscribableInfo(
cartridgeContext.getSubscribableInfoContext()));
cartridgeDefinitions.add(cartridgeDefinition);
}
}
}
return cartridgeDefinitions;
}
private static SubscribableInfo convertStubSubscribableInfoContextToSubscribableInfo(
SubscribableInfoContext subscribableInfoContext) {
SubscribableInfo subscribableInfo = new SubscribableInfo();
subscribableInfo.setAlias(subscribableInfoContext.getAlias());
subscribableInfo.setAutoscalingPolicy(subscribableInfoContext.getAutoscalingPolicy());
if (!CommonUtil.isEmptyArray(subscribableInfoContext.getDependencyAliases())) {
subscribableInfo.setDependencyAliases(subscribableInfoContext.getDependencyAliases());
}
subscribableInfo.setDeploymentPolicy(subscribableInfoContext.getDeploymentPolicy());
subscribableInfo.setMinMembers(subscribableInfoContext.getMinMembers());
subscribableInfo.setMaxMembers(subscribableInfoContext.getMaxMembers());
subscribableInfo.setProperty(convertStubPropertiesToPropertyBeanList(subscribableInfoContext.getProperties()));
if (subscribableInfoContext.getArtifactRepositoryContext() != null) {
ArtifactRepositoryContext artifactRepositoryContext = subscribableInfoContext.getArtifactRepositoryContext();
ArtifactRepositoryBean artifactRepository = new ArtifactRepositoryBean();
artifactRepository.setAlias(artifactRepositoryContext.getAlias());
artifactRepository.setRepoUrl(artifactRepositoryContext.getRepoUrl());
artifactRepository.setPrivateRepo(artifactRepositoryContext.getPrivateRepo());
artifactRepository.setRepoUsername(artifactRepositoryContext.getRepoUsername());
artifactRepository.setRepoPassword(artifactRepositoryContext.getRepoPassword());
subscribableInfo.setArtifactRepository(artifactRepository);
}
if (subscribableInfoContext.getPersistenceContext() != null) {
PersistenceContext persistenceContext = subscribableInfoContext.getPersistenceContext();
PersistenceBean persistenceBean = new PersistenceBean();
persistenceBean.setRequired(true);
persistenceBean.setVolume(convertStubVolumeToVolume(persistenceContext.getVolumes()));
subscribableInfo.setPersistence(persistenceBean);
}
return subscribableInfo;
}
private static List<VolumeBean> convertStubVolumeToVolume(VolumeContext[] volumes) {
List<VolumeBean> volumeBeans = new ArrayList<VolumeBean>();
for (VolumeContext volumeContext : volumes) {
VolumeBean volumeBean = new VolumeBean();
volumeBean.setRemoveOnTermination(volumeContext.getRemoveOntermination());
volumeBean.setVolumeId(volumeContext.getVolumeId());
volumeBean.setMappingPath(volumeContext.getMappingPath());
volumeBean.setDevice(volumeContext.getDevice());
volumeBean.setSize(String.valueOf(volumeContext.getSize()));
volumeBean.setSnapshotId(volumeContext.getSnapshotId());
volumeBean.setId(volumeContext.getId());
}
return volumeBeans;
}
private static List<org.apache.stratos.common.beans.PropertyBean>
convertStubPropertiesToPropertyBeanList(org.apache.stratos.autoscaler.stub.Properties properties) {
List<org.apache.stratos.common.beans.PropertyBean> propertyBeanList =
new ArrayList<org.apache.stratos.common.beans.PropertyBean>();
if ((properties != null) && (properties.getProperties() != null)) {
for (org.apache.stratos.autoscaler.stub.Property property : properties.getProperties()) {
if ((property != null)) {
org.apache.stratos.common.beans.PropertyBean propertyBean =
new org.apache.stratos.common.beans.PropertyBean();
propertyBean.setName(property.getName());
propertyBean.setValue(String.valueOf(property.getValue()));
propertyBeanList.add(propertyBean);
}
}
}
return propertyBeanList;
}
private static List<org.apache.stratos.common.beans.PropertyBean>
convertCCStubPropertiesToPropertyBeanList(org.apache.stratos.cloud.controller.stub.Properties properties) {
List<org.apache.stratos.common.beans.PropertyBean> propertyBeanList =
new ArrayList<org.apache.stratos.common.beans.PropertyBean>();
if ((properties != null) && (properties.getProperties() != null)) {
for (org.apache.stratos.cloud.controller.stub.Property property : properties.getProperties()) {
if ((property != null)) {
org.apache.stratos.common.beans.PropertyBean propertyBean =
new org.apache.stratos.common.beans.PropertyBean();
propertyBean.setName(property.getName());
propertyBean.setValue(String.valueOf(property.getValue()));
propertyBeanList.add(propertyBean);
}
}
}
return propertyBeanList;
}
private static CartridgeContext[] convertCartridgeReferenceBeansToStubCartridgeContexts(
List<CartridgeReferenceBean> cartridges,int tenantId) throws RestAPIException {
if (cartridges == null) {
return null;
}
CartridgeContext[] cartridgeContextArray = new CartridgeContext[cartridges.size()];
int i = 0;
for (CartridgeReferenceBean cartridgeDefinition : cartridges) {
CartridgeContext context = new CartridgeContext();
context.setCartridgeMax(cartridgeDefinition.getCartridgeMax());
context.setCartridgeMin(cartridgeDefinition.getCartridgeMin());
context.setType(cartridgeDefinition.getType());
context.setUuid(cartridgeDefinition.getUuid());
context.setTenantId(tenantId);
try {
context.setUuid(CloudControllerServiceClient.getInstance().getCartridgeByTenant(cartridgeDefinition
.getType(), tenantId).getUuid());
} catch (RemoteException e) {
throw new RestAPIException(e);
} catch (CloudControllerServiceCartridgeNotFoundExceptionException e) {
throw new RestAPIException(e);
}
context.setSubscribableInfoContext(convertSubscribableInfo(cartridgeDefinition.getSubscribableInfo(),tenantId));
cartridgeContextArray[i++] = context;
}
return cartridgeContextArray;
}
private static SubscribableInfoContext convertSubscribableInfo(SubscribableInfo subscribableInfo, int tenantId)
throws RestAPIException {
if (subscribableInfo == null) {
return null;
}
SubscribableInfoContext infoContext = new SubscribableInfoContext();
infoContext.setAlias(subscribableInfo.getAlias());
infoContext.setAutoscalingPolicy(subscribableInfo.getAutoscalingPolicy());
infoContext.setDeploymentPolicy(subscribableInfo.getDeploymentPolicy());
try {
AutoscalePolicy autoscalePolicy = AutoscalerServiceClient.getInstance().getAutoScalePolicyForTenant
(subscribableInfo.getAutoscalingPolicy(), tenantId);
if (autoscalePolicy != null) {
String autoScalerUuid= autoscalePolicy.getUuid();
infoContext.setAutoscalingPolicyUuid(autoScalerUuid);
}
} catch (RemoteException e) {
throw new RestAPIException(e);
}
infoContext.setDependencyAliases(subscribableInfo.getDependencyAliases());
try {
DeploymentPolicy deploymentPolicy = AutoscalerServiceClient.getInstance().getDeploymentPolicyForTenant(
subscribableInfo.getDeploymentPolicy(), tenantId);
if (deploymentPolicy != null) {
String deploymentPolicyUuid = deploymentPolicy.getUuid();
infoContext.setDeploymentPolicyUuid(deploymentPolicyUuid);
}
} catch (RemoteException e) {
throw new RestAPIException(e);
}
infoContext.setMaxMembers(subscribableInfo.getMaxMembers());
infoContext.setMinMembers(subscribableInfo.getMinMembers());
if (subscribableInfo.getArtifactRepository() != null) {
ArtifactRepositoryBean artifactRepository = subscribableInfo.getArtifactRepository();
ArtifactRepositoryContext artifactRepositoryContext = new ArtifactRepositoryContext();
artifactRepositoryContext.setAlias(infoContext.getAlias());
artifactRepositoryContext.setPrivateRepo(artifactRepository.isPrivateRepo());
artifactRepositoryContext.setRepoUrl(artifactRepository.getRepoUrl());
artifactRepositoryContext.setRepoUsername(artifactRepository.getRepoUsername());
artifactRepositoryContext.setRepoPassword(artifactRepository.getRepoPassword());
infoContext.setArtifactRepositoryContext(artifactRepositoryContext);
}
if (subscribableInfo.getPersistence() != null) {
PersistenceBean persistenceBean = subscribableInfo.getPersistence();
PersistenceContext persistenceContext = new PersistenceContext();
persistenceContext.setPersistenceRequired(true);
persistenceContext.setVolumes(convertVolumeToStubVolume(persistenceBean.getVolume()));
infoContext.setPersistenceContext(persistenceContext);
}
infoContext.setProperties(convertPropertyBeansToStubProperties(subscribableInfo.getProperty()));
return infoContext;
}
private static VolumeContext[] convertVolumeToStubVolume(List<VolumeBean> volumes) {
if (volumes == null) {
return null;
}
List<VolumeContext> volumeContexts = new ArrayList<VolumeContext>();
for (VolumeBean volumeBean : volumes) {
VolumeContext volumeContext = new VolumeContext();
volumeContext.setRemoveOntermination(volumeBean.isRemoveOnTermination());
volumeContext.setVolumeId(volumeBean.getVolumeId());
volumeContext.setMappingPath(volumeBean.getMappingPath());
volumeContext.setDevice(volumeBean.getDevice());
// When volumeId is specified, volume size is not relevant.
if (volumeBean.getSize() != null) {
volumeContext.setSize(Integer.parseInt(volumeBean.getSize()));
}
volumeContext.setSnapshotId(volumeBean.getSnapshotId());
volumeContext.setId(volumeBean.getId());
volumeContexts.add(volumeContext);
}
return volumeContexts.toArray(new VolumeContext[volumeContexts.size()]);
}
private static org.apache.stratos.autoscaler.stub.Properties convertPropertyBeansToStubProperties(
List<org.apache.stratos.common.beans.PropertyBean> property) {
org.apache.stratos.autoscaler.stub.Properties prop = new org.apache.stratos.autoscaler.stub.Properties();
if (property != null) {
for (org.apache.stratos.common.beans.PropertyBean propertyBean : property) {
org.apache.stratos.autoscaler.stub.Property p = new org.apache.stratos.autoscaler.stub.Property();
p.setName(propertyBean.getName());
p.setValue(propertyBean.getValue());
prop.addProperties(p);
}
}
return prop;
}
private static DependencyContext convertDependencyDefinitionsToDependencyContexts(DependencyBean dependencyBean) {
if (dependencyBean == null) {
return null;
}
DependencyContext dependencyContext = new DependencyContext();
dependencyContext.setTerminationBehaviour(dependencyBean.getTerminationBehaviour());
if (dependencyBean.getStartupOrders() != null) {
List<String> startupOrders = convertStartupOrdersBeansToStringList(dependencyBean.getStartupOrders());
dependencyContext.setStartupOrdersContexts(startupOrders.toArray(new String[startupOrders.size()]));
}
if (dependencyBean.getScalingDependents() != null) {
List<String> scalingDependents = convertScalingDependentsBeansToStringList(dependencyBean.getScalingDependents());
dependencyContext.setScalingDependents(scalingDependents.toArray(new String[scalingDependents.size()]));
}
return dependencyContext;
}
private static List<String> convertScalingDependentsBeansToStringList(List<ScalingDependentsBean> scalingDependentsBeans) {
List<String> scalingDependents = new ArrayList<String>();
if (scalingDependentsBeans != null) {
for (ScalingDependentsBean scalingDependentsBean : scalingDependentsBeans) {
StringBuilder stringBuilder = new StringBuilder();
for (String alias : scalingDependentsBean.getAliases()) {
if (stringBuilder.length() > 0) {
stringBuilder.append(",");
}
stringBuilder.append(alias);
}
scalingDependents.add(stringBuilder.toString());
}
}
return scalingDependents;
}
private static org.apache.stratos.autoscaler.stub.pojo.GroupContext[]
convertGroupDefinitionsToStubGroupContexts(List<CartridgeGroupReferenceBean> groupDefinitions, int tenantId)
throws RestAPIException {
if (groupDefinitions == null) {
return null;
}
GroupContext[] groupContexts = new GroupContext[groupDefinitions.size()];
int i = 0;
for (CartridgeGroupReferenceBean groupDefinition : groupDefinitions) {
GroupContext groupContext = new GroupContext();
groupContext.setUuid(StratosApiV41Utils.getServiceGroupUuidByTenant(groupDefinition.getName(),
tenantId));
groupDefinition.setTenantId(tenantId);
groupContext.setName(groupDefinition.getName());
groupContext.setAlias(groupDefinition.getAlias());
groupContext.setGroupMaxInstances(groupDefinition.getGroupMaxInstances());
groupContext.setGroupMinInstances(groupDefinition.getGroupMinInstances());
groupContext.setDeploymentPolicy(groupDefinition.getDeploymentPolicy());
if(groupDefinition.getDeploymentPolicy()!=null) {
groupContext.setDeploymentPolicyUuid(StratosApiV41Utils.getDeploymentPolicyUuidByTenant(groupDefinition
.getDeploymentPolicy(), tenantId));
}
// Groups
if (groupDefinition.getGroups() != null) {
groupContext.setGroupContexts(convertGroupDefinitionsToStubGroupContexts(groupDefinition.getGroups(),
tenantId));
}
// Cartridges
if (groupDefinition.getCartridges() != null) {
groupContext.setCartridgeContexts(convertCartridgeReferenceBeansToStubCartridgeContexts
(groupDefinition.getCartridges(), tenantId));
}
groupContexts[i++] = groupContext;
}
return groupContexts;
}
/**
* Convert Persistence To PersistenceBean
*
* @param baseDir base directory
* @param directories directories
* @return DeploymentBean
*/
private static DeploymentBean convertDeploymentToDeploymentBean(String[] directories,
String baseDir) {
if (baseDir == null || directories == null || directories[0] == null) {
return null;
}
DeploymentBean deploymentBean = new DeploymentBean();
deploymentBean.setBaseDir(baseDir);
deploymentBean.setDir(Arrays.asList(baseDir));
return deploymentBean;
}
/**
* Convert Persistence To PersistenceBean
*
* @param persistence persistence
* @return PersistenceBean
*/
private static PersistenceBean convertPersistenceToPersistenceBean(Persistence persistence) {
if (persistence == null) {
return null;
}
PersistenceBean persistenceBean = new PersistenceBean();
persistenceBean.setRequired(persistence.isPersistenceRequiredSpecified());
persistenceBean.setVolume(convertVolumesToVolumeBeans(persistence.getVolumes()));
return persistenceBean;
}
/**
* Convert Volumes To VolumeBeans
*
* @param volumes Volumes
* @return VolumeBeans
*/
private static List<VolumeBean> convertVolumesToVolumeBeans(Volume[] volumes) {
List<VolumeBean> list = new ArrayList<VolumeBean>();
for (Volume volume : volumes) {
VolumeBean volumeBean = new VolumeBean();
volumeBean.setId(volume.getId());
volumeBean.setDevice(volume.getDevice());
volumeBean.setSize(String.valueOf(volume.getSize()));
volumeBean.setSnapshotId(volume.getSnapshotId());
list.add(volumeBean);
}
return list;
}
/**
* Converts Cartridge to CartridgeDefinitionBean
*
* @param cartridgeInfo cartridge Info
* @return CartridgeBean
*/
public static CartridgeBean convertCartridgeToCartridgeDefinitionBean(Cartridge cartridgeInfo) {
CartridgeBean cartridgeBean = new CartridgeBean();
cartridgeBean.setType(cartridgeInfo.getType());
cartridgeBean.setProvider(cartridgeInfo.getProvider());
cartridgeBean.setCategory(cartridgeInfo.getCategory());
cartridgeBean.setHost(cartridgeInfo.getHostName());
cartridgeBean.setDisplayName(cartridgeInfo.getDisplayName());
cartridgeBean.setDescription(cartridgeInfo.getDescription());
cartridgeBean.setVersion(cartridgeInfo.getVersion());
cartridgeBean.setMultiTenant(cartridgeInfo.getMultiTenant());
cartridgeBean.setDescription(cartridgeInfo.getDescription());
cartridgeBean.setLoadBalancingIPType(cartridgeInfo.getLoadBalancingIPType());
if (cartridgeInfo.getMetadataKeys() != null && cartridgeInfo.getMetadataKeys()[0] != null) {
cartridgeBean.setMetadataKeys(Arrays.asList(cartridgeInfo.getMetadataKeys()));
}
//convert persistence
cartridgeBean.setPersistence(convertPersistenceToPersistenceBean(cartridgeInfo.getPersistence()));
//convert deployment
cartridgeBean.setDeployment(convertDeploymentToDeploymentBean(cartridgeInfo.getDeploymentDirs(),
cartridgeInfo.getBaseDir()));
//convert IaaSProvider
cartridgeBean.setIaasProvider(convertIaaSProviderToIaaSProviderBean(
cartridgeInfo.getIaasConfigs()));
//Convert Port-mappings
cartridgeBean.setPortMapping(convertPortMappingsToStubPortMappingBeans(
cartridgeInfo.getPortMappings()));
//convert properties
cartridgeBean.setProperty(convertCCStubPropertiesToPropertyBeans(cartridgeInfo.getProperties()));
return cartridgeBean;
}
public static ApplicationInfoBean convertApplicationToApplicationBean(Application application) {
if (application == null) {
return null;
}
ApplicationInfoBean applicationBean = new ApplicationInfoBean();
applicationBean.setId(application.getUniqueIdentifier());
applicationBean.setName(application.getName());
applicationBean.setDescription(application.getDescription());
applicationBean.setTenantDomain(application.getTenantDomain());
applicationBean.setTenantAdminUsername(application.getTenantAdminUserName());
return applicationBean;
}
public static ApplicationInfoBean convertApplicationToApplicationInstanceBean(Application application) {
if (application == null) {
return null;
}
ApplicationInfoBean applicationBean = new ApplicationInfoBean();
applicationBean.setId(application.getId());
applicationBean.setName(application.getName());
applicationBean.setStatus(application.getStatus().name());
applicationBean.setDescription(application.getDescription());
applicationBean.setTenantDomain(application.getTenantDomain());
applicationBean.setTenantAdminUsername(application.getTenantAdminUserName());
applicationBean.setApplicationInstances(convertApplicationInstancesToApplicationInstances(application));
return applicationBean;
}
private static List<ApplicationInstanceBean> convertApplicationInstancesToApplicationInstances(
Application application) {
if (application == null) {
return null;
}
List<ApplicationInstanceBean> applicationInstanceList = new ArrayList<ApplicationInstanceBean>();
Collection<ApplicationInstance> applicationInstancesInTopology =
application.getInstanceIdToInstanceContextMap().values();
for (ApplicationInstance applicationInstance : applicationInstancesInTopology) {
ApplicationInstanceBean instance = new ApplicationInstanceBean();
instance.setInstanceId(applicationInstance.getInstanceId());
instance.setApplicationUuid(application.getUniqueIdentifier());
instance.setApplicationId(application.getId());
instance.setParentInstanceId(applicationInstance.getParentId());
instance.setStatus(applicationInstance.getStatus().toString());
applicationInstanceList.add(instance);
}
return applicationInstanceList;
}
public static List<GroupInstanceBean> convertGroupToGroupInstancesBean(String instanceId, Group group) {
if (group == null) {
return null;
}
List<GroupInstanceBean> groupInstanceBeans = new ArrayList<GroupInstanceBean>();
if (group.getInstanceContexts(instanceId) != null) {
GroupInstance groupInstance = group.getInstanceContexts(instanceId);
GroupInstanceBean groupInstanceBean = new GroupInstanceBean();
groupInstanceBean.setParentInstanceId(instanceId);
groupInstanceBean.setInstanceId(groupInstance.getInstanceId());
groupInstanceBean.setStatus(groupInstance.getStatus().toString());
groupInstanceBean.setGroupId(group.getUniqueIdentifier());
groupInstanceBeans.add(groupInstanceBean);
} else {
List<org.apache.stratos.messaging.domain.instance.Instance> groupInstances =
group.getInstanceContextsWithParentId(instanceId);
for (org.apache.stratos.messaging.domain.instance.Instance groupInstance : groupInstances) {
GroupInstanceBean groupInstanceBean = new GroupInstanceBean();
groupInstanceBean.setParentInstanceId(instanceId);
groupInstanceBean.setInstanceId(groupInstance.getInstanceId());
groupInstanceBean.setStatus(((GroupInstance) groupInstance).getStatus().toString());
groupInstanceBean.setGroupId(group.getUniqueIdentifier());
groupInstanceBeans.add(groupInstanceBean);
}
}
return groupInstanceBeans;
}
public static org.apache.stratos.common.beans.TenantInfoBean convertCarbonTenantInfoBeanToTenantInfoBean(
TenantInfoBean carbonTenantInfoBean) {
if (carbonTenantInfoBean == null) {
return null;
}
org.apache.stratos.common.beans.TenantInfoBean tenantInfoBean =
new org.apache.stratos.common.beans.TenantInfoBean();
tenantInfoBean.setTenantId(carbonTenantInfoBean.getTenantId());
tenantInfoBean.setTenantDomain(carbonTenantInfoBean.getTenantDomain());
tenantInfoBean.setActive(carbonTenantInfoBean.isActive());
tenantInfoBean.setAdmin(carbonTenantInfoBean.getAdmin());
tenantInfoBean.setEmail(carbonTenantInfoBean.getEmail());
tenantInfoBean.setAdminPassword(carbonTenantInfoBean.getAdminPassword());
tenantInfoBean.setFirstName(carbonTenantInfoBean.getFirstname());
tenantInfoBean.setLastName(carbonTenantInfoBean.getLastname());
tenantInfoBean.setCreatedDate(carbonTenantInfoBean.getCreatedDate().getTimeInMillis());
return tenantInfoBean;
}
public static TenantInfoBean convertTenantInfoBeanToCarbonTenantInfoBean(
org.apache.stratos.common.beans.TenantInfoBean tenantInfoBean) {
if (tenantInfoBean == null) {
return null;
}
TenantInfoBean carbonTenantInfoBean = new TenantInfoBean();
carbonTenantInfoBean.setTenantId(tenantInfoBean.getTenantId());
carbonTenantInfoBean.setTenantDomain(tenantInfoBean.getTenantDomain());
carbonTenantInfoBean.setActive(tenantInfoBean.isActive());
carbonTenantInfoBean.setAdmin(tenantInfoBean.getAdmin());
carbonTenantInfoBean.setEmail(tenantInfoBean.getEmail());
carbonTenantInfoBean.setAdminPassword(tenantInfoBean.getAdminPassword());
carbonTenantInfoBean.setFirstname(tenantInfoBean.getFirstName());
carbonTenantInfoBean.setLastname(tenantInfoBean.getLastName());
if (tenantInfoBean.getCreatedDate() > 0) {
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(tenantInfoBean.getCreatedDate());
carbonTenantInfoBean.setCreatedDate(calendar);
}
return carbonTenantInfoBean;
}
public static ServiceGroup convertServiceGroupDefinitionToASStubServiceGroup(CartridgeGroupBean groupBean,String groupUuid,
int tenantId)
throws ServiceGroupDefinitionException {
if (groupBean == null) {
return null;
}
ServiceGroup servicegroup = new ServiceGroup();
List<CartridgeGroupBean> groupsDefinitions = groupBean.getGroups();
List<String> cartridgesDefinitions = groupBean.getCartridges();
servicegroup.setName(groupBean.getName());
servicegroup.setUuid(groupUuid);
servicegroup.setTenantId(tenantId);
if (groupsDefinitions == null) {
groupsDefinitions = new ArrayList<CartridgeGroupBean>(0);
}
if (cartridgesDefinitions == null) {
cartridgesDefinitions = new ArrayList<String>(0);
}
ServiceGroup[] subGroups = new ServiceGroup[groupsDefinitions.size()];
String[] cartridges = new String[cartridgesDefinitions.size()];
int i = 0;
for (CartridgeGroupBean groupDefinition : groupsDefinitions) {
subGroups[i] = convertServiceGroupDefinitionToASStubServiceGroup(groupDefinition,UUID.randomUUID().toString(), tenantId);
++i;
}
servicegroup.setGroups(subGroups);
cartridges = cartridgesDefinitions.toArray(cartridges);
servicegroup.setCartridges(cartridges);
DependencyBean dependencyBean = groupBean.getDependencies();
if (dependencyBean != null) {
Dependencies dependencies = new Dependencies();
List<StartupOrderBean> startupOrderBeans = dependencyBean.getStartupOrders();
if (startupOrderBeans != null) {
List<String> startupOrders = convertStartupOrdersBeansToStringList(dependencyBean.getStartupOrders());
dependencies.setStartupOrders(startupOrders.toArray(new String[startupOrders.size()]));
}
// validate termination behavior
validateTerminationBehavior(dependencyBean.getTerminationBehaviour());
dependencies.setTerminationBehaviour(dependencyBean.getTerminationBehaviour());
if (dependencyBean.getScalingDependents() != null) {
List<String> scalingDependents = convertScalingDependentsBeansToStringList(dependencyBean.getScalingDependents());
dependencies.setScalingDependants(scalingDependents.toArray(new String[scalingDependents.size()]));
}
servicegroup.setDependencies(dependencies);
}
return servicegroup;
}
private static List<String> convertStartupOrdersBeansToStringList(List<StartupOrderBean> startupOrderBeans) {
List<String> startupOrders = new ArrayList<String>();
if (startupOrderBeans != null) {
for (StartupOrderBean startupOrderBean : startupOrderBeans) {
StringBuilder stringBuilder = new StringBuilder();
for (String alias : startupOrderBean.getAliases()) {
if (stringBuilder.length() > 0) {
stringBuilder.append(",");
}
stringBuilder.append(alias);
}
startupOrders.add(stringBuilder.toString());
}
}
return startupOrders;
}
public static CartridgeGroupBean convertStubServiceGroupToServiceGroupDefinition(ServiceGroup serviceGroup) {
if (serviceGroup == null) {
return null;
}
CartridgeGroupBean serviceGroupDefinition = new CartridgeGroupBean();
serviceGroupDefinition.setName(serviceGroup.getName());
String[] cartridges = serviceGroup.getCartridges();
ServiceGroup[] groups = serviceGroup.getGroups();
org.apache.stratos.autoscaler.stub.pojo.Dependencies dependencies = serviceGroup.getDependencies();
List<CartridgeGroupBean> groupDefinitions = new ArrayList<CartridgeGroupBean>(groups.length);
for (ServiceGroup group : groups) {
if (group != null) {
groupDefinitions.add(convertStubServiceGroupToServiceGroupDefinition(group));
}
}
if (dependencies != null) {
DependencyBean dependencyBean = new DependencyBean();
String[] startupOrders = dependencies.getStartupOrders();
if (startupOrders != null && startupOrders[0] != null) {
dependencyBean.setStartupOrders(convertStringArrayToStartupOrderBeans(startupOrders));
}
String[] scalingDependants = dependencies.getScalingDependants();
if (scalingDependants != null && scalingDependants[0] != null) {
dependencyBean.setScalingDependents(convertStringArrayToDependentScalingBeans(scalingDependants));
}
dependencyBean.setTerminationBehaviour(dependencies.getTerminationBehaviour());
serviceGroupDefinition.setDependencies(dependencyBean);
}
List<String> cartridgesDef = new ArrayList<String>(Arrays.asList(cartridges));
if (cartridges[0] != null) {
serviceGroupDefinition.setCartridges(cartridgesDef);
}
serviceGroupDefinition.setGroups(groupDefinitions);
return serviceGroupDefinition;
}
/**
* Validates terminationBehavior. The terminationBehavior should be one of the following:
* 1. terminate-none
* 2. terminate-dependents
* 3. terminate-all
*
* @throws ServiceGroupDefinitionException if terminationBehavior is different to what is
* listed above
*/
private static void validateTerminationBehavior(String terminationBehavior) throws ServiceGroupDefinitionException {
if (!(terminationBehavior == null || "terminate-none".equals(terminationBehavior) ||
"terminate-dependents".equals(terminationBehavior) || "terminate-all".equals(terminationBehavior))) {
throw new ServiceGroupDefinitionException("Invalid Termination Behaviour specified: [ " +
terminationBehavior + " ], should be one of 'terminate-none', 'terminate-dependents', " +
" 'terminate-all' ");
}
}
public static ApplicationSignUp convertApplicationSignUpBeanToStubApplicationSignUp(
ApplicationSignUpBean applicationSignUpBean) {
if (applicationSignUpBean == null) {
return null;
}
ApplicationSignUp applicationSignUp = new ApplicationSignUp();
if (applicationSignUpBean.getArtifactRepositories() != null) {
List<ArtifactRepository> artifactRepositoryList = new ArrayList<ArtifactRepository>();
for (ArtifactRepositoryBean artifactRepositoryBean : applicationSignUpBean.getArtifactRepositories()) {
ArtifactRepository artifactRepository = new ArtifactRepository();
artifactRepository.setAlias(artifactRepositoryBean.getAlias());
artifactRepository.setPrivateRepo(artifactRepositoryBean.isPrivateRepo());
artifactRepository.setRepoUrl(artifactRepositoryBean.getRepoUrl());
artifactRepository.setRepoUsername(artifactRepositoryBean.getRepoUsername());
artifactRepository.setRepoPassword(artifactRepositoryBean.getRepoPassword());
artifactRepositoryList.add(artifactRepository);
}
ArtifactRepository[] artifactRepositoryArray = artifactRepositoryList.toArray(new ArtifactRepository[
artifactRepositoryList.size()]);
applicationSignUp.setArtifactRepositories(artifactRepositoryArray);
}
return applicationSignUp;
}
public static ApplicationSignUpBean convertStubApplicationSignUpToApplicationSignUpBean(ApplicationSignUp applicationSignUp) {
if (applicationSignUp == null) {
return null;
}
ApplicationSignUpBean applicationSignUpBean = new ApplicationSignUpBean();
if (applicationSignUp.getArtifactRepositories() != null) {
List<ArtifactRepositoryBean> artifactRepositoryBeanList = new ArrayList<ArtifactRepositoryBean>();
for (ArtifactRepository artifactRepository : applicationSignUp.getArtifactRepositories()) {
if (artifactRepository != null) {
ArtifactRepositoryBean artifactRepositoryBean = new ArtifactRepositoryBean();
artifactRepositoryBean.setAlias(artifactRepository.getAlias());
artifactRepositoryBean.setPrivateRepo(artifactRepository.getPrivateRepo());
artifactRepositoryBean.setRepoUrl(artifactRepository.getRepoUrl());
artifactRepositoryBean.setRepoUsername(artifactRepository.getRepoUsername());
artifactRepositoryBean.setRepoPassword(artifactRepository.getRepoPassword());
artifactRepositoryBeanList.add(artifactRepositoryBean);
}
}
applicationSignUpBean.setArtifactRepositories(artifactRepositoryBeanList);
}
return applicationSignUpBean;
}
public static DomainMapping convertDomainMappingBeanToStubDomainMapping(DomainMappingBean domainMappingBean) {
if (domainMappingBean == null) {
return null;
}
DomainMapping domainMapping = new DomainMapping();
domainMapping.setDomainName(domainMappingBean.getDomainName());
domainMapping.setContextPath(domainMappingBean.getContextPath());
return domainMapping;
}
public static DomainMappingBean convertStubDomainMappingToDomainMappingBean(DomainMapping domainMapping) {
if (domainMapping == null) {
return null;
}
DomainMappingBean domainMappingBean = new DomainMappingBean();
domainMappingBean.setDomainName(domainMapping.getDomainName());
domainMappingBean.setContextPath(domainMapping.getContextPath());
return domainMappingBean;
}
public static DeploymentPolicyBean convertCCStubDeploymentPolicyToDeploymentPolicy(DeploymentPolicy deploymentPolicy) {
if (deploymentPolicy == null) {
return null;
}
DeploymentPolicyBean deploymentPolicyBean = new DeploymentPolicyBean();
deploymentPolicyBean.setId(deploymentPolicy.getId());
deploymentPolicyBean.setNetworkPartitions(convertASStubNetworkPartitionsToNetworkPartitionReferences(
deploymentPolicy.getNetworkPartitionRefs()));
return deploymentPolicyBean;
}
public static ApplicationPolicy convertApplicationPolicyBeanToStubAppPolicy(
ApplicationPolicyBean applicationPolicyBean, String applicationPolicyUuid, int tenantId) {
if (applicationPolicyBean == null) {
return null;
}
ApplicationPolicy applicationPolicy = new ApplicationPolicy();
applicationPolicy.setId(applicationPolicyBean.getId());
applicationPolicy.setAlgorithm(applicationPolicyBean.getAlgorithm());
applicationPolicy.setNetworkPartitions(applicationPolicyBean.getNetworkPartitions());
applicationPolicy.setUuid(applicationPolicyUuid);
applicationPolicy.setTenantId(tenantId);
if (applicationPolicyBean.getProperties() != null) {
if (!applicationPolicyBean.getProperties().isEmpty()) {
applicationPolicy.setProperties(getASPropertiesFromCommonProperties(applicationPolicyBean.getProperties()));
}
}
return applicationPolicy;
}
public static DeploymentPolicy convertDeploymentPolicyBeanToASDeploymentPolicy(
DeploymentPolicyBean deploymentPolicyBean, String deploymentPolicyUuid, int tenantId) throws RemoteException {
if (deploymentPolicyBean == null) {
return null;
}
DeploymentPolicy deploymentPolicy = new DeploymentPolicy();
deploymentPolicy.setId(deploymentPolicyBean.getId());
deploymentPolicy.setUuid(deploymentPolicyUuid);
deploymentPolicy.setTenantId(tenantId);
if (deploymentPolicyBean.getNetworkPartitions() != null) {
deploymentPolicy.setNetworkPartitionRefs(convertNetworkPartitionToASStubNetworkPartition(
deploymentPolicyBean.getNetworkPartitions()));
}
return deploymentPolicy;
}
public static DeploymentPolicyBean[] convertASStubDeploymentPoliciesToDeploymentPolicies(
DeploymentPolicy[] deploymentPolicies) {
DeploymentPolicyBean[] deploymentPolicyBeans;
if (null == deploymentPolicies) {
deploymentPolicyBeans = new DeploymentPolicyBean[0];
return deploymentPolicyBeans;
}
deploymentPolicyBeans = new DeploymentPolicyBean[deploymentPolicies.length];
for (int i = 0; i < deploymentPolicies.length; i++) {
deploymentPolicyBeans[i] = convertASStubDeploymentPolicyToDeploymentPolicy(deploymentPolicies[i]);
}
return deploymentPolicyBeans;
}
private static DeploymentPolicyBean convertASStubDeploymentPolicyToDeploymentPolicy(DeploymentPolicy deploymentPolicy) {
if (deploymentPolicy == null) {
return null;
}
DeploymentPolicyBean deploymentPolicyBean = new DeploymentPolicyBean();
deploymentPolicyBean.setId(deploymentPolicy.getId());
deploymentPolicyBean.setNetworkPartitions(convertASStubNetworkPartitionRefsToNetworkPartitions(
deploymentPolicy.getNetworkPartitionRefs()));
return deploymentPolicyBean;
}
private static List<NetworkPartitionReferenceBean> convertASStubNetworkPartitionRefsToNetworkPartitions(
NetworkPartitionRef[] networkPartitions) {
List<NetworkPartitionReferenceBean> networkPartitionBeans = new ArrayList<NetworkPartitionReferenceBean>();
for (NetworkPartitionRef networkPartition : networkPartitions) {
NetworkPartitionReferenceBean networkPartitionReferenceBean = new NetworkPartitionReferenceBean();
networkPartitionReferenceBean.setId(networkPartition.getId());
networkPartitionReferenceBean.setPartitionAlgo(networkPartition.getPartitionAlgo());
networkPartitionReferenceBean.setPartitions(
convertASStubPartitionRefsToPartitionReferences(networkPartition.getPartitionRefs()));
networkPartitionBeans.add(networkPartitionReferenceBean);
}
return networkPartitionBeans;
}
private static List<PartitionReferenceBean> convertASStubPartitionRefsToPartitionReferences(
PartitionRef[] partitions) {
List<PartitionReferenceBean> partitionRefBeans = new ArrayList<PartitionReferenceBean>();
for (PartitionRef partition : partitions) {
PartitionReferenceBean partitionRefBean = new PartitionReferenceBean();
partitionRefBean.setUuid(partition.getId());
partitionRefBean.setPartitionMax(partition.getPartitionMax());
partitionRefBeans.add(partitionRefBean);
}
return partitionRefBeans;
}
private static PartitionRef convertPartitionToASStubPartition(
PartitionReferenceBean partitionReferenceBean) {
if (partitionReferenceBean == null) {
return null;
}
PrivilegedCarbonContext carbonContext = PrivilegedCarbonContext.getThreadLocalCarbonContext();
PartitionRef stubPartition = new PartitionRef();
stubPartition.setUuid(partitionReferenceBean.getUuid());
stubPartition.setId(partitionReferenceBean.getId());
stubPartition.setTenantId(carbonContext.getTenantId());
stubPartition.setPartitionMax(partitionReferenceBean.getPartitionMax());
return stubPartition;
}
private static PartitionRef[] convertToASStubPartitions
(List<PartitionReferenceBean> partitionReferenceBeanList) {
PartitionRef[] partitions
= new PartitionRef[partitionReferenceBeanList.size()];
for (int i = 0; i < partitionReferenceBeanList.size(); i++) {
partitions[i] = convertPartitionToASStubPartition(partitionReferenceBeanList.get(i));
}
return partitions;
}
private static NetworkPartitionRef[] convertNetworkPartitionToASStubNetworkPartition(
List<NetworkPartitionReferenceBean> networkPartitionReferenceBeans) throws RemoteException {
List<NetworkPartitionRef> networkPartitionRefList =
new ArrayList<NetworkPartitionRef>();
for (NetworkPartitionReferenceBean networkPartitionReferenceBean : networkPartitionReferenceBeans) {
NetworkPartitionRef networkPartitionRef = new NetworkPartitionRef();
networkPartitionRef.setId(networkPartitionReferenceBean.getId());
PrivilegedCarbonContext carbonContext = PrivilegedCarbonContext.getThreadLocalCarbonContext();
String networkPartitionUuid = CloudControllerServiceClient.getInstance().getNetworkPartitionUuid
(networkPartitionReferenceBean.getId(), carbonContext.getTenantId());
networkPartitionRef.setUuid(networkPartitionUuid);
networkPartitionRef.setPartitionAlgo(networkPartitionReferenceBean.getPartitionAlgo());
if (networkPartitionReferenceBean.getPartitions() != null) {
networkPartitionRef.setPartitionRefs(convertToASStubPartitions(
networkPartitionReferenceBean.getPartitions()));
}
networkPartitionRefList.add(networkPartitionRef);
}
return networkPartitionRefList.toArray(new NetworkPartitionRef[networkPartitionRefList.size()]);
}
public static IaasProviderInfoBean convertStringArrayToIaasProviderInfoBean(String[] iaasProviders) {
IaasProviderInfoBean iaasProviderInfoBean = new IaasProviderInfoBean();
if (iaasProviders != null) {
iaasProviderInfoBean.setIaasProviders(Arrays.asList(iaasProviders));
}
return iaasProviderInfoBean;
}
}
| |
/*
* Copyright 2012-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.rules;
import com.facebook.buck.event.BuckEventBus;
import com.facebook.buck.model.BuildTarget;
import com.facebook.buck.model.Pair;
import com.facebook.buck.parser.NoSuchBuildTargetException;
import com.facebook.buck.util.HumanReadableException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Iterables;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import javax.annotation.Nullable;
/**
* Provides a mechanism for mapping between a {@link BuildTarget} and the {@link BuildRule} it
* represents. Once parsing is complete, instances of this class can be considered immutable.
*/
public class BuildRuleResolver {
private final TargetGraph targetGraph;
private final TargetNodeToBuildRuleTransformer buildRuleGenerator;
/**
* Event bus for reporting performance information.
* Will likely be null in unit tests.
*/
@Nullable
private final BuckEventBus eventBus;
private final ConcurrentHashMap<BuildTarget, BuildRule> buildRuleIndex;
private final LoadingCache<Pair<BuildTarget, Class<?>>, Optional<?>> metadataCache;
public BuildRuleResolver(
TargetGraph targetGraph,
TargetNodeToBuildRuleTransformer buildRuleGenerator) {
this(targetGraph, buildRuleGenerator, null);
}
public BuildRuleResolver(
TargetGraph targetGraph,
TargetNodeToBuildRuleTransformer buildRuleGenerator,
@Nullable BuckEventBus eventBus) {
this.targetGraph = targetGraph;
this.buildRuleGenerator = buildRuleGenerator;
this.eventBus = eventBus;
this.buildRuleIndex = new ConcurrentHashMap<>();
this.metadataCache = CacheBuilder.newBuilder()
.build(
new CacheLoader<Pair<BuildTarget, Class<?>>, Optional<?>>() {
@Override
public Optional<?> load(Pair<BuildTarget, Class<?>> key) throws Exception {
TargetNode<?, ?> node = BuildRuleResolver.this.targetGraph.get(key.getFirst());
return load(node, key.getSecond());
}
@SuppressWarnings("unchecked")
private <T, U> Optional<U> load(
TargetNode<T, ?> node,
Class<U> metadataClass) throws NoSuchBuildTargetException {
T arg = node.getConstructorArg();
if (metadataClass.isAssignableFrom(arg.getClass())) {
return Optional.of(metadataClass.cast(arg));
}
Description<?> description = node.getDescription();
if (!(description instanceof MetadataProvidingDescription)) {
return Optional.empty();
}
MetadataProvidingDescription<T> metadataProvidingDescription =
(MetadataProvidingDescription<T>) description;
return metadataProvidingDescription.createMetadata(
node.getBuildTarget(),
BuildRuleResolver.this,
arg,
metadataClass);
}
});
}
/**
* @return an unmodifiable view of the rules in the index
*/
public Iterable<BuildRule> getBuildRules() {
return Iterables.unmodifiableIterable(buildRuleIndex.values());
}
private <T> T fromNullable(BuildTarget target, @Nullable T rule) {
if (rule == null) {
throw new HumanReadableException("Rule for target '%s' could not be resolved.", target);
}
return rule;
}
/**
* Returns the {@link BuildRule} with the {@code buildTarget}.
*/
public BuildRule getRule(BuildTarget buildTarget) {
return fromNullable(buildTarget, buildRuleIndex.get(buildTarget));
}
public Optional<BuildRule> getRuleOptional(BuildTarget buildTarget) {
return Optional.ofNullable(buildRuleIndex.get(buildTarget));
}
public BuildRule requireRule(BuildTarget target) throws NoSuchBuildTargetException {
BuildRule rule = buildRuleIndex.get(target);
if (rule != null) {
return rule;
}
TargetNode<?, ?> node = targetGraph.get(target);
rule = buildRuleGenerator.transform(targetGraph, this, node);
Preconditions.checkState(
// TODO(k21): This should hold for flavored build targets as well.
rule.getBuildTarget().getUnflavoredBuildTarget().equals(target.getUnflavoredBuildTarget()),
"Description returned rule for '%s' instead of '%s'.",
rule.getBuildTarget(),
target);
BuildRule oldRule = buildRuleIndex.put(target, rule);
Preconditions.checkState(
// TODO(k21): Eventually we should be able to remove the oldRule == rule part.
// For now we need it to handle cases where a description adds a rule to the index before
// returning it.
oldRule == null || oldRule == rule,
"Multiple rules created for target '%s':\n" +
"new rule '%s' does not match existing rule '%s'.",
target,
rule,
oldRule);
return rule;
}
public ImmutableSortedSet<BuildRule> requireAllRules(Iterable<BuildTarget> buildTargets)
throws NoSuchBuildTargetException {
ImmutableSortedSet.Builder<BuildRule> rules = ImmutableSortedSet.naturalOrder();
for (BuildTarget target : buildTargets) {
rules.add(requireRule(target));
}
return rules.build();
}
@SuppressWarnings("unchecked")
public <T> Optional<T> requireMetadata(BuildTarget target, Class<T> metadataClass)
throws NoSuchBuildTargetException {
try {
return (Optional<T>) metadataCache.get(
new Pair<BuildTarget, Class<?>>(target, metadataClass));
} catch (ExecutionException e) {
Throwables.propagateIfInstanceOf(e.getCause(), NoSuchBuildTargetException.class);
throw new RuntimeException(e);
}
}
@SuppressWarnings("unchecked")
public <T> Optional<T> getRuleOptionalWithType(
BuildTarget buildTarget,
Class<T> cls) {
BuildRule rule = buildRuleIndex.get(buildTarget);
if (rule != null) {
if (cls.isInstance(rule)) {
return Optional.of((T) rule);
} else {
throw new HumanReadableException(
"Rule for target '%s' is present but not of expected type %s (got %s)",
buildTarget,
cls,
rule.getClass());
}
}
return Optional.empty();
}
public <T> T getRuleWithType(BuildTarget buildTarget, Class<T> cls) {
return fromNullable(buildTarget, getRuleOptionalWithType(buildTarget, cls).orElse(null));
}
public ImmutableSortedSet<BuildRule> getAllRules(Iterable<BuildTarget> targets) {
ImmutableSortedSet.Builder<BuildRule> rules = ImmutableSortedSet.naturalOrder();
for (BuildTarget target : targets) {
rules.add(getRule(target));
}
return rules.build();
}
/**
* Adds to the index a mapping from {@code buildRule}'s target to itself and returns
* {@code buildRule}.
*/
@VisibleForTesting
public <T extends BuildRule> T addToIndex(T buildRule) {
BuildRule oldValue = buildRuleIndex.put(buildRule.getBuildTarget(), buildRule);
// Yuck! This is here to make it possible for a rule to depend on a flavor of itself but it
// would be much much better if we just got rid of the BuildRuleResolver entirely.
if (oldValue != null && oldValue != buildRule) {
throw new IllegalStateException("A build rule for this target has already been created: " +
oldValue.getBuildTarget());
}
return buildRule;
}
/**
* Adds an iterable of build rules to the index.
*/
public <T extends BuildRule, C extends Iterable<T>> C addAllToIndex(C buildRules) {
for (T buildRule : buildRules) {
addToIndex(buildRule);
}
return buildRules;
}
@Nullable
public BuckEventBus getEventBus() {
return eventBus;
}
}
| |
package org.manufacturingoot.view;
import java.awt.event.WindowEvent;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import javax.persistence.EntityManagerFactory;
import javax.swing.JFrame;
import javax.swing.JOptionPane;
import org.manufacturingoot.model.ManufacturingOrder;
import org.manufacturingoot.model.ProductionDepartment;
import org.manufacturingoot.service.ManufacturingOrderService;
import org.manufacturingoot.util.Constants;
import org.manufacturingoot.util.SessionUtil;
public class ManufacturingOrderForm extends javax.swing.JFrame {
private ManufacturingOrder currentItem;
private EntityManagerFactory emf;
public ManufacturingOrderForm(EntityManagerFactory emf, ManufacturingOrder mo) {
initComponents();
this.emf = emf;
currentItem = mo;
setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
prepareForm();
setLocationRelativeTo(null);
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jPanel1 = new javax.swing.JPanel();
jLabel1 = new javax.swing.JLabel();
jLabel2 = new javax.swing.JLabel();
textEmail = new javax.swing.JTextField();
jLabel3 = new javax.swing.JLabel();
jLabel4 = new javax.swing.JLabel();
jLabel5 = new javax.swing.JLabel();
comboStatus = new javax.swing.JComboBox();
buttonSave = new javax.swing.JButton();
jLabel6 = new javax.swing.JLabel();
textId = new javax.swing.JTextField();
textDate = new javax.swing.JFormattedTextField();
jScrollPane1 = new javax.swing.JScrollPane();
textMessage = new javax.swing.JTextArea();
setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
jLabel1.setText("Manufacturing Order");
jLabel2.setText("Email");
jLabel3.setText("Message");
jLabel4.setText("Receive Date");
jLabel5.setText("Status");
comboStatus.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "UNPROCESSED", "PROCESSING", "PROCESSED" }));
buttonSave.setText("Save");
buttonSave.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
buttonSaveActionPerformed(evt);
}
});
jLabel6.setText("Id");
textId.setEnabled(false);
textDate.setFormatterFactory(new javax.swing.text.DefaultFormatterFactory(new javax.swing.text.DateFormatter(new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss"))));
textMessage.setColumns(20);
textMessage.setRows(5);
jScrollPane1.setViewportView(textMessage);
javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1);
jPanel1.setLayout(jPanel1Layout);
jPanel1Layout.setHorizontalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addContainerGap()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, jPanel1Layout.createSequentialGroup()
.addGap(0, 0, Short.MAX_VALUE)
.addComponent(buttonSave))
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(jLabel1)
.addGap(0, 0, Short.MAX_VALUE))
.addGroup(jPanel1Layout.createSequentialGroup()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel3)
.addComponent(jLabel4)
.addComponent(jLabel2)
.addComponent(jLabel5)
.addComponent(jLabel6))
.addGap(16, 16, 16)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(comboStatus, 0, 300, Short.MAX_VALUE)
.addComponent(textEmail)
.addComponent(textId)
.addComponent(textDate, javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jScrollPane1))))
.addContainerGap())
);
jPanel1Layout.setVerticalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addContainerGap()
.addComponent(jLabel1)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel6)
.addComponent(textId, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel2)
.addComponent(textEmail, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jScrollPane1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel3))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel4)
.addComponent(textDate, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel5)
.addComponent(comboStatus, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(buttonSave)
.addContainerGap())
);
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 420, Short.MAX_VALUE)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jPanel1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)))
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 318, Short.MAX_VALUE)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jPanel1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)))
);
pack();
}// </editor-fold>//GEN-END:initComponents
private void buttonSaveActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_buttonSaveActionPerformed
ManufacturingOrderService mos = new ManufacturingOrderService(emf);
if (currentItem != null) {
loadForm();
try {
mos.edit(currentItem);
textEmail.requestFocus();
} catch (Exception ex) {
ex.printStackTrace();
JOptionPane.showMessageDialog(null, "Gagal melakukan update");
}
} else {
currentItem = new ManufacturingOrder();
loadForm();
mos.create(currentItem);
}
WindowEvent event = new WindowEvent(this, WindowEvent.WINDOW_CLOSING);
dispatchEvent(event);
}//GEN-LAST:event_buttonSaveActionPerformed
private void loadForm() {
currentItem.setEmail(textEmail.getText().trim());
currentItem.setMessage(textMessage.getText().trim());
String date = textDate.getText().trim();
try {
currentItem.setReceiveDate(
new SimpleDateFormat(Constants.DATETIME_FORMAT).parse(date));
} catch (ParseException ex) {
ex.printStackTrace();
JOptionPane.showMessageDialog(null, "Date tidak dapat diolah");
}
currentItem.setStatus(comboStatus.getSelectedItem().toString());
currentItem.setCreatedBy((ProductionDepartment) SessionUtil.getSession());
}
private void prepareForm() {
if (currentItem == null) {
textEmail.setText("");
textMessage.setText("");
textDate.setText(
new SimpleDateFormat(Constants.DATETIME_FORMAT).format(new Date()));
} else {
textId.setText(currentItem.getId().toString());
textEmail.setText(currentItem.getEmail());
textMessage.setText(currentItem.getMessage());
String date = new SimpleDateFormat(Constants.DATETIME_FORMAT).format(currentItem.getReceiveDate());
textDate.setText(date);
comboStatus.setSelectedItem(currentItem.getStatus());
}
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton buttonSave;
private javax.swing.JComboBox comboStatus;
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel2;
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel4;
private javax.swing.JLabel jLabel5;
private javax.swing.JLabel jLabel6;
private javax.swing.JPanel jPanel1;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JFormattedTextField textDate;
private javax.swing.JTextField textEmail;
private javax.swing.JTextField textId;
private javax.swing.JTextArea textMessage;
// End of variables declaration//GEN-END:variables
}
| |
/*
* Copyright 2002-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.saml2.provider.service.authentication;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Base64;
import java.util.List;
import java.util.UUID;
import javax.crypto.SecretKey;
import javax.crypto.spec.SecretKeySpec;
import javax.xml.namespace.QName;
import org.apache.xml.security.encryption.XMLCipherParameters;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import org.opensaml.core.xml.XMLObject;
import org.opensaml.core.xml.config.XMLObjectProviderRegistrySupport;
import org.opensaml.core.xml.io.MarshallingException;
import org.opensaml.core.xml.schema.XSAny;
import org.opensaml.core.xml.schema.XSBoolean;
import org.opensaml.core.xml.schema.XSBooleanValue;
import org.opensaml.core.xml.schema.XSDateTime;
import org.opensaml.core.xml.schema.XSInteger;
import org.opensaml.core.xml.schema.XSString;
import org.opensaml.core.xml.schema.XSURI;
import org.opensaml.core.xml.schema.impl.XSAnyBuilder;
import org.opensaml.core.xml.schema.impl.XSBooleanBuilder;
import org.opensaml.core.xml.schema.impl.XSDateTimeBuilder;
import org.opensaml.core.xml.schema.impl.XSIntegerBuilder;
import org.opensaml.core.xml.schema.impl.XSStringBuilder;
import org.opensaml.core.xml.schema.impl.XSURIBuilder;
import org.opensaml.saml.common.SAMLVersion;
import org.opensaml.saml.common.SignableSAMLObject;
import org.opensaml.saml.saml2.core.Assertion;
import org.opensaml.saml.saml2.core.Attribute;
import org.opensaml.saml.saml2.core.AttributeStatement;
import org.opensaml.saml.saml2.core.AttributeValue;
import org.opensaml.saml.saml2.core.AuthnRequest;
import org.opensaml.saml.saml2.core.Conditions;
import org.opensaml.saml.saml2.core.EncryptedAssertion;
import org.opensaml.saml.saml2.core.EncryptedAttribute;
import org.opensaml.saml.saml2.core.EncryptedID;
import org.opensaml.saml.saml2.core.Issuer;
import org.opensaml.saml.saml2.core.NameID;
import org.opensaml.saml.saml2.core.Response;
import org.opensaml.saml.saml2.core.Subject;
import org.opensaml.saml.saml2.core.SubjectConfirmation;
import org.opensaml.saml.saml2.core.SubjectConfirmationData;
import org.opensaml.saml.saml2.core.impl.AttributeBuilder;
import org.opensaml.saml.saml2.core.impl.AttributeStatementBuilder;
import org.opensaml.saml.saml2.encryption.Encrypter;
import org.opensaml.security.SecurityException;
import org.opensaml.security.credential.BasicCredential;
import org.opensaml.security.credential.Credential;
import org.opensaml.security.credential.CredentialSupport;
import org.opensaml.security.credential.UsageType;
import org.opensaml.xmlsec.SignatureSigningParameters;
import org.opensaml.xmlsec.encryption.support.DataEncryptionParameters;
import org.opensaml.xmlsec.encryption.support.EncryptionException;
import org.opensaml.xmlsec.encryption.support.KeyEncryptionParameters;
import org.opensaml.xmlsec.signature.support.SignatureConstants;
import org.opensaml.xmlsec.signature.support.SignatureException;
import org.opensaml.xmlsec.signature.support.SignatureSupport;
import org.springframework.security.saml2.Saml2Exception;
import org.springframework.security.saml2.core.OpenSamlInitializationService;
import org.springframework.security.saml2.core.Saml2X509Credential;
import org.springframework.security.saml2.core.TestSaml2X509Credentials;
public final class TestOpenSamlObjects {
static {
OpenSamlInitializationService.initialize();
}
private static String USERNAME = "test@saml.user";
private static String DESTINATION = "https://localhost/login/saml2/sso/idp-alias";
private static String RELYING_PARTY_ENTITY_ID = "https://localhost/saml2/service-provider-metadata/idp-alias";
private static String ASSERTING_PARTY_ENTITY_ID = "https://some.idp.test/saml2/idp";
private static SecretKey SECRET_KEY = new SecretKeySpec(
Base64.getDecoder().decode("shOnwNMoCv88HKMEa91+FlYoD5RNvzMTAL5LGxZKIFk="), "AES");
private TestOpenSamlObjects() {
}
static Response response() {
return response(DESTINATION, ASSERTING_PARTY_ENTITY_ID);
}
static Response response(String destination, String issuerEntityId) {
Response response = build(Response.DEFAULT_ELEMENT_NAME);
response.setID("R" + UUID.randomUUID().toString());
response.setIssueInstant(DateTime.now());
response.setVersion(SAMLVersion.VERSION_20);
response.setID("_" + UUID.randomUUID().toString());
response.setDestination(destination);
response.setIssuer(issuer(issuerEntityId));
return response;
}
static Response signedResponseWithOneAssertion() {
Response response = response();
response.getAssertions().add(assertion());
return signed(response, TestSaml2X509Credentials.assertingPartySigningCredential(), RELYING_PARTY_ENTITY_ID);
}
static Assertion assertion() {
return assertion(USERNAME, ASSERTING_PARTY_ENTITY_ID, RELYING_PARTY_ENTITY_ID, DESTINATION);
}
static Assertion assertion(String username, String issuerEntityId, String recipientEntityId, String recipientUri) {
Assertion assertion = build(Assertion.DEFAULT_ELEMENT_NAME);
assertion.setID("A" + UUID.randomUUID().toString());
assertion.setIssueInstant(DateTime.now());
assertion.setVersion(SAMLVersion.VERSION_20);
assertion.setIssueInstant(DateTime.now());
assertion.setIssuer(issuer(issuerEntityId));
assertion.setSubject(subject(username));
assertion.setConditions(conditions());
SubjectConfirmation subjectConfirmation = subjectConfirmation();
subjectConfirmation.setMethod(SubjectConfirmation.METHOD_BEARER);
SubjectConfirmationData confirmationData = subjectConfirmationData(recipientEntityId);
confirmationData.setRecipient(recipientUri);
subjectConfirmation.setSubjectConfirmationData(confirmationData);
assertion.getSubject().getSubjectConfirmations().add(subjectConfirmation);
return assertion;
}
static Issuer issuer(String entityId) {
Issuer issuer = build(Issuer.DEFAULT_ELEMENT_NAME);
issuer.setValue(entityId);
return issuer;
}
static Subject subject(String principalName) {
Subject subject = build(Subject.DEFAULT_ELEMENT_NAME);
if (principalName != null) {
subject.setNameID(nameId(principalName));
}
return subject;
}
static NameID nameId(String principalName) {
NameID nameId = build(NameID.DEFAULT_ELEMENT_NAME);
nameId.setValue(principalName);
return nameId;
}
static SubjectConfirmation subjectConfirmation() {
return build(SubjectConfirmation.DEFAULT_ELEMENT_NAME);
}
static SubjectConfirmationData subjectConfirmationData(String recipient) {
SubjectConfirmationData subject = build(SubjectConfirmationData.DEFAULT_ELEMENT_NAME);
subject.setRecipient(recipient);
subject.setNotBefore(DateTime.now().minus(Duration.millis(5 * 60 * 1000)));
subject.setNotOnOrAfter(DateTime.now().plus(Duration.millis(5 * 60 * 1000)));
return subject;
}
static Conditions conditions() {
Conditions conditions = build(Conditions.DEFAULT_ELEMENT_NAME);
conditions.setNotBefore(DateTime.now().minus(Duration.millis(5 * 60 * 1000)));
conditions.setNotOnOrAfter(DateTime.now().plus(Duration.millis(5 * 60 * 1000)));
return conditions;
}
public static AuthnRequest authnRequest() {
Issuer issuer = build(Issuer.DEFAULT_ELEMENT_NAME);
issuer.setValue(ASSERTING_PARTY_ENTITY_ID);
AuthnRequest authnRequest = build(AuthnRequest.DEFAULT_ELEMENT_NAME);
authnRequest.setIssuer(issuer);
authnRequest.setDestination(ASSERTING_PARTY_ENTITY_ID + "/SSO.saml2");
authnRequest.setAssertionConsumerServiceURL(DESTINATION);
return authnRequest;
}
static Credential getSigningCredential(Saml2X509Credential credential, String entityId) {
BasicCredential cred = getBasicCredential(credential);
cred.setEntityId(entityId);
cred.setUsageType(UsageType.SIGNING);
return cred;
}
static BasicCredential getBasicCredential(Saml2X509Credential credential) {
return CredentialSupport.getSimpleCredential(credential.getCertificate(), credential.getPrivateKey());
}
static <T extends SignableSAMLObject> T signed(T signable, Saml2X509Credential credential, String entityId,
String signAlgorithmUri) {
SignatureSigningParameters parameters = new SignatureSigningParameters();
Credential signingCredential = getSigningCredential(credential, entityId);
parameters.setSigningCredential(signingCredential);
parameters.setSignatureAlgorithm(signAlgorithmUri);
parameters.setSignatureReferenceDigestMethod(SignatureConstants.ALGO_ID_DIGEST_SHA256);
parameters.setSignatureCanonicalizationAlgorithm(SignatureConstants.ALGO_ID_C14N_EXCL_OMIT_COMMENTS);
try {
SignatureSupport.signObject(signable, parameters);
}
catch (MarshallingException | SignatureException | SecurityException ex) {
throw new Saml2Exception(ex);
}
return signable;
}
static <T extends SignableSAMLObject> T signed(T signable, Saml2X509Credential credential, String entityId) {
return signed(signable, credential, entityId, SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA256);
}
static EncryptedAssertion encrypted(Assertion assertion, Saml2X509Credential credential) {
X509Certificate certificate = credential.getCertificate();
Encrypter encrypter = getEncrypter(certificate);
try {
return encrypter.encrypt(assertion);
}
catch (EncryptionException ex) {
throw new Saml2Exception("Unable to encrypt assertion.", ex);
}
}
static EncryptedID encrypted(NameID nameId, Saml2X509Credential credential) {
X509Certificate certificate = credential.getCertificate();
Encrypter encrypter = getEncrypter(certificate);
try {
return encrypter.encrypt(nameId);
}
catch (EncryptionException ex) {
throw new Saml2Exception("Unable to encrypt nameID.", ex);
}
}
static EncryptedAttribute encrypted(String name, String value, Saml2X509Credential credential) {
Attribute attribute = attribute(name, value);
X509Certificate certificate = credential.getCertificate();
Encrypter encrypter = getEncrypter(certificate);
try {
return encrypter.encrypt(attribute);
}
catch (EncryptionException ex) {
throw new Saml2Exception("Unable to encrypt nameID.", ex);
}
}
private static Encrypter getEncrypter(X509Certificate certificate) {
String dataAlgorithm = XMLCipherParameters.AES_256;
String keyAlgorithm = XMLCipherParameters.RSA_1_5;
BasicCredential dataCredential = new BasicCredential(SECRET_KEY);
DataEncryptionParameters dataEncryptionParameters = new DataEncryptionParameters();
dataEncryptionParameters.setEncryptionCredential(dataCredential);
dataEncryptionParameters.setAlgorithm(dataAlgorithm);
Credential credential = CredentialSupport.getSimpleCredential(certificate, null);
KeyEncryptionParameters keyEncryptionParameters = new KeyEncryptionParameters();
keyEncryptionParameters.setEncryptionCredential(credential);
keyEncryptionParameters.setAlgorithm(keyAlgorithm);
Encrypter encrypter = new Encrypter(dataEncryptionParameters, keyEncryptionParameters);
Encrypter.KeyPlacement keyPlacement = Encrypter.KeyPlacement.valueOf("PEER");
encrypter.setKeyPlacement(keyPlacement);
return encrypter;
}
static Attribute attribute(String name, String value) {
Attribute attribute = build(Attribute.DEFAULT_ELEMENT_NAME);
attribute.setName(name);
XSString xsValue = new XSStringBuilder().buildObject(AttributeValue.DEFAULT_ELEMENT_NAME, XSString.TYPE_NAME);
xsValue.setValue(value);
attribute.getAttributeValues().add(xsValue);
return attribute;
}
static List<AttributeStatement> attributeStatements() {
List<AttributeStatement> attributeStatements = new ArrayList<>();
AttributeStatementBuilder attributeStatementBuilder = new AttributeStatementBuilder();
AttributeBuilder attributeBuilder = new AttributeBuilder();
AttributeStatement attrStmt1 = attributeStatementBuilder.buildObject();
Attribute emailAttr = attributeBuilder.buildObject();
emailAttr.setName("email");
XSAny email1 = new XSAnyBuilder().buildObject(AttributeValue.DEFAULT_ELEMENT_NAME, XSAny.TYPE_NAME); // gh-8864
email1.setTextContent("john.doe@example.com");
emailAttr.getAttributeValues().add(email1);
XSAny email2 = new XSAnyBuilder().buildObject(AttributeValue.DEFAULT_ELEMENT_NAME);
email2.setTextContent("doe.john@example.com");
emailAttr.getAttributeValues().add(email2);
attrStmt1.getAttributes().add(emailAttr);
Attribute nameAttr = attributeBuilder.buildObject();
nameAttr.setName("name");
XSString name = new XSStringBuilder().buildObject(AttributeValue.DEFAULT_ELEMENT_NAME, XSString.TYPE_NAME);
name.setValue("John Doe");
nameAttr.getAttributeValues().add(name);
attrStmt1.getAttributes().add(nameAttr);
Attribute ageAttr = attributeBuilder.buildObject();
ageAttr.setName("age");
XSInteger age = new XSIntegerBuilder().buildObject(AttributeValue.DEFAULT_ELEMENT_NAME, XSInteger.TYPE_NAME);
age.setValue(21);
ageAttr.getAttributeValues().add(age);
attrStmt1.getAttributes().add(ageAttr);
attributeStatements.add(attrStmt1);
AttributeStatement attrStmt2 = attributeStatementBuilder.buildObject();
Attribute websiteAttr = attributeBuilder.buildObject();
websiteAttr.setName("website");
XSURI uri = new XSURIBuilder().buildObject(AttributeValue.DEFAULT_ELEMENT_NAME, XSURI.TYPE_NAME);
uri.setValue("https://johndoe.com/");
websiteAttr.getAttributeValues().add(uri);
attrStmt2.getAttributes().add(websiteAttr);
Attribute registeredAttr = attributeBuilder.buildObject();
registeredAttr.setName("registered");
XSBoolean registered = new XSBooleanBuilder().buildObject(AttributeValue.DEFAULT_ELEMENT_NAME,
XSBoolean.TYPE_NAME);
registered.setValue(new XSBooleanValue(true, false));
registeredAttr.getAttributeValues().add(registered);
attrStmt2.getAttributes().add(registeredAttr);
Attribute registeredDateAttr = attributeBuilder.buildObject();
registeredDateAttr.setName("registeredDate");
XSDateTime registeredDate = new XSDateTimeBuilder().buildObject(AttributeValue.DEFAULT_ELEMENT_NAME,
XSDateTime.TYPE_NAME);
registeredDate.setValue(DateTime.parse("1970-01-01T00:00:00Z"));
registeredDateAttr.getAttributeValues().add(registeredDate);
attrStmt2.getAttributes().add(registeredDateAttr);
attributeStatements.add(attrStmt2);
return attributeStatements;
}
static <T extends XMLObject> T build(QName qName) {
return (T) XMLObjectProviderRegistrySupport.getBuilderFactory().getBuilder(qName).buildObject(qName);
}
}
| |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.RestoreInProgress;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayAllocator;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
/**
* This service manages the node allocation of a cluster. For this reason the
* {@link AllocationService} keeps {@link AllocationDeciders} to choose nodes
* for shard allocation. This class also manages new nodes joining the cluster
* and rerouting of shards.
*/
public class AllocationService extends AbstractComponent {
private final AllocationDeciders allocationDeciders;
private final GatewayAllocator gatewayAllocator;
private final ShardsAllocator shardsAllocator;
private final ClusterInfoService clusterInfoService;
@Inject
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator,
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) {
super(settings);
this.allocationDeciders = allocationDeciders;
this.gatewayAllocator = gatewayAllocator;
this.shardsAllocator = shardsAllocator;
this.clusterInfoService = clusterInfoService;
}
/**
* Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be
* provided as parameter and no duplicates should be contained.
* <p>
* If the same instance of the {@link ClusterState} is returned, then no change has been made.</p>
*/
public ClusterState applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards) {
if (startedShards.isEmpty()) {
return clusterState;
}
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
// as starting a primary relocation target can reinitialize replica shards, start replicas first
startedShards = new ArrayList<>(startedShards);
Collections.sort(startedShards, Comparator.comparing(ShardRouting::primary));
applyStartedShards(allocation, startedShards);
gatewayAllocator.applyStartedShards(allocation, startedShards);
reroute(allocation);
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
return buildResultAndLogHealthChange(clusterState, allocation, "shards started [" + startedShardsAsString + "] ...");
}
protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) {
RoutingTable oldRoutingTable = oldState.routingTable();
RoutingNodes newRoutingNodes = allocation.routingNodes();
final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build();
MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable);
assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata
final ClusterState.Builder newStateBuilder = ClusterState.builder(oldState)
.routingTable(newRoutingTable)
.metaData(newMetaData);
final RestoreInProgress restoreInProgress = allocation.custom(RestoreInProgress.TYPE);
if (restoreInProgress != null) {
RestoreInProgress updatedRestoreInProgress = allocation.updateRestoreInfoWithRoutingChanges(restoreInProgress);
if (updatedRestoreInProgress != restoreInProgress) {
ImmutableOpenMap.Builder<String, ClusterState.Custom> customsBuilder = ImmutableOpenMap.builder(allocation.getCustoms());
customsBuilder.put(RestoreInProgress.TYPE, updatedRestoreInProgress);
newStateBuilder.customs(customsBuilder.build());
}
}
final ClusterState newState = newStateBuilder.build();
logClusterHealthStateChange(
new ClusterStateHealth(oldState),
new ClusterStateHealth(newState),
reason
);
return newState;
}
public ClusterState applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
return applyFailedShards(clusterState, Collections.singletonList(new FailedShard(failedShard, null, null)),
Collections.emptyList());
}
public ClusterState applyFailedShards(ClusterState clusterState, List<FailedShard> failedShards) {
return applyFailedShards(clusterState, failedShards, Collections.emptyList());
}
/**
* Applies the failed shards. Note, only assigned ShardRouting instances that exist in the routing table should be
* provided as parameter. Also applies a list of allocation ids to remove from the in-sync set for shard copies for which there
* are no routing entries in the routing table.
*
* <p>
* If the same instance of ClusterState is returned, then no change has been made.</p>
*/
public ClusterState applyFailedShards(final ClusterState clusterState, final List<FailedShard> failedShards,
final List<StaleShard> staleShards) {
if (staleShards.isEmpty() && failedShards.isEmpty()) {
return clusterState;
}
ClusterState tmpState = IndexMetaDataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards);
RoutingNodes routingNodes = getMutableRoutingNodes(tmpState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
long currentNanoTime = currentNanoTime();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, tmpState,
clusterInfoService.getClusterInfo(), currentNanoTime, false);
for (FailedShard failedShardEntry : failedShards) {
ShardRouting shardToFail = failedShardEntry.getRoutingEntry();
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardToFail.shardId().getIndex());
allocation.addIgnoreShardForNode(shardToFail.shardId(), shardToFail.currentNodeId());
// failing a primary also fails initializing replica shards, re-resolve ShardRouting
ShardRouting failedShard = routingNodes.getByAllocationId(shardToFail.shardId(), shardToFail.allocationId().getId());
if (failedShard != null) {
if (failedShard != shardToFail) {
logger.trace("{} shard routing modified in an earlier iteration (previous: {}, current: {})",
shardToFail.shardId(), shardToFail, failedShard);
}
int failedAllocations = failedShard.unassignedInfo() != null ? failedShard.unassignedInfo().getNumFailedAllocations() : 0;
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShardEntry.getMessage(),
failedShardEntry.getFailure(), failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false,
AllocationStatus.NO_ATTEMPT);
routingNodes.failShard(logger, failedShard, unassignedInfo, indexMetaData, allocation.changes());
} else {
logger.trace("{} shard routing failed in an earlier iteration (routing: {})", shardToFail.shardId(), shardToFail);
}
}
gatewayAllocator.applyFailedShards(allocation, failedShards);
reroute(allocation);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.getRoutingEntry().shardId().toString());
return buildResultAndLogHealthChange(clusterState, allocation, "shards failed [" + failedShardsAsString + "] ...");
}
/**
* unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas
* if needed.
*/
public ClusterState deassociateDeadNodes(final ClusterState clusterState, boolean reroute, String reason) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
// first, clear from the shards any node id they used to belong to that is now dead
deassociateDeadNodes(allocation);
if (reroute) {
reroute(allocation);
}
if (allocation.routingNodesChanged() == false) {
return clusterState;
}
return buildResultAndLogHealthChange(clusterState, allocation, reason);
}
/**
* Removes delay markers from unassigned shards based on current time stamp.
*/
private void removeDelayMarkers(RoutingAllocation allocation) {
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
final MetaData metaData = allocation.metaData();
while (unassignedIterator.hasNext()) {
ShardRouting shardRouting = unassignedIterator.next();
UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
if (unassignedInfo.isDelayed()) {
final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(allocation.getCurrentNanoTime(),
metaData.getIndexSafe(shardRouting.index()).getSettings());
if (newComputedLeftDelayNanos == 0) {
unassignedIterator.updateUnassigned(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(),
unassignedInfo.getFailure(), unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(),
unassignedInfo.getUnassignedTimeInMillis(), false, unassignedInfo.getLastAllocationStatus()),
shardRouting.recoverySource(), allocation.changes());
}
}
}
}
/**
* Internal helper to cap the number of elements in a potentially long list for logging.
*
* @param elements The elements to log. May be any non-null list. Must not be null.
* @param formatter A function that can convert list elements to a String. Must not be null.
* @param <T> The list element type.
* @return A comma-separated string of the first few elements.
*/
private <T> String firstListElementsToCommaDelimitedString(List<T> elements, Function<T, String> formatter) {
final int maxNumberOfElements = 10;
return elements
.stream()
.limit(maxNumberOfElements)
.map(formatter)
.collect(Collectors.joining(", "));
}
public CommandsResult reroute(final ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// we don't shuffle the unassigned shards here, to try and get as close as possible to
// a consistent result of the effect the commands have on the routing
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
clusterInfoService.getClusterInfo(), currentNanoTime(), retryFailed);
// don't short circuit deciders, we want a full explanation
allocation.debugDecision(true);
// we ignore disable allocation, because commands are explicit
allocation.ignoreDisable(true);
RoutingExplanations explanations = commands.execute(allocation, explain);
// we revert the ignore disable flag, since when rerouting, we want the original setting to take place
allocation.ignoreDisable(false);
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
return new CommandsResult(explanations, buildResultAndLogHealthChange(clusterState, allocation, "reroute commands"));
}
/**
* Reroutes the routing table based on the live nodes.
* <p>
* If the same instance of ClusterState is returned, then no change has been made.
*/
public ClusterState reroute(ClusterState clusterState, String reason) {
return reroute(clusterState, reason, false);
}
/**
* Reroutes the routing table based on the live nodes.
* <p>
* If the same instance of ClusterState is returned, then no change has been made.
*/
protected ClusterState reroute(final ClusterState clusterState, String reason, boolean debug) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
allocation.debugDecision(debug);
reroute(allocation);
if (allocation.routingNodesChanged() == false) {
return clusterState;
}
return buildResultAndLogHealthChange(clusterState, allocation, reason);
}
private void logClusterHealthStateChange(ClusterStateHealth previousStateHealth, ClusterStateHealth newStateHealth, String reason) {
ClusterHealthStatus previousHealth = previousStateHealth.getStatus();
ClusterHealthStatus currentHealth = newStateHealth.getStatus();
if (!previousHealth.equals(currentHealth)) {
logger.info("Cluster health status changed from [{}] to [{}] (reason: [{}]).", previousHealth, currentHealth, reason);
}
}
private boolean hasDeadNodes(RoutingAllocation allocation) {
for (RoutingNode routingNode : allocation.routingNodes()) {
if (allocation.nodes().getDataNodes().containsKey(routingNode.nodeId()) == false) {
return true;
}
}
return false;
}
private void reroute(RoutingAllocation allocation) {
assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See deassociateDeadNodes";
// now allocate all the unassigned to available nodes
if (allocation.routingNodes().unassigned().size() > 0) {
removeDelayMarkers(allocation);
gatewayAllocator.allocateUnassigned(allocation);
}
shardsAllocator.allocate(allocation);
assert RoutingNodes.assertShardStats(allocation.routingNodes());
}
private void deassociateDeadNodes(RoutingAllocation allocation) {
for (Iterator<RoutingNode> it = allocation.routingNodes().mutableIterator(); it.hasNext(); ) {
RoutingNode node = it.next();
if (allocation.nodes().getDataNodes().containsKey(node.nodeId())) {
// its a live node, continue
continue;
}
// now, go over all the shards routing on the node, and fail them
for (ShardRouting shardRouting : node.copyShards()) {
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0;
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]",
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT);
allocation.routingNodes().failShard(logger, shardRouting, unassignedInfo, indexMetaData, allocation.changes());
}
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
// since it relies on the fact that the RoutingNode exists in the list of nodes
it.remove();
}
}
private void applyStartedShards(RoutingAllocation routingAllocation, List<ShardRouting> startedShardEntries) {
assert startedShardEntries.isEmpty() == false : "non-empty list of started shard entries expected";
RoutingNodes routingNodes = routingAllocation.routingNodes();
for (ShardRouting startedShard : startedShardEntries) {
assert startedShard.initializing() : "only initializing shards can be started";
assert routingAllocation.metaData().index(startedShard.shardId().getIndex()) != null :
"shard started for unknown index (shard entry: " + startedShard + ")";
assert startedShard == routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId()) :
"shard routing to start does not exist in routing table, expected: " + startedShard + " but was: " +
routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId());
routingNodes.startShard(logger, startedShard, routingAllocation.changes());
}
}
private RoutingNodes getMutableRoutingNodes(ClusterState clusterState) {
RoutingNodes routingNodes = new RoutingNodes(clusterState, false); // this is a costly operation - only call this once!
return routingNodes;
}
/** override this to control time based decisions during allocation */
protected long currentNanoTime() {
return System.nanoTime();
}
/**
* this class is used to describe results of applying a set of
* {@link org.elasticsearch.cluster.routing.allocation.command.AllocationCommand}
*/
public static class CommandsResult {
private final RoutingExplanations explanations;
private final ClusterState clusterState;
/**
* Creates a new {@link CommandsResult}
* @param explanations Explanation for the reroute actions
* @param clusterState Resulting cluster state
*/
private CommandsResult(RoutingExplanations explanations, ClusterState clusterState) {
this.clusterState = clusterState;
this.explanations = explanations;
}
/**
* Get the explanation of this result
*/
public RoutingExplanations explanations() {
return explanations;
}
/**
* thre resulting cluster state, after the commands were applied
*/
public ClusterState getClusterState() {
return clusterState;
}
}
}
| |
/* ========================================================================
*
* This file is part of CODEC, which is a Java package for encoding
* and decoding ASN.1 data structures.
*
* Author: Fraunhofer Institute for Computer Graphics Research IGD
* Department A8: Security Technology
* Fraunhoferstr. 5, 64283 Darmstadt, Germany
*
* Rights: Copyright (c) 2004 by Fraunhofer-Gesellschaft
* zur Foerderung der angewandten Forschung e.V.
* Hansastr. 27c, 80686 Munich, Germany.
*
* ------------------------------------------------------------------------
*
* The software package is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software package; if not, write to the Free
* Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA or obtain a copy of the license at
* http://www.fsf.org/licensing/licenses/lgpl.txt.
*
* ------------------------------------------------------------------------
*
* The CODEC library can solely be used and distributed according to
* the terms and conditions of the GNU Lesser General Public License for
* non-commercial research purposes and shall not be embedded in any
* products or services of any user or of any third party and shall not
* be linked with any products or services of any user or of any third
* party that will be commercially exploited.
*
* The CODEC library has not been tested for the use or application
* for a determined purpose. It is a developing version that can
* possibly contain errors. Therefore, Fraunhofer-Gesellschaft zur
* Foerderung der angewandten Forschung e.V. does not warrant that the
* operation of the CODEC library will be uninterrupted or error-free.
* Neither does Fraunhofer-Gesellschaft zur Foerderung der angewandten
* Forschung e.V. warrant that the CODEC library will operate and
* interact in an uninterrupted or error-free way together with the
* computer program libraries of third parties which the CODEC library
* accesses and which are distributed together with the CODEC library.
*
* Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
* does not warrant that the operation of the third parties's computer
* program libraries themselves which the CODEC library accesses will
* be uninterrupted or error-free.
*
* Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
* shall not be liable for any errors or direct, indirect, special,
* incidental or consequential damages, including lost profits resulting
* from the combination of the CODEC library with software of any user
* or of any third party or resulting from the implementation of the
* CODEC library in any products, systems or services of any user or
* of any third party.
*
* Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
* does not provide any warranty nor any liability that utilization of
* the CODEC library will not interfere with third party intellectual
* property rights or with any other protected third party rights or will
* cause damage to third parties. Fraunhofer Gesellschaft zur Foerderung
* der angewandten Forschung e.V. is currently not aware of any such
* rights.
*
* The CODEC library is supplied without any accompanying services.
*
* ========================================================================
*/
package codec.x509.extensions;
import java.util.Iterator;
import codec.asn1.ASN1BitString;
import codec.asn1.ASN1Exception;
import codec.asn1.ASN1Sequence;
import codec.asn1.ASN1SequenceOf;
import codec.asn1.ASN1TaggedType;
import codec.x509.GeneralName;
/**
* <pre>
* DistributionPoint ::= SEQUENCE {
* distributionPoint [0] EXPLICIT DistributionPointName OPTIONAL,
* reasons [1] IMPLICIT ReasonFlags OPTIONAL,
* cRLIssuer [2] IMPLICIT GeneralNames OPTIONAL
* }
* DistributionPointName ::= CHOICE {
* fullName [0] IMPLICIT GeneralNames,
* nameRelativeToCRLIssuer [1] IMPLICIT RelativeDistinguishedName
* } NOT IMPLEMENTED YET
* ReasonFlags ::= BIT STRING {
* unused (0),
* keyCompromise (1),
* cACompromise (2),
* affiliationChanged (3),
* superseded (4),
* cessationOfOperation (5),
* certificateHold (6)
* }
* </pre>
*/
public class DistributionPoint extends ASN1Sequence {
public static final int TAG_DISTRIBUTION_POINT = 0;
public static final int TAG_REASONS = 1;
public static final int TAG_CRL_ISSUER = 2;
public static final int TAG_FULL_NAME = 0;
private ASN1TaggedType distributionPointTag_;
private ASN1TaggedType distributionPoint_;
/*
* The only CHOICE we support. This is linked into distributionPoint by
* means of an ASN1TaggedType.
*/
private ASN1Sequence fullName_;
private ASN1TaggedType reasonsTag_;
private ASN1BitString reasons_;
private ASN1TaggedType cRLIssuerTag_;
private ASN1Sequence cRLIssuer_;
public DistributionPoint() {
/*
* We do not support both choices of DistributionPointName, hence we
* directly initialize the one we support rather than going through an
* ASN1Choice (which just adds another layer of complication).
*/
fullName_ = new ASN1SequenceOf(GeneralName.class);
/*
* We wrap the only CHOICE we support into the appropriate tagged type.
*/
distributionPoint_ = new ASN1TaggedType(TAG_FULL_NAME, fullName_,
false, false);
/*
* We wrap again. Flags are EXPLICIT and OPTIONAL.
*/
distributionPointTag_ = new ASN1TaggedType(TAG_DISTRIBUTION_POINT,
distributionPoint_, true, true);
/*
* Finally, we add the tagged type.
*/
add(distributionPointTag_);
/*
* Next element with tag [1].
*/
reasons_ = new ASN1BitString();
reasonsTag_ = new ASN1TaggedType(TAG_REASONS, reasons_, false, true);
add(reasonsTag_);
/*
* Final element with tag [2].
*/
cRLIssuer_ = new ASN1SequenceOf(GeneralName.class);
cRLIssuerTag_ = new ASN1TaggedType(TAG_CRL_ISSUER, cRLIssuer_, false,
true);
add(cRLIssuerTag_);
}
public void setReasons(boolean flags[]) throws ASN1Exception {
if (flags.length > 7) {
throw new ASN1Exception("Wrong number of flags!");
}
reasons_.setBits(flags);
reasonsTag_.setOptional(false);
}
public void addDistributionPointName(GeneralName aName) {
fullName_.add(aName);
distributionPointTag_.setOptional(false);
}
public void addCRLIssuer(GeneralName aName) {
cRLIssuer_.add(aName);
cRLIssuerTag_.setOptional(false);
}
/**
* returns the ASN1SequenceOf that contains the GeneralNames with the
* distribution point URL's. Note that RelativeDistinguishedNames are NOT
* supported yet and will return null in this case!
*/
public ASN1Sequence getDistributionPointNames() {
if (distributionPointTag_.isOptional()) {
return null;
}
return fullName_;
}
/**
* returns an array of Strings containing all CRL-DP URL's
* RelativeDistinguishedName not implemented yet!
*/
public String[] getDPURLs() {
ASN1Sequence names;
GeneralName gn;
Iterator i;
String[] res;
int n;
names = getDistributionPointNames();
if (names == null) {
return null;
}
res = new String[names.size()];
for (n = 0, i = names.iterator(); i.hasNext(); n++) {
try {
gn = (GeneralName) i.next();
res[n] = gn.getGeneralName().getValue().toString();
} catch (codec.x509.X509Exception ex) {
res[n] = "<could not decode this URL!>";
}
}
return res;
}
public String toString(String offset) {
StringBuffer buf;
String[] dps;
buf = new StringBuffer(offset + "DistributionPoint {\n");
dps = getDPURLs();
if (dps == null) {
buf.append(offset + "No URLs\n");
} else {
for (int i = 0; i < dps.length; i++) {
buf.append(offset + dps[i]);
buf.append("\n");
}
}
if (!this.cRLIssuerTag_.isOptional()) {
buf.append("CRL Issuers:\n" + cRLIssuer_.toString());
}
buf.append(offset + "}\n");
return buf.toString();
}
public String toString() {
return toString("");
}
}
| |
/*
* Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.inspector.model;
import java.io.Serializable;
/**
*
*/
public class ListAttachedRulesPackagesResult implements Serializable, Cloneable {
/**
* <p>
* A list of ARNs specifying the rules packages returned by the action.
* </p>
*/
private java.util.List<String> rulesPackageArnList;
/**
* <p>
* When a response is generated, if there is more data to be listed, this
* parameter is present in the response and contains the value to use for
* the <b>nextToken</b> parameter in a subsequent pagination request. If
* there is no more data to be listed, this parameter is set to 'null'.
* </p>
*/
private String nextToken;
/**
* <p>
* A list of ARNs specifying the rules packages returned by the action.
* </p>
*
* @return A list of ARNs specifying the rules packages returned by the
* action.
*/
public java.util.List<String> getRulesPackageArnList() {
return rulesPackageArnList;
}
/**
* <p>
* A list of ARNs specifying the rules packages returned by the action.
* </p>
*
* @param rulesPackageArnList
* A list of ARNs specifying the rules packages returned by the
* action.
*/
public void setRulesPackageArnList(
java.util.Collection<String> rulesPackageArnList) {
if (rulesPackageArnList == null) {
this.rulesPackageArnList = null;
return;
}
this.rulesPackageArnList = new java.util.ArrayList<String>(
rulesPackageArnList);
}
/**
* <p>
* A list of ARNs specifying the rules packages returned by the action.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if
* any). Use {@link #setRulesPackageArnList(java.util.Collection)} or
* {@link #withRulesPackageArnList(java.util.Collection)} if you want to
* override the existing values.
* </p>
*
* @param rulesPackageArnList
* A list of ARNs specifying the rules packages returned by the
* action.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public ListAttachedRulesPackagesResult withRulesPackageArnList(
String... rulesPackageArnList) {
if (this.rulesPackageArnList == null) {
setRulesPackageArnList(new java.util.ArrayList<String>(
rulesPackageArnList.length));
}
for (String ele : rulesPackageArnList) {
this.rulesPackageArnList.add(ele);
}
return this;
}
/**
* <p>
* A list of ARNs specifying the rules packages returned by the action.
* </p>
*
* @param rulesPackageArnList
* A list of ARNs specifying the rules packages returned by the
* action.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public ListAttachedRulesPackagesResult withRulesPackageArnList(
java.util.Collection<String> rulesPackageArnList) {
setRulesPackageArnList(rulesPackageArnList);
return this;
}
/**
* <p>
* When a response is generated, if there is more data to be listed, this
* parameter is present in the response and contains the value to use for
* the <b>nextToken</b> parameter in a subsequent pagination request. If
* there is no more data to be listed, this parameter is set to 'null'.
* </p>
*
* @param nextToken
* When a response is generated, if there is more data to be listed,
* this parameter is present in the response and contains the value
* to use for the <b>nextToken</b> parameter in a subsequent
* pagination request. If there is no more data to be listed, this
* parameter is set to 'null'.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* When a response is generated, if there is more data to be listed, this
* parameter is present in the response and contains the value to use for
* the <b>nextToken</b> parameter in a subsequent pagination request. If
* there is no more data to be listed, this parameter is set to 'null'.
* </p>
*
* @return When a response is generated, if there is more data to be listed,
* this parameter is present in the response and contains the value
* to use for the <b>nextToken</b> parameter in a subsequent
* pagination request. If there is no more data to be listed, this
* parameter is set to 'null'.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* When a response is generated, if there is more data to be listed, this
* parameter is present in the response and contains the value to use for
* the <b>nextToken</b> parameter in a subsequent pagination request. If
* there is no more data to be listed, this parameter is set to 'null'.
* </p>
*
* @param nextToken
* When a response is generated, if there is more data to be listed,
* this parameter is present in the response and contains the value
* to use for the <b>nextToken</b> parameter in a subsequent
* pagination request. If there is no more data to be listed, this
* parameter is set to 'null'.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public ListAttachedRulesPackagesResult withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getRulesPackageArnList() != null)
sb.append("RulesPackageArnList: " + getRulesPackageArnList() + ",");
if (getNextToken() != null)
sb.append("NextToken: " + getNextToken());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ListAttachedRulesPackagesResult == false)
return false;
ListAttachedRulesPackagesResult other = (ListAttachedRulesPackagesResult) obj;
if (other.getRulesPackageArnList() == null
^ this.getRulesPackageArnList() == null)
return false;
if (other.getRulesPackageArnList() != null
&& other.getRulesPackageArnList().equals(
this.getRulesPackageArnList()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null
&& other.getNextToken().equals(this.getNextToken()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime
* hashCode
+ ((getRulesPackageArnList() == null) ? 0
: getRulesPackageArnList().hashCode());
hashCode = prime * hashCode
+ ((getNextToken() == null) ? 0 : getNextToken().hashCode());
return hashCode;
}
@Override
public ListAttachedRulesPackagesResult clone() {
try {
return (ListAttachedRulesPackagesResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException(
"Got a CloneNotSupportedException from Object.clone() "
+ "even though we're Cloneable!", e);
}
}
}
| |
/*******************************************************************************
* Manchester Centre for Integrative Systems Biology
* University of Manchester
* Manchester M1 7ND
* United Kingdom
*
* Copyright (C) 2008 University of Manchester
*
* This program is released under the Academic Free License ("AFL") v3.0.
* (http://www.opensource.org/licenses/academic.php)
*******************************************************************************/
package org.mcisb.subliminal.metacyc;
import java.io.*;
import java.net.*;
import java.nio.charset.*;
import java.util.*;
import java.util.regex.*;
import org.mcisb.subliminal.*;
/**
* @author Neil Swainston
*/
class MetaCycDownloader
{
/**
*
*/
private final static String TARBALL = " [Download tarball] "; //$NON-NLS-1$
/**
*
*/
private final static String EMPTY_STRING = ""; //$NON-NLS-1$
/**
*
*/
private final URL source;
/**
*
*/
private final String organismName;
/**
*
*/
private final File destination;
/**
*
* @param directory
* @param organismName
* @return File
* @throws Exception
*/
public static File getMetaCycSource( final File metacycDirectory, final String organismName ) throws Exception
{
if( SubliminalUtils.find( metacycDirectory, "metabolic-reactions.sbml" ) == null ) //$NON-NLS-1$
{
final URL source = new URL( System.getProperty( "org.mcisb.subliminal.metacyc.MetaCycSource" ) ); //$NON-NLS-1$
final String username = System.getProperty( "org.mcisb.subliminal.metacyc.MetaCycUsername" ); //$NON-NLS-1$
final String password = System.getProperty( "org.mcisb.subliminal.metacyc.MetaCycPassword" ); //$NON-NLS-1$
final MetaCycDownloader downloader = new MetaCycDownloader( source, username, password, organismName, metacycDirectory );
downloader.doTask();
}
return metacycDirectory;
}
/**
*
* @param source
* @param organismName
* @param destination
*/
private MetaCycDownloader( final URL source, final String username, final String password, final String organismName, final File destination )
{
this.source = source;
this.organismName = organismName;
this.destination = destination;
Authenticator.setDefault( new PasswordAuthenticator( username, password ) );
}
/**
*
* @return String[]
* @throws Exception
*/
protected String[] getOrganisms() throws Exception
{
final Collection<String> organisms = new LinkedHashSet<>();
BufferedReader reader = null;
try
{
// Install the custom authenticator
reader = new BufferedReader( new InputStreamReader( source.openStream(), Charset.defaultCharset() ) );
String line = null;
while( ( line = reader.readLine() ) != null )
{
final String stripped = SubliminalUtils.stripTags( line );
if( stripped.contains( TARBALL ) )
{
organisms.add( stripped.replace( TARBALL, EMPTY_STRING ).trim() );
}
}
return organisms.toArray( new String[ organisms.size() ] );
}
finally
{
if( reader != null )
{
reader.close();
}
}
}
/**
*
* @return
* @throws Exception
*/
protected Serializable doTask() throws Exception
{
final Collection<String> organisms = new LinkedHashSet<>();
final String ZIP_REGEXP = "(?=.*)http://.*\\.tar.gz(?=.*)"; //$NON-NLS-1$
BufferedReader reader = null;
try
{
// Install the custom authenticator
reader = new BufferedReader( new InputStreamReader( source.openStream(), Charset.defaultCharset() ) );
String line = null;
boolean found = false;
while( ( line = reader.readLine() ) != null )
{
String organism = null;
final String stripped = SubliminalUtils.stripTags( line );
if( stripped.contains( TARBALL ) )
{
organism = stripped.replace( TARBALL, EMPTY_STRING ).trim();
}
if( organism != null )
{
organisms.add( organism );
if( !found && ( organism.equals( organismName ) || organismName.contains( organism ) ) )
{
found = true;
}
if( found )
{
System.out.println( "MetaCyc: Downloading " + organism ); //$NON-NLS-1$
final Matcher matcher = Pattern.compile( ZIP_REGEXP ).matcher( line );
while( matcher.find() )
{
final String zipUrl = line.substring( matcher.start(), matcher.end() );
SubliminalUtils.untar( new URL( zipUrl ), destination );
return null;
}
}
}
}
throw new UnsupportedOperationException( "MetaCyc data for " + organismName + " unavailable. Supported organisms are " + Arrays.toString( organisms.toArray( new String[ organisms.size() ] ) ) + "." ); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
}
finally
{
if( reader != null )
{
reader.close();
}
}
}
/**
*
* @author Neil Swainston
*/
private class PasswordAuthenticator extends Authenticator
{
/**
*
*/
private final String username;
/**
*
*/
private final String password;
/**
*
* @param username
* @param password
*/
public PasswordAuthenticator( final String username, final String password )
{
this.username = username;
this.password = password;
}
/*
* (non-Javadoc)
*
* @see java.net.Authenticator#getPasswordAuthentication()
*/
@Override
protected PasswordAuthentication getPasswordAuthentication()
{
return new PasswordAuthentication( username, password.toCharArray() );
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.xml.jaxb;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.StringWriter;
import java.nio.charset.StandardCharsets;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Marshaller;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.TransformerException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.apache.camel.CamelContext;
import org.apache.camel.DelegateEndpoint;
import org.apache.camel.Endpoint;
import org.apache.camel.Expression;
import org.apache.camel.ExtendedCamelContext;
import org.apache.camel.NamedNode;
import org.apache.camel.TypeConversionException;
import org.apache.camel.converter.jaxp.XmlConverter;
import org.apache.camel.model.ExpressionNode;
import org.apache.camel.model.RouteDefinition;
import org.apache.camel.model.RoutesDefinition;
import org.apache.camel.model.language.ExpressionDefinition;
import org.apache.camel.spi.ModelJAXBContextFactory;
import org.apache.camel.spi.ModelToXMLDumper;
import org.apache.camel.spi.NamespaceAware;
import org.apache.camel.spi.TypeConverterRegistry;
import org.apache.camel.spi.annotations.JdkService;
import org.apache.camel.util.xml.XmlLineNumberParser;
import static org.apache.camel.model.ProcessorDefinitionHelper.filterTypeInOutputs;
/**
* JAXB based {@link ModelToXMLDumper}.
*/
@JdkService(ModelToXMLDumper.FACTORY)
public class JaxbModelToXMLDumper implements ModelToXMLDumper {
@Override
public String dumpModelAsXml(CamelContext context, NamedNode definition) throws Exception {
JAXBContext jaxbContext = getJAXBContext(context);
final Map<String, String> namespaces = new LinkedHashMap<>();
// gather all namespaces from the routes or route which is stored on the
// expression nodes
if (definition instanceof RoutesDefinition) {
List<RouteDefinition> routes = ((RoutesDefinition)definition).getRoutes();
for (RouteDefinition route : routes) {
extractNamespaces(route, namespaces);
}
} else if (definition instanceof RouteDefinition) {
RouteDefinition route = (RouteDefinition)definition;
extractNamespaces(route, namespaces);
}
Marshaller marshaller = jaxbContext.createMarshaller();
marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
marshaller.setProperty(Marshaller.JAXB_ENCODING, "UTF-8");
StringWriter buffer = new StringWriter();
marshaller.marshal(definition, buffer);
XmlConverter xmlConverter = newXmlConverter(context);
String xml = buffer.toString();
Document dom;
try {
dom = xmlConverter.toDOMDocument(xml, null);
} catch (Exception e) {
throw new TypeConversionException(xml, Document.class, e);
}
// Add additional namespaces to the document root element
Element documentElement = dom.getDocumentElement();
for (String nsPrefix : namespaces.keySet()) {
String prefix = nsPrefix.equals("xmlns") ? nsPrefix : "xmlns:" + nsPrefix;
documentElement.setAttribute(prefix, namespaces.get(nsPrefix));
}
// We invoke the type converter directly because we need to pass some
// custom XML output options
Properties outputProperties = new Properties();
outputProperties.put(OutputKeys.INDENT, "yes");
outputProperties.put(OutputKeys.STANDALONE, "yes");
outputProperties.put(OutputKeys.ENCODING, "UTF-8");
try {
return xmlConverter.toStringFromDocument(dom, outputProperties);
} catch (TransformerException e) {
throw new IllegalStateException("Failed converting document object to string", e);
}
}
@Override
public String dumpModelAsXml(CamelContext context, NamedNode definition, boolean resolvePlaceholders, boolean resolveDelegateEndpoints) throws Exception {
String xml = dumpModelAsXml(context, definition);
// if resolving placeholders we parse the xml, and resolve the property
// placeholders during parsing
if (resolvePlaceholders || resolveDelegateEndpoints) {
final AtomicBoolean changed = new AtomicBoolean();
InputStream is = new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8));
Document dom = XmlLineNumberParser.parseXml(is, new XmlLineNumberParser.XmlTextTransformer() {
private String prev;
@Override
public String transform(String text) {
String after = text;
if (resolveDelegateEndpoints && "uri".equals(prev)) {
try {
// must resolve placeholder as the endpoint may use
// property placeholders
String uri = context.resolvePropertyPlaceholders(text);
Endpoint endpoint = context.hasEndpoint(uri);
if (endpoint instanceof DelegateEndpoint) {
endpoint = ((DelegateEndpoint)endpoint).getEndpoint();
after = endpoint.getEndpointUri();
}
} catch (Exception e) {
// ignore
}
}
if (resolvePlaceholders) {
try {
after = context.resolvePropertyPlaceholders(after);
} catch (Exception e) {
// ignore
}
}
if (!changed.get()) {
changed.set(!text.equals(after));
}
// okay the previous must be the attribute key with uri, so
// it refers to an endpoint
prev = text;
return after;
}
});
// okay there were some property placeholder or delegate endpoints
// replaced so re-create the model
if (changed.get()) {
xml = context.getTypeConverter().mandatoryConvertTo(String.class, dom);
ExtendedCamelContext ecc = context.adapt(ExtendedCamelContext.class);
NamedNode copy = ecc.getXMLRoutesDefinitionLoader().createModelFromXml(context, xml, NamedNode.class);
xml = ecc.getModelToXMLDumper().dumpModelAsXml(context, copy);
}
}
return xml;
}
private static JAXBContext getJAXBContext(CamelContext context) throws Exception {
ModelJAXBContextFactory factory = context.adapt(ExtendedCamelContext.class).getModelJAXBContextFactory();
return (JAXBContext) factory.newJAXBContext();
}
/**
* Extract all XML namespaces from the root element in a DOM Document
*
* @param document the DOM document
* @param namespaces the map of namespaces to add new found XML namespaces
*/
private static void extractNamespaces(Document document, Map<String, String> namespaces) throws JAXBException {
NamedNodeMap attributes = document.getDocumentElement().getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
Node item = attributes.item(i);
String nsPrefix = item.getNodeName();
if (nsPrefix != null && nsPrefix.startsWith("xmlns")) {
String nsValue = item.getNodeValue();
String[] nsParts = nsPrefix.split(":");
if (nsParts.length == 1) {
namespaces.put(nsParts[0], nsValue);
} else if (nsParts.length == 2) {
namespaces.put(nsParts[1], nsValue);
} else {
// Fallback on adding the namespace prefix as we find it
namespaces.put(nsPrefix, nsValue);
}
}
}
}
/**
* Extract all XML namespaces from the expressions in the route
*
* @param route the route
* @param namespaces the map of namespaces to add discovered XML namespaces
* into
*/
private static void extractNamespaces(RouteDefinition route, Map<String, String> namespaces) {
Iterator<ExpressionNode> it = filterTypeInOutputs(route.getOutputs(), ExpressionNode.class);
while (it.hasNext()) {
NamespaceAware na = getNamespaceAwareFromExpression(it.next());
if (na != null) {
Map<String, String> map = na.getNamespaces();
if (map != null && !map.isEmpty()) {
namespaces.putAll(map);
}
}
}
}
private static NamespaceAware getNamespaceAwareFromExpression(ExpressionNode expressionNode) {
ExpressionDefinition ed = expressionNode.getExpression();
NamespaceAware na = null;
Expression exp = ed.getExpressionValue();
if (exp instanceof NamespaceAware) {
na = (NamespaceAware)exp;
} else if (ed instanceof NamespaceAware) {
na = (NamespaceAware)ed;
}
return na;
}
/**
* Creates a new {@link XmlConverter}
*
* @param context CamelContext if provided
* @return a new XmlConverter instance
*/
private static XmlConverter newXmlConverter(CamelContext context) {
XmlConverter xmlConverter;
if (context != null) {
TypeConverterRegistry registry = context.getTypeConverterRegistry();
xmlConverter = registry.getInjector().newInstance(XmlConverter.class, false);
} else {
xmlConverter = new XmlConverter();
}
return xmlConverter;
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.pluto.container.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.ResourceBundle;
import java.util.Set;
import javax.portlet.PortletConfig;
import javax.portlet.PortletContext;
import javax.xml.XMLConstants;
import javax.xml.namespace.QName;
import org.apache.pluto.container.om.portlet.ContainerRuntimeOption;
import org.apache.pluto.container.om.portlet.EventDefinitionReference;
import org.apache.pluto.container.om.portlet.InitParam;
import org.apache.pluto.container.om.portlet.PortletDefinition;
/**
* Abstract PortletConfig base class Implementation.
* <p>
* An embedding Portal can extend this base class and is only required to provide
* an implementation of the getResourceBundle bundle method.
* </p>
*
* @version $Id$
*/
public abstract class AbstractPortletConfigImpl implements PortletConfig
{
protected PortletContext portletContext;
/**
* The portlet descriptor.
*/
protected PortletDefinition portlet;
protected Map<String, String[]> containerRuntimeOptions;
protected Set<String> supportedContainerRuntimeOptions;
public AbstractPortletConfigImpl(PortletContext portletContext, PortletDefinition portlet)
{
this.portletContext = portletContext;
this.portlet = portlet;
this.supportedContainerRuntimeOptions = new HashSet<String>();
for (Enumeration<String> e = portletContext.getContainerRuntimeOptions(); e.hasMoreElements(); )
{
supportedContainerRuntimeOptions.add(e.nextElement());
}
}
public abstract ResourceBundle getResourceBundle(Locale locale);
public String getPortletName() {
return portlet.getPortletName();
}
public PortletContext getPortletContext() {
return portletContext;
}
public String getInitParameter(String name) {
if (name == null) {
throw new IllegalArgumentException("Parameter name == null");
}
Iterator<? extends InitParam> parms = portlet.getInitParams().iterator();
while(parms.hasNext()) {
InitParam param = parms.next();
if (param.getParamName().equals(name)) {
return param.getParamValue();
}
}
return null;
}
public Enumeration<String> getInitParameterNames() {
return new java.util.Enumeration<String>() {
private Iterator<InitParam> iterator =
new ArrayList<InitParam>(portlet.getInitParams()).iterator();
public boolean hasMoreElements() {
return iterator.hasNext();
}
public String nextElement() {
if (iterator.hasNext()) {
return iterator.next().getParamName();
}
return null;
}
};
}
public PortletDefinition getPortletDefinition() {
return portlet;
}
// --------------------------------------------------------------------------------------------
public Enumeration<String> getPublicRenderParameterNames() {
if (portlet.getSupportedPublicRenderParameters() != null){
return Collections.enumeration(portlet.getSupportedPublicRenderParameters());
}
return Collections.enumeration(new ArrayList<String>());
}
public String getDefaultNamespace() {
if (portlet.getApplication().getDefaultNamespace() == null)
return XMLConstants.NULL_NS_URI;
return portlet.getApplication().getDefaultNamespace();
}
public Enumeration<QName> getProcessingEventQNames() {
ArrayList<QName> qnameList = new ArrayList<QName>();
for (EventDefinitionReference ref : portlet.getSupportedProcessingEvents())
{
QName name = ref.getQualifiedName();
if (name == null)
{
continue;
}
qnameList.add(name);
}
return Collections.enumeration(qnameList);
}
public Enumeration<QName> getPublishingEventQNames() {
ArrayList<QName> qnameList = new ArrayList<QName>();
for (EventDefinitionReference ref : portlet.getSupportedPublishingEvents())
{
QName name = ref.getQualifiedName();
if (name == null)
{
continue;
}
qnameList.add(name);
}
return Collections.enumeration(qnameList);
}
public Enumeration<Locale> getSupportedLocales() {
// for each String entry in SupportedLocales (portletDD)
// add an entry in the resut list (new Locale(string))
List<Locale> locals = new ArrayList<Locale>();
List<String> languageIds = portlet.getSupportedLocales();
if (languageIds!=null){
for (String languageId : languageIds) {
locals.add(getLocale(languageId));
}
}
return Collections.enumeration(locals);
}
public Map<String, String[]> getContainerRuntimeOptions()
{
synchronized(this)
{
if (containerRuntimeOptions == null)
{
containerRuntimeOptions = new HashMap<String, String[]>();
for (ContainerRuntimeOption option : portlet.getApplication().getContainerRuntimeOptions())
{
List<String> values = option.getValues();
String [] tempValues = new String[values.size()];
for (int i=0;i<values.size();i++)
{
tempValues[i] = values.get(i);
}
containerRuntimeOptions.put(option.getName(),tempValues);
}
for (ContainerRuntimeOption option : portlet.getContainerRuntimeOptions())
{
List<String> values = option.getValues();
String [] tempValues = new String[values.size()];
for (int i=0;i<values.size();i++)
{
tempValues[i] = values.get(i);
}
containerRuntimeOptions.put(option.getName(),tempValues);
}
for (Iterator<String> iter = containerRuntimeOptions.keySet().iterator(); iter.hasNext(); )
{
String key = iter.next();
if (!supportedContainerRuntimeOptions.contains(key))
{
iter.remove();
}
}
}
}
if (!containerRuntimeOptions.isEmpty())
{
Map<String, String[]> result = new HashMap<String, String[]>(containerRuntimeOptions.size());
for (Map.Entry<String,String[]> entry : containerRuntimeOptions.entrySet())
{
if (entry.getValue() != null)
{
result.put(entry.getKey(), entry.getValue().clone());
}
}
return Collections.unmodifiableMap(result);
}
return Collections.emptyMap();
}
private Locale getLocale(String languageId) {
Locale locale;
int pos = languageId.indexOf("_");
if (pos == -1) {
locale = Locale.forLanguageTag(languageId);
}
else {
String[] languageIdParts = languageId.split("_");
String languageCode = languageIdParts[0];
String countryCode = languageIdParts[1];
String variant = null;
if (languageIdParts.length > 2) {
variant = languageIdParts[2];
}
if ((variant != null) && (variant.trim().length() > 0)) {
locale = new Locale(languageCode, countryCode, variant);
}
else {
locale = new Locale(languageCode, countryCode);
}
}
return locale;
}
}
| |
/*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.operators;
import io.fabric8.kubernetes.api.model.DeletionPropagation;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
import io.fabric8.kubernetes.api.model.Service;
import io.strimzi.api.kafka.model.KafkaBridgeResources;
import io.strimzi.api.kafka.model.KafkaBuilder;
import io.strimzi.api.kafka.model.KafkaConnectResources;
import io.strimzi.api.kafka.model.KafkaMirrorMaker2;
import io.strimzi.api.kafka.model.KafkaMirrorMaker2Resources;
import io.strimzi.api.kafka.model.KafkaResources;
import io.strimzi.api.kafka.model.KafkaTopic;
import io.strimzi.api.kafka.model.connect.ConnectorPlugin;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder;
import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType;
import io.strimzi.api.kafka.model.status.Condition;
import io.strimzi.api.kafka.model.status.KafkaBridgeStatus;
import io.strimzi.api.kafka.model.status.KafkaConnectStatus;
import io.strimzi.api.kafka.model.status.KafkaConnectorStatus;
import io.strimzi.api.kafka.model.status.KafkaMirrorMaker2Status;
import io.strimzi.api.kafka.model.status.KafkaMirrorMakerStatus;
import io.strimzi.api.kafka.model.status.KafkaStatus;
import io.strimzi.api.kafka.model.status.KafkaTopicStatus;
import io.strimzi.api.kafka.model.status.ListenerStatus;
import io.strimzi.operator.common.Annotations;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.systemtest.AbstractST;
import io.strimzi.systemtest.BeforeAllOnce;
import io.strimzi.systemtest.Constants;
import io.strimzi.systemtest.Environment;
import io.strimzi.systemtest.annotations.IsolatedSuite;
import io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient;
import io.strimzi.systemtest.resources.operator.SetupClusterOperator;
import io.strimzi.systemtest.annotations.ParallelTest;
import io.strimzi.systemtest.resources.ResourceManager;
import io.strimzi.systemtest.resources.crd.KafkaBridgeResource;
import io.strimzi.systemtest.resources.crd.KafkaConnectResource;
import io.strimzi.systemtest.resources.crd.KafkaConnectorResource;
import io.strimzi.systemtest.resources.crd.KafkaMirrorMaker2Resource;
import io.strimzi.systemtest.resources.crd.KafkaMirrorMakerResource;
import io.strimzi.systemtest.resources.crd.KafkaResource;
import io.strimzi.systemtest.resources.crd.KafkaTopicResource;
import io.strimzi.systemtest.resources.crd.KafkaUserResource;
import io.strimzi.systemtest.templates.crd.KafkaBridgeTemplates;
import io.strimzi.systemtest.templates.crd.KafkaClientsTemplates;
import io.strimzi.systemtest.templates.crd.KafkaConnectTemplates;
import io.strimzi.systemtest.templates.crd.KafkaConnectorTemplates;
import io.strimzi.systemtest.templates.crd.KafkaMirrorMaker2Templates;
import io.strimzi.systemtest.templates.crd.KafkaMirrorMakerTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates;
import io.strimzi.systemtest.templates.crd.KafkaUserTemplates;
import io.strimzi.systemtest.utils.ClientUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaBridgeUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectorUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaMirrorMaker2Utils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaMirrorMakerUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaUserUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.extension.ExtensionContext;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static io.strimzi.api.kafka.model.KafkaResources.externalBootstrapServiceName;
import static io.strimzi.systemtest.Constants.BRIDGE;
import static io.strimzi.systemtest.Constants.CONNECT;
import static io.strimzi.systemtest.Constants.CONNECTOR_OPERATOR;
import static io.strimzi.systemtest.Constants.CONNECT_COMPONENTS;
import static io.strimzi.systemtest.Constants.EXTERNAL_CLIENTS_USED;
import static io.strimzi.systemtest.Constants.INFRA_NAMESPACE;
import static io.strimzi.systemtest.Constants.MIRROR_MAKER;
import static io.strimzi.systemtest.Constants.MIRROR_MAKER2;
import static io.strimzi.systemtest.Constants.NODEPORT_SUPPORTED;
import static io.strimzi.systemtest.Constants.REGRESSION;
import static io.strimzi.systemtest.enums.CustomResourceStatus.NotReady;
import static io.strimzi.systemtest.enums.CustomResourceStatus.Ready;
import static io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils.getKafkaSecretCertificates;
import static io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils.getKafkaStatusCertificates;
import static io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient;
import static io.strimzi.test.k8s.KubeClusterResource.kubeClient;
import static java.util.Arrays.asList;
import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.StringContains.containsString;
@Tag(REGRESSION)
@IsolatedSuite
class CustomResourceStatusST extends AbstractST {
private static final Logger LOGGER = LogManager.getLogger(CustomResourceStatusST.class);
private static int topicOperatorReconciliationInterval;
private static final String CUSTOM_RESOURCE_STATUS_CLUSTER_NAME = "custom-resource-status-cluster-name";
private static final String EXAMPLE_TOPIC_NAME = "example-topic-name";
@ParallelTest
@Tag(NODEPORT_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testKafkaStatus(ExtensionContext extensionContext) {
LOGGER.info("Checking status of deployed kafka cluster");
KafkaUtils.waitForKafkaReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
ExternalKafkaClient externalKafkaClient = new ExternalKafkaClient.Builder()
.withTopicName(TOPIC_NAME)
.withNamespaceName(INFRA_NAMESPACE)
.withClusterName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)
.build();
externalKafkaClient.verifyProducedAndConsumedMessages(
externalKafkaClient.sendMessagesPlain(),
externalKafkaClient.receiveMessagesPlain()
);
assertKafkaStatus(1, KafkaResources.bootstrapServiceName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME) + "." + INFRA_NAMESPACE + ".svc");
KafkaResource.replaceKafkaResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, k -> {
k.getSpec().getEntityOperator().getTopicOperator().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100000m"))
.build());
});
LOGGER.info("Wait until cluster will be in NotReady state ...");
KafkaUtils.waitForKafkaNotReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
LOGGER.info("Recovery cluster to Ready state ...");
KafkaResource.replaceKafkaResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, k -> {
k.getSpec().getEntityOperator().getTopicOperator().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100m"))
.build());
});
KafkaUtils.waitForKafkaReady(INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaStatus(3, KafkaResources.bootstrapServiceName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME) + "." + INFRA_NAMESPACE + ".svc");
}
@ParallelTest
void testKafkaUserStatus(ExtensionContext extensionContext) {
String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, userName).build());
LOGGER.info("Checking status of deployed KafkaUser");
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(INFRA_NAMESPACE).withName(userName).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser Status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser Type: {}", kafkaCondition.getType());
assertThat("KafkaUser is in wrong state!", kafkaCondition.getType(), is(Ready.toString()));
LOGGER.info("KafkaUser is in desired state: Ready");
}
@ParallelTest
void testKafkaUserStatusNotReady(ExtensionContext extensionContext) {
// Simulate NotReady state with userName longer than 64 characters
String userName = "sasl-use-rabcdefghijklmnopqrstuvxyzabcdefghijklmnopqrstuvxyzabcdef";
resourceManager.createResource(extensionContext, false, KafkaUserTemplates.defaultUser(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, userName).build());
KafkaUserUtils.waitForKafkaUserNotReady(userName);
LOGGER.info("Checking status of deployed KafkaUser {}", userName);
Condition kafkaCondition = KafkaUserResource.kafkaUserClient().inNamespace(INFRA_NAMESPACE).withName(userName).get().getStatus().getConditions().get(0);
LOGGER.info("KafkaUser Status: {}", kafkaCondition.getStatus());
LOGGER.info("KafkaUser Type: {}", kafkaCondition.getType());
LOGGER.info("KafkaUser Message: {}", kafkaCondition.getMessage());
LOGGER.info("KafkaUser Reason: {}", kafkaCondition.getReason());
assertThat("KafkaUser is in wrong state!", kafkaCondition.getType(), is(NotReady.toString()));
LOGGER.info("KafkaUser {} is in desired state: {}", userName, kafkaCondition.getType());
KafkaUserResource.kafkaUserClient().inNamespace(INFRA_NAMESPACE).withName(userName).delete();
KafkaUserUtils.waitForKafkaUserDeletion(userName);
}
@ParallelTest
@Tag(MIRROR_MAKER)
void testKafkaMirrorMakerStatus(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String mirrorMakerName = clusterName + "-mirror-maker";
// Deploy Mirror Maker
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(mirrorMakerName, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, ClientUtils.generateRandomConsumerGroup(), 1, false).build());
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerReady(mirrorMakerName);
assertKafkaMirrorMakerStatus(1, mirrorMakerName);
// Corrupt Mirror Maker pods
KafkaMirrorMakerResource.replaceMirrorMakerResource(mirrorMakerName, mm -> mm.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100000000m"))
.build()));
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerNotReady(mirrorMakerName);
// Restore Mirror Maker pod
KafkaMirrorMakerResource.replaceMirrorMakerResource(mirrorMakerName, mm -> mm.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100m"))
.build()));
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerReady(mirrorMakerName);
assertKafkaMirrorMakerStatus(3, mirrorMakerName);
}
@ParallelTest
@Tag(MIRROR_MAKER)
void testKafkaMirrorMakerStatusWrongBootstrap(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String mirrorMakerName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(mirrorMakerName, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, ClientUtils.generateRandomConsumerGroup(), 1, false).build());
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerReady(mirrorMakerName);
assertKafkaMirrorMakerStatus(1, mirrorMakerName);
// Corrupt Mirror Maker pods
KafkaMirrorMakerResource.replaceMirrorMakerResource(mirrorMakerName, mm -> mm.getSpec().getConsumer().setBootstrapServers("non-exists-bootstrap"));
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerNotReady(mirrorMakerName);
// Restore Mirror Maker pods
KafkaMirrorMakerResource.replaceMirrorMakerResource(mirrorMakerName, mm -> mm.getSpec().getConsumer().setBootstrapServers(KafkaResources.plainBootstrapAddress(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME)));
KafkaMirrorMakerUtils.waitForKafkaMirrorMakerReady(mirrorMakerName);
assertKafkaMirrorMakerStatus(3, mirrorMakerName);
}
@ParallelTest
@Tag(BRIDGE)
void testKafkaBridgeStatus(ExtensionContext extensionContext) {
String bridgeUrl = KafkaBridgeResources.url(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, INFRA_NAMESPACE, 8080);
resourceManager.createResource(extensionContext, KafkaBridgeTemplates.kafkaBridge(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, KafkaResources.plainBootstrapAddress(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME), 1).build());
KafkaBridgeUtils.waitForKafkaBridgeReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaBridgeStatus(1, bridgeUrl);
KafkaBridgeResource.replaceBridgeResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100000000m"))
.build()));
KafkaBridgeUtils.waitForKafkaBridgeNotReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
KafkaBridgeResource.replaceBridgeResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("10m"))
.build()));
KafkaBridgeUtils.waitForKafkaBridgeReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaBridgeStatus(3, bridgeUrl);
}
@ParallelTest
@Tag(CONNECT)
@Tag(CONNECTOR_OPERATOR)
@Tag(CONNECT_COMPONENTS)
void testKafkaConnectAndConnectorStatus(ExtensionContext extensionContext) {
String connectUrl = KafkaConnectResources.url(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, INFRA_NAMESPACE, 8083);
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, 1)
.editMetadata()
.addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true")
.endMetadata()
.build());
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME)
.editSpec()
.addToConfig("topic", EXAMPLE_TOPIC_NAME)
.endSpec()
.build());
assertKafkaConnectStatus(1, connectUrl);
assertKafkaConnectorStatus(1, "RUNNING|UNASSIGNED", "source", List.of());
KafkaConnectResource.replaceKafkaConnectResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100000000m"))
.build()));
KafkaConnectUtils.waitForConnectNotReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
KafkaConnectResource.replaceKafkaConnectResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100m"))
.build()));
KafkaConnectUtils.waitForConnectReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaConnectStatus(3, connectUrl);
KafkaConnectorResource.replaceKafkaConnectorResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME,
kc -> kc.getMetadata().setLabels(Collections.singletonMap(Labels.STRIMZI_CLUSTER_LABEL, "non-existing-connect-cluster")));
KafkaConnectorUtils.waitForConnectorNotReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertThat(KafkaConnectorResource.kafkaConnectorClient().inNamespace(INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus().getConnectorStatus(), is(nullValue()));
KafkaConnectorResource.replaceKafkaConnectorResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME,
kc -> kc.getMetadata().setLabels(Collections.singletonMap(Labels.STRIMZI_CLUSTER_LABEL, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME)));
KafkaConnectorUtils.waitForConnectorReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaConnectorStatus(1, "RUNNING|UNASSIGNED", "source", List.of(EXAMPLE_TOPIC_NAME));
String defaultClass = KafkaConnectorResource.kafkaConnectorClient().inNamespace(INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getSpec().getClassName();
KafkaConnectorResource.replaceKafkaConnectorResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME,
kc -> kc.getSpec().setClassName("non-existing-class"));
KafkaConnectorUtils.waitForConnectorNotReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertThat(KafkaConnectorResource.kafkaConnectorClient().inNamespace(INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus().getConnectorStatus(), is(nullValue()));
KafkaConnectorResource.replaceKafkaConnectorResource(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME,
kc -> {
kc.getMetadata().setLabels(Collections.singletonMap(Labels.STRIMZI_CLUSTER_LABEL, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME));
kc.getSpec().setClassName(defaultClass);
});
KafkaConnectorUtils.waitForConnectorReady(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
assertKafkaConnectorStatus(3, "RUNNING|UNASSIGNED", "source", List.of(EXAMPLE_TOPIC_NAME));
}
@ParallelTest
@Tag(CONNECTOR_OPERATOR)
void testKafkaConnectorWithoutClusterConfig(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// This test check NPE when connect cluster is not specified in labels
// Check for NPE in CO logs is performed after every test in BaseST
resourceManager.createResource(extensionContext, false, KafkaConnectorTemplates.kafkaConnector(clusterName, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, 2)
.withNewMetadata()
.withName(clusterName)
.withNamespace(ResourceManager.kubeClient().getNamespace())
.endMetadata()
.build());
KafkaConnectorUtils.waitForConnectorNotReady(clusterName);
KafkaConnectorResource.kafkaConnectorClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
KafkaConnectorUtils.waitForConnectorDeletion(clusterName);
}
@ParallelTest
void testKafkaTopicStatus(ExtensionContext extensionContext) {
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, topicName).build());
KafkaTopicUtils.waitForKafkaTopicReady(topicName);
assertKafkaTopicStatus(1, topicName);
}
@ParallelTest
void testKafkaTopicStatusNotReady(ExtensionContext extensionContext) {
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, topicName, 1, 10, 10).build());
KafkaTopicUtils.waitForKafkaTopicNotReady(topicName);
assertKafkaTopicStatus(1, topicName);
cmdKubeClient().deleteByName(KafkaTopic.RESOURCE_KIND, topicName);
KafkaTopicUtils.waitForKafkaTopicDeletion(topicName);
}
@ParallelTest
void testKafkaStatusCertificate(ExtensionContext extensionContext) {
String certs = getKafkaStatusCertificates(Constants.TLS_LISTENER_DEFAULT_NAME, INFRA_NAMESPACE, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME);
String secretCerts = getKafkaSecretCertificates(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME + "-cluster-ca-cert", "ca.crt");
LOGGER.info("Check if KafkaStatus certificates are the same as secret certificates");
assertThat(secretCerts, is(certs));
}
@ParallelTest
@Tag(MIRROR_MAKER2)
@Tag(CONNECT_COMPONENTS)
void testKafkaMirrorMaker2Status(ExtensionContext extensionContext) {
String targetClusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String mirrorMaker2Name = targetClusterName + "-mirror-maker-2";
String mm2Url = KafkaMirrorMaker2Resources.url(mirrorMaker2Name, INFRA_NAMESPACE, 8083);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(targetClusterName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(mirrorMaker2Name, CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, targetClusterName, 1, false).build());
KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2Ready(mirrorMaker2Name);
KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2ConnectorReadiness(INFRA_NAMESPACE, mirrorMaker2Name);
assertKafkaMirrorMaker2Status(1, mm2Url, mirrorMaker2Name);
// Corrupt Mirror Maker pods
KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2Resource(mirrorMaker2Name, mm2 -> mm2.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100000000m"))
.build()));
KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2NotReady(mirrorMaker2Name);
// Restore Mirror Maker pod
KafkaMirrorMaker2Resource.replaceKafkaMirrorMaker2Resource(mirrorMaker2Name, mm2 -> mm2.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100m"))
.build()));
KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2Ready(mirrorMaker2Name);
KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2ConnectorReadiness(INFRA_NAMESPACE, mirrorMaker2Name);
assertKafkaMirrorMaker2Status(3, mm2Url, mirrorMaker2Name);
// Wait for pods stability and check that pods weren't rolled
PodUtils.verifyThatRunningPodsAreStable(INFRA_NAMESPACE, KafkaMirrorMaker2Resources.deploymentName(mirrorMaker2Name));
assertKafkaMirrorMaker2Status(3, mm2Url, mirrorMaker2Name);
KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2ConnectorReadiness(INFRA_NAMESPACE, mirrorMaker2Name);
}
@ParallelTest
@Tag(MIRROR_MAKER2)
void testKafkaMirrorMaker2WrongBootstrap(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String mirrorMaker2Name = clusterName + "-mirror-maker-2";
KafkaMirrorMaker2 kafkaMirrorMaker2 = KafkaMirrorMaker2Templates.kafkaMirrorMaker2(mirrorMaker2Name,
"non-existing-source", "non-existing-target", 1, false).build();
resourceManager.createResource(extensionContext, false, kafkaMirrorMaker2);
KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2NotReady(mirrorMaker2Name);
// delete
KafkaMirrorMaker2Resource.kafkaMirrorMaker2Client().inNamespace(INFRA_NAMESPACE).withName(mirrorMaker2Name).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
DeploymentUtils.waitForDeploymentDeletion(KafkaMirrorMaker2Resources.deploymentName(mirrorMaker2Name));
}
@ParallelTest
void testKafkaTopicDecreaseStatus(ExtensionContext extensionContext) throws InterruptedException {
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, topicName, 5).build());
int decreaseTo = 1;
LOGGER.info("Decreasing number of partitions to {}", decreaseTo);
KafkaTopicResource.replaceTopicResource(topicName, kafkaTopic -> kafkaTopic.getSpec().setPartitions(decreaseTo));
KafkaTopicUtils.waitForKafkaTopicPartitionChange(topicName, decreaseTo);
KafkaTopicUtils.waitForKafkaTopicNotReady(topicName);
assertKafkaTopicDecreasePartitionsStatus(topicName);
// Wait some time to check if error is still present in KafkaTopic status
LOGGER.info("Wait {} ms for next reconciliation", topicOperatorReconciliationInterval);
Thread.sleep(topicOperatorReconciliationInterval);
assertKafkaTopicDecreasePartitionsStatus(topicName);
}
@ParallelTest
void testKafkaTopicChangingInSyncReplicasStatus(ExtensionContext extensionContext) throws InterruptedException {
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, topicName, 5).build());
String invalidValue = "x";
LOGGER.info("Changing min.insync.replicas to random char");
KafkaTopicResource.replaceTopicResource(topicName, kafkaTopic -> kafkaTopic.getSpec().getConfig().replace("min.insync.replicas", invalidValue));
KafkaTopicUtils.waitForKafkaTopicNotReady(topicName);
assertKafkaTopicWrongMinInSyncReplicasStatus(topicName, invalidValue);
// Wait some time to check if error is still present in KafkaTopic status
LOGGER.info("Wait {} ms for next reconciliation", topicOperatorReconciliationInterval);
Thread.sleep(topicOperatorReconciliationInterval);
assertKafkaTopicWrongMinInSyncReplicasStatus(topicName, invalidValue);
}
@BeforeAll
void setup(ExtensionContext extensionContext) {
install = new SetupClusterOperator.SetupClusterOperatorBuilder()
.withExtensionContext(BeforeAllOnce.getSharedExtensionContext())
.withNamespace(INFRA_NAMESPACE)
.withOperationTimeout(Constants.CO_OPERATION_TIMEOUT_SHORT)
.createInstallation()
.runInstallation();
GenericKafkaListener plain = new GenericKafkaListenerBuilder()
.withName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.withPort(9092)
.withType(KafkaListenerType.INTERNAL)
.withTls(false)
.build();
GenericKafkaListener tls = new GenericKafkaListenerBuilder()
.withName(Constants.TLS_LISTENER_DEFAULT_NAME)
.withPort(9093)
.withType(KafkaListenerType.INTERNAL)
.withTls(true)
.build();
GenericKafkaListener nodePort = new GenericKafkaListenerBuilder()
.withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)
.withPort(9094)
.withType(KafkaListenerType.NODEPORT)
.withTls(false)
.build();
List<GenericKafkaListener> listeners;
if (Environment.isNamespaceRbacScope()) {
listeners = asList(plain, tls);
} else {
listeners = asList(plain, tls, nodePort);
}
KafkaBuilder kafkaBuilder = KafkaTemplates.kafkaEphemeral(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, 3, 3)
.editSpec()
.editKafka()
.withListeners(listeners)
.endKafka()
.endSpec();
String kafkaClientsName = INFRA_NAMESPACE + "-shared-" + Constants.KAFKA_CLIENTS;
resourceManager.createResource(extensionContext, kafkaBuilder.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME, TOPIC_NAME).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
topicOperatorReconciliationInterval = KafkaResource.kafkaClient().inNamespace(INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get()
.getSpec().getEntityOperator().getTopicOperator().getReconciliationIntervalSeconds() * 1_000 * 2 + 5_000;
}
void assertKafkaStatus(long expectedObservedGeneration, String internalAddress) {
KafkaStatus kafkaStatus = KafkaResource.kafkaClient().inNamespace(INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus();
assertThat("Kafka cluster status has incorrect Observed Generation", kafkaStatus.getObservedGeneration(), is(expectedObservedGeneration));
for (ListenerStatus listener : kafkaStatus.getListeners()) {
switch (listener.getType()) {
case Constants.TLS_LISTENER_DEFAULT_NAME:
assertThat("TLS bootstrap has incorrect port", listener.getAddresses().get(0).getPort(), is(9093));
assertThat("TLS bootstrap has incorrect host", listener.getAddresses().get(0).getHost(), is(internalAddress));
break;
case Constants.PLAIN_LISTENER_DEFAULT_NAME:
assertThat("Plain bootstrap has incorrect port", listener.getAddresses().get(0).getPort(), is(9092));
assertThat("Plain bootstrap has incorrect host", listener.getAddresses().get(0).getHost(), is(internalAddress));
break;
case Constants.EXTERNAL_LISTENER_DEFAULT_NAME:
Service extBootstrapService = kubeClient(INFRA_NAMESPACE).getClient().services()
.inNamespace(INFRA_NAMESPACE)
.withName(externalBootstrapServiceName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME))
.get();
assertThat("External bootstrap has incorrect port", listener.getAddresses().get(0).getPort(), is(extBootstrapService.getSpec().getPorts().get(0).getNodePort()));
assertThat("External bootstrap has incorrect host", listener.getAddresses().get(0).getHost() != null);
break;
}
}
}
void assertKafkaMirrorMakerStatus(long expectedObservedGeneration, String mirrorMakerName) {
KafkaMirrorMakerStatus kafkaMirrorMakerStatus = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(INFRA_NAMESPACE).withName(mirrorMakerName).get().getStatus();
assertThat("Kafka MirrorMaker cluster status has incorrect Observed Generation", kafkaMirrorMakerStatus.getObservedGeneration(), is(expectedObservedGeneration));
}
void assertKafkaMirrorMaker2Status(long expectedObservedGeneration, String apiUrl, String mirrorMaker2Name) {
KafkaMirrorMaker2Status kafkaMirrorMaker2Status = KafkaMirrorMaker2Resource.kafkaMirrorMaker2Client().inNamespace(INFRA_NAMESPACE).withName(mirrorMaker2Name).get().getStatus();
assertThat("Kafka MirrorMaker2 cluster status has incorrect Observed Generation", kafkaMirrorMaker2Status.getObservedGeneration(), is(expectedObservedGeneration));
assertThat("Kafka MirrorMaker2 cluster status has incorrect URL", kafkaMirrorMaker2Status.getUrl(), is(apiUrl));
for (Map<String, Object> connector : kafkaMirrorMaker2Status.getConnectors()) {
assertThat("One of the connectors is not RUNNING:\n" + connector.toString(), ((Map<String, String>) connector.get("connector")).get("state"), is("RUNNING"));
}
}
void assertKafkaBridgeStatus(long expectedObservedGeneration, String bridgeAddress) {
KafkaBridgeStatus kafkaBridgeStatus = KafkaBridgeResource.kafkaBridgeClient().inNamespace(INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus();
assertThat("Kafka Bridge cluster status has incorrect Observed Generation", kafkaBridgeStatus.getObservedGeneration(), is(expectedObservedGeneration));
assertThat("Kafka Bridge cluster status has incorrect URL", kafkaBridgeStatus.getUrl(), is(bridgeAddress));
}
void assertKafkaConnectStatus(long expectedObservedGeneration, String expectedUrl) {
KafkaConnectStatus kafkaConnectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus();
assertThat("Kafka Connect cluster status has incorrect Observed Generation", kafkaConnectStatus.getObservedGeneration(), is(expectedObservedGeneration));
assertThat("Kafka Connect cluster status has incorrect URL", kafkaConnectStatus.getUrl(), is(expectedUrl));
validateConnectPlugins(kafkaConnectStatus.getConnectorPlugins());
}
void validateConnectPlugins(List<ConnectorPlugin> pluginsList) {
assertThat(pluginsList, notNullValue());
List<String> pluginsClasses = pluginsList.stream().map(p -> p.getConnectorClass()).collect(Collectors.toList());
assertThat(pluginsClasses, hasItems("org.apache.kafka.connect.file.FileStreamSinkConnector",
"org.apache.kafka.connect.file.FileStreamSourceConnector",
"org.apache.kafka.connect.mirror.MirrorCheckpointConnector",
"org.apache.kafka.connect.mirror.MirrorHeartbeatConnector",
"org.apache.kafka.connect.mirror.MirrorSourceConnector"));
}
@SuppressWarnings("unchecked")
void assertKafkaConnectorStatus(long expectedObservedGeneration, String connectorStates, String type, List<String> topics) {
KafkaConnectorStatus kafkaConnectorStatus = KafkaConnectorResource.kafkaConnectorClient().inNamespace(INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus();
assertThat(kafkaConnectorStatus.getObservedGeneration(), is(expectedObservedGeneration));
Map<String, Object> connectorStatus = kafkaConnectorStatus.getConnectorStatus();
String currentState = ((LinkedHashMap<String, String>) connectorStatus.get("connector")).get("state");
assertThat(connectorStates, containsString(currentState));
assertThat(connectorStatus.get("name"), is(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME));
assertThat(connectorStatus.get("type"), is(type));
assertThat(connectorStatus.get("tasks"), notNullValue());
assertThat(kafkaConnectorStatus.getTopics(), is(topics));
}
void assertKafkaTopicStatus(long expectedObservedGeneration, String topicName) {
KafkaTopicStatus kafkaTopicStatus = KafkaTopicResource.kafkaTopicClient().inNamespace(INFRA_NAMESPACE).withName(topicName).get().getStatus();
assertThat("KafkaTopic status has incorrect Observed Generation", kafkaTopicStatus.getObservedGeneration(), is(expectedObservedGeneration));
}
void assertKafkaTopicDecreasePartitionsStatus(String topicName) {
KafkaTopicStatus kafkaTopicStatus = KafkaTopicResource.kafkaTopicClient().inNamespace(INFRA_NAMESPACE).withName(topicName).get().getStatus();
assertThat(kafkaTopicStatus.getConditions().stream()
.anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(kafkaTopicStatus.getConditions().stream()
.anyMatch(condition -> condition.getReason().equals("PartitionDecreaseException")), is(true));
assertThat(kafkaTopicStatus.getConditions().stream()
.anyMatch(condition -> condition.getMessage().contains("Number of partitions cannot be decreased")), is(true));
}
void assertKafkaTopicWrongMinInSyncReplicasStatus(String topicName, String invalidValue) {
KafkaTopicStatus kafkaTopicStatus = KafkaTopicResource.kafkaTopicClient().inNamespace(INFRA_NAMESPACE).withName(topicName).get().getStatus();
assertThat(kafkaTopicStatus.getConditions().stream()
.anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(kafkaTopicStatus.getConditions().stream()
.anyMatch(condition -> condition.getReason().equals("InvalidRequestException")), is(true));
assertThat(kafkaTopicStatus.getConditions().stream()
.anyMatch(condition -> condition.getMessage().contains(String.format("Invalid value %s for configuration min.insync.replicas", invalidValue))), is(true));
}
}
| |
/***
* ASM: a very small and fast Java bytecode manipulation framework
* Copyright (c) 2000-2011 INRIA, France Telecom
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package edu.columbia.cs.psl.phosphor.org.objectweb.asm;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
/**
* A Java field or method type. This class can be used to make it easier to
* manipulate type and method descriptors.
*
* @author Eric Bruneton
* @author Chris Nokleberg
*/
public class Type {
/**
* The sort of the <tt>void</tt> type. See {@link #getSort getSort}.
*/
public static final int VOID = 0;
/**
* The sort of the <tt>boolean</tt> type. See {@link #getSort getSort}.
*/
public static final int BOOLEAN = 1;
/**
* The sort of the <tt>char</tt> type. See {@link #getSort getSort}.
*/
public static final int CHAR = 2;
/**
* The sort of the <tt>byte</tt> type. See {@link #getSort getSort}.
*/
public static final int BYTE = 3;
/**
* The sort of the <tt>short</tt> type. See {@link #getSort getSort}.
*/
public static final int SHORT = 4;
/**
* The sort of the <tt>int</tt> type. See {@link #getSort getSort}.
*/
public static final int INT = 5;
/**
* The sort of the <tt>float</tt> type. See {@link #getSort getSort}.
*/
public static final int FLOAT = 6;
/**
* The sort of the <tt>long</tt> type. See {@link #getSort getSort}.
*/
public static final int LONG = 7;
/**
* The sort of the <tt>double</tt> type. See {@link #getSort getSort}.
*/
public static final int DOUBLE = 8;
/**
* The sort of array reference types. See {@link #getSort getSort}.
*/
public static final int ARRAY = 9;
/**
* The sort of object reference types. See {@link #getSort getSort}.
*/
public static final int OBJECT = 10;
/**
* The sort of method types. See {@link #getSort getSort}.
*/
public static final int METHOD = 11;
/**
* The <tt>void</tt> type.
*/
public static final Type VOID_TYPE = new Type(VOID, null, ('V' << 24)
| (5 << 16) | (0 << 8) | 0, 1);
/**
* The <tt>boolean</tt> type.
*/
public static final Type BOOLEAN_TYPE = new Type(BOOLEAN, null, ('Z' << 24)
| (0 << 16) | (5 << 8) | 1, 1);
/**
* The <tt>char</tt> type.
*/
public static final Type CHAR_TYPE = new Type(CHAR, null, ('C' << 24)
| (0 << 16) | (6 << 8) | 1, 1);
/**
* The <tt>byte</tt> type.
*/
public static final Type BYTE_TYPE = new Type(BYTE, null, ('B' << 24)
| (0 << 16) | (5 << 8) | 1, 1);
/**
* The <tt>short</tt> type.
*/
public static final Type SHORT_TYPE = new Type(SHORT, null, ('S' << 24)
| (0 << 16) | (7 << 8) | 1, 1);
/**
* The <tt>int</tt> type.
*/
public static final Type INT_TYPE = new Type(INT, null, ('I' << 24)
| (0 << 16) | (0 << 8) | 1, 1);
/**
* The <tt>float</tt> type.
*/
public static final Type FLOAT_TYPE = new Type(FLOAT, null, ('F' << 24)
| (2 << 16) | (2 << 8) | 1, 1);
/**
* The <tt>long</tt> type.
*/
public static final Type LONG_TYPE = new Type(LONG, null, ('J' << 24)
| (1 << 16) | (1 << 8) | 2, 1);
/**
* The <tt>double</tt> type.
*/
public static final Type DOUBLE_TYPE = new Type(DOUBLE, null, ('D' << 24)
| (3 << 16) | (3 << 8) | 2, 1);
// ------------------------------------------------------------------------
// Fields
// ------------------------------------------------------------------------
/**
* The sort of this Java type.
*/
private final int sort;
/**
* A buffer containing the internal name of this Java type. This field is
* only used for reference types.
*/
private final char[] buf;
/**
* The offset of the internal name of this Java type in {@link #buf buf} or,
* for primitive types, the size, descriptor and getOpcode offsets for this
* type (byte 0 contains the size, byte 1 the descriptor, byte 2 the offset
* for IALOAD or IASTORE, byte 3 the offset for all other instructions).
*/
private final int off;
/**
* The length of the internal name of this Java type.
*/
private final int len;
// ------------------------------------------------------------------------
// Constructors
// ------------------------------------------------------------------------
/**
* Constructs a reference type.
*
* @param sort
* the sort of the reference type to be constructed.
* @param buf
* a buffer containing the descriptor of the previous type.
* @param off
* the offset of this descriptor in the previous buffer.
* @param len
* the length of this descriptor.
*/
private Type(final int sort, final char[] buf, final int off, final int len) {
this.sort = sort;
this.buf = buf;
this.off = off;
this.len = len;
}
/**
* Returns the Java type corresponding to the given type descriptor.
*
* @param typeDescriptor
* a field or method type descriptor.
* @return the Java type corresponding to the given type descriptor.
*/
public static Type getType(final String typeDescriptor) {
return getType(typeDescriptor.toCharArray(), 0);
}
/**
* Returns the Java type corresponding to the given internal name.
*
* @param internalName
* an internal name.
* @return the Java type corresponding to the given internal name.
*/
public static Type getObjectType(final String internalName) {
char[] buf = internalName.toCharArray();
return new Type(buf[0] == '[' ? ARRAY : OBJECT, buf, 0, buf.length);
}
/**
* Returns the Java type corresponding to the given method descriptor.
* Equivalent to <code>Type.getType(methodDescriptor)</code>.
*
* @param methodDescriptor
* a method descriptor.
* @return the Java type corresponding to the given method descriptor.
*/
public static Type getMethodType(final String methodDescriptor) {
return getType(methodDescriptor.toCharArray(), 0);
}
/**
* Returns the Java method type corresponding to the given argument and
* return types.
*
* @param returnType
* the return type of the method.
* @param argumentTypes
* the argument types of the method.
* @return the Java type corresponding to the given argument and return
* types.
*/
public static Type getMethodType(final Type returnType,
final Type... argumentTypes) {
return getType(getMethodDescriptor(returnType, argumentTypes));
}
/**
* Returns the Java type corresponding to the given class.
*
* @param c
* a class.
* @return the Java type corresponding to the given class.
*/
public static Type getType(final Class<?> c) {
if (c.isPrimitive()) {
if (c == Integer.TYPE) {
return INT_TYPE;
} else if (c == Void.TYPE) {
return VOID_TYPE;
} else if (c == Boolean.TYPE) {
return BOOLEAN_TYPE;
} else if (c == Byte.TYPE) {
return BYTE_TYPE;
} else if (c == Character.TYPE) {
return CHAR_TYPE;
} else if (c == Short.TYPE) {
return SHORT_TYPE;
} else if (c == Double.TYPE) {
return DOUBLE_TYPE;
} else if (c == Float.TYPE) {
return FLOAT_TYPE;
} else /* if (c == Long.TYPE) */{
return LONG_TYPE;
}
} else {
return getType(getDescriptor(c));
}
}
/**
* Returns the Java method type corresponding to the given constructor.
*
* @param c
* a {@link Constructor Constructor} object.
* @return the Java method type corresponding to the given constructor.
*/
public static Type getType(final Constructor<?> c) {
return getType(getConstructorDescriptor(c));
}
/**
* Returns the Java method type corresponding to the given method.
*
* @param m
* a {@link Method Method} object.
* @return the Java method type corresponding to the given method.
*/
public static Type getType(final Method m) {
return getType(getMethodDescriptor(m));
}
/**
* Returns the Java types corresponding to the argument types of the given
* method descriptor.
*
* @param methodDescriptor
* a method descriptor.
* @return the Java types corresponding to the argument types of the given
* method descriptor.
*/
public static Type[] getArgumentTypes(final String methodDescriptor) {
char[] buf = methodDescriptor.toCharArray();
int off = 1;
int size = 0;
while (true) {
char car = buf[off++];
if (car == ')') {
break;
} else if (car == 'L') {
while (buf[off++] != ';') {
}
++size;
} else if (car != '[') {
++size;
}
}
Type[] args = new Type[size];
off = 1;
size = 0;
while (buf[off] != ')') {
args[size] = getType(buf, off);
off += args[size].len + (args[size].sort == OBJECT ? 2 : 0);
size += 1;
}
return args;
}
/**
* Returns the Java types corresponding to the argument types of the given
* method.
*
* @param method
* a method.
* @return the Java types corresponding to the argument types of the given
* method.
*/
public static Type[] getArgumentTypes(final Method method) {
Class<?>[] classes = method.getParameterTypes();
Type[] types = new Type[classes.length];
for (int i = classes.length - 1; i >= 0; --i) {
types[i] = getType(classes[i]);
}
return types;
}
/**
* Returns the Java type corresponding to the return type of the given
* method descriptor.
*
* @param methodDescriptor
* a method descriptor.
* @return the Java type corresponding to the return type of the given
* method descriptor.
*/
public static Type getReturnType(final String methodDescriptor) {
char[] buf = methodDescriptor.toCharArray();
return getType(buf, methodDescriptor.indexOf(')') + 1);
}
/**
* Returns the Java type corresponding to the return type of the given
* method.
*
* @param method
* a method.
* @return the Java type corresponding to the return type of the given
* method.
*/
public static Type getReturnType(final Method method) {
return getType(method.getReturnType());
}
/**
* Computes the size of the arguments and of the return value of a method.
*
* @param desc
* the descriptor of a method.
* @return the size of the arguments of the method (plus one for the
* implicit this argument), argSize, and the size of its return
* value, retSize, packed into a single int i =
* <tt>(argSize << 2) | retSize</tt> (argSize is therefore equal to
* <tt>i >> 2</tt>, and retSize to <tt>i & 0x03</tt>).
*/
public static int getArgumentsAndReturnSizes(final String desc) {
int n = 1;
int c = 1;
while (true) {
char car = desc.charAt(c++);
if (car == ')') {
car = desc.charAt(c);
return n << 2
| (car == 'V' ? 0 : (car == 'D' || car == 'J' ? 2 : 1));
} else if (car == 'L') {
while (desc.charAt(c++) != ';') {
}
n += 1;
} else if (car == '[') {
while ((car = desc.charAt(c)) == '[') {
++c;
}
if (car == 'D' || car == 'J') {
n -= 1;
}
} else if (car == 'D' || car == 'J') {
n += 2;
} else {
n += 1;
}
}
}
/**
* Returns the Java type corresponding to the given type descriptor. For
* method descriptors, buf is supposed to contain nothing more than the
* descriptor itself.
*
* @param buf
* a buffer containing a type descriptor.
* @param off
* the offset of this descriptor in the previous buffer.
* @return the Java type corresponding to the given type descriptor.
*/
private static Type getType(final char[] buf, final int off) {
int len;
switch (buf[off]) {
case 'V':
return VOID_TYPE;
case 'Z':
return BOOLEAN_TYPE;
case 'C':
return CHAR_TYPE;
case 'B':
return BYTE_TYPE;
case 'S':
return SHORT_TYPE;
case 'I':
return INT_TYPE;
case 'F':
return FLOAT_TYPE;
case 'J':
return LONG_TYPE;
case 'D':
return DOUBLE_TYPE;
case '[':
len = 1;
while (buf[off + len] == '[') {
++len;
}
if (buf[off + len] == 'L') {
++len;
while (buf[off + len] != ';') {
++len;
}
}
return new Type(ARRAY, buf, off, len + 1);
case 'L':
len = 1;
while (buf[off + len] != ';') {
++len;
}
return new Type(OBJECT, buf, off + 1, len - 1);
// case '(':
default:
return new Type(METHOD, buf, off, buf.length - off);
}
}
// ------------------------------------------------------------------------
// Accessors
// ------------------------------------------------------------------------
/**
* Returns the sort of this Java type.
*
* @return {@link #VOID VOID}, {@link #BOOLEAN BOOLEAN}, {@link #CHAR CHAR},
* {@link #BYTE BYTE}, {@link #SHORT SHORT}, {@link #INT INT},
* {@link #FLOAT FLOAT}, {@link #LONG LONG}, {@link #DOUBLE DOUBLE},
* {@link #ARRAY ARRAY}, {@link #OBJECT OBJECT} or {@link #METHOD
* METHOD}.
*/
public int getSort() {
return sort;
}
/**
* Returns the number of dimensions of this array type. This method should
* only be used for an array type.
*
* @return the number of dimensions of this array type.
*/
public int getDimensions() {
int i = 1;
while (buf[off + i] == '[') {
++i;
}
return i;
}
/**
* Returns the type of the elements of this array type. This method should
* only be used for an array type.
*
* @return Returns the type of the elements of this array type.
*/
public Type getElementType() {
return getType(buf, off + getDimensions());
}
/**
* Returns the binary name of the class corresponding to this type. This
* method must not be used on method types.
*
* @return the binary name of the class corresponding to this type.
*/
public String getClassName() {
switch (sort) {
case VOID:
return "void";
case BOOLEAN:
return "boolean";
case CHAR:
return "char";
case BYTE:
return "byte";
case SHORT:
return "short";
case INT:
return "int";
case FLOAT:
return "float";
case LONG:
return "long";
case DOUBLE:
return "double";
case ARRAY:
StringBuilder sb = new StringBuilder(getElementType().getClassName());
for (int i = getDimensions(); i > 0; --i) {
sb.append("[]");
}
return sb.toString();
case OBJECT:
return new String(buf, off, len).replace('/', '.');
default:
return null;
}
}
/**
* Returns the internal name of the class corresponding to this object or
* array type. The internal name of a class is its fully qualified name (as
* returned by Class.getName(), where '.' are replaced by '/'. This method
* should only be used for an object or array type.
*
* @return the internal name of the class corresponding to this object type.
*/
public String getInternalName() {
return new String(buf, off, len);
}
/**
* Returns the argument types of methods of this type. This method should
* only be used for method types.
*
* @return the argument types of methods of this type.
*/
public Type[] getArgumentTypes() {
return getArgumentTypes(getDescriptor());
}
/**
* Returns the return type of methods of this type. This method should only
* be used for method types.
*
* @return the return type of methods of this type.
*/
public Type getReturnType() {
return getReturnType(getDescriptor());
}
/**
* Returns the size of the arguments and of the return value of methods of
* this type. This method should only be used for method types.
*
* @return the size of the arguments (plus one for the implicit this
* argument), argSize, and the size of the return value, retSize,
* packed into a single
* int i = <tt>(argSize << 2) | retSize</tt>
* (argSize is therefore equal to <tt>i >> 2</tt>,
* and retSize to <tt>i & 0x03</tt>).
*/
public int getArgumentsAndReturnSizes() {
return getArgumentsAndReturnSizes(getDescriptor());
}
// ------------------------------------------------------------------------
// Conversion to type descriptors
// ------------------------------------------------------------------------
/**
* Returns the descriptor corresponding to this Java type.
*
* @return the descriptor corresponding to this Java type.
*/
public String getDescriptor() {
StringBuffer buf = new StringBuffer();
getDescriptor(buf);
return buf.toString();
}
/**
* Returns the descriptor corresponding to the given argument and return
* types.
*
* @param returnType
* the return type of the method.
* @param argumentTypes
* the argument types of the method.
* @return the descriptor corresponding to the given argument and return
* types.
*/
public static String getMethodDescriptor(final Type returnType,
final Type... argumentTypes) {
StringBuffer buf = new StringBuffer();
buf.append('(');
for (int i = 0; i < argumentTypes.length; ++i) {
argumentTypes[i].getDescriptor(buf);
}
buf.append(')');
returnType.getDescriptor(buf);
return buf.toString();
}
/**
* Appends the descriptor corresponding to this Java type to the given
* string buffer.
*
* @param buf
* the string buffer to which the descriptor must be appended.
*/
private void getDescriptor(final StringBuffer buf) {
if (this.buf == null) {
// descriptor is in byte 3 of 'off' for primitive types (buf ==
// null)
buf.append((char) ((off & 0xFF000000) >>> 24));
} else if (sort == OBJECT) {
buf.append('L');
buf.append(this.buf, off, len);
buf.append(';');
} else { // sort == ARRAY || sort == METHOD
buf.append(this.buf, off, len);
}
}
// ------------------------------------------------------------------------
// Direct conversion from classes to type descriptors,
// without intermediate Type objects
// ------------------------------------------------------------------------
/**
* Returns the internal name of the given class. The internal name of a
* class is its fully qualified name, as returned by Class.getName(), where
* '.' are replaced by '/'.
*
* @param c
* an object or array class.
* @return the internal name of the given class.
*/
public static String getInternalName(final Class<?> c) {
return c.getName().replace('.', '/');
}
/**
* Returns the descriptor corresponding to the given Java type.
*
* @param c
* an object class, a primitive class or an array class.
* @return the descriptor corresponding to the given class.
*/
public static String getDescriptor(final Class<?> c) {
StringBuffer buf = new StringBuffer();
getDescriptor(buf, c);
return buf.toString();
}
/**
* Returns the descriptor corresponding to the given constructor.
*
* @param c
* a {@link Constructor Constructor} object.
* @return the descriptor of the given constructor.
*/
public static String getConstructorDescriptor(final Constructor<?> c) {
Class<?>[] parameters = c.getParameterTypes();
StringBuffer buf = new StringBuffer();
buf.append('(');
for (int i = 0; i < parameters.length; ++i) {
getDescriptor(buf, parameters[i]);
}
return buf.append(")V").toString();
}
/**
* Returns the descriptor corresponding to the given method.
*
* @param m
* a {@link Method Method} object.
* @return the descriptor of the given method.
*/
public static String getMethodDescriptor(final Method m) {
Class<?>[] parameters = m.getParameterTypes();
StringBuffer buf = new StringBuffer();
buf.append('(');
for (int i = 0; i < parameters.length; ++i) {
getDescriptor(buf, parameters[i]);
}
buf.append(')');
getDescriptor(buf, m.getReturnType());
return buf.toString();
}
/**
* Appends the descriptor of the given class to the given string buffer.
*
* @param buf
* the string buffer to which the descriptor must be appended.
* @param c
* the class whose descriptor must be computed.
*/
private static void getDescriptor(final StringBuffer buf, final Class<?> c) {
Class<?> d = c;
while (true) {
if (d.isPrimitive()) {
char car;
if (d == Integer.TYPE) {
car = 'I';
} else if (d == Void.TYPE) {
car = 'V';
} else if (d == Boolean.TYPE) {
car = 'Z';
} else if (d == Byte.TYPE) {
car = 'B';
} else if (d == Character.TYPE) {
car = 'C';
} else if (d == Short.TYPE) {
car = 'S';
} else if (d == Double.TYPE) {
car = 'D';
} else if (d == Float.TYPE) {
car = 'F';
} else /* if (d == Long.TYPE) */{
car = 'J';
}
buf.append(car);
return;
} else if (d.isArray()) {
buf.append('[');
d = d.getComponentType();
} else {
buf.append('L');
String name = d.getName();
int len = name.length();
for (int i = 0; i < len; ++i) {
char car = name.charAt(i);
buf.append(car == '.' ? '/' : car);
}
buf.append(';');
return;
}
}
}
// ------------------------------------------------------------------------
// Corresponding size and opcodes
// ------------------------------------------------------------------------
/**
* Returns the size of values of this type. This method must not be used for
* method types.
*
* @return the size of values of this type, i.e., 2 for <tt>long</tt> and
* <tt>double</tt>, 0 for <tt>void</tt> and 1 otherwise.
*/
public int getSize() {
// the size is in byte 0 of 'off' for primitive types (buf == null)
return buf == null ? (off & 0xFF) : 1;
}
/**
* Returns a JVM instruction opcode adapted to this Java type. This method
* must not be used for method types.
*
* @param opcode
* a JVM instruction opcode. This opcode must be one of ILOAD,
* ISTORE, IALOAD, IASTORE, IADD, ISUB, IMUL, IDIV, IREM, INEG,
* ISHL, ISHR, IUSHR, IAND, IOR, IXOR and IRETURN.
* @return an opcode that is similar to the given opcode, but adapted to
* this Java type. For example, if this type is <tt>float</tt> and
* <tt>opcode</tt> is IRETURN, this method returns FRETURN.
*/
public int getOpcode(final int opcode) {
if (opcode == Opcodes.IALOAD || opcode == Opcodes.IASTORE) {
// the offset for IALOAD or IASTORE is in byte 1 of 'off' for
// primitive types (buf == null)
return opcode + (buf == null ? (off & 0xFF00) >> 8 : 4);
} else {
// the offset for other instructions is in byte 2 of 'off' for
// primitive types (buf == null)
return opcode + (buf == null ? (off & 0xFF0000) >> 16 : 4);
}
}
// ------------------------------------------------------------------------
// Equals, hashCode and toString
// ------------------------------------------------------------------------
/**
* Tests if the given object is equal to this type.
*
* @param o
* the object to be compared to this type.
* @return <tt>true</tt> if the given object is equal to this type.
*/
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Type)) {
return false;
}
Type t = (Type) o;
if (sort != t.sort) {
return false;
}
if (sort >= ARRAY) {
if (len != t.len) {
return false;
}
for (int i = off, j = t.off, end = i + len; i < end; i++, j++) {
if (buf[i] != t.buf[j]) {
return false;
}
}
}
return true;
}
/**
* Returns a hash code value for this type.
*
* @return a hash code value for this type.
*/
@Override
public int hashCode() {
int hc = 13 * sort;
if (sort >= ARRAY) {
for (int i = off, end = i + len; i < end; i++) {
hc = 17 * (hc + buf[i]);
}
}
return hc;
}
/**
* Returns a string representation of this type.
*
* @return the descriptor of this type.
*/
@Override
public String toString() {
return getDescriptor();
}
}
| |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.applicationautoscaling.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalingPolicies"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeScalingPoliciesResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* <p>
* Information about the scaling policies.
* </p>
*/
private java.util.List<ScalingPolicy> scalingPolicies;
/**
* <p>
* The token required to get the next set of results. This value is <code>null</code> if there are no more results
* to return.
* </p>
*/
private String nextToken;
/**
* <p>
* Information about the scaling policies.
* </p>
*
* @return Information about the scaling policies.
*/
public java.util.List<ScalingPolicy> getScalingPolicies() {
return scalingPolicies;
}
/**
* <p>
* Information about the scaling policies.
* </p>
*
* @param scalingPolicies
* Information about the scaling policies.
*/
public void setScalingPolicies(java.util.Collection<ScalingPolicy> scalingPolicies) {
if (scalingPolicies == null) {
this.scalingPolicies = null;
return;
}
this.scalingPolicies = new java.util.ArrayList<ScalingPolicy>(scalingPolicies);
}
/**
* <p>
* Information about the scaling policies.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setScalingPolicies(java.util.Collection)} or {@link #withScalingPolicies(java.util.Collection)} if you
* want to override the existing values.
* </p>
*
* @param scalingPolicies
* Information about the scaling policies.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeScalingPoliciesResult withScalingPolicies(ScalingPolicy... scalingPolicies) {
if (this.scalingPolicies == null) {
setScalingPolicies(new java.util.ArrayList<ScalingPolicy>(scalingPolicies.length));
}
for (ScalingPolicy ele : scalingPolicies) {
this.scalingPolicies.add(ele);
}
return this;
}
/**
* <p>
* Information about the scaling policies.
* </p>
*
* @param scalingPolicies
* Information about the scaling policies.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeScalingPoliciesResult withScalingPolicies(java.util.Collection<ScalingPolicy> scalingPolicies) {
setScalingPolicies(scalingPolicies);
return this;
}
/**
* <p>
* The token required to get the next set of results. This value is <code>null</code> if there are no more results
* to return.
* </p>
*
* @param nextToken
* The token required to get the next set of results. This value is <code>null</code> if there are no more
* results to return.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* The token required to get the next set of results. This value is <code>null</code> if there are no more results
* to return.
* </p>
*
* @return The token required to get the next set of results. This value is <code>null</code> if there are no more
* results to return.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* The token required to get the next set of results. This value is <code>null</code> if there are no more results
* to return.
* </p>
*
* @param nextToken
* The token required to get the next set of results. This value is <code>null</code> if there are no more
* results to return.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeScalingPoliciesResult withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getScalingPolicies() != null)
sb.append("ScalingPolicies: ").append(getScalingPolicies()).append(",");
if (getNextToken() != null)
sb.append("NextToken: ").append(getNextToken());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DescribeScalingPoliciesResult == false)
return false;
DescribeScalingPoliciesResult other = (DescribeScalingPoliciesResult) obj;
if (other.getScalingPolicies() == null ^ this.getScalingPolicies() == null)
return false;
if (other.getScalingPolicies() != null && other.getScalingPolicies().equals(this.getScalingPolicies()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getScalingPolicies() == null) ? 0 : getScalingPolicies().hashCode());
hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode());
return hashCode;
}
@Override
public DescribeScalingPoliciesResult clone() {
try {
return (DescribeScalingPoliciesResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| |
package gov.nasa.jpl.mbee.mdk.docgen.validation.impl;
import gov.nasa.jpl.mbee.mdk.docgen.validation.DocGenValidationPackage;
import gov.nasa.jpl.mbee.mdk.docgen.validation.Rule;
import gov.nasa.jpl.mbee.mdk.docgen.validation.Suite;
import org.eclipse.emf.common.notify.Notification;
import org.eclipse.emf.common.notify.NotificationChain;
import org.eclipse.emf.common.util.EList;
import org.eclipse.emf.ecore.EClass;
import org.eclipse.emf.ecore.InternalEObject;
import org.eclipse.emf.ecore.impl.ENotificationImpl;
import org.eclipse.emf.ecore.impl.EObjectImpl;
import org.eclipse.emf.ecore.util.EObjectContainmentEList;
import org.eclipse.emf.ecore.util.InternalEList;
import java.util.Collection;
/**
* <!-- begin-user-doc --> An implementation of the model object '
* <em><b>Suite</b></em>'. <!-- end-user-doc -->
* <p>
* The following features are implemented:
* <ul>
* <li>
* {@link SuiteImpl#isShowDetail
* <em>Show Detail</em>}</li>
* <li>
* {@link SuiteImpl#isShowSummary
* <em>Show Summary</em>}</li>
* <li>
* {@link SuiteImpl#isOwnSection
* <em>Own Section</em>}</li>
* <li>{@link SuiteImpl#getName
* <em>Name</em>}</li>
* <li>
* {@link SuiteImpl#getRules
* <em>Rules</em>}</li>
* </ul>
* </p>
*
* @generated
*/
public class SuiteImpl extends EObjectImpl implements Suite {
/**
* The default value of the '{@link #isShowDetail() <em>Show Detail</em>}'
* attribute. <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #isShowDetail()
*/
protected static final boolean SHOW_DETAIL_EDEFAULT = false;
/**
* The cached value of the '{@link #isShowDetail() <em>Show Detail</em>}'
* attribute. <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #isShowDetail()
*/
protected boolean showDetail = SHOW_DETAIL_EDEFAULT;
/**
* The default value of the '{@link #isShowSummary() <em>Show Summary</em>}'
* attribute. <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #isShowSummary()
*/
protected static final boolean SHOW_SUMMARY_EDEFAULT = false;
/**
* The cached value of the '{@link #isShowSummary() <em>Show Summary</em>}'
* attribute. <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #isShowSummary()
*/
protected boolean showSummary = SHOW_SUMMARY_EDEFAULT;
/**
* The default value of the '{@link #isOwnSection() <em>Own Section</em>}'
* attribute. <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #isOwnSection()
*/
protected static final boolean OWN_SECTION_EDEFAULT = false;
/**
* The cached value of the '{@link #isOwnSection() <em>Own Section</em>}'
* attribute. <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #isOwnSection()
*/
protected boolean ownSection = OWN_SECTION_EDEFAULT;
/**
* The default value of the '{@link #getName() <em>Name</em>}' attribute.
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #getName()
*/
protected static final String NAME_EDEFAULT = null;
/**
* The cached value of the '{@link #getName() <em>Name</em>}' attribute.
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #getName()
*/
protected String name = NAME_EDEFAULT;
/**
* The cached value of the '{@link #getRules() <em>Rules</em>}' containment
* reference list. <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
* @ordered
* @see #getRules()
*/
protected EList<Rule> rules;
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
protected SuiteImpl() {
super();
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
protected EClass eStaticClass() {
return DocGenValidationPackage.Literals.SUITE;
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public boolean isShowDetail() {
return showDetail;
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public void setShowDetail(boolean newShowDetail) {
boolean oldShowDetail = showDetail;
showDetail = newShowDetail;
if (eNotificationRequired()) {
eNotify(new ENotificationImpl(this, Notification.SET, DocGenValidationPackage.SUITE__SHOW_DETAIL,
oldShowDetail, showDetail));
}
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public boolean isShowSummary() {
return showSummary;
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public void setShowSummary(boolean newShowSummary) {
boolean oldShowSummary = showSummary;
showSummary = newShowSummary;
if (eNotificationRequired()) {
eNotify(new ENotificationImpl(this, Notification.SET, DocGenValidationPackage.SUITE__SHOW_SUMMARY,
oldShowSummary, showSummary));
}
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public boolean isOwnSection() {
return ownSection;
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public void setOwnSection(boolean newOwnSection) {
boolean oldOwnSection = ownSection;
ownSection = newOwnSection;
if (eNotificationRequired()) {
eNotify(new ENotificationImpl(this, Notification.SET, DocGenValidationPackage.SUITE__OWN_SECTION,
oldOwnSection, ownSection));
}
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public String getName() {
return name;
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public void setName(String newName) {
String oldName = name;
name = newName;
if (eNotificationRequired()) {
eNotify(new ENotificationImpl(this, Notification.SET, DocGenValidationPackage.SUITE__NAME, oldName,
name));
}
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public EList<Rule> getRules() {
if (rules == null) {
rules = new EObjectContainmentEList<Rule>(Rule.class, this, DocGenValidationPackage.SUITE__RULES);
}
return rules;
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) {
switch (featureID) {
case DocGenValidationPackage.SUITE__RULES:
return ((InternalEList<?>) getRules()).basicRemove(otherEnd, msgs);
}
return super.eInverseRemove(otherEnd, featureID, msgs);
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case DocGenValidationPackage.SUITE__SHOW_DETAIL:
return isShowDetail();
case DocGenValidationPackage.SUITE__SHOW_SUMMARY:
return isShowSummary();
case DocGenValidationPackage.SUITE__OWN_SECTION:
return isOwnSection();
case DocGenValidationPackage.SUITE__NAME:
return getName();
case DocGenValidationPackage.SUITE__RULES:
return getRules();
}
return super.eGet(featureID, resolve, coreType);
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@SuppressWarnings("unchecked")
@Override
public void eSet(int featureID, Object newValue) {
switch (featureID) {
case DocGenValidationPackage.SUITE__SHOW_DETAIL:
setShowDetail((Boolean) newValue);
return;
case DocGenValidationPackage.SUITE__SHOW_SUMMARY:
setShowSummary((Boolean) newValue);
return;
case DocGenValidationPackage.SUITE__OWN_SECTION:
setOwnSection((Boolean) newValue);
return;
case DocGenValidationPackage.SUITE__NAME:
setName((String) newValue);
return;
case DocGenValidationPackage.SUITE__RULES:
getRules().clear();
getRules().addAll((Collection<? extends Rule>) newValue);
return;
}
super.eSet(featureID, newValue);
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public void eUnset(int featureID) {
switch (featureID) {
case DocGenValidationPackage.SUITE__SHOW_DETAIL:
setShowDetail(SHOW_DETAIL_EDEFAULT);
return;
case DocGenValidationPackage.SUITE__SHOW_SUMMARY:
setShowSummary(SHOW_SUMMARY_EDEFAULT);
return;
case DocGenValidationPackage.SUITE__OWN_SECTION:
setOwnSection(OWN_SECTION_EDEFAULT);
return;
case DocGenValidationPackage.SUITE__NAME:
setName(NAME_EDEFAULT);
return;
case DocGenValidationPackage.SUITE__RULES:
getRules().clear();
return;
}
super.eUnset(featureID);
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case DocGenValidationPackage.SUITE__SHOW_DETAIL:
return showDetail != SHOW_DETAIL_EDEFAULT;
case DocGenValidationPackage.SUITE__SHOW_SUMMARY:
return showSummary != SHOW_SUMMARY_EDEFAULT;
case DocGenValidationPackage.SUITE__OWN_SECTION:
return ownSection != OWN_SECTION_EDEFAULT;
case DocGenValidationPackage.SUITE__NAME:
return NAME_EDEFAULT == null ? name != null : !NAME_EDEFAULT.equals(name);
case DocGenValidationPackage.SUITE__RULES:
return rules != null && !rules.isEmpty();
}
return super.eIsSet(featureID);
}
/**
* <!-- begin-user-doc --> <!-- end-user-doc -->
*
* @generated
*/
@Override
public String toString() {
if (eIsProxy()) {
return super.toString();
}
StringBuffer result = new StringBuffer(super.toString());
result.append(" (showDetail: ");
result.append(showDetail);
result.append(", showSummary: ");
result.append(showSummary);
result.append(", ownSection: ");
result.append(ownSection);
result.append(", name: ");
result.append(name);
result.append(')');
return result.toString();
}
} // SuiteImpl
| |
package org.jgroups.protocols;
import org.jgroups.*;
import org.jgroups.annotations.Experimental;
import org.jgroups.annotations.MBean;
import org.jgroups.annotations.ManagedAttribute;
import org.jgroups.annotations.ManagedOperation;
import org.jgroups.stack.Protocol;
import org.jgroups.util.Bits;
import org.jgroups.util.MessageBatch;
import org.jgroups.util.Table;
import org.jgroups.util.Util;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BiConsumer;
import java.util.function.Supplier;
/**
* Implementation of total order protocol using a sequencer_uum.
*
* Todo 1: on a sequencer change, the new coordinator needs to determine the highest seqno from all members
* Todo 2: on a sequencer change, if a member has pendindg messages in the forward-queue, they need to be resent
* Todo 3: this protocol is currently broken, as a new member doesn't get the highest seqno and thus creates its table
* at offset=0, which means it will queue all messages higher than 0, and eventually run out of memory!!!
*
* @author Bela Ban
* @edited Andrei Palade
*/
@Experimental
@MBean(description="Implementation of total order protocol using a sequencer (unicast-unicast-multicast)")
public class SEQUENCER2 extends Protocol {
protected volatile Address coord;
protected volatile View view;
protected volatile boolean is_coord=false;
protected final AtomicLong seqno=new AtomicLong(0); // only used by the sequencer
// messages to be multicast are added to this queue; when seqnos are received from the sequencer, we remove and
// send messages from the queue
protected final BlockingQueue<Message> fwd_queue=new LinkedBlockingQueue<>(20000); // make this configurable
// the number of seqno requests sent to the sequencer
protected final AtomicInteger seqno_reqs=new AtomicInteger(0);
protected volatile boolean running=true;
protected static final BiConsumer<MessageBatch,Message> BATCH_ACCUMULATOR=MessageBatch::add;
@ManagedAttribute protected long request_msgs;
@ManagedAttribute protected long response_msgs;
@ManagedAttribute protected long bcasts_sent;
@ManagedAttribute protected long bcasts_received;
@ManagedAttribute protected long bcasts_delivered;
@ManagedAttribute protected long sent_requests;
@ManagedAttribute protected long received_requests;
@ManagedAttribute protected long sent_responses;
@ManagedAttribute protected long received_responses;
protected Table<Message> received_msgs = new Table<>();
@ManagedAttribute
public boolean isCoordinator() {return is_coord;}
public Address getCoordinator() {return coord;}
@ManagedAttribute(description="Number of messages in the forward-queue")
public int getFwdQueueSize() {return fwd_queue.size();}
@ManagedOperation
public void resetStats() {
request_msgs=response_msgs=bcasts_sent=bcasts_received=bcasts_delivered=0L;
sent_requests=received_requests=sent_responses=received_responses=0L; // reset number of sent and received requests and responses
}
public void start() throws Exception {
super.start();
running = true;
}
public void stop() {
running=false;
super.stop();
}
public Object down(Event evt) {
switch(evt.getType()) {
case Event.VIEW_CHANGE:
handleViewChange(evt.getArg());
break;
case Event.TMP_VIEW:
handleTmpView(evt.getArg());
break;
}
return down_prot.down(evt);
}
public Object down(Message msg) {
if(msg.getDest() != null || msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB))
return down_prot.down(msg);
if(msg.getSrc() == null)
msg.setSrc(local_addr);
try {
fwd_queue.put(msg);
if(seqno_reqs.getAndIncrement() == 0) {
int num_reqs=seqno_reqs.get();
sendSeqnoRequest(num_reqs);
}
}
catch(InterruptedException e) {
if(!running)
return null;
throw new RuntimeException(e);
}
return null; // don't pass down
}
public Object up(Event evt) {
switch(evt.getType()) {
case Event.VIEW_CHANGE:
Object retval=up_prot.up(evt);
handleViewChange(evt.getArg());
return retval;
case Event.TMP_VIEW:
handleTmpView(evt.getArg());
break;
}
return up_prot.up(evt);
}
public Object up(Message msg) {
SequencerHeader hdr;
if(msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB))
return up_prot.up(msg);
hdr=msg.getHeader(this.id);
if(hdr == null)
return up_prot.up(msg); // pass up
switch(hdr.type) {
case SequencerHeader.REQUEST:
if(!is_coord) {
log.error("%s: non-coord; dropping REQUEST request from %s", local_addr, msg.getSrc());
return null;
}
Address sender=msg.getSrc();
if(view != null && !view.containsMember(sender)) {
log.error("%s : dropping REQUEST from non-member %s; view=%s" + view, local_addr, sender, view);
return null;
}
long new_seqno=seqno.getAndAdd(hdr.num_seqnos) +1;
sendSeqnoResponse(sender, new_seqno, hdr.num_seqnos);
received_requests++;
break;
case SequencerHeader.RESPONSE:
Address coordinator=msg.getSrc();
if(view != null && !view.containsMember(coordinator)) {
log.error(local_addr + "%s: dropping RESPONSE from non-coordinator %s; view=%s", local_addr, coordinator, view);
return null;
}
long send_seqno=hdr.seqno;
for(int i=0; i < hdr.num_seqnos; i++) {
Message bcast_msg=fwd_queue.poll();
if(bcast_msg == null) {
log.error(Util.getMessage("Received%DSeqnosButFwdqueueIsEmpty"), hdr.num_seqnos);
break;
}
if(log.isTraceEnabled())
log.trace("%s: broadcasting %d", local_addr, send_seqno);
broadcast(bcast_msg, send_seqno++);
}
int num_reqs=0;
if((num_reqs=seqno_reqs.addAndGet(-hdr.num_seqnos)) > 0 && num_reqs > 0)
sendSeqnoRequest(num_reqs);
break;
case SequencerHeader.BCAST:
deliver(msg, hdr);
bcasts_received++;
break;
}
return null;
}
/* public void up(MessageBatch batch) { // todo: better impl: add seq messages into the table in 1 op
List<Tuple<Long,Message>> msgs=null;
for(Iterator<Message> it=batch.iterator(); it.hasNext();) {
final Message msg=it.next();
if(msg == null || msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB))
continue;
SequencerHeader hdr=(SequencerHeader)msg.getHeader(id);
if(hdr == null)
continue;
it.remove(); // we have a header; remove the message from the batch, so it won't be passed up the stack
switch(hdr.type) {
case SequencerHeader.REQUEST:
case SequencerHeader.RESPONSE:
up(msg);
break;
case SequencerHeader.BCAST:
if(msgs == null)
msgs=new ArrayList<Tuple<Long,Message>>(batch.size());
msgs.add(new Tuple<Long,Message>(hdr.seqno, msg));
break;
default:
log.error(Util.getMessage("HeaderTypeNotKnown"), local_addr, hdr.type);
}
}
if(msgs != null) {
Address sender=batch.sender();
if(sender == null) {
log.error(local_addr + ": sender is null, cannot deliver batch " + "::" + batch);
return;
}
final Table<Message> win=received_msgs;
System.out.println("<--B " + batch.sender() + "::" + batch);
win.add(msgs);
final AtomicBoolean processing=win.getProcessing();
if(processing.compareAndSet(false, true))
removeAndDeliver(processing, win, sender);
}
if(!batch.isEmpty())
up_prot.up(batch);
}*/
public void up(MessageBatch batch) {
Iterator<Message> it=batch.iterator();
while(it.hasNext()) {
Message msg=it.next();
if(msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB) || msg.getHeader(id) == null)
continue;
it.remove();
// simplistic implementation
try {
up(msg);
}
catch(Throwable t) {
log.error(Util.getMessage("FailedPassingUpMessage"), t);
}
}
if(!batch.isEmpty())
up_prot.up(batch);
}
/* --------------------------------- Private Methods ----------------------------------- */
protected void handleViewChange(View v) {
List<Address> mbrs=v.getMembers();
if(mbrs.isEmpty()) return;
if(view == null || view.compareTo(v) < 0)
view=v;
else
return;
Address existing_coord=coord, new_coord=mbrs.get(0);
boolean coord_changed=!Objects.equals(existing_coord, new_coord);
if(coord_changed && new_coord != null) {
coord=new_coord;
// todo: if I'm the new coord, get the highest seqno from all members. If not, re-send my pending seqno reqs
}
if(new_coord != null)
is_coord=new_coord.equals(local_addr);
}
// If we're becoming coordinator, we need to handle TMP_VIEW as
// an immediate change of view. See JGRP-1452.
private void handleTmpView(View v) {
Address new_coord=v.getCoord();
if(new_coord != null && !new_coord.equals(coord) && local_addr != null && local_addr.equals(new_coord))
handleViewChange(v);
}
protected void sendSeqnoRequest(int num_seqnos) {
Address target=coord;
if(target == null)
return;
SequencerHeader hdr=new SequencerHeader(SequencerHeader.REQUEST, 0, num_seqnos);
Message forward_msg=new EmptyMessage(target).putHeader(this.id, hdr);
down_prot.down(forward_msg);
sent_requests++;
}
protected void sendSeqnoResponse(Address original_sender,long seqno, int num_seqnos) {
SequencerHeader hdr = new SequencerHeader(SequencerHeader.RESPONSE, seqno, num_seqnos);
Message ucast_msg = new EmptyMessage(original_sender).putHeader(this.id, hdr);
if (log.isTraceEnabled())
log.trace(local_addr + ": sending seqno response to " + original_sender + ":: new_seqno=" + seqno + ", num_seqnos=" + num_seqnos);
down_prot.down(ucast_msg);
sent_responses++;
}
protected void broadcast(final Message msg, long seqno) {
msg.putHeader(this.id, new SequencerHeader(SequencerHeader.BCAST, seqno));
if(log.isTraceEnabled())
log.trace(local_addr + ": broadcasting ::" + seqno);
down_prot.down(msg);
bcasts_sent++;
}
protected void deliver(Message msg, SequencerHeader hdr) {
Address sender=msg.getSrc();
if(sender == null) {
if(log.isErrorEnabled())
log.error(local_addr + ": sender is null, cannot deliver " + "::" + hdr.getSeqno());
return;
}
final Table<Message> win=received_msgs;
win.add(hdr.seqno, msg);
removeAndDeliver(win, sender);
}
protected void removeAndDeliver(Table<Message> win, Address sender) {
AtomicInteger adders=win.getAdders();
if(adders.getAndIncrement() != 0)
return;
final MessageBatch batch=new MessageBatch(win.size()).dest(local_addr).sender(sender).multicast(false);
Supplier<MessageBatch> batch_creator=() -> batch;
do {
try {
batch.reset();
win.removeMany(true, 0, null, batch_creator, BATCH_ACCUMULATOR);
}
catch(Throwable t) {
log.error("failed removing messages from table for " + sender, t);
}
if(!batch.isEmpty()) {
// batch is guaranteed to NOT contain any OOB messages as the drop_oob_msgs_filter removed them
deliverBatch(batch);
}
}
while(adders.decrementAndGet() != 0);
}
protected void deliverBatch(MessageBatch batch) {
try {
if(batch.isEmpty())
return;
if(log.isTraceEnabled()) {
Message first=batch.first(), last=batch.last();
StringBuilder sb=new StringBuilder(local_addr + ": delivering");
if(first != null && last != null) {
SequencerHeader hdr1=first.getHeader(id), hdr2=last.getHeader(id);
sb.append(" #").append(hdr1.seqno).append(" - #").append(hdr2.seqno);
}
sb.append(" (" + batch.size()).append(" messages)");
log.trace(sb);
}
up_prot.up(batch);
}
catch(Throwable t) {
log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, "batch", batch, t);
}
}
/* ----------------------------- End of Private Methods -------------------------------- */
public static class SequencerHeader extends Header {
protected static final byte REQUEST = 1;
protected static final byte BCAST = 2;
protected static final byte RESPONSE = 3;
protected byte type;
protected long seqno;
protected int num_seqnos=1; // the number of seqnos requested (REQUEST) or returned (on a RESPONSE)
public SequencerHeader() {}
public SequencerHeader(byte type) {this.type=type;}
public SequencerHeader(byte type, long seqno) {
this(type, seqno, 1);
}
public SequencerHeader(byte type, long seqno, int num_seqnos) {
this(type);
this.seqno=seqno;
this.num_seqnos=num_seqnos;
}
public short getMagicId() {return 86;}
public Supplier<? extends Header> create() {
return SequencerHeader::new;
}
public long getSeqno() {return seqno;}
public String toString() {
StringBuilder sb=new StringBuilder(64);
sb.append(printType());
if(seqno >= 0)
sb.append(" seqno=" + seqno);
if(num_seqnos > 1)
sb.append(", num_seqnos=" + num_seqnos);
return sb.toString();
}
protected final String printType() {
switch(type) {
case REQUEST: return "REQUEST";
case BCAST: return "BCAST";
case RESPONSE: return "RESPONSE";
default: return "n/a";
}
}
@Override
public void writeTo(DataOutput out) throws IOException {
out.writeByte(type);
Bits.writeLongCompressed(seqno, out);
out.writeShort(num_seqnos);
}
@Override
public void readFrom(DataInput in) throws IOException {
type=in.readByte();
seqno=Bits.readLongCompressed(in);
num_seqnos=in.readUnsignedShort();
}
// type + seqno + localSeqno + flush_ack
@Override
public int serializedSize() {
return Global.BYTE_SIZE + Bits.size(seqno) + Global.SHORT_SIZE;
}
}
}
| |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.datatorrent.lib.io.fs;
import java.io.IOException;
import org.apache.hadoop.fs.FSDataInputStream;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.google.common.io.ByteStreams;
import com.datatorrent.api.Context;
import com.datatorrent.lib.io.block.BlockMetadata;
import com.datatorrent.lib.io.block.FSSliceReader;
import com.datatorrent.lib.io.block.ReaderContext;
/**
* S3BlockReader extends from BlockReader and serves the functionality of read objects and
* parse Block metadata
*
* @since 3.5.0
*/
@org.apache.hadoop.classification.InterfaceStability.Evolving
public class S3BlockReader extends FSSliceReader
{
private transient AmazonS3 s3Client;
private String bucketName;
private String accessKey;
private String secretAccessKey;
public S3BlockReader()
{
this.readerContext = new S3BlockReaderContext();
}
@Override
public void setup(Context.OperatorContext context)
{
super.setup(context);
s3Client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretAccessKey));
((S3BlockReaderContext)readerContext).setBucketName(bucketName);
((S3BlockReaderContext)readerContext).setS3Client(s3Client);
}
/**
* Extracts the bucket name from the given uri
* @param s3uri s3 uri
* @return name of the bucket
*/
public static String extractBucket(String s3uri)
{
return s3uri.substring(s3uri.indexOf('@') + 1, s3uri.indexOf("/", s3uri.indexOf('@')));
}
/**
* Extracts the accessKey from the given uri
* @param s3uri given s3 uri
* @return the accessKey
*/
public static String extractAccessKey(String s3uri)
{
return s3uri.substring(s3uri.indexOf("://") + 3, s3uri.indexOf(':', s3uri.indexOf("://") + 3));
}
/**
* Extracts the secretAccessKey from the given uri
* @param s3uri given s3uri
* @return the secretAccessKey
*/
public static String extractSecretAccessKey(String s3uri)
{
return s3uri.substring(s3uri.indexOf(':', s3uri.indexOf("://") + 1) + 1, s3uri.indexOf('@'));
}
/**
* Extract the file path from given block and set it to the readerContext
* @param block block metadata
* @return stream
* @throws IOException
*/
@Override
protected FSDataInputStream setupStream(BlockMetadata.FileBlockMetadata block) throws IOException
{
String filePath = block.getFilePath();
// File path would be the path after bucket name.
// Check if the file path starts with "/"
if (filePath.startsWith("/")) {
filePath = filePath.substring(1);
}
((S3BlockReaderContext)readerContext).setFilePath(filePath);
return null;
}
/**
* BlockReadeContext for reading S3 Blocks.
*/
private static class S3BlockReaderContext extends ReaderContext.FixedBytesReaderContext<FSDataInputStream>
{
private transient AmazonS3 s3Client;
private transient String bucketName;
private transient String filePath;
/**
* S3 block read would be achieved through the AmazonS3 client. Following are the steps to achieve:
* (1) Create the objectRequest from bucketName and filePath.
* (2) Set the range to the above created objectRequest.
* (3) Get the object portion through AmazonS3 client API.
* (4) Get the object content from the above object portion.
* @return the block entity
* @throws IOException
*/
@Override
protected Entity readEntity() throws IOException
{
entity.clear();
GetObjectRequest rangeObjectRequest = new GetObjectRequest(
bucketName, filePath);
rangeObjectRequest.setRange(offset, blockMetadata.getLength() - 1);
S3Object objectPortion = s3Client.getObject(rangeObjectRequest);
S3ObjectInputStream wrappedStream = objectPortion.getObjectContent();
byte[] record = ByteStreams.toByteArray(wrappedStream);
entity.setUsedBytes(record.length);
entity.setRecord(record);
wrappedStream.close();
return entity;
}
/**
* Return the AmazonS3 service
* @return the s3Client
*/
public AmazonS3 getS3Client()
{
return s3Client;
}
/**
* Set the AmazonS3 service
* @param s3Client given s3Client
*/
public void setS3Client(AmazonS3 s3Client)
{
this.s3Client = s3Client;
}
/**
* Get the bucket name
* @return the bucketName
*/
public String getBucketName()
{
return bucketName;
}
/**
* Set the bucket name
* @param bucketName given bucketName
*/
public void setBucketName(String bucketName)
{
this.bucketName = bucketName;
}
/**
* Get the file path
* @return the file path
*/
public String getFilePath()
{
return filePath;
}
/**
* Sets the file path
* @param filePath given filePath
*/
public void setFilePath(String filePath)
{
this.filePath = filePath;
}
}
/**
* Get the S3 bucket name
* @return bucket
*/
public String getBucketName()
{
return bucketName;
}
/**
* Set the bucket name where the file resides
* @param bucketName bucket name
*/
public void setBucketName(String bucketName)
{
this.bucketName = bucketName;
}
/**
* Return the access key
* @return the accessKey
*/
public String getAccessKey()
{
return accessKey;
}
/**
* Set the access key
* @param accessKey given accessKey
*/
public void setAccessKey(String accessKey)
{
this.accessKey = accessKey;
}
/**
* Return the secretAccessKey
* @return the secretAccessKey
*/
public String getSecretAccessKey()
{
return secretAccessKey;
}
/**
* Set the secretAccessKey
* @param secretAccessKey secretAccessKey
*/
public void setSecretAccessKey(String secretAccessKey)
{
this.secretAccessKey = secretAccessKey;
}
}
| |
// Generated from Swift.g4 by ANTLR 4.5
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.*;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.*;
@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"})
public class SwiftLexer extends Lexer {
static { RuntimeMetaData.checkVersion("4.5", RuntimeMetaData.VERSION); }
protected static final DFA[] _decisionToDFA;
protected static final PredictionContextCache _sharedContextCache =
new PredictionContextCache();
public static final int
T__0=1, T__1=2, T__2=3, T__3=4, T__4=5, T__5=6, T__6=7, T__7=8, T__8=9,
T__9=10, T__10=11, T__11=12, T__12=13, T__13=14, T__14=15, T__15=16, T__16=17,
T__17=18, T__18=19, T__19=20, T__20=21, T__21=22, T__22=23, T__23=24,
T__24=25, T__25=26, T__26=27, T__27=28, T__28=29, T__29=30, T__30=31,
T__31=32, T__32=33, T__33=34, T__34=35, T__35=36, T__36=37, T__37=38,
T__38=39, T__39=40, T__40=41, T__41=42, T__42=43, T__43=44, T__44=45,
T__45=46, T__46=47, T__47=48, T__48=49, T__49=50, T__50=51, T__51=52,
T__52=53, T__53=54, T__54=55, T__55=56, T__56=57, T__57=58, T__58=59,
T__59=60, T__60=61, T__61=62, T__62=63, T__63=64, T__64=65, T__65=66,
T__66=67, T__67=68, T__68=69, T__69=70, T__70=71, T__71=72, T__72=73,
T__73=74, T__74=75, T__75=76, T__76=77, T__77=78, T__78=79, T__79=80,
T__80=81, T__81=82, T__82=83, T__83=84, T__84=85, T__85=86, T__86=87,
T__87=88, T__88=89, T__89=90, T__90=91, T__91=92, T__92=93, T__93=94,
T__94=95, T__95=96, T__96=97, T__97=98, T__98=99, T__99=100, Platform=101,
Regular_identifier=102, DOT=103, LCURLY=104, LPAREN=105, LBRACK=106, RCURLY=107,
RPAREN=108, RBRACK=109, COMMA=110, COLON=111, SEMI=112, LT=113, GT=114,
UNDERSCORE=115, BANG=116, QUESTION=117, AT=118, AND=119, SUB=120, EQUAL=121,
OR=122, DIV=123, ADD=124, MUL=125, MOD=126, CARET=127, TILDE=128, Operator_head_other=129,
Operator_following_character=130, Implicit_parameter_name=131, Binary_literal=132,
Octal_literal=133, Decimal_literal=134, Pure_decimal_digits=135, Hexadecimal_literal=136,
Floating_point_literal=137, Static_string_literal=138, Interpolated_string_literal=139,
WS=140, Block_comment=141, Line_comment=142;
public static String[] modeNames = {
"DEFAULT_MODE"
};
public static final String[] ruleNames = {
"T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6", "T__7", "T__8",
"T__9", "T__10", "T__11", "T__12", "T__13", "T__14", "T__15", "T__16",
"T__17", "T__18", "T__19", "T__20", "T__21", "T__22", "T__23", "T__24",
"T__25", "T__26", "T__27", "T__28", "T__29", "T__30", "T__31", "T__32",
"T__33", "T__34", "T__35", "T__36", "T__37", "T__38", "T__39", "T__40",
"T__41", "T__42", "T__43", "T__44", "T__45", "T__46", "T__47", "T__48",
"T__49", "T__50", "T__51", "T__52", "T__53", "T__54", "T__55", "T__56",
"T__57", "T__58", "T__59", "T__60", "T__61", "T__62", "T__63", "T__64",
"T__65", "T__66", "T__67", "T__68", "T__69", "T__70", "T__71", "T__72",
"T__73", "T__74", "T__75", "T__76", "T__77", "T__78", "T__79", "T__80",
"T__81", "T__82", "T__83", "T__84", "T__85", "T__86", "T__87", "T__88",
"T__89", "T__90", "T__91", "T__92", "T__93", "T__94", "T__95", "T__96",
"T__97", "T__98", "T__99", "Platform", "Platform_name", "Platform_version",
"Regular_identifier", "Identifier_head", "Identifier_character", "Identifier_characters",
"DOT", "LCURLY", "LPAREN", "LBRACK", "RCURLY", "RPAREN", "RBRACK", "COMMA",
"COLON", "SEMI", "LT", "GT", "UNDERSCORE", "BANG", "QUESTION", "AT", "AND",
"SUB", "EQUAL", "OR", "DIV", "ADD", "MUL", "MOD", "CARET", "TILDE", "Operator_head_other",
"Operator_following_character", "Implicit_parameter_name", "Binary_literal",
"Binary_digit", "Binary_literal_character", "Binary_literal_characters",
"Octal_literal", "Octal_digit", "Octal_literal_character", "Octal_literal_characters",
"Decimal_literal", "Pure_decimal_digits", "Decimal_digit", "Decimal_literal_character",
"Decimal_literal_characters", "Hexadecimal_literal", "Hexadecimal_digit",
"Hexadecimal_literal_character", "Hexadecimal_literal_characters", "Floating_point_literal",
"Decimal_fraction", "Decimal_exponent", "Hexadecimal_fraction", "Hexadecimal_exponent",
"Floating_point_e", "Floating_point_p", "Sign", "Static_string_literal",
"Quoted_text", "Quoted_text_item", "Escaped_character", "Interpolated_string_literal",
"Interpolated_text_item", "WS", "Block_comment", "Line_comment"
};
private static final String[] _LITERAL_NAMES = {
null, "'for'", "'case'", "'in'", "'while'", "'let'", "'var'", "'repeat'",
"'if'", "'else'", "'guard'", "'switch'", "'default'", "'where'", "'break'",
"'continue'", "'fallthrough'", "'return'", "'#available'", "'throw'",
"'defer'", "'do'", "'catch'", "'#if'", "'#endif'", "'#elseif'", "'#else'",
"'os'", "'arch'", "'OSX'", "'iOS'", "'watchOS'", "'tvOS'", "'i386'", "'x86_64'",
"'arm'", "'arm64'", "'#line'", "'import'", "'typealias'", "'struct'",
"'class'", "'enum'", "'protocol'", "'func'", "'get'", "'set'", "'willSet'",
"'didSet'", "'throws'", "'rethrows'", "'indirect'", "'associatedtype'",
"'init'", "'deinit'", "'extension'", "'subscript'", "'prefix'", "'operator'",
"'postfix'", "'infix'", "'precedence'", "'associativity'", "'left'", "'right'",
"'none'", "'convenience'", "'dynamic'", "'final'", "'lazy'", "'mutating'",
"'nonmutating'", "'optional'", "'override'", "'required'", "'static'",
"'unowned'", "'safe'", "'unsafe'", "'weak'", "'internal'", "'private'",
"'public'", "'is'", "'as'", "'try'", "'__FILE__'", "'__LINE__'", "'__COLUMN__'",
"'__FUNCTION__'", "'unowned(safe)'", "'unowned(unsafe)'", "'#selector'",
"'dynamicType'", "'Type'", "'Protocol'", "'inout'", "'`'", "'true'", "'false'",
"'nil'", null, null, "'.'", "'{'", "'('", "'['", "'}'", "')'", "']'",
"','", "':'", "';'", "'<'", "'>'", "'_'", "'!'", "'?'", "'@'", "'&'",
"'-'", "'='", "'|'", "'/'", "'+'", "'*'", "'%'", "'^'", "'~'"
};
private static final String[] _SYMBOLIC_NAMES = {
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, "Platform", "Regular_identifier", "DOT",
"LCURLY", "LPAREN", "LBRACK", "RCURLY", "RPAREN", "RBRACK", "COMMA", "COLON",
"SEMI", "LT", "GT", "UNDERSCORE", "BANG", "QUESTION", "AT", "AND", "SUB",
"EQUAL", "OR", "DIV", "ADD", "MUL", "MOD", "CARET", "TILDE", "Operator_head_other",
"Operator_following_character", "Implicit_parameter_name", "Binary_literal",
"Octal_literal", "Decimal_literal", "Pure_decimal_digits", "Hexadecimal_literal",
"Floating_point_literal", "Static_string_literal", "Interpolated_string_literal",
"WS", "Block_comment", "Line_comment"
};
public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);
/**
* @deprecated Use {@link #VOCABULARY} instead.
*/
@Deprecated
public static final String[] tokenNames;
static {
tokenNames = new String[_SYMBOLIC_NAMES.length];
for (int i = 0; i < tokenNames.length; i++) {
tokenNames[i] = VOCABULARY.getLiteralName(i);
if (tokenNames[i] == null) {
tokenNames[i] = VOCABULARY.getSymbolicName(i);
}
if (tokenNames[i] == null) {
tokenNames[i] = "<INVALID>";
}
}
}
@Override
@Deprecated
public String[] getTokenNames() {
return tokenNames;
}
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
public SwiftLexer(CharStream input) {
super(input);
_interp = new LexerATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);
}
@Override
public String getGrammarFileName() { return "Swift.g4"; }
@Override
public String[] getRuleNames() { return ruleNames; }
@Override
public String getSerializedATN() { return _serializedATN; }
@Override
public String[] getModeNames() { return modeNames; }
@Override
public ATN getATN() { return _ATN; }
public static final String _serializedATN =
"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\u0090\u059d\b\1\4"+
"\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n"+
"\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22"+
"\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31"+
"\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t"+
" \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t"+
"+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64"+
"\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t"+
"=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4"+
"I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\t"+
"T\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4^\t^\4_\t_"+
"\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4g\tg\4h\th\4i\ti\4j\tj\4k"+
"\tk\4l\tl\4m\tm\4n\tn\4o\to\4p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv"+
"\4w\tw\4x\tx\4y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080\t"+
"\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084"+
"\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088\4\u0089"+
"\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d"+
"\4\u008e\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091\4\u0092"+
"\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095\t\u0095\4\u0096\t\u0096"+
"\4\u0097\t\u0097\4\u0098\t\u0098\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b"+
"\t\u009b\4\u009c\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f"+
"\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3\t\u00a3\4\u00a4"+
"\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6\4\u00a7\t\u00a7\4\u00a8\t\u00a8"+
"\4\u00a9\t\u00a9\4\u00aa\t\u00aa\4\u00ab\t\u00ab\3\2\3\2\3\2\3\2\3\3\3"+
"\3\3\3\3\3\3\3\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\7"+
"\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3"+
"\n\3\13\3\13\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3"+
"\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3"+
"\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3"+
"\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3"+
"\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3"+
"\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3"+
"\26\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3"+
"\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3"+
"\33\3\33\3\33\3\33\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3"+
"\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3"+
"\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%"+
"\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3"+
"(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3,\3,\3"+
",\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3.\3.\3.\3.\3/\3/\3/\3/\3\60\3\60\3"+
"\60\3\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\62\3"+
"\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3"+
"\63\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3\65\3\65\3"+
"\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3"+
"\66\3\66\3\67\3\67\3\67\3\67\3\67\3\67\3\67\38\38\38\38\38\38\38\38\3"+
"8\38\39\39\39\39\39\39\39\39\39\39\3:\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3"+
";\3;\3;\3;\3;\3<\3<\3<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3"+
">\3>\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3@\3@\3"+
"@\3@\3@\3A\3A\3A\3A\3A\3A\3B\3B\3B\3B\3B\3C\3C\3C\3C\3C\3C\3C\3C\3C\3"+
"C\3C\3C\3D\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3F\3F\3F\3F\3F\3G\3"+
"G\3G\3G\3G\3G\3G\3G\3G\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3H\3I\3I\3I\3"+
"I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J\3J\3J\3J\3J\3K\3K\3K\3K\3K\3K\3K\3K\3"+
"K\3L\3L\3L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3O\3O\3"+
"O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3"+
"R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3S\3T\3T\3T\3U\3U\3U\3V\3V\3V\3V\3W\3W\3"+
"W\3W\3W\3W\3W\3W\3W\3X\3X\3X\3X\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3"+
"Y\3Y\3Y\3Y\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3[\3"+
"[\3[\3[\3[\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\"+
"\3\\\3\\\3\\\3\\\3]\3]\3]\3]\3]\3]\3]\3]\3]\3]\3^\3^\3^\3^\3^\3^\3^\3"+
"^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3`\3`\3`\3`\3`\3`\3`\3`\3`\3a\3a\3a\3a\3"+
"a\3a\3b\3b\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3d\3e\3e\3e\3e\3f\3f\5f\u041d"+
"\nf\3f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g"+
"\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g"+
"\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\3g\5g\u0460"+
"\ng\3h\3h\3h\3h\3h\3h\3h\3h\3h\3h\3h\5h\u046d\nh\3i\3i\5i\u0471\ni\3j"+
"\5j\u0474\nj\3k\3k\5k\u0478\nk\3l\6l\u047b\nl\rl\16l\u047c\3m\3m\3n\3"+
"n\3o\3o\3p\3p\3q\3q\3r\3r\3s\3s\3t\3t\3u\3u\3v\3v\3w\3w\3x\3x\3y\3y\3"+
"z\3z\3{\3{\3|\3|\3}\3}\3~\3~\3\177\3\177\3\u0080\3\u0080\3\u0081\3\u0081"+
"\3\u0082\3\u0082\3\u0083\3\u0083\3\u0084\3\u0084\3\u0085\3\u0085\3\u0086"+
"\3\u0086\3\u0087\5\u0087\u04b4\n\u0087\3\u0088\5\u0088\u04b7\n\u0088\3"+
"\u0089\3\u0089\3\u0089\3\u008a\3\u008a\3\u008a\3\u008a\3\u008a\5\u008a"+
"\u04c1\n\u008a\3\u008b\3\u008b\3\u008c\3\u008c\5\u008c\u04c7\n\u008c\3"+
"\u008d\6\u008d\u04ca\n\u008d\r\u008d\16\u008d\u04cb\3\u008e\3\u008e\3"+
"\u008e\3\u008e\3\u008e\5\u008e\u04d3\n\u008e\3\u008f\3\u008f\3\u0090\3"+
"\u0090\5\u0090\u04d9\n\u0090\3\u0091\6\u0091\u04dc\n\u0091\r\u0091\16"+
"\u0091\u04dd\3\u0092\3\u0092\7\u0092\u04e2\n\u0092\f\u0092\16\u0092\u04e5"+
"\13\u0092\3\u0093\6\u0093\u04e8\n\u0093\r\u0093\16\u0093\u04e9\3\u0094"+
"\3\u0094\3\u0095\3\u0095\5\u0095\u04f0\n\u0095\3\u0096\6\u0096\u04f3\n"+
"\u0096\r\u0096\16\u0096\u04f4\3\u0097\3\u0097\3\u0097\3\u0097\3\u0097"+
"\5\u0097\u04fc\n\u0097\3\u0098\3\u0098\3\u0099\3\u0099\5\u0099\u0502\n"+
"\u0099\3\u009a\6\u009a\u0505\n\u009a\r\u009a\16\u009a\u0506\3\u009b\3"+
"\u009b\5\u009b\u050b\n\u009b\3\u009b\5\u009b\u050e\n\u009b\3\u009b\3\u009b"+
"\5\u009b\u0512\n\u009b\3\u009b\3\u009b\5\u009b\u0516\n\u009b\3\u009c\3"+
"\u009c\3\u009c\3\u009d\3\u009d\5\u009d\u051d\n\u009d\3\u009d\3\u009d\3"+
"\u009e\3\u009e\3\u009e\5\u009e\u0524\n\u009e\3\u009f\3\u009f\5\u009f\u0528"+
"\n\u009f\3\u009f\3\u009f\3\u00a0\3\u00a0\3\u00a1\3\u00a1\3\u00a2\3\u00a2"+
"\3\u00a3\3\u00a3\5\u00a3\u0534\n\u00a3\3\u00a3\3\u00a3\3\u00a4\6\u00a4"+
"\u0539\n\u00a4\r\u00a4\16\u00a4\u053a\3\u00a5\3\u00a5\5\u00a5\u053f\n"+
"\u00a5\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6"+
"\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6"+
"\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6"+
"\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\5\u00a6\u0561\n\u00a6"+
"\3\u00a7\3\u00a7\7\u00a7\u0565\n\u00a7\f\u00a7\16\u00a7\u0568\13\u00a7"+
"\3\u00a7\3\u00a7\3\u00a8\3\u00a8\3\u00a8\3\u00a8\3\u00a8\6\u00a8\u0571"+
"\n\u00a8\r\u00a8\16\u00a8\u0572\3\u00a8\3\u00a8\3\u00a8\5\u00a8\u0578"+
"\n\u00a8\3\u00a9\6\u00a9\u057b\n\u00a9\r\u00a9\16\u00a9\u057c\3\u00a9"+
"\3\u00a9\3\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa\7\u00aa\u0586\n\u00aa"+
"\f\u00aa\16\u00aa\u0589\13\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa"+
"\3\u00ab\3\u00ab\3\u00ab\3\u00ab\7\u00ab\u0594\n\u00ab\f\u00ab\16\u00ab"+
"\u0597\13\u00ab\3\u00ab\5\u00ab\u059a\n\u00ab\3\u00ab\3\u00ab\4\u0587"+
"\u0595\2\u00ac\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16"+
"\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34"+
"\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g"+
"\65i\66k\67m8o9q:s;u<w=y>{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089F"+
"\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009d"+
"P\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1"+
"Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5"+
"d\u00c7e\u00c9f\u00cbg\u00cd\2\u00cf\2\u00d1h\u00d3\2\u00d5\2\u00d7\2"+
"\u00d9i\u00dbj\u00ddk\u00dfl\u00e1m\u00e3n\u00e5o\u00e7p\u00e9q\u00eb"+
"r\u00eds\u00eft\u00f1u\u00f3v\u00f5w\u00f7x\u00f9y\u00fbz\u00fd{\u00ff"+
"|\u0101}\u0103~\u0105\177\u0107\u0080\u0109\u0081\u010b\u0082\u010d\u0083"+
"\u010f\u0084\u0111\u0085\u0113\u0086\u0115\2\u0117\2\u0119\2\u011b\u0087"+
"\u011d\2\u011f\2\u0121\2\u0123\u0088\u0125\u0089\u0127\2\u0129\2\u012b"+
"\2\u012d\u008a\u012f\2\u0131\2\u0133\2\u0135\u008b\u0137\2\u0139\2\u013b"+
"\2\u013d\2\u013f\2\u0141\2\u0143\2\u0145\u008c\u0147\2\u0149\2\u014b\2"+
"\u014d\u008d\u014f\2\u0151\u008e\u0153\u008f\u0155\u0090\3\2\22%\2C\\"+
"aac|\u00aa\u00aa\u00ac\u00ac\u00af\u00af\u00b1\u00b1\u00b4\u00b7\u00b9"+
"\u00bc\u00be\u00c0\u00c2\u00d8\u00da\u00f8\u00fa\u0301\u0372\u1681\u1683"+
"\u180f\u1811\u1dc1\u1e02\u2001\u200d\u200f\u202c\u2030\u2041\u2042\u2056"+
"\u2056\u2062\u20d1\u2102\u2191\u2462\u2501\u2778\u2795\u2c02\u2e01\u2e82"+
"\u3001\u3006\u3009\u3023\u3031\u3033\ud801\uf902\ufd3f\ufd42\ufdd1\ufdf2"+
"\ufe21\ufe32\ufe46\ufe49\uffff\7\2\62;\u0302\u0371\u1dc2\u1e01\u20d2\u2101"+
"\ufe22\ufe31\27\2\u00a3\u00a9\u00ab\u00ab\u00ad\u00ae\u00b0\u00b0\u00b2"+
"\u00b3\u00b8\u00b8\u00bd\u00bd\u00c1\u00c1\u00d9\u00d9\u00f9\u00f9\u2018"+
"\u2019\u2022\u2029\u2032\u2040\u2043\u2055\u2057\u2060\u2192\u2401\u2502"+
"\u2777\u2796\u2c01\u2e02\u2e81\u3003\u3005\u300a\u3032\r\2\u0302\u0302"+
"\u0371\u0371\u1dc2\u1dc2\u1e01\u1e01\u2015\u2015\u20d2\u20d2\u2101\u2101"+
"\ufe02\ufe02\ufe11\ufe11\ufe22\ufe22\ufe31\ufe31\3\2\62\63\3\2\629\3\2"+
"\62;\4\2\62;aa\5\2\62;CHch\4\2GGgg\4\2RRrr\4\2--//\6\2\f\f\17\17$$^^\t"+
"\2$$))\62\62^^ppttvv\5\2\2\2\13\17\"\"\3\3\f\f\u05ad\2\3\3\2\2\2\2\5\3"+
"\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2"+
"\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3"+
"\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'"+
"\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63"+
"\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2"+
"?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3"+
"\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2"+
"\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2"+
"e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3"+
"\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2"+
"\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087"+
"\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2"+
"\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099"+
"\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2"+
"\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab"+
"\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2"+
"\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd"+
"\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2"+
"\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2\2\2\u00d1\3\2\2\2\2\u00d9"+
"\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2"+
"\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb"+
"\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2"+
"\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd"+
"\3\2\2\2\2\u00ff\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2"+
"\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010b\3\2\2\2\2\u010d\3\2\2\2\2\u010f"+
"\3\2\2\2\2\u0111\3\2\2\2\2\u0113\3\2\2\2\2\u011b\3\2\2\2\2\u0123\3\2\2"+
"\2\2\u0125\3\2\2\2\2\u012d\3\2\2\2\2\u0135\3\2\2\2\2\u0145\3\2\2\2\2\u014d"+
"\3\2\2\2\2\u0151\3\2\2\2\2\u0153\3\2\2\2\2\u0155\3\2\2\2\3\u0157\3\2\2"+
"\2\5\u015b\3\2\2\2\7\u0160\3\2\2\2\t\u0163\3\2\2\2\13\u0169\3\2\2\2\r"+
"\u016d\3\2\2\2\17\u0171\3\2\2\2\21\u0178\3\2\2\2\23\u017b\3\2\2\2\25\u0180"+
"\3\2\2\2\27\u0186\3\2\2\2\31\u018d\3\2\2\2\33\u0195\3\2\2\2\35\u019b\3"+
"\2\2\2\37\u01a1\3\2\2\2!\u01aa\3\2\2\2#\u01b6\3\2\2\2%\u01bd\3\2\2\2\'"+
"\u01c8\3\2\2\2)\u01ce\3\2\2\2+\u01d4\3\2\2\2-\u01d7\3\2\2\2/\u01dd\3\2"+
"\2\2\61\u01e1\3\2\2\2\63\u01e8\3\2\2\2\65\u01f0\3\2\2\2\67\u01f6\3\2\2"+
"\29\u01f9\3\2\2\2;\u01fe\3\2\2\2=\u0202\3\2\2\2?\u0206\3\2\2\2A\u020e"+
"\3\2\2\2C\u0213\3\2\2\2E\u0218\3\2\2\2G\u021f\3\2\2\2I\u0223\3\2\2\2K"+
"\u0229\3\2\2\2M\u022f\3\2\2\2O\u0236\3\2\2\2Q\u0240\3\2\2\2S\u0247\3\2"+
"\2\2U\u024d\3\2\2\2W\u0252\3\2\2\2Y\u025b\3\2\2\2[\u0260\3\2\2\2]\u0264"+
"\3\2\2\2_\u0268\3\2\2\2a\u0270\3\2\2\2c\u0277\3\2\2\2e\u027e\3\2\2\2g"+
"\u0287\3\2\2\2i\u0290\3\2\2\2k\u029f\3\2\2\2m\u02a4\3\2\2\2o\u02ab\3\2"+
"\2\2q\u02b5\3\2\2\2s\u02bf\3\2\2\2u\u02c6\3\2\2\2w\u02cf\3\2\2\2y\u02d7"+
"\3\2\2\2{\u02dd\3\2\2\2}\u02e8\3\2\2\2\177\u02f6\3\2\2\2\u0081\u02fb\3"+
"\2\2\2\u0083\u0301\3\2\2\2\u0085\u0306\3\2\2\2\u0087\u0312\3\2\2\2\u0089"+
"\u031a\3\2\2\2\u008b\u0320\3\2\2\2\u008d\u0325\3\2\2\2\u008f\u032e\3\2"+
"\2\2\u0091\u033a\3\2\2\2\u0093\u0343\3\2\2\2\u0095\u034c\3\2\2\2\u0097"+
"\u0355\3\2\2\2\u0099\u035c\3\2\2\2\u009b\u0364\3\2\2\2\u009d\u0369\3\2"+
"\2\2\u009f\u0370\3\2\2\2\u00a1\u0375\3\2\2\2\u00a3\u037e\3\2\2\2\u00a5"+
"\u0386\3\2\2\2\u00a7\u038d\3\2\2\2\u00a9\u0390\3\2\2\2\u00ab\u0393\3\2"+
"\2\2\u00ad\u0397\3\2\2\2\u00af\u03a0\3\2\2\2\u00b1\u03a9\3\2\2\2\u00b3"+
"\u03b4\3\2\2\2\u00b5\u03c1\3\2\2\2\u00b7\u03cf\3\2\2\2\u00b9\u03df\3\2"+
"\2\2\u00bb\u03e9\3\2\2\2\u00bd\u03f5\3\2\2\2\u00bf\u03fa\3\2\2\2\u00c1"+
"\u0403\3\2\2\2\u00c3\u0409\3\2\2\2\u00c5\u040b\3\2\2\2\u00c7\u0410\3\2"+
"\2\2\u00c9\u0416\3\2\2\2\u00cb\u041a\3\2\2\2\u00cd\u045f\3\2\2\2\u00cf"+
"\u046c\3\2\2\2\u00d1\u046e\3\2\2\2\u00d3\u0473\3\2\2\2\u00d5\u0477\3\2"+
"\2\2\u00d7\u047a\3\2\2\2\u00d9\u047e\3\2\2\2\u00db\u0480\3\2\2\2\u00dd"+
"\u0482\3\2\2\2\u00df\u0484\3\2\2\2\u00e1\u0486\3\2\2\2\u00e3\u0488\3\2"+
"\2\2\u00e5\u048a\3\2\2\2\u00e7\u048c\3\2\2\2\u00e9\u048e\3\2\2\2\u00eb"+
"\u0490\3\2\2\2\u00ed\u0492\3\2\2\2\u00ef\u0494\3\2\2\2\u00f1\u0496\3\2"+
"\2\2\u00f3\u0498\3\2\2\2\u00f5\u049a\3\2\2\2\u00f7\u049c\3\2\2\2\u00f9"+
"\u049e\3\2\2\2\u00fb\u04a0\3\2\2\2\u00fd\u04a2\3\2\2\2\u00ff\u04a4\3\2"+
"\2\2\u0101\u04a6\3\2\2\2\u0103\u04a8\3\2\2\2\u0105\u04aa\3\2\2\2\u0107"+
"\u04ac\3\2\2\2\u0109\u04ae\3\2\2\2\u010b\u04b0\3\2\2\2\u010d\u04b3\3\2"+
"\2\2\u010f\u04b6\3\2\2\2\u0111\u04b8\3\2\2\2\u0113\u04bb\3\2\2\2\u0115"+
"\u04c2\3\2\2\2\u0117\u04c6\3\2\2\2\u0119\u04c9\3\2\2\2\u011b\u04cd\3\2"+
"\2\2\u011d\u04d4\3\2\2\2\u011f\u04d8\3\2\2\2\u0121\u04db\3\2\2\2\u0123"+
"\u04df\3\2\2\2\u0125\u04e7\3\2\2\2\u0127\u04eb\3\2\2\2\u0129\u04ef\3\2"+
"\2\2\u012b\u04f2\3\2\2\2\u012d\u04f6\3\2\2\2\u012f\u04fd\3\2\2\2\u0131"+
"\u0501\3\2\2\2\u0133\u0504\3\2\2\2\u0135\u0515\3\2\2\2\u0137\u0517\3\2"+
"\2\2\u0139\u051a\3\2\2\2\u013b\u0520\3\2\2\2\u013d\u0525\3\2\2\2\u013f"+
"\u052b\3\2\2\2\u0141\u052d\3\2\2\2\u0143\u052f\3\2\2\2\u0145\u0531\3\2"+
"\2\2\u0147\u0538\3\2\2\2\u0149\u053e\3\2\2\2\u014b\u0560\3\2\2\2\u014d"+
"\u0562\3\2\2\2\u014f\u0577\3\2\2\2\u0151\u057a\3\2\2\2\u0153\u0580\3\2"+
"\2\2\u0155\u058f\3\2\2\2\u0157\u0158\7h\2\2\u0158\u0159\7q\2\2\u0159\u015a"+
"\7t\2\2\u015a\4\3\2\2\2\u015b\u015c\7e\2\2\u015c\u015d\7c\2\2\u015d\u015e"+
"\7u\2\2\u015e\u015f\7g\2\2\u015f\6\3\2\2\2\u0160\u0161\7k\2\2\u0161\u0162"+
"\7p\2\2\u0162\b\3\2\2\2\u0163\u0164\7y\2\2\u0164\u0165\7j\2\2\u0165\u0166"+
"\7k\2\2\u0166\u0167\7n\2\2\u0167\u0168\7g\2\2\u0168\n\3\2\2\2\u0169\u016a"+
"\7n\2\2\u016a\u016b\7g\2\2\u016b\u016c\7v\2\2\u016c\f\3\2\2\2\u016d\u016e"+
"\7x\2\2\u016e\u016f\7c\2\2\u016f\u0170\7t\2\2\u0170\16\3\2\2\2\u0171\u0172"+
"\7t\2\2\u0172\u0173\7g\2\2\u0173\u0174\7r\2\2\u0174\u0175\7g\2\2\u0175"+
"\u0176\7c\2\2\u0176\u0177\7v\2\2\u0177\20\3\2\2\2\u0178\u0179\7k\2\2\u0179"+
"\u017a\7h\2\2\u017a\22\3\2\2\2\u017b\u017c\7g\2\2\u017c\u017d\7n\2\2\u017d"+
"\u017e\7u\2\2\u017e\u017f\7g\2\2\u017f\24\3\2\2\2\u0180\u0181\7i\2\2\u0181"+
"\u0182\7w\2\2\u0182\u0183\7c\2\2\u0183\u0184\7t\2\2\u0184\u0185\7f\2\2"+
"\u0185\26\3\2\2\2\u0186\u0187\7u\2\2\u0187\u0188\7y\2\2\u0188\u0189\7"+
"k\2\2\u0189\u018a\7v\2\2\u018a\u018b\7e\2\2\u018b\u018c\7j\2\2\u018c\30"+
"\3\2\2\2\u018d\u018e\7f\2\2\u018e\u018f\7g\2\2\u018f\u0190\7h\2\2\u0190"+
"\u0191\7c\2\2\u0191\u0192\7w\2\2\u0192\u0193\7n\2\2\u0193\u0194\7v\2\2"+
"\u0194\32\3\2\2\2\u0195\u0196\7y\2\2\u0196\u0197\7j\2\2\u0197\u0198\7"+
"g\2\2\u0198\u0199\7t\2\2\u0199\u019a\7g\2\2\u019a\34\3\2\2\2\u019b\u019c"+
"\7d\2\2\u019c\u019d\7t\2\2\u019d\u019e\7g\2\2\u019e\u019f\7c\2\2\u019f"+
"\u01a0\7m\2\2\u01a0\36\3\2\2\2\u01a1\u01a2\7e\2\2\u01a2\u01a3\7q\2\2\u01a3"+
"\u01a4\7p\2\2\u01a4\u01a5\7v\2\2\u01a5\u01a6\7k\2\2\u01a6\u01a7\7p\2\2"+
"\u01a7\u01a8\7w\2\2\u01a8\u01a9\7g\2\2\u01a9 \3\2\2\2\u01aa\u01ab\7h\2"+
"\2\u01ab\u01ac\7c\2\2\u01ac\u01ad\7n\2\2\u01ad\u01ae\7n\2\2\u01ae\u01af"+
"\7v\2\2\u01af\u01b0\7j\2\2\u01b0\u01b1\7t\2\2\u01b1\u01b2\7q\2\2\u01b2"+
"\u01b3\7w\2\2\u01b3\u01b4\7i\2\2\u01b4\u01b5\7j\2\2\u01b5\"\3\2\2\2\u01b6"+
"\u01b7\7t\2\2\u01b7\u01b8\7g\2\2\u01b8\u01b9\7v\2\2\u01b9\u01ba\7w\2\2"+
"\u01ba\u01bb\7t\2\2\u01bb\u01bc\7p\2\2\u01bc$\3\2\2\2\u01bd\u01be\7%\2"+
"\2\u01be\u01bf\7c\2\2\u01bf\u01c0\7x\2\2\u01c0\u01c1\7c\2\2\u01c1\u01c2"+
"\7k\2\2\u01c2\u01c3\7n\2\2\u01c3\u01c4\7c\2\2\u01c4\u01c5\7d\2\2\u01c5"+
"\u01c6\7n\2\2\u01c6\u01c7\7g\2\2\u01c7&\3\2\2\2\u01c8\u01c9\7v\2\2\u01c9"+
"\u01ca\7j\2\2\u01ca\u01cb\7t\2\2\u01cb\u01cc\7q\2\2\u01cc\u01cd\7y\2\2"+
"\u01cd(\3\2\2\2\u01ce\u01cf\7f\2\2\u01cf\u01d0\7g\2\2\u01d0\u01d1\7h\2"+
"\2\u01d1\u01d2\7g\2\2\u01d2\u01d3\7t\2\2\u01d3*\3\2\2\2\u01d4\u01d5\7"+
"f\2\2\u01d5\u01d6\7q\2\2\u01d6,\3\2\2\2\u01d7\u01d8\7e\2\2\u01d8\u01d9"+
"\7c\2\2\u01d9\u01da\7v\2\2\u01da\u01db\7e\2\2\u01db\u01dc\7j\2\2\u01dc"+
".\3\2\2\2\u01dd\u01de\7%\2\2\u01de\u01df\7k\2\2\u01df\u01e0\7h\2\2\u01e0"+
"\60\3\2\2\2\u01e1\u01e2\7%\2\2\u01e2\u01e3\7g\2\2\u01e3\u01e4\7p\2\2\u01e4"+
"\u01e5\7f\2\2\u01e5\u01e6\7k\2\2\u01e6\u01e7\7h\2\2\u01e7\62\3\2\2\2\u01e8"+
"\u01e9\7%\2\2\u01e9\u01ea\7g\2\2\u01ea\u01eb\7n\2\2\u01eb\u01ec\7u\2\2"+
"\u01ec\u01ed\7g\2\2\u01ed\u01ee\7k\2\2\u01ee\u01ef\7h\2\2\u01ef\64\3\2"+
"\2\2\u01f0\u01f1\7%\2\2\u01f1\u01f2\7g\2\2\u01f2\u01f3\7n\2\2\u01f3\u01f4"+
"\7u\2\2\u01f4\u01f5\7g\2\2\u01f5\66\3\2\2\2\u01f6\u01f7\7q\2\2\u01f7\u01f8"+
"\7u\2\2\u01f88\3\2\2\2\u01f9\u01fa\7c\2\2\u01fa\u01fb\7t\2\2\u01fb\u01fc"+
"\7e\2\2\u01fc\u01fd\7j\2\2\u01fd:\3\2\2\2\u01fe\u01ff\7Q\2\2\u01ff\u0200"+
"\7U\2\2\u0200\u0201\7Z\2\2\u0201<\3\2\2\2\u0202\u0203\7k\2\2\u0203\u0204"+
"\7Q\2\2\u0204\u0205\7U\2\2\u0205>\3\2\2\2\u0206\u0207\7y\2\2\u0207\u0208"+
"\7c\2\2\u0208\u0209\7v\2\2\u0209\u020a\7e\2\2\u020a\u020b\7j\2\2\u020b"+
"\u020c\7Q\2\2\u020c\u020d\7U\2\2\u020d@\3\2\2\2\u020e\u020f\7v\2\2\u020f"+
"\u0210\7x\2\2\u0210\u0211\7Q\2\2\u0211\u0212\7U\2\2\u0212B\3\2\2\2\u0213"+
"\u0214\7k\2\2\u0214\u0215\7\65\2\2\u0215\u0216\7:\2\2\u0216\u0217\78\2"+
"\2\u0217D\3\2\2\2\u0218\u0219\7z\2\2\u0219\u021a\7:\2\2\u021a\u021b\7"+
"8\2\2\u021b\u021c\7a\2\2\u021c\u021d\78\2\2\u021d\u021e\7\66\2\2\u021e"+
"F\3\2\2\2\u021f\u0220\7c\2\2\u0220\u0221\7t\2\2\u0221\u0222\7o\2\2\u0222"+
"H\3\2\2\2\u0223\u0224\7c\2\2\u0224\u0225\7t\2\2\u0225\u0226\7o\2\2\u0226"+
"\u0227\78\2\2\u0227\u0228\7\66\2\2\u0228J\3\2\2\2\u0229\u022a\7%\2\2\u022a"+
"\u022b\7n\2\2\u022b\u022c\7k\2\2\u022c\u022d\7p\2\2\u022d\u022e\7g\2\2"+
"\u022eL\3\2\2\2\u022f\u0230\7k\2\2\u0230\u0231\7o\2\2\u0231\u0232\7r\2"+
"\2\u0232\u0233\7q\2\2\u0233\u0234\7t\2\2\u0234\u0235\7v\2\2\u0235N\3\2"+
"\2\2\u0236\u0237\7v\2\2\u0237\u0238\7{\2\2\u0238\u0239\7r\2\2\u0239\u023a"+
"\7g\2\2\u023a\u023b\7c\2\2\u023b\u023c\7n\2\2\u023c\u023d\7k\2\2\u023d"+
"\u023e\7c\2\2\u023e\u023f\7u\2\2\u023fP\3\2\2\2\u0240\u0241\7u\2\2\u0241"+
"\u0242\7v\2\2\u0242\u0243\7t\2\2\u0243\u0244\7w\2\2\u0244\u0245\7e\2\2"+
"\u0245\u0246\7v\2\2\u0246R\3\2\2\2\u0247\u0248\7e\2\2\u0248\u0249\7n\2"+
"\2\u0249\u024a\7c\2\2\u024a\u024b\7u\2\2\u024b\u024c\7u\2\2\u024cT\3\2"+
"\2\2\u024d\u024e\7g\2\2\u024e\u024f\7p\2\2\u024f\u0250\7w\2\2\u0250\u0251"+
"\7o\2\2\u0251V\3\2\2\2\u0252\u0253\7r\2\2\u0253\u0254\7t\2\2\u0254\u0255"+
"\7q\2\2\u0255\u0256\7v\2\2\u0256\u0257\7q\2\2\u0257\u0258\7e\2\2\u0258"+
"\u0259\7q\2\2\u0259\u025a\7n\2\2\u025aX\3\2\2\2\u025b\u025c\7h\2\2\u025c"+
"\u025d\7w\2\2\u025d\u025e\7p\2\2\u025e\u025f\7e\2\2\u025fZ\3\2\2\2\u0260"+
"\u0261\7i\2\2\u0261\u0262\7g\2\2\u0262\u0263\7v\2\2\u0263\\\3\2\2\2\u0264"+
"\u0265\7u\2\2\u0265\u0266\7g\2\2\u0266\u0267\7v\2\2\u0267^\3\2\2\2\u0268"+
"\u0269\7y\2\2\u0269\u026a\7k\2\2\u026a\u026b\7n\2\2\u026b\u026c\7n\2\2"+
"\u026c\u026d\7U\2\2\u026d\u026e\7g\2\2\u026e\u026f\7v\2\2\u026f`\3\2\2"+
"\2\u0270\u0271\7f\2\2\u0271\u0272\7k\2\2\u0272\u0273\7f\2\2\u0273\u0274"+
"\7U\2\2\u0274\u0275\7g\2\2\u0275\u0276\7v\2\2\u0276b\3\2\2\2\u0277\u0278"+
"\7v\2\2\u0278\u0279\7j\2\2\u0279\u027a\7t\2\2\u027a\u027b\7q\2\2\u027b"+
"\u027c\7y\2\2\u027c\u027d\7u\2\2\u027dd\3\2\2\2\u027e\u027f\7t\2\2\u027f"+
"\u0280\7g\2\2\u0280\u0281\7v\2\2\u0281\u0282\7j\2\2\u0282\u0283\7t\2\2"+
"\u0283\u0284\7q\2\2\u0284\u0285\7y\2\2\u0285\u0286\7u\2\2\u0286f\3\2\2"+
"\2\u0287\u0288\7k\2\2\u0288\u0289\7p\2\2\u0289\u028a\7f\2\2\u028a\u028b"+
"\7k\2\2\u028b\u028c\7t\2\2\u028c\u028d\7g\2\2\u028d\u028e\7e\2\2\u028e"+
"\u028f\7v\2\2\u028fh\3\2\2\2\u0290\u0291\7c\2\2\u0291\u0292\7u\2\2\u0292"+
"\u0293\7u\2\2\u0293\u0294\7q\2\2\u0294\u0295\7e\2\2\u0295\u0296\7k\2\2"+
"\u0296\u0297\7c\2\2\u0297\u0298\7v\2\2\u0298\u0299\7g\2\2\u0299\u029a"+
"\7f\2\2\u029a\u029b\7v\2\2\u029b\u029c\7{\2\2\u029c\u029d\7r\2\2\u029d"+
"\u029e\7g\2\2\u029ej\3\2\2\2\u029f\u02a0\7k\2\2\u02a0\u02a1\7p\2\2\u02a1"+
"\u02a2\7k\2\2\u02a2\u02a3\7v\2\2\u02a3l\3\2\2\2\u02a4\u02a5\7f\2\2\u02a5"+
"\u02a6\7g\2\2\u02a6\u02a7\7k\2\2\u02a7\u02a8\7p\2\2\u02a8\u02a9\7k\2\2"+
"\u02a9\u02aa\7v\2\2\u02aan\3\2\2\2\u02ab\u02ac\7g\2\2\u02ac\u02ad\7z\2"+
"\2\u02ad\u02ae\7v\2\2\u02ae\u02af\7g\2\2\u02af\u02b0\7p\2\2\u02b0\u02b1"+
"\7u\2\2\u02b1\u02b2\7k\2\2\u02b2\u02b3\7q\2\2\u02b3\u02b4\7p\2\2\u02b4"+
"p\3\2\2\2\u02b5\u02b6\7u\2\2\u02b6\u02b7\7w\2\2\u02b7\u02b8\7d\2\2\u02b8"+
"\u02b9\7u\2\2\u02b9\u02ba\7e\2\2\u02ba\u02bb\7t\2\2\u02bb\u02bc\7k\2\2"+
"\u02bc\u02bd\7r\2\2\u02bd\u02be\7v\2\2\u02ber\3\2\2\2\u02bf\u02c0\7r\2"+
"\2\u02c0\u02c1\7t\2\2\u02c1\u02c2\7g\2\2\u02c2\u02c3\7h\2\2\u02c3\u02c4"+
"\7k\2\2\u02c4\u02c5\7z\2\2\u02c5t\3\2\2\2\u02c6\u02c7\7q\2\2\u02c7\u02c8"+
"\7r\2\2\u02c8\u02c9\7g\2\2\u02c9\u02ca\7t\2\2\u02ca\u02cb\7c\2\2\u02cb"+
"\u02cc\7v\2\2\u02cc\u02cd\7q\2\2\u02cd\u02ce\7t\2\2\u02cev\3\2\2\2\u02cf"+
"\u02d0\7r\2\2\u02d0\u02d1\7q\2\2\u02d1\u02d2\7u\2\2\u02d2\u02d3\7v\2\2"+
"\u02d3\u02d4\7h\2\2\u02d4\u02d5\7k\2\2\u02d5\u02d6\7z\2\2\u02d6x\3\2\2"+
"\2\u02d7\u02d8\7k\2\2\u02d8\u02d9\7p\2\2\u02d9\u02da\7h\2\2\u02da\u02db"+
"\7k\2\2\u02db\u02dc\7z\2\2\u02dcz\3\2\2\2\u02dd\u02de\7r\2\2\u02de\u02df"+
"\7t\2\2\u02df\u02e0\7g\2\2\u02e0\u02e1\7e\2\2\u02e1\u02e2\7g\2\2\u02e2"+
"\u02e3\7f\2\2\u02e3\u02e4\7g\2\2\u02e4\u02e5\7p\2\2\u02e5\u02e6\7e\2\2"+
"\u02e6\u02e7\7g\2\2\u02e7|\3\2\2\2\u02e8\u02e9\7c\2\2\u02e9\u02ea\7u\2"+
"\2\u02ea\u02eb\7u\2\2\u02eb\u02ec\7q\2\2\u02ec\u02ed\7e\2\2\u02ed\u02ee"+
"\7k\2\2\u02ee\u02ef\7c\2\2\u02ef\u02f0\7v\2\2\u02f0\u02f1\7k\2\2\u02f1"+
"\u02f2\7x\2\2\u02f2\u02f3\7k\2\2\u02f3\u02f4\7v\2\2\u02f4\u02f5\7{\2\2"+
"\u02f5~\3\2\2\2\u02f6\u02f7\7n\2\2\u02f7\u02f8\7g\2\2\u02f8\u02f9\7h\2"+
"\2\u02f9\u02fa\7v\2\2\u02fa\u0080\3\2\2\2\u02fb\u02fc\7t\2\2\u02fc\u02fd"+
"\7k\2\2\u02fd\u02fe\7i\2\2\u02fe\u02ff\7j\2\2\u02ff\u0300\7v\2\2\u0300"+
"\u0082\3\2\2\2\u0301\u0302\7p\2\2\u0302\u0303\7q\2\2\u0303\u0304\7p\2"+
"\2\u0304\u0305\7g\2\2\u0305\u0084\3\2\2\2\u0306\u0307\7e\2\2\u0307\u0308"+
"\7q\2\2\u0308\u0309\7p\2\2\u0309\u030a\7x\2\2\u030a\u030b\7g\2\2\u030b"+
"\u030c\7p\2\2\u030c\u030d\7k\2\2\u030d\u030e\7g\2\2\u030e\u030f\7p\2\2"+
"\u030f\u0310\7e\2\2\u0310\u0311\7g\2\2\u0311\u0086\3\2\2\2\u0312\u0313"+
"\7f\2\2\u0313\u0314\7{\2\2\u0314\u0315\7p\2\2\u0315\u0316\7c\2\2\u0316"+
"\u0317\7o\2\2\u0317\u0318\7k\2\2\u0318\u0319\7e\2\2\u0319\u0088\3\2\2"+
"\2\u031a\u031b\7h\2\2\u031b\u031c\7k\2\2\u031c\u031d\7p\2\2\u031d\u031e"+
"\7c\2\2\u031e\u031f\7n\2\2\u031f\u008a\3\2\2\2\u0320\u0321\7n\2\2\u0321"+
"\u0322\7c\2\2\u0322\u0323\7|\2\2\u0323\u0324\7{\2\2\u0324\u008c\3\2\2"+
"\2\u0325\u0326\7o\2\2\u0326\u0327\7w\2\2\u0327\u0328\7v\2\2\u0328\u0329"+
"\7c\2\2\u0329\u032a\7v\2\2\u032a\u032b\7k\2\2\u032b\u032c\7p\2\2\u032c"+
"\u032d\7i\2\2\u032d\u008e\3\2\2\2\u032e\u032f\7p\2\2\u032f\u0330\7q\2"+
"\2\u0330\u0331\7p\2\2\u0331\u0332\7o\2\2\u0332\u0333\7w\2\2\u0333\u0334"+
"\7v\2\2\u0334\u0335\7c\2\2\u0335\u0336\7v\2\2\u0336\u0337\7k\2\2\u0337"+
"\u0338\7p\2\2\u0338\u0339\7i\2\2\u0339\u0090\3\2\2\2\u033a\u033b\7q\2"+
"\2\u033b\u033c\7r\2\2\u033c\u033d\7v\2\2\u033d\u033e\7k\2\2\u033e\u033f"+
"\7q\2\2\u033f\u0340\7p\2\2\u0340\u0341\7c\2\2\u0341\u0342\7n\2\2\u0342"+
"\u0092\3\2\2\2\u0343\u0344\7q\2\2\u0344\u0345\7x\2\2\u0345\u0346\7g\2"+
"\2\u0346\u0347\7t\2\2\u0347\u0348\7t\2\2\u0348\u0349\7k\2\2\u0349\u034a"+
"\7f\2\2\u034a\u034b\7g\2\2\u034b\u0094\3\2\2\2\u034c\u034d\7t\2\2\u034d"+
"\u034e\7g\2\2\u034e\u034f\7s\2\2\u034f\u0350\7w\2\2\u0350\u0351\7k\2\2"+
"\u0351\u0352\7t\2\2\u0352\u0353\7g\2\2\u0353\u0354\7f\2\2\u0354\u0096"+
"\3\2\2\2\u0355\u0356\7u\2\2\u0356\u0357\7v\2\2\u0357\u0358\7c\2\2\u0358"+
"\u0359\7v\2\2\u0359\u035a\7k\2\2\u035a\u035b\7e\2\2\u035b\u0098\3\2\2"+
"\2\u035c\u035d\7w\2\2\u035d\u035e\7p\2\2\u035e\u035f\7q\2\2\u035f\u0360"+
"\7y\2\2\u0360\u0361\7p\2\2\u0361\u0362\7g\2\2\u0362\u0363\7f\2\2\u0363"+
"\u009a\3\2\2\2\u0364\u0365\7u\2\2\u0365\u0366\7c\2\2\u0366\u0367\7h\2"+
"\2\u0367\u0368\7g\2\2\u0368\u009c\3\2\2\2\u0369\u036a\7w\2\2\u036a\u036b"+
"\7p\2\2\u036b\u036c\7u\2\2\u036c\u036d\7c\2\2\u036d\u036e\7h\2\2\u036e"+
"\u036f\7g\2\2\u036f\u009e\3\2\2\2\u0370\u0371\7y\2\2\u0371\u0372\7g\2"+
"\2\u0372\u0373\7c\2\2\u0373\u0374\7m\2\2\u0374\u00a0\3\2\2\2\u0375\u0376"+
"\7k\2\2\u0376\u0377\7p\2\2\u0377\u0378\7v\2\2\u0378\u0379\7g\2\2\u0379"+
"\u037a\7t\2\2\u037a\u037b\7p\2\2\u037b\u037c\7c\2\2\u037c\u037d\7n\2\2"+
"\u037d\u00a2\3\2\2\2\u037e\u037f\7r\2\2\u037f\u0380\7t\2\2\u0380\u0381"+
"\7k\2\2\u0381\u0382\7x\2\2\u0382\u0383\7c\2\2\u0383\u0384\7v\2\2\u0384"+
"\u0385\7g\2\2\u0385\u00a4\3\2\2\2\u0386\u0387\7r\2\2\u0387\u0388\7w\2"+
"\2\u0388\u0389\7d\2\2\u0389\u038a\7n\2\2\u038a\u038b\7k\2\2\u038b\u038c"+
"\7e\2\2\u038c\u00a6\3\2\2\2\u038d\u038e\7k\2\2\u038e\u038f\7u\2\2\u038f"+
"\u00a8\3\2\2\2\u0390\u0391\7c\2\2\u0391\u0392\7u\2\2\u0392\u00aa\3\2\2"+
"\2\u0393\u0394\7v\2\2\u0394\u0395\7t\2\2\u0395\u0396\7{\2\2\u0396\u00ac"+
"\3\2\2\2\u0397\u0398\7a\2\2\u0398\u0399\7a\2\2\u0399\u039a\7H\2\2\u039a"+
"\u039b\7K\2\2\u039b\u039c\7N\2\2\u039c\u039d\7G\2\2\u039d\u039e\7a\2\2"+
"\u039e\u039f\7a\2\2\u039f\u00ae\3\2\2\2\u03a0\u03a1\7a\2\2\u03a1\u03a2"+
"\7a\2\2\u03a2\u03a3\7N\2\2\u03a3\u03a4\7K\2\2\u03a4\u03a5\7P\2\2\u03a5"+
"\u03a6\7G\2\2\u03a6\u03a7\7a\2\2\u03a7\u03a8\7a\2\2\u03a8\u00b0\3\2\2"+
"\2\u03a9\u03aa\7a\2\2\u03aa\u03ab\7a\2\2\u03ab\u03ac\7E\2\2\u03ac\u03ad"+
"\7Q\2\2\u03ad\u03ae\7N\2\2\u03ae\u03af\7W\2\2\u03af\u03b0\7O\2\2\u03b0"+
"\u03b1\7P\2\2\u03b1\u03b2\7a\2\2\u03b2\u03b3\7a\2\2\u03b3\u00b2\3\2\2"+
"\2\u03b4\u03b5\7a\2\2\u03b5\u03b6\7a\2\2\u03b6\u03b7\7H\2\2\u03b7\u03b8"+
"\7W\2\2\u03b8\u03b9\7P\2\2\u03b9\u03ba\7E\2\2\u03ba\u03bb\7V\2\2\u03bb"+
"\u03bc\7K\2\2\u03bc\u03bd\7Q\2\2\u03bd\u03be\7P\2\2\u03be\u03bf\7a\2\2"+
"\u03bf\u03c0\7a\2\2\u03c0\u00b4\3\2\2\2\u03c1\u03c2\7w\2\2\u03c2\u03c3"+
"\7p\2\2\u03c3\u03c4\7q\2\2\u03c4\u03c5\7y\2\2\u03c5\u03c6\7p\2\2\u03c6"+
"\u03c7\7g\2\2\u03c7\u03c8\7f\2\2\u03c8\u03c9\7*\2\2\u03c9\u03ca\7u\2\2"+
"\u03ca\u03cb\7c\2\2\u03cb\u03cc\7h\2\2\u03cc\u03cd\7g\2\2\u03cd\u03ce"+
"\7+\2\2\u03ce\u00b6\3\2\2\2\u03cf\u03d0\7w\2\2\u03d0\u03d1\7p\2\2\u03d1"+
"\u03d2\7q\2\2\u03d2\u03d3\7y\2\2\u03d3\u03d4\7p\2\2\u03d4\u03d5\7g\2\2"+
"\u03d5\u03d6\7f\2\2\u03d6\u03d7\7*\2\2\u03d7\u03d8\7w\2\2\u03d8\u03d9"+
"\7p\2\2\u03d9\u03da\7u\2\2\u03da\u03db\7c\2\2\u03db\u03dc\7h\2\2\u03dc"+
"\u03dd\7g\2\2\u03dd\u03de\7+\2\2\u03de\u00b8\3\2\2\2\u03df\u03e0\7%\2"+
"\2\u03e0\u03e1\7u\2\2\u03e1\u03e2\7g\2\2\u03e2\u03e3\7n\2\2\u03e3\u03e4"+
"\7g\2\2\u03e4\u03e5\7e\2\2\u03e5\u03e6\7v\2\2\u03e6\u03e7\7q\2\2\u03e7"+
"\u03e8\7t\2\2\u03e8\u00ba\3\2\2\2\u03e9\u03ea\7f\2\2\u03ea\u03eb\7{\2"+
"\2\u03eb\u03ec\7p\2\2\u03ec\u03ed\7c\2\2\u03ed\u03ee\7o\2\2\u03ee\u03ef"+
"\7k\2\2\u03ef\u03f0\7e\2\2\u03f0\u03f1\7V\2\2\u03f1\u03f2\7{\2\2\u03f2"+
"\u03f3\7r\2\2\u03f3\u03f4\7g\2\2\u03f4\u00bc\3\2\2\2\u03f5\u03f6\7V\2"+
"\2\u03f6\u03f7\7{\2\2\u03f7\u03f8\7r\2\2\u03f8\u03f9\7g\2\2\u03f9\u00be"+
"\3\2\2\2\u03fa\u03fb\7R\2\2\u03fb\u03fc\7t\2\2\u03fc\u03fd\7q\2\2\u03fd"+
"\u03fe\7v\2\2\u03fe\u03ff\7q\2\2\u03ff\u0400\7e\2\2\u0400\u0401\7q\2\2"+
"\u0401\u0402\7n\2\2\u0402\u00c0\3\2\2\2\u0403\u0404\7k\2\2\u0404\u0405"+
"\7p\2\2\u0405\u0406\7q\2\2\u0406\u0407\7w\2\2\u0407\u0408\7v\2\2\u0408"+
"\u00c2\3\2\2\2\u0409\u040a\7b\2\2\u040a\u00c4\3\2\2\2\u040b\u040c\7v\2"+
"\2\u040c\u040d\7t\2\2\u040d\u040e\7w\2\2\u040e\u040f\7g\2\2\u040f\u00c6"+
"\3\2\2\2\u0410\u0411\7h\2\2\u0411\u0412\7c\2\2\u0412\u0413\7n\2\2\u0413"+
"\u0414\7u\2\2\u0414\u0415\7g\2\2\u0415\u00c8\3\2\2\2\u0416\u0417\7p\2"+
"\2\u0417\u0418\7k\2\2\u0418\u0419\7n\2\2\u0419\u00ca\3\2\2\2\u041a\u041c"+
"\5\u00cdg\2\u041b\u041d\5\u0151\u00a9\2\u041c\u041b\3\2\2\2\u041c\u041d"+
"\3\2\2\2\u041d\u041e\3\2\2\2\u041e\u041f\5\u00cfh\2\u041f\u00cc\3\2\2"+
"\2\u0420\u0421\7k\2\2\u0421\u0422\7Q\2\2\u0422\u0460\7U\2\2\u0423\u0424"+
"\7k\2\2\u0424\u0425\7Q\2\2\u0425\u0426\7U\2\2\u0426\u0427\7C\2\2\u0427"+
"\u0428\7r\2\2\u0428\u0429\7r\2\2\u0429\u042a\7n\2\2\u042a\u042b\7k\2\2"+
"\u042b\u042c\7e\2\2\u042c\u042d\7c\2\2\u042d\u042e\7v\2\2\u042e\u042f"+
"\7k\2\2\u042f\u0430\7q\2\2\u0430\u0431\7p\2\2\u0431\u0432\7G\2\2\u0432"+
"\u0433\7z\2\2\u0433\u0434\7v\2\2\u0434\u0435\7g\2\2\u0435\u0436\7p\2\2"+
"\u0436\u0437\7u\2\2\u0437\u0438\7k\2\2\u0438\u0439\7q\2\2\u0439\u0460"+
"\7p\2\2\u043a\u043b\7Q\2\2\u043b\u043c\7U\2\2\u043c\u0460\7Z\2\2\u043d"+
"\u043e\7Q\2\2\u043e\u043f\7U\2\2\u043f\u0440\7Z\2\2\u0440\u0441\7C\2\2"+
"\u0441\u0442\7r\2\2\u0442\u0443\7r\2\2\u0443\u0444\7n\2\2\u0444\u0445"+
"\7k\2\2\u0445\u0446\7e\2\2\u0446\u0447\7c\2\2\u0447\u0448\7v\2\2\u0448"+
"\u0449\7k\2\2\u0449\u044a\7q\2\2\u044a\u044b\7p\2\2\u044b\u044c\7G\2\2"+
"\u044c\u044d\7z\2\2\u044d\u044e\7v\2\2\u044e\u044f\7g\2\2\u044f\u0450"+
"\7p\2\2\u0450\u0451\7u\2\2\u0451\u0452\7k\2\2\u0452\u0453\7q\2\2\u0453"+
"\u0460\7p\2\2\u0454\u0455\7y\2\2\u0455\u0456\7c\2\2\u0456\u0457\7v\2\2"+
"\u0457\u0458\7e\2\2\u0458\u0459\7j\2\2\u0459\u045a\7Q\2\2\u045a\u0460"+
"\7U\2\2\u045b\u045c\7v\2\2\u045c\u045d\7x\2\2\u045d\u045e\7Q\2\2\u045e"+
"\u0460\7U\2\2\u045f\u0420\3\2\2\2\u045f\u0423\3\2\2\2\u045f\u043a\3\2"+
"\2\2\u045f\u043d\3\2\2\2\u045f\u0454\3\2\2\2\u045f\u045b\3\2\2\2\u0460"+
"\u00ce\3\2\2\2\u0461\u046d\5\u0125\u0093\2\u0462\u0463\5\u0125\u0093\2"+
"\u0463\u0464\7\60\2\2\u0464\u0465\5\u0125\u0093\2\u0465\u046d\3\2\2\2"+
"\u0466\u0467\5\u0125\u0093\2\u0467\u0468\7\60\2\2\u0468\u0469\5\u0125"+
"\u0093\2\u0469\u046a\7\60\2\2\u046a\u046b\5\u0125\u0093\2\u046b\u046d"+
"\3\2\2\2\u046c\u0461\3\2\2\2\u046c\u0462\3\2\2\2\u046c\u0466\3\2\2\2\u046d"+
"\u00d0\3\2\2\2\u046e\u0470\5\u00d3j\2\u046f\u0471\5\u00d7l\2\u0470\u046f"+
"\3\2\2\2\u0470\u0471\3\2\2\2\u0471\u00d2\3\2\2\2\u0472\u0474\t\2\2\2\u0473"+
"\u0472\3\2\2\2\u0474\u00d4\3\2\2\2\u0475\u0478\t\3\2\2\u0476\u0478\5\u00d3"+
"j\2\u0477\u0475\3\2\2\2\u0477\u0476\3\2\2\2\u0478\u00d6\3\2\2\2\u0479"+
"\u047b\5\u00d5k\2\u047a\u0479\3\2\2\2\u047b\u047c\3\2\2\2\u047c\u047a"+
"\3\2\2\2\u047c\u047d\3\2\2\2\u047d\u00d8\3\2\2\2\u047e\u047f\7\60\2\2"+
"\u047f\u00da\3\2\2\2\u0480\u0481\7}\2\2\u0481\u00dc\3\2\2\2\u0482\u0483"+
"\7*\2\2\u0483\u00de\3\2\2\2\u0484\u0485\7]\2\2\u0485\u00e0\3\2\2\2\u0486"+
"\u0487\7\177\2\2\u0487\u00e2\3\2\2\2\u0488\u0489\7+\2\2\u0489\u00e4\3"+
"\2\2\2\u048a\u048b\7_\2\2\u048b\u00e6\3\2\2\2\u048c\u048d\7.\2\2\u048d"+
"\u00e8\3\2\2\2\u048e\u048f\7<\2\2\u048f\u00ea\3\2\2\2\u0490\u0491\7=\2"+
"\2\u0491\u00ec\3\2\2\2\u0492\u0493\7>\2\2\u0493\u00ee\3\2\2\2\u0494\u0495"+
"\7@\2\2\u0495\u00f0\3\2\2\2\u0496\u0497\7a\2\2\u0497\u00f2\3\2\2\2\u0498"+
"\u0499\7#\2\2\u0499\u00f4\3\2\2\2\u049a\u049b\7A\2\2\u049b\u00f6\3\2\2"+
"\2\u049c\u049d\7B\2\2\u049d\u00f8\3\2\2\2\u049e\u049f\7(\2\2\u049f\u00fa"+
"\3\2\2\2\u04a0\u04a1\7/\2\2\u04a1\u00fc\3\2\2\2\u04a2\u04a3\7?\2\2\u04a3"+
"\u00fe\3\2\2\2\u04a4\u04a5\7~\2\2\u04a5\u0100\3\2\2\2\u04a6\u04a7\7\61"+
"\2\2\u04a7\u0102\3\2\2\2\u04a8\u04a9\7-\2\2\u04a9\u0104\3\2\2\2\u04aa"+
"\u04ab\7,\2\2\u04ab\u0106\3\2\2\2\u04ac\u04ad\7\'\2\2\u04ad\u0108\3\2"+
"\2\2\u04ae\u04af\7`\2\2\u04af\u010a\3\2\2\2\u04b0\u04b1\7\u0080\2\2\u04b1"+
"\u010c\3\2\2\2\u04b2\u04b4\t\4\2\2\u04b3\u04b2\3\2\2\2\u04b4\u010e\3\2"+
"\2\2\u04b5\u04b7\t\5\2\2\u04b6\u04b5\3\2\2\2\u04b7\u0110\3\2\2\2\u04b8"+
"\u04b9\7&\2\2\u04b9\u04ba\5\u0125\u0093\2\u04ba\u0112\3\2\2\2\u04bb\u04bc"+
"\7\62\2\2\u04bc\u04bd\7d\2\2\u04bd\u04be\3\2\2\2\u04be\u04c0\5\u0115\u008b"+
"\2\u04bf\u04c1\5\u0119\u008d\2\u04c0\u04bf\3\2\2\2\u04c0\u04c1\3\2\2\2"+
"\u04c1\u0114\3\2\2\2\u04c2\u04c3\t\6\2\2\u04c3\u0116\3\2\2\2\u04c4\u04c7"+
"\5\u0115\u008b\2\u04c5\u04c7\7a\2\2\u04c6\u04c4\3\2\2\2\u04c6\u04c5\3"+
"\2\2\2\u04c7\u0118\3\2\2\2\u04c8\u04ca\5\u0117\u008c\2\u04c9\u04c8\3\2"+
"\2\2\u04ca\u04cb\3\2\2\2\u04cb\u04c9\3\2\2\2\u04cb\u04cc\3\2\2\2\u04cc"+
"\u011a\3\2\2\2\u04cd\u04ce\7\62\2\2\u04ce\u04cf\7q\2\2\u04cf\u04d0\3\2"+
"\2\2\u04d0\u04d2\5\u011d\u008f\2\u04d1\u04d3\5\u0121\u0091\2\u04d2\u04d1"+
"\3\2\2\2\u04d2\u04d3\3\2\2\2\u04d3\u011c\3\2\2\2\u04d4\u04d5\t\7\2\2\u04d5"+
"\u011e\3\2\2\2\u04d6\u04d9\5\u011d\u008f\2\u04d7\u04d9\7a\2\2\u04d8\u04d6"+
"\3\2\2\2\u04d8\u04d7\3\2\2\2\u04d9\u0120\3\2\2\2\u04da\u04dc\5\u011f\u0090"+
"\2\u04db\u04da\3\2\2\2\u04dc\u04dd\3\2\2\2\u04dd\u04db\3\2\2\2\u04dd\u04de"+
"\3\2\2\2\u04de\u0122\3\2\2\2\u04df\u04e3\t\b\2\2\u04e0\u04e2\t\t\2\2\u04e1"+
"\u04e0\3\2\2\2\u04e2\u04e5\3\2\2\2\u04e3\u04e1\3\2\2\2\u04e3\u04e4\3\2"+
"\2\2\u04e4\u0124\3\2\2\2\u04e5\u04e3\3\2\2\2\u04e6\u04e8\t\b\2\2\u04e7"+
"\u04e6\3\2\2\2\u04e8\u04e9\3\2\2\2\u04e9\u04e7\3\2\2\2\u04e9\u04ea\3\2"+
"\2\2\u04ea\u0126\3\2\2\2\u04eb\u04ec\t\b\2\2\u04ec\u0128\3\2\2\2\u04ed"+
"\u04f0\5\u0127\u0094\2\u04ee\u04f0\7a\2\2\u04ef\u04ed\3\2\2\2\u04ef\u04ee"+
"\3\2\2\2\u04f0\u012a\3\2\2\2\u04f1\u04f3\5\u0129\u0095\2\u04f2\u04f1\3"+
"\2\2\2\u04f3\u04f4\3\2\2\2\u04f4\u04f2\3\2\2\2\u04f4\u04f5\3\2\2\2\u04f5"+
"\u012c\3\2\2\2\u04f6\u04f7\7\62\2\2\u04f7\u04f8\7z\2\2\u04f8\u04f9\3\2"+
"\2\2\u04f9\u04fb\5\u012f\u0098\2\u04fa\u04fc\5\u0133\u009a\2\u04fb\u04fa"+
"\3\2\2\2\u04fb\u04fc\3\2\2\2\u04fc\u012e\3\2\2\2\u04fd\u04fe\t\n\2\2\u04fe"+
"\u0130\3\2\2\2\u04ff\u0502\5\u012f\u0098\2\u0500\u0502\7a\2\2\u0501\u04ff"+
"\3\2\2\2\u0501\u0500\3\2\2\2\u0502\u0132\3\2\2\2\u0503\u0505\5\u0131\u0099"+
"\2\u0504\u0503\3\2\2\2\u0505\u0506\3\2\2\2\u0506\u0504\3\2\2\2\u0506\u0507"+
"\3\2\2\2\u0507\u0134\3\2\2\2\u0508\u050a\5\u0123\u0092\2\u0509\u050b\5"+
"\u0137\u009c\2\u050a\u0509\3\2\2\2\u050a\u050b\3\2\2\2\u050b\u050d\3\2"+
"\2\2\u050c\u050e\5\u0139\u009d\2\u050d\u050c\3\2\2\2\u050d\u050e\3\2\2"+
"\2\u050e\u0516\3\2\2\2\u050f\u0511\5\u012d\u0097\2\u0510\u0512\5\u013b"+
"\u009e\2\u0511\u0510\3\2\2\2\u0511\u0512\3\2\2\2\u0512\u0513\3\2\2\2\u0513"+
"\u0514\5\u013d\u009f\2\u0514\u0516\3\2\2\2\u0515\u0508\3\2\2\2\u0515\u050f"+
"\3\2\2\2\u0516\u0136\3\2\2\2\u0517\u0518\7\60\2\2\u0518\u0519\5\u0123"+
"\u0092\2\u0519\u0138\3\2\2\2\u051a\u051c\5\u013f\u00a0\2\u051b\u051d\5"+
"\u0143\u00a2\2\u051c\u051b\3\2\2\2\u051c\u051d\3\2\2\2\u051d\u051e\3\2"+
"\2\2\u051e\u051f\5\u0123\u0092\2\u051f\u013a\3\2\2\2\u0520\u0521\7\60"+
"\2\2\u0521\u0523\5\u012f\u0098\2\u0522\u0524\5\u0133\u009a\2\u0523\u0522"+
"\3\2\2\2\u0523\u0524\3\2\2\2\u0524\u013c\3\2\2\2\u0525\u0527\5\u0141\u00a1"+
"\2\u0526\u0528\5\u0143\u00a2\2\u0527\u0526\3\2\2\2\u0527\u0528\3\2\2\2"+
"\u0528\u0529\3\2\2\2\u0529\u052a\5\u0123\u0092\2\u052a\u013e\3\2\2\2\u052b"+
"\u052c\t\13\2\2\u052c\u0140\3\2\2\2\u052d\u052e\t\f\2\2\u052e\u0142\3"+
"\2\2\2\u052f\u0530\t\r\2\2\u0530\u0144\3\2\2\2\u0531\u0533\7$\2\2\u0532"+
"\u0534\5\u0147\u00a4\2\u0533\u0532\3\2\2\2\u0533\u0534\3\2\2\2\u0534\u0535"+
"\3\2\2\2\u0535\u0536\7$\2\2\u0536\u0146\3\2\2\2\u0537\u0539\5\u0149\u00a5"+
"\2\u0538\u0537\3\2\2\2\u0539\u053a\3\2\2\2\u053a\u0538\3\2\2\2\u053a\u053b"+
"\3\2\2\2\u053b\u0148\3\2\2\2\u053c\u053f\5\u014b\u00a6\2\u053d\u053f\n"+
"\16\2\2\u053e\u053c\3\2\2\2\u053e\u053d\3\2\2\2\u053f\u014a\3\2\2\2\u0540"+
"\u0541\7^\2\2\u0541\u0561\t\17\2\2\u0542\u0543\7^\2\2\u0543\u0544\7z\2"+
"\2\u0544\u0545\3\2\2\2\u0545\u0546\5\u012f\u0098\2\u0546\u0547\5\u012f"+
"\u0098\2\u0547\u0561\3\2\2\2\u0548\u0549\7^\2\2\u0549\u054a\7w\2\2\u054a"+
"\u054b\3\2\2\2\u054b\u054c\7}\2\2\u054c\u054d\5\u012f\u0098\2\u054d\u054e"+
"\5\u012f\u0098\2\u054e\u054f\5\u012f\u0098\2\u054f\u0550\5\u012f\u0098"+
"\2\u0550\u0551\7\177\2\2\u0551\u0561\3\2\2\2\u0552\u0553\7^\2\2\u0553"+
"\u0554\7w\2\2\u0554\u0555\3\2\2\2\u0555\u0556\7}\2\2\u0556\u0557\5\u012f"+
"\u0098\2\u0557\u0558\5\u012f\u0098\2\u0558\u0559\5\u012f\u0098\2\u0559"+
"\u055a\5\u012f\u0098\2\u055a\u055b\5\u012f\u0098\2\u055b\u055c\5\u012f"+
"\u0098\2\u055c\u055d\5\u012f\u0098\2\u055d\u055e\5\u012f\u0098\2\u055e"+
"\u055f\7\177\2\2\u055f\u0561\3\2\2\2\u0560\u0540\3\2\2\2\u0560\u0542\3"+
"\2\2\2\u0560\u0548\3\2\2\2\u0560\u0552\3\2\2\2\u0561\u014c\3\2\2\2\u0562"+
"\u0566\7$\2\2\u0563\u0565\5\u014f\u00a8\2\u0564\u0563\3\2\2\2\u0565\u0568"+
"\3\2\2\2\u0566\u0564\3\2\2\2\u0566\u0567\3\2\2\2\u0567\u0569\3\2\2\2\u0568"+
"\u0566\3\2\2\2\u0569\u056a\7$\2\2\u056a\u014e\3\2\2\2\u056b\u056c\7^\2"+
"\2\u056c\u056d\7*\2\2\u056d\u0570\3\2\2\2\u056e\u0571\5\u014d\u00a7\2"+
"\u056f\u0571\5\u014f\u00a8\2\u0570\u056e\3\2\2\2\u0570\u056f\3\2\2\2\u0571"+
"\u0572\3\2\2\2\u0572\u0570\3\2\2\2\u0572\u0573\3\2\2\2\u0573\u0574\3\2"+
"\2\2\u0574\u0575\7+\2\2\u0575\u0578\3\2\2\2\u0576\u0578\5\u0149\u00a5"+
"\2\u0577\u056b\3\2\2\2\u0577\u0576\3\2\2\2\u0578\u0150\3\2\2\2\u0579\u057b"+
"\t\20\2\2\u057a\u0579\3\2\2\2\u057b\u057c\3\2\2\2\u057c\u057a\3\2\2\2"+
"\u057c\u057d\3\2\2\2\u057d\u057e\3\2\2\2\u057e\u057f\b\u00a9\2\2\u057f"+
"\u0152\3\2\2\2\u0580\u0581\7\61\2\2\u0581\u0582\7,\2\2\u0582\u0587\3\2"+
"\2\2\u0583\u0586\5\u0153\u00aa\2\u0584\u0586\13\2\2\2\u0585\u0583\3\2"+
"\2\2\u0585\u0584\3\2\2\2\u0586\u0589\3\2\2\2\u0587\u0588\3\2\2\2\u0587"+
"\u0585\3\2\2\2\u0588\u058a\3\2\2\2\u0589\u0587\3\2\2\2\u058a\u058b\7,"+
"\2\2\u058b\u058c\7\61\2\2\u058c\u058d\3\2\2\2\u058d\u058e\b\u00aa\2\2"+
"\u058e\u0154\3\2\2\2\u058f\u0590\7\61\2\2\u0590\u0591\7\61\2\2\u0591\u0595"+
"\3\2\2\2\u0592\u0594\13\2\2\2\u0593\u0592\3\2\2\2\u0594\u0597\3\2\2\2"+
"\u0595\u0596\3\2\2\2\u0595\u0593\3\2\2\2\u0596\u0599\3\2\2\2\u0597\u0595"+
"\3\2\2\2\u0598\u059a\t\21\2\2\u0599\u0598\3\2\2\2\u059a\u059b\3\2\2\2"+
"\u059b\u059c\b\u00ab\2\2\u059c\u0156\3\2\2\2-\2\u041c\u045f\u046c\u0470"+
"\u0473\u0477\u047c\u04b3\u04b6\u04c0\u04c6\u04cb\u04d2\u04d8\u04dd\u04e3"+
"\u04e9\u04ef\u04f4\u04fb\u0501\u0506\u050a\u050d\u0511\u0515\u051c\u0523"+
"\u0527\u0533\u053a\u053e\u0560\u0566\u0570\u0572\u0577\u057c\u0585\u0587"+
"\u0595\u0599\3\2\3\2";
public static final ATN _ATN =
new ATNDeserializer().deserialize(_serializedATN.toCharArray());
static {
_decisionToDFA = new DFA[_ATN.getNumberOfDecisions()];
for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) {
_decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i);
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.salesforce.dto.generated;
import java.util.ArrayList;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.thoughtworks.xstream.annotations.XStreamAlias;
import com.thoughtworks.xstream.annotations.XStreamConverter;
import org.apache.camel.component.salesforce.api.PicklistEnumConverter;
import org.apache.camel.component.salesforce.api.dto.AbstractDescribedSObjectBase;
import org.apache.camel.component.salesforce.api.dto.SObjectDescription;
import org.apache.camel.component.salesforce.api.dto.SObjectDescriptionUrls;
import org.apache.camel.component.salesforce.api.dto.SObjectField;
//CHECKSTYLE:OFF
/**
* Salesforce DTO for SObject Account
*/
@XStreamAlias("Account")
public class Account extends AbstractDescribedSObjectBase {
public Account() {
getAttributes().setType("Account");
}
private static final SObjectDescription DESCRIPTION = createSObjectDescription();
private String MasterRecordId;
@JsonProperty("MasterRecordId")
public String getMasterRecordId() {
return this.MasterRecordId;
}
@JsonProperty("MasterRecordId")
public void setMasterRecordId(String MasterRecordId) {
this.MasterRecordId = MasterRecordId;
}
@XStreamAlias("MasterRecord")
private Account MasterRecord;
@JsonProperty("MasterRecord")
public Account getMasterRecord() {
return this.MasterRecord;
}
private String ParentId;
@JsonProperty("ParentId")
public String getParentId() {
return this.ParentId;
}
@JsonProperty("ParentId")
public void setParentId(String ParentId) {
this.ParentId = ParentId;
}
@XStreamAlias("Parent")
private Account Parent;
@JsonProperty("Parent")
public Account getParent() {
return this.Parent;
}
@JsonProperty("Parent")
public void setParent(Account Parent) {
this.Parent = Parent;
}
private String BillingStreet;
@JsonProperty("BillingStreet")
public String getBillingStreet() {
return this.BillingStreet;
}
@JsonProperty("BillingStreet")
public void setBillingStreet(String BillingStreet) {
this.BillingStreet = BillingStreet;
}
private String BillingCity;
@JsonProperty("BillingCity")
public String getBillingCity() {
return this.BillingCity;
}
@JsonProperty("BillingCity")
public void setBillingCity(String BillingCity) {
this.BillingCity = BillingCity;
}
private String BillingState;
@JsonProperty("BillingState")
public String getBillingState() {
return this.BillingState;
}
@JsonProperty("BillingState")
public void setBillingState(String BillingState) {
this.BillingState = BillingState;
}
private String BillingPostalCode;
@JsonProperty("BillingPostalCode")
public String getBillingPostalCode() {
return this.BillingPostalCode;
}
@JsonProperty("BillingPostalCode")
public void setBillingPostalCode(String BillingPostalCode) {
this.BillingPostalCode = BillingPostalCode;
}
private String BillingCountry;
@JsonProperty("BillingCountry")
public String getBillingCountry() {
return this.BillingCountry;
}
@JsonProperty("BillingCountry")
public void setBillingCountry(String BillingCountry) {
this.BillingCountry = BillingCountry;
}
private Double BillingLatitude;
@JsonProperty("BillingLatitude")
public Double getBillingLatitude() {
return this.BillingLatitude;
}
@JsonProperty("BillingLatitude")
public void setBillingLatitude(Double BillingLatitude) {
this.BillingLatitude = BillingLatitude;
}
private Double BillingLongitude;
@JsonProperty("BillingLongitude")
public Double getBillingLongitude() {
return this.BillingLongitude;
}
@JsonProperty("BillingLongitude")
public void setBillingLongitude(Double BillingLongitude) {
this.BillingLongitude = BillingLongitude;
}
private org.apache.camel.component.salesforce.api.dto.Address BillingAddress;
@JsonProperty("BillingAddress")
public org.apache.camel.component.salesforce.api.dto.Address getBillingAddress() {
return this.BillingAddress;
}
@JsonProperty("BillingAddress")
public void setBillingAddress(org.apache.camel.component.salesforce.api.dto.Address BillingAddress) {
this.BillingAddress = BillingAddress;
}
private String ShippingStreet;
@JsonProperty("ShippingStreet")
public String getShippingStreet() {
return this.ShippingStreet;
}
@JsonProperty("ShippingStreet")
public void setShippingStreet(String ShippingStreet) {
this.ShippingStreet = ShippingStreet;
}
private String ShippingCity;
@JsonProperty("ShippingCity")
public String getShippingCity() {
return this.ShippingCity;
}
@JsonProperty("ShippingCity")
public void setShippingCity(String ShippingCity) {
this.ShippingCity = ShippingCity;
}
private String ShippingState;
@JsonProperty("ShippingState")
public String getShippingState() {
return this.ShippingState;
}
@JsonProperty("ShippingState")
public void setShippingState(String ShippingState) {
this.ShippingState = ShippingState;
}
private String ShippingPostalCode;
@JsonProperty("ShippingPostalCode")
public String getShippingPostalCode() {
return this.ShippingPostalCode;
}
@JsonProperty("ShippingPostalCode")
public void setShippingPostalCode(String ShippingPostalCode) {
this.ShippingPostalCode = ShippingPostalCode;
}
private String ShippingCountry;
@JsonProperty("ShippingCountry")
public String getShippingCountry() {
return this.ShippingCountry;
}
@JsonProperty("ShippingCountry")
public void setShippingCountry(String ShippingCountry) {
this.ShippingCountry = ShippingCountry;
}
private Double ShippingLatitude;
@JsonProperty("ShippingLatitude")
public Double getShippingLatitude() {
return this.ShippingLatitude;
}
@JsonProperty("ShippingLatitude")
public void setShippingLatitude(Double ShippingLatitude) {
this.ShippingLatitude = ShippingLatitude;
}
private Double ShippingLongitude;
@JsonProperty("ShippingLongitude")
public Double getShippingLongitude() {
return this.ShippingLongitude;
}
@JsonProperty("ShippingLongitude")
public void setShippingLongitude(Double ShippingLongitude) {
this.ShippingLongitude = ShippingLongitude;
}
private org.apache.camel.component.salesforce.api.dto.Address ShippingAddress;
@JsonProperty("ShippingAddress")
public org.apache.camel.component.salesforce.api.dto.Address getShippingAddress() {
return this.ShippingAddress;
}
@JsonProperty("ShippingAddress")
public void setShippingAddress(org.apache.camel.component.salesforce.api.dto.Address ShippingAddress) {
this.ShippingAddress = ShippingAddress;
}
private String Phone;
@JsonProperty("Phone")
public String getPhone() {
return this.Phone;
}
@JsonProperty("Phone")
public void setPhone(String Phone) {
this.Phone = Phone;
}
private String Fax;
@JsonProperty("Fax")
public String getFax() {
return this.Fax;
}
@JsonProperty("Fax")
public void setFax(String Fax) {
this.Fax = Fax;
}
private String AccountNumber;
@JsonProperty("AccountNumber")
public String getAccountNumber() {
return this.AccountNumber;
}
@JsonProperty("AccountNumber")
public void setAccountNumber(String AccountNumber) {
this.AccountNumber = AccountNumber;
}
private String Website;
@JsonProperty("Website")
public String getWebsite() {
return this.Website;
}
@JsonProperty("Website")
public void setWebsite(String Website) {
this.Website = Website;
}
private String PhotoUrl;
@JsonProperty("PhotoUrl")
public String getPhotoUrl() {
return this.PhotoUrl;
}
@JsonProperty("PhotoUrl")
public void setPhotoUrl(String PhotoUrl) {
this.PhotoUrl = PhotoUrl;
}
private String Sic;
@JsonProperty("Sic")
public String getSic() {
return this.Sic;
}
@JsonProperty("Sic")
public void setSic(String Sic) {
this.Sic = Sic;
}
@XStreamConverter(PicklistEnumConverter.class)
private Account_IndustryEnum Industry;
@JsonProperty("Industry")
public Account_IndustryEnum getIndustry() {
return this.Industry;
}
@JsonProperty("Industry")
public void setIndustry(Account_IndustryEnum Industry) {
this.Industry = Industry;
}
private Double AnnualRevenue;
@JsonProperty("AnnualRevenue")
public Double getAnnualRevenue() {
return this.AnnualRevenue;
}
@JsonProperty("AnnualRevenue")
public void setAnnualRevenue(Double AnnualRevenue) {
this.AnnualRevenue = AnnualRevenue;
}
private Integer NumberOfEmployees;
@JsonProperty("NumberOfEmployees")
public Integer getNumberOfEmployees() {
return this.NumberOfEmployees;
}
@JsonProperty("NumberOfEmployees")
public void setNumberOfEmployees(Integer NumberOfEmployees) {
this.NumberOfEmployees = NumberOfEmployees;
}
private String TickerSymbol;
@JsonProperty("TickerSymbol")
public String getTickerSymbol() {
return this.TickerSymbol;
}
@JsonProperty("TickerSymbol")
public void setTickerSymbol(String TickerSymbol) {
this.TickerSymbol = TickerSymbol;
}
private String Description;
@JsonProperty("Description")
public String getDescription() {
return this.Description;
}
@JsonProperty("Description")
public void setDescription(String Description) {
this.Description = Description;
}
private String Site;
@JsonProperty("Site")
public String getSite() {
return this.Site;
}
@JsonProperty("Site")
public void setSite(String Site) {
this.Site = Site;
}
private String Jigsaw;
@JsonProperty("Jigsaw")
public String getJigsaw() {
return this.Jigsaw;
}
@JsonProperty("Jigsaw")
public void setJigsaw(String Jigsaw) {
this.Jigsaw = Jigsaw;
}
private String JigsawCompanyId;
@JsonProperty("JigsawCompanyId")
public String getJigsawCompanyId() {
return this.JigsawCompanyId;
}
@JsonProperty("JigsawCompanyId")
public void setJigsawCompanyId(String JigsawCompanyId) {
this.JigsawCompanyId = JigsawCompanyId;
}
private String DunsNumber;
@JsonProperty("DunsNumber")
public String getDunsNumber() {
return this.DunsNumber;
}
@JsonProperty("DunsNumber")
public void setDunsNumber(String DunsNumber) {
this.DunsNumber = DunsNumber;
}
private String Tradestyle;
@JsonProperty("Tradestyle")
public String getTradestyle() {
return this.Tradestyle;
}
@JsonProperty("Tradestyle")
public void setTradestyle(String Tradestyle) {
this.Tradestyle = Tradestyle;
}
private String NaicsCode;
@JsonProperty("NaicsCode")
public String getNaicsCode() {
return this.NaicsCode;
}
@JsonProperty("NaicsCode")
public void setNaicsCode(String NaicsCode) {
this.NaicsCode = NaicsCode;
}
private String NaicsDesc;
@JsonProperty("NaicsDesc")
public String getNaicsDesc() {
return this.NaicsDesc;
}
@JsonProperty("NaicsDesc")
public void setNaicsDesc(String NaicsDesc) {
this.NaicsDesc = NaicsDesc;
}
private String YearStarted;
@JsonProperty("YearStarted")
public String getYearStarted() {
return this.YearStarted;
}
@JsonProperty("YearStarted")
public void setYearStarted(String YearStarted) {
this.YearStarted = YearStarted;
}
private String SicDesc;
@JsonProperty("SicDesc")
public String getSicDesc() {
return this.SicDesc;
}
@JsonProperty("SicDesc")
public void setSicDesc(String SicDesc) {
this.SicDesc = SicDesc;
}
private String DandbCompanyId;
@JsonProperty("DandbCompanyId")
public String getDandbCompanyId() {
return this.DandbCompanyId;
}
@JsonProperty("DandbCompanyId")
public void setDandbCompanyId(String DandbCompanyId) {
this.DandbCompanyId = DandbCompanyId;
}
private String OperatingHoursId;
@JsonProperty("OperatingHoursId")
public String getOperatingHoursId() {
return this.OperatingHoursId;
}
@JsonProperty("OperatingHoursId")
public void setOperatingHoursId(String OperatingHoursId) {
this.OperatingHoursId = OperatingHoursId;
}
private Double Shipping_Location__Latitude__s;
@JsonProperty("Shipping_Location__Latitude__s")
public Double getShipping_Location__Latitude__s() {
return this.Shipping_Location__Latitude__s;
}
@JsonProperty("Shipping_Location__Latitude__s")
public void setShipping_Location__Latitude__s(Double Shipping_Location__Latitude__s) {
this.Shipping_Location__Latitude__s = Shipping_Location__Latitude__s;
}
private Double Shipping_Location__Longitude__s;
@JsonProperty("Shipping_Location__Longitude__s")
public Double getShipping_Location__Longitude__s() {
return this.Shipping_Location__Longitude__s;
}
@JsonProperty("Shipping_Location__Longitude__s")
public void setShipping_Location__Longitude__s(Double Shipping_Location__Longitude__s) {
this.Shipping_Location__Longitude__s = Shipping_Location__Longitude__s;
}
private org.apache.camel.component.salesforce.api.dto.GeoLocation Shipping_Location__c;
@JsonProperty("Shipping_Location__c")
public org.apache.camel.component.salesforce.api.dto.GeoLocation getShipping_Location__c() {
return this.Shipping_Location__c;
}
@JsonProperty("Shipping_Location__c")
public void setShipping_Location__c(org.apache.camel.component.salesforce.api.dto.GeoLocation Shipping_Location__c) {
this.Shipping_Location__c = Shipping_Location__c;
}
private String External_Id__c;
@JsonProperty("External_Id__c")
public String getExternal_Id__c() {
return this.External_Id__c;
}
@JsonProperty("External_Id__c")
public void setExternal_Id__c(String External_Id__c) {
this.External_Id__c = External_Id__c;
}
private QueryRecordsAccount ChildAccounts;
@JsonProperty("ChildAccounts")
public QueryRecordsAccount getChildAccounts() {
return ChildAccounts;
}
@JsonProperty("ChildAccounts")
public void setChildAccounts(QueryRecordsAccount ChildAccounts) {
this.ChildAccounts = ChildAccounts;
}
private QueryRecordsContact Contacts;
@JsonProperty("Contacts")
public QueryRecordsContact getContacts() {
return Contacts;
}
@JsonProperty("Contacts")
public void setContacts(QueryRecordsContact Contacts) {
this.Contacts = Contacts;
}
@Override
public final SObjectDescription description() {
return DESCRIPTION;
}
private static SObjectDescription createSObjectDescription() {
final SObjectDescription description = new SObjectDescription();
final List<SObjectField> fields1 = new ArrayList<>();
description.setFields(fields1);
final SObjectField sObjectField1 = createField("Id", "Account ID", "id", "tns:ID", 18, false, false, false, false, false, false, true);
fields1.add(sObjectField1);
final SObjectField sObjectField2 = createField("IsDeleted", "Deleted", "boolean", "xsd:boolean", 0, false, false, false, false, false, false, false);
fields1.add(sObjectField2);
final SObjectField sObjectField3 = createField("MasterRecordId", "Master Record ID", "reference", "tns:ID", 18, false, true, false, false, false, false, false);
fields1.add(sObjectField3);
final SObjectField sObjectField4 = createField("Name", "Account Name", "string", "xsd:string", 255, false, false, true, false, false, false, false);
fields1.add(sObjectField4);
final SObjectField sObjectField5 = createField("Type", "Account Type", "picklist", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField5);
final SObjectField sObjectField6 = createField("ParentId", "Parent Account ID", "reference", "tns:ID", 18, false, true, false, false, false, false, false);
fields1.add(sObjectField6);
final SObjectField sObjectField7 = createField("BillingStreet", "Billing Street", "textarea", "xsd:string", 255, false, true, false, false, false, false, false);
fields1.add(sObjectField7);
final SObjectField sObjectField8 = createField("BillingCity", "Billing City", "string", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField8);
final SObjectField sObjectField9 = createField("BillingState", "Billing State/Province", "string", "xsd:string", 80, false, true, false, false, false, false, false);
fields1.add(sObjectField9);
final SObjectField sObjectField10 = createField("BillingPostalCode", "Billing Zip/Postal Code", "string", "xsd:string", 20, false, true, false, false, false, false, false);
fields1.add(sObjectField10);
final SObjectField sObjectField11 = createField("BillingCountry", "Billing Country", "string", "xsd:string", 80, false, true, false, false, false, false, false);
fields1.add(sObjectField11);
final SObjectField sObjectField12 = createField("BillingLatitude", "Billing Latitude", "double", "xsd:double", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField12);
final SObjectField sObjectField13 = createField("BillingLongitude", "Billing Longitude", "double", "xsd:double", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField13);
final SObjectField sObjectField14 = createField("BillingGeocodeAccuracy", "Billing Geocode Accuracy", "picklist", "xsd:string", 40, false, true, false, false, false, false,
false);
fields1.add(sObjectField14);
final SObjectField sObjectField15 = createField("BillingAddress", "Billing Address", "address", "urn:address", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField15);
final SObjectField sObjectField16 = createField("ShippingStreet", "Shipping Street", "textarea", "xsd:string", 255, false, true, false, false, false, false, false);
fields1.add(sObjectField16);
final SObjectField sObjectField17 = createField("ShippingCity", "Shipping City", "string", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField17);
final SObjectField sObjectField18 = createField("ShippingState", "Shipping State/Province", "string", "xsd:string", 80, false, true, false, false, false, false, false);
fields1.add(sObjectField18);
final SObjectField sObjectField19 = createField("ShippingPostalCode", "Shipping Zip/Postal Code", "string", "xsd:string", 20, false, true, false, false, false, false,
false);
fields1.add(sObjectField19);
final SObjectField sObjectField20 = createField("ShippingCountry", "Shipping Country", "string", "xsd:string", 80, false, true, false, false, false, false, false);
fields1.add(sObjectField20);
final SObjectField sObjectField21 = createField("ShippingLatitude", "Shipping Latitude", "double", "xsd:double", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField21);
final SObjectField sObjectField22 = createField("ShippingLongitude", "Shipping Longitude", "double", "xsd:double", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField22);
final SObjectField sObjectField23 = createField("ShippingGeocodeAccuracy", "Shipping Geocode Accuracy", "picklist", "xsd:string", 40, false, true, false, false, false,
false, false);
fields1.add(sObjectField23);
final SObjectField sObjectField24 = createField("ShippingAddress", "Shipping Address", "address", "urn:address", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField24);
final SObjectField sObjectField25 = createField("Phone", "Account Phone", "phone", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField25);
final SObjectField sObjectField26 = createField("Fax", "Account Fax", "phone", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField26);
final SObjectField sObjectField27 = createField("AccountNumber", "Account Number", "string", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField27);
final SObjectField sObjectField28 = createField("Website", "Website", "url", "xsd:string", 255, false, true, false, false, false, false, false);
fields1.add(sObjectField28);
final SObjectField sObjectField29 = createField("PhotoUrl", "Photo URL", "url", "xsd:string", 255, false, true, false, false, false, false, false);
fields1.add(sObjectField29);
final SObjectField sObjectField30 = createField("Sic", "SIC Code", "string", "xsd:string", 20, false, true, false, false, false, false, false);
fields1.add(sObjectField30);
final SObjectField sObjectField31 = createField("Industry", "Industry", "picklist", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField31);
final SObjectField sObjectField32 = createField("AnnualRevenue", "Annual Revenue", "currency", "xsd:double", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField32);
final SObjectField sObjectField33 = createField("NumberOfEmployees", "Employees", "int", "xsd:int", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField33);
final SObjectField sObjectField34 = createField("Ownership", "Ownership", "picklist", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField34);
final SObjectField sObjectField35 = createField("TickerSymbol", "Ticker Symbol", "string", "xsd:string", 20, false, true, false, false, false, false, false);
fields1.add(sObjectField35);
final SObjectField sObjectField36 = createField("Description", "Account Description", "textarea", "xsd:string", 32000, false, true, false, false, false, false, false);
fields1.add(sObjectField36);
final SObjectField sObjectField37 = createField("Rating", "Account Rating", "picklist", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField37);
final SObjectField sObjectField38 = createField("Site", "Account Site", "string", "xsd:string", 80, false, true, false, false, false, false, false);
fields1.add(sObjectField38);
final SObjectField sObjectField39 = createField("OwnerId", "Owner ID", "reference", "tns:ID", 18, false, false, false, false, false, false, false);
fields1.add(sObjectField39);
final SObjectField sObjectField40 = createField("CreatedDate", "Created Date", "datetime", "xsd:dateTime", 0, false, false, false, false, false, false, false);
fields1.add(sObjectField40);
final SObjectField sObjectField41 = createField("CreatedById", "Created By ID", "reference", "tns:ID", 18, false, false, false, false, false, false, false);
fields1.add(sObjectField41);
final SObjectField sObjectField42 = createField("LastModifiedDate", "Last Modified Date", "datetime", "xsd:dateTime", 0, false, false, false, false, false, false, false);
fields1.add(sObjectField42);
final SObjectField sObjectField43 = createField("LastModifiedById", "Last Modified By ID", "reference", "tns:ID", 18, false, false, false, false, false, false, false);
fields1.add(sObjectField43);
final SObjectField sObjectField44 = createField("SystemModstamp", "System Modstamp", "datetime", "xsd:dateTime", 0, false, false, false, false, false, false, false);
fields1.add(sObjectField44);
final SObjectField sObjectField45 = createField("LastActivityDate", "Last Activity", "date", "xsd:date", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField45);
final SObjectField sObjectField46 = createField("LastViewedDate", "Last Viewed Date", "datetime", "xsd:dateTime", 0, false, true, false, false, false, false, false);
fields1.add(sObjectField46);
final SObjectField sObjectField47 = createField("LastReferencedDate", "Last Referenced Date", "datetime", "xsd:dateTime", 0, false, true, false, false, false, false,
false);
fields1.add(sObjectField47);
final SObjectField sObjectField48 = createField("Jigsaw", "Data.com Key", "string", "xsd:string", 20, false, true, false, false, false, false, false);
fields1.add(sObjectField48);
final SObjectField sObjectField49 = createField("JigsawCompanyId", "Jigsaw Company ID", "string", "xsd:string", 20, false, true, false, false, false, false, false);
fields1.add(sObjectField49);
final SObjectField sObjectField50 = createField("CleanStatus", "Clean Status", "picklist", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField50);
final SObjectField sObjectField51 = createField("AccountSource", "Account Source", "picklist", "xsd:string", 40, false, true, false, false, false, false, false);
fields1.add(sObjectField51);
final SObjectField sObjectField52 = createField("DunsNumber", "D-U-N-S Number", "string", "xsd:string", 9, false, true, false, false, false, false, false);
fields1.add(sObjectField52);
final SObjectField sObjectField53 = createField("Tradestyle", "Tradestyle", "string", "xsd:string", 255, false, true, false, false, false, false, false);
fields1.add(sObjectField53);
final SObjectField sObjectField54 = createField("NaicsCode", "NAICS Code", "string", "xsd:string", 8, false, true, false, false, false, false, false);
fields1.add(sObjectField54);
final SObjectField sObjectField55 = createField("NaicsDesc", "NAICS Description", "string", "xsd:string", 120, false, true, false, false, false, false, false);
fields1.add(sObjectField55);
final SObjectField sObjectField56 = createField("YearStarted", "Year Started", "string", "xsd:string", 4, false, true, false, false, false, false, false);
fields1.add(sObjectField56);
final SObjectField sObjectField57 = createField("SicDesc", "SIC Description", "string", "xsd:string", 80, false, true, false, false, false, false, false);
fields1.add(sObjectField57);
final SObjectField sObjectField58 = createField("DandbCompanyId", "D&B Company ID", "reference", "tns:ID", 18, false, true, false, false, false, false, false);
fields1.add(sObjectField58);
final SObjectField sObjectField59 = createField("OperatingHoursId", "Operating Hour ID", "reference", "tns:ID", 18, false, true, false, false, false, false, false);
fields1.add(sObjectField59);
final SObjectField sObjectField60 = createField("Shipping_Location__Latitude__s", "Shipping_Location (Latitude)", "double", "xsd:double", 0, false, true, false, false,
true, false, false);
fields1.add(sObjectField60);
final SObjectField sObjectField61 = createField("Shipping_Location__Longitude__s", "Shipping_Location (Longitude)", "double", "xsd:double", 0, false, true, false, false,
true, false, false);
fields1.add(sObjectField61);
final SObjectField sObjectField62 = createField("Shipping_Location__c", "Shipping_Location", "location", "urn:location", 0, false, true, false, false, true, false, false);
fields1.add(sObjectField62);
final SObjectField sObjectField63 = createField("External_Id__c", "External Id", "string", "xsd:string", 255, true, true, false, true, true, false, true);
fields1.add(sObjectField63);
description.setLabel("Account");
description.setLabelPlural("Accounts");
description.setName("Account");
final SObjectDescriptionUrls sObjectDescriptionUrls1 = new SObjectDescriptionUrls();
sObjectDescriptionUrls1.setApprovalLayouts("/services/data/v45.0/sobjects/Account/describe/approvalLayouts");
sObjectDescriptionUrls1.setCompactLayouts("/services/data/v45.0/sobjects/Account/describe/compactLayouts");
sObjectDescriptionUrls1.setDefaultValues("/services/data/v45.0/sobjects/Account/defaultValues?recordTypeId&fields");
sObjectDescriptionUrls1.setDescribe("/services/data/v45.0/sobjects/Account/describe");
sObjectDescriptionUrls1.setLayouts("/services/data/v45.0/sobjects/Account/describe/layouts");
sObjectDescriptionUrls1.setListviews("/services/data/v45.0/sobjects/Account/listviews");
sObjectDescriptionUrls1.setQuickActions("/services/data/v45.0/sobjects/Account/quickActions");
sObjectDescriptionUrls1.setRowTemplate("/services/data/v45.0/sobjects/Account/{ID}");
sObjectDescriptionUrls1.setSobject("/services/data/v45.0/sobjects/Account");
sObjectDescriptionUrls1.setUiDetailTemplate("https://customer-flow-8168-dev-ed.cs42.my.salesforce.com/{ID}");
sObjectDescriptionUrls1.setUiEditTemplate("https://customer-flow-8168-dev-ed.cs42.my.salesforce.com/{ID}/e");
sObjectDescriptionUrls1.setUiNewRecord("https://customer-flow-8168-dev-ed.cs42.my.salesforce.com/001/e");
description.setUrls(sObjectDescriptionUrls1);
return description;
}
}
//CHECKSTYLE:ON
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.query.aggregation.histogram.sql;
import com.fasterxml.jackson.databind.Module;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.java.util.common.io.Closer;
import org.apache.druid.query.Druids;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryRunnerFactoryConglomerate;
import org.apache.druid.query.aggregation.CountAggregatorFactory;
import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory;
import org.apache.druid.query.aggregation.FilteredAggregatorFactory;
import org.apache.druid.query.aggregation.histogram.ApproximateHistogramAggregatorFactory;
import org.apache.druid.query.aggregation.histogram.ApproximateHistogramDruidModule;
import org.apache.druid.query.aggregation.histogram.ApproximateHistogramFoldingAggregatorFactory;
import org.apache.druid.query.aggregation.histogram.QuantilePostAggregator;
import org.apache.druid.query.aggregation.post.ArithmeticPostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.expression.TestExprMacroTable;
import org.apache.druid.query.filter.NotDimFilter;
import org.apache.druid.query.filter.SelectorDimFilter;
import org.apache.druid.query.groupby.GroupByQuery;
import org.apache.druid.query.spec.MultipleIntervalSegmentSpec;
import org.apache.druid.segment.IndexBuilder;
import org.apache.druid.segment.QueryableIndex;
import org.apache.druid.segment.TestHelper;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.incremental.IncrementalIndexSchema;
import org.apache.druid.segment.virtual.ExpressionVirtualColumn;
import org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory;
import org.apache.druid.server.QueryStackTests;
import org.apache.druid.server.security.AuthTestUtils;
import org.apache.druid.server.security.AuthenticationResult;
import org.apache.druid.sql.SqlLifecycle;
import org.apache.druid.sql.SqlLifecycleFactory;
import org.apache.druid.sql.calcite.filtration.Filtration;
import org.apache.druid.sql.calcite.planner.DruidOperatorTable;
import org.apache.druid.sql.calcite.planner.PlannerConfig;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.planner.PlannerFactory;
import org.apache.druid.sql.calcite.util.CalciteTestBase;
import org.apache.druid.sql.calcite.util.CalciteTests;
import org.apache.druid.sql.calcite.util.QueryLogHook;
import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.partition.LinearShardSpec;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class QuantileSqlAggregatorTest extends CalciteTestBase
{
private static final String DATA_SOURCE = "foo";
private static QueryRunnerFactoryConglomerate conglomerate;
private static Closer resourceCloser;
private static AuthenticationResult authenticationResult = CalciteTests.REGULAR_USER_AUTH_RESULT;
private static final Map<String, Object> QUERY_CONTEXT_DEFAULT = ImmutableMap.of(
PlannerContext.CTX_SQL_QUERY_ID, "dummy"
);
@BeforeClass
public static void setUpClass()
{
resourceCloser = Closer.create();
conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser);
}
@AfterClass
public static void tearDownClass() throws IOException
{
resourceCloser.close();
}
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@Rule
public QueryLogHook queryLogHook = QueryLogHook.create();
private SpecificSegmentsQuerySegmentWalker walker;
private SqlLifecycleFactory sqlLifecycleFactory;
@Before
public void setUp() throws Exception
{
ApproximateHistogramDruidModule.registerSerde();
for (Module mod : new ApproximateHistogramDruidModule().getJacksonModules()) {
CalciteTests.getJsonMapper().registerModule(mod);
TestHelper.JSON_MAPPER.registerModule(mod);
}
final QueryableIndex index = IndexBuilder.create()
.tmpDir(temporaryFolder.newFolder())
.segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance())
.schema(
new IncrementalIndexSchema.Builder()
.withMetrics(
new CountAggregatorFactory("cnt"),
new DoubleSumAggregatorFactory("m1", "m1"),
new ApproximateHistogramAggregatorFactory(
"hist_m1",
"m1",
null,
null,
null,
null,
false
)
)
.withRollup(false)
.build()
)
.rows(CalciteTests.ROWS1)
.buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(
DataSegment.builder()
.dataSource(DATA_SOURCE)
.interval(index.getDataInterval())
.version("1")
.shardSpec(new LinearShardSpec(0))
.size(0)
.build(),
index
);
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = new DruidOperatorTable(
ImmutableSet.of(new QuantileSqlAggregator()),
ImmutableSet.of()
);
SchemaPlus rootSchema =
CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
sqlLifecycleFactory = CalciteTests.createSqlLifecycleFactory(
new PlannerFactory(
rootSchema,
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
operatorTable,
CalciteTests.createExprMacroTable(),
plannerConfig,
AuthTestUtils.TEST_AUTHORIZER_MAPPER,
CalciteTests.getJsonMapper(),
CalciteTests.DRUID_SCHEMA_NAME
)
);
}
@After
public void tearDown() throws Exception
{
walker.close();
walker = null;
}
@Test
public void testQuantileOnFloatAndLongs() throws Exception
{
SqlLifecycle sqlLifecycle = sqlLifecycleFactory.factorize();
final String sql = "SELECT\n"
+ "APPROX_QUANTILE(m1, 0.01),\n"
+ "APPROX_QUANTILE(m1, 0.5, 50),\n"
+ "APPROX_QUANTILE(m1, 0.98, 200),\n"
+ "APPROX_QUANTILE(m1, 0.99),\n"
+ "APPROX_QUANTILE(m1 * 2, 0.97),\n"
+ "APPROX_QUANTILE(m1, 0.99) FILTER(WHERE dim1 = 'abc'),\n"
+ "APPROX_QUANTILE(m1, 0.999) FILTER(WHERE dim1 <> 'abc'),\n"
+ "APPROX_QUANTILE(m1, 0.999) FILTER(WHERE dim1 = 'abc'),\n"
+ "APPROX_QUANTILE(cnt, 0.5)\n"
+ "FROM foo";
// Verify results
final List<Object[]> results = sqlLifecycle.runSimple(
sql,
QUERY_CONTEXT_DEFAULT,
DEFAULT_PARAMETERS,
authenticationResult
).toList();
final List<Object[]> expectedResults = ImmutableList.of(
new Object[]{
1.0,
3.0,
5.880000114440918,
5.940000057220459,
11.640000343322754,
6.0,
4.994999885559082,
6.0,
1.0
}
);
Assert.assertEquals(expectedResults.size(), results.size());
for (int i = 0; i < expectedResults.size(); i++) {
Assert.assertArrayEquals(expectedResults.get(i), results.get(i));
}
// Verify query
Assert.assertEquals(
Druids.newTimeseriesQueryBuilder()
.dataSource(CalciteTests.DATASOURCE1)
.intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Filtration.eternity())))
.granularity(Granularities.ALL)
.virtualColumns(
new ExpressionVirtualColumn(
"v0",
"(\"m1\" * 2)",
ValueType.FLOAT,
TestExprMacroTable.INSTANCE
)
)
.aggregators(ImmutableList.of(
new ApproximateHistogramAggregatorFactory("a0:agg", "m1", null, null, null, null, false),
new ApproximateHistogramAggregatorFactory("a2:agg", "m1", 200, null, null, null, false),
new ApproximateHistogramAggregatorFactory("a4:agg", "v0", null, null, null, null, false),
new FilteredAggregatorFactory(
new ApproximateHistogramAggregatorFactory("a5:agg", "m1", null, null, null, null, false),
new SelectorDimFilter("dim1", "abc", null)
),
new FilteredAggregatorFactory(
new ApproximateHistogramAggregatorFactory("a6:agg", "m1", null, null, null, null, false),
new NotDimFilter(new SelectorDimFilter("dim1", "abc", null))
),
new ApproximateHistogramAggregatorFactory("a8:agg", "cnt", null, null, null, null, false)
))
.postAggregators(
new QuantilePostAggregator("a0", "a0:agg", 0.01f),
new QuantilePostAggregator("a1", "a0:agg", 0.50f),
new QuantilePostAggregator("a2", "a2:agg", 0.98f),
new QuantilePostAggregator("a3", "a0:agg", 0.99f),
new QuantilePostAggregator("a4", "a4:agg", 0.97f),
new QuantilePostAggregator("a5", "a5:agg", 0.99f),
new QuantilePostAggregator("a6", "a6:agg", 0.999f),
new QuantilePostAggregator("a7", "a5:agg", 0.999f),
new QuantilePostAggregator("a8", "a8:agg", 0.50f)
)
.context(ImmutableMap.of("skipEmptyBuckets", true, PlannerContext.CTX_SQL_QUERY_ID, "dummy"))
.build(),
Iterables.getOnlyElement(queryLogHook.getRecordedQueries())
);
}
@Test
public void testQuantileOnComplexColumn() throws Exception
{
SqlLifecycle lifecycle = sqlLifecycleFactory.factorize();
final String sql = "SELECT\n"
+ "APPROX_QUANTILE(hist_m1, 0.01),\n"
+ "APPROX_QUANTILE(hist_m1, 0.5, 50),\n"
+ "APPROX_QUANTILE(hist_m1, 0.98, 200),\n"
+ "APPROX_QUANTILE(hist_m1, 0.99),\n"
+ "APPROX_QUANTILE(hist_m1, 0.99) FILTER(WHERE dim1 = 'abc'),\n"
+ "APPROX_QUANTILE(hist_m1, 0.999) FILTER(WHERE dim1 <> 'abc'),\n"
+ "APPROX_QUANTILE(hist_m1, 0.999) FILTER(WHERE dim1 = 'abc')\n"
+ "FROM foo";
// Verify results
final List<Object[]> results = lifecycle.runSimple(
sql,
QUERY_CONTEXT_DEFAULT,
DEFAULT_PARAMETERS,
authenticationResult
).toList();
final List<Object[]> expectedResults = ImmutableList.of(
new Object[]{1.0, 3.0, 5.880000114440918, 5.940000057220459, 6.0, 4.994999885559082, 6.0}
);
Assert.assertEquals(expectedResults.size(), results.size());
for (int i = 0; i < expectedResults.size(); i++) {
Assert.assertArrayEquals(expectedResults.get(i), results.get(i));
}
// Verify query
Assert.assertEquals(
Druids.newTimeseriesQueryBuilder()
.dataSource(CalciteTests.DATASOURCE1)
.intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Filtration.eternity())))
.granularity(Granularities.ALL)
.aggregators(ImmutableList.of(
new ApproximateHistogramFoldingAggregatorFactory("a0:agg", "hist_m1", null, null, null, null, false),
new ApproximateHistogramFoldingAggregatorFactory("a2:agg", "hist_m1", 200, null, null, null, false),
new FilteredAggregatorFactory(
new ApproximateHistogramFoldingAggregatorFactory("a4:agg", "hist_m1", null, null, null, null, false),
new SelectorDimFilter("dim1", "abc", null)
),
new FilteredAggregatorFactory(
new ApproximateHistogramFoldingAggregatorFactory("a5:agg", "hist_m1", null, null, null, null, false),
new NotDimFilter(new SelectorDimFilter("dim1", "abc", null))
)
))
.postAggregators(
new QuantilePostAggregator("a0", "a0:agg", 0.01f),
new QuantilePostAggregator("a1", "a0:agg", 0.50f),
new QuantilePostAggregator("a2", "a2:agg", 0.98f),
new QuantilePostAggregator("a3", "a0:agg", 0.99f),
new QuantilePostAggregator("a4", "a4:agg", 0.99f),
new QuantilePostAggregator("a5", "a5:agg", 0.999f),
new QuantilePostAggregator("a6", "a4:agg", 0.999f)
)
.context(ImmutableMap.of("skipEmptyBuckets", true, PlannerContext.CTX_SQL_QUERY_ID, "dummy"))
.build(),
Iterables.getOnlyElement(queryLogHook.getRecordedQueries())
);
}
@Test
public void testQuantileOnInnerQuery() throws Exception
{
SqlLifecycle sqlLifecycle = sqlLifecycleFactory.factorize();
final String sql = "SELECT AVG(x), APPROX_QUANTILE(x, 0.98)\n"
+ "FROM (SELECT dim2, SUM(m1) AS x FROM foo GROUP BY dim2)";
// Verify results
final List<Object[]> results = sqlLifecycle.runSimple(
sql,
QUERY_CONTEXT_DEFAULT,
DEFAULT_PARAMETERS,
authenticationResult
).toList();
final List<Object[]> expectedResults;
if (NullHandling.replaceWithDefault()) {
expectedResults = ImmutableList.of(new Object[]{7.0, 8.26386833190918});
} else {
expectedResults = ImmutableList.of(new Object[]{5.25, 6.59091854095459});
}
Assert.assertEquals(expectedResults.size(), results.size());
for (int i = 0; i < expectedResults.size(); i++) {
Assert.assertArrayEquals(expectedResults.get(i), results.get(i));
}
// Verify query
Assert.assertEquals(
GroupByQuery.builder()
.setDataSource(
new QueryDataSource(
GroupByQuery.builder()
.setDataSource(CalciteTests.DATASOURCE1)
.setInterval(new MultipleIntervalSegmentSpec(ImmutableList.of(Filtration.eternity())))
.setGranularity(Granularities.ALL)
.setDimensions(new DefaultDimensionSpec("dim2", "d0"))
.setAggregatorSpecs(
ImmutableList.of(
new DoubleSumAggregatorFactory("a0", "m1")
)
)
.setContext(ImmutableMap.of(PlannerContext.CTX_SQL_QUERY_ID, "dummy"))
.build()
)
)
.setInterval(new MultipleIntervalSegmentSpec(ImmutableList.of(Filtration.eternity())))
.setGranularity(Granularities.ALL)
.setAggregatorSpecs(
new DoubleSumAggregatorFactory("_a0:sum", "a0"),
new CountAggregatorFactory("_a0:count"),
new ApproximateHistogramAggregatorFactory(
"_a1:agg",
"a0",
null,
null,
null,
null,
false
)
)
.setPostAggregatorSpecs(
ImmutableList.of(
new ArithmeticPostAggregator(
"_a0",
"quotient",
ImmutableList.of(
new FieldAccessPostAggregator(null, "_a0:sum"),
new FieldAccessPostAggregator(null, "_a0:count")
)
),
new QuantilePostAggregator("_a1", "_a1:agg", 0.98f)
)
)
.setContext(ImmutableMap.of(PlannerContext.CTX_SQL_QUERY_ID, "dummy"))
.build(),
Iterables.getOnlyElement(queryLogHook.getRecordedQueries())
);
}
}
| |
/*
* Copyright 2014 Mark Borner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package au.com.borner.salesforce.plugin.apex.psi;
import org.jetbrains.annotations.*;
import com.intellij.psi.PsiElementVisitor;
import com.intellij.psi.PsiElement;
public class ApexVisitor extends PsiElementVisitor {
public void visitDeclaration(@NotNull ApexDeclaration o) {
visitDeclarationElement(o);
}
public void visitReference(@NotNull ApexReference o) {
visitReferenceElement(o);
}
public void visitAdditionAssignExpression(@NotNull ApexAdditionAssignExpression o) {
visitExpression(o);
}
public void visitAdditionExpression(@NotNull ApexAdditionExpression o) {
visitExpression(o);
}
public void visitAndAssignExpression(@NotNull ApexAndAssignExpression o) {
visitExpression(o);
}
public void visitAndExpression(@NotNull ApexAndExpression o) {
visitExpression(o);
}
public void visitAnnotation(@NotNull ApexAnnotation o) {
visitPsiElement(o);
}
public void visitAnnotationParameter(@NotNull ApexAnnotationParameter o) {
visitPsiElement(o);
}
public void visitAnnotationParameterValue(@NotNull ApexAnnotationParameterValue o) {
visitPsiElement(o);
}
public void visitArguments(@NotNull ApexArguments o) {
visitPsiElement(o);
}
public void visitArrayCreatorRest(@NotNull ApexArrayCreatorRest o) {
visitPsiElement(o);
}
public void visitArrayInitializer(@NotNull ApexArrayInitializer o) {
visitPsiElement(o);
}
public void visitArrayPositionExpression(@NotNull ApexArrayPositionExpression o) {
visitExpression(o);
}
public void visitAssignExpression(@NotNull ApexAssignExpression o) {
visitExpression(o);
}
public void visitBitwiseAndExpression(@NotNull ApexBitwiseAndExpression o) {
visitExpression(o);
}
public void visitBitwiseExclusiveOr1expression(@NotNull ApexBitwiseExclusiveOr1expression o) {
visitExpression(o);
}
public void visitBitwiseExclusiveOr2expression(@NotNull ApexBitwiseExclusiveOr2expression o) {
visitExpression(o);
}
public void visitBitwiseOrExpression(@NotNull ApexBitwiseOrExpression o) {
visitExpression(o);
}
public void visitBitwiseShiftLeftExpression(@NotNull ApexBitwiseShiftLeftExpression o) {
visitExpression(o);
}
public void visitBitwiseShiftRightExpression(@NotNull ApexBitwiseShiftRightExpression o) {
visitExpression(o);
}
public void visitBitwiseShiftRightUnsignedExpression(@NotNull ApexBitwiseShiftRightUnsignedExpression o) {
visitExpression(o);
}
public void visitBlock(@NotNull ApexBlock o) {
visitPsiElement(o);
}
public void visitBlockStatement(@NotNull ApexBlockStatement o) {
visitPsiElement(o);
}
public void visitBooleanLiteral(@NotNull ApexBooleanLiteral o) {
visitPsiElement(o);
}
public void visitCastExpression(@NotNull ApexCastExpression o) {
visitExpression(o);
}
public void visitCatchClause(@NotNull ApexCatchClause o) {
visitPsiElement(o);
}
public void visitClassBody(@NotNull ApexClassBody o) {
visitPsiElement(o);
}
public void visitClassCreatorRest(@NotNull ApexClassCreatorRest o) {
visitPsiElement(o);
}
public void visitClassDeclaration(@NotNull ApexClassDeclaration o) {
visitDeclaration(o);
}
public void visitClassInterfaceOrPrimitiveType(@NotNull ApexClassInterfaceOrPrimitiveType o) {
visitPsiElement(o);
}
public void visitClassOrInterfaceReference(@NotNull ApexClassOrInterfaceReference o) {
visitReference(o);
}
public void visitClassicForStatement(@NotNull ApexClassicForStatement o) {
visitPsiElement(o);
}
public void visitConstructorBody(@NotNull ApexConstructorBody o) {
visitPsiElement(o);
}
public void visitConstructorDeclaration(@NotNull ApexConstructorDeclaration o) {
visitDeclaration(o);
}
public void visitCreatedName(@NotNull ApexCreatedName o) {
visitPsiElement(o);
}
public void visitCreator(@NotNull ApexCreator o) {
visitPsiElement(o);
}
public void visitDecrementAfterExpression(@NotNull ApexDecrementAfterExpression o) {
visitExpression(o);
}
public void visitDecrementBeforeExpression(@NotNull ApexDecrementBeforeExpression o) {
visitExpression(o);
}
public void visitDivideAssignExpression(@NotNull ApexDivideAssignExpression o) {
visitExpression(o);
}
public void visitDivisionExpression(@NotNull ApexDivisionExpression o) {
visitExpression(o);
}
public void visitDoStatement(@NotNull ApexDoStatement o) {
visitPsiElement(o);
}
public void visitEnhancedForStatement(@NotNull ApexEnhancedForStatement o) {
visitPsiElement(o);
}
public void visitEnumDeclaration(@NotNull ApexEnumDeclaration o) {
visitDeclaration(o);
}
public void visitEqualityExpression(@NotNull ApexEqualityExpression o) {
visitExpression(o);
}
public void visitExactEqualityExpression(@NotNull ApexExactEqualityExpression o) {
visitExpression(o);
}
public void visitExactInequalityExpression(@NotNull ApexExactInequalityExpression o) {
visitExpression(o);
}
public void visitExplicitGenericInvocation(@NotNull ApexExplicitGenericInvocation o) {
visitPsiElement(o);
}
public void visitExplicitGenericInvocationSuffix(@NotNull ApexExplicitGenericInvocationSuffix o) {
visitPsiElement(o);
}
public void visitExpression(@NotNull ApexExpression o) {
visitPsiElement(o);
}
public void visitExpressionList(@NotNull ApexExpressionList o) {
visitPsiElement(o);
}
public void visitExpressionListExpression(@NotNull ApexExpressionListExpression o) {
visitExpression(o);
}
public void visitExtendsClause(@NotNull ApexExtendsClause o) {
visitPsiElement(o);
}
public void visitFieldDeclarator(@NotNull ApexFieldDeclarator o) {
visitPsiElement(o);
}
public void visitFieldVisibility(@NotNull ApexFieldVisibility o) {
visitPsiElement(o);
}
public void visitFinallyBlock(@NotNull ApexFinallyBlock o) {
visitPsiElement(o);
}
public void visitForInit(@NotNull ApexForInit o) {
visitPsiElement(o);
}
public void visitForUpdate(@NotNull ApexForUpdate o) {
visitPsiElement(o);
}
public void visitGenericExpression(@NotNull ApexGenericExpression o) {
visitExpression(o);
}
public void visitGreaterEqualExpression(@NotNull ApexGreaterEqualExpression o) {
visitExpression(o);
}
public void visitGreaterThanExpression(@NotNull ApexGreaterThanExpression o) {
visitExpression(o);
}
public void visitIdentifierExpression(@NotNull ApexIdentifierExpression o) {
visitExpression(o);
}
public void visitIfStatement(@NotNull ApexIfStatement o) {
visitPsiElement(o);
}
public void visitImplementsClause(@NotNull ApexImplementsClause o) {
visitPsiElement(o);
}
public void visitIncrementAfterExpression(@NotNull ApexIncrementAfterExpression o) {
visitExpression(o);
}
public void visitIncrementBeforeExpression(@NotNull ApexIncrementBeforeExpression o) {
visitExpression(o);
}
public void visitInequalityExpression(@NotNull ApexInequalityExpression o) {
visitExpression(o);
}
public void visitInnerCreator(@NotNull ApexInnerCreator o) {
visitPsiElement(o);
}
public void visitInstanceOfExpression(@NotNull ApexInstanceOfExpression o) {
visitExpression(o);
}
public void visitInstantiationExpression(@NotNull ApexInstantiationExpression o) {
visitExpression(o);
}
public void visitInterfaceBody(@NotNull ApexInterfaceBody o) {
visitPsiElement(o);
}
public void visitInterfaceDeclaration(@NotNull ApexInterfaceDeclaration o) {
visitDeclaration(o);
}
public void visitLessEqualExpression(@NotNull ApexLessEqualExpression o) {
visitExpression(o);
}
public void visitLessThanExpression(@NotNull ApexLessThanExpression o) {
visitExpression(o);
}
public void visitListCollection(@NotNull ApexListCollection o) {
visitPsiElement(o);
}
public void visitLiteral(@NotNull ApexLiteral o) {
visitPsiElement(o);
}
public void visitLocalVariableDeclarationStatement(@NotNull ApexLocalVariableDeclarationStatement o) {
visitPsiElement(o);
}
public void visitLocalVariableDeclarator(@NotNull ApexLocalVariableDeclarator o) {
visitPsiElement(o);
}
public void visitLogicalComplimentExpression(@NotNull ApexLogicalComplimentExpression o) {
visitExpression(o);
}
public void visitMapCollection(@NotNull ApexMapCollection o) {
visitPsiElement(o);
}
public void visitMethodBody(@NotNull ApexMethodBody o) {
visitPsiElement(o);
}
public void visitMethodDeclaration(@NotNull ApexMethodDeclaration o) {
visitDeclaration(o);
}
public void visitMultiplicationAssignExpression(@NotNull ApexMultiplicationAssignExpression o) {
visitExpression(o);
}
public void visitMultiplicationExpression(@NotNull ApexMultiplicationExpression o) {
visitExpression(o);
}
public void visitNonWildcardTypeArguments(@NotNull ApexNonWildcardTypeArguments o) {
visitPsiElement(o);
}
public void visitNonWildcardTypeArgumentsOrDiamond(@NotNull ApexNonWildcardTypeArgumentsOrDiamond o) {
visitPsiElement(o);
}
public void visitOrAssignExpression(@NotNull ApexOrAssignExpression o) {
visitExpression(o);
}
public void visitOrExpression(@NotNull ApexOrExpression o) {
visitExpression(o);
}
public void visitParameterDefinition(@NotNull ApexParameterDefinition o) {
visitPsiElement(o);
}
public void visitParameters(@NotNull ApexParameters o) {
visitPsiElement(o);
}
public void visitPrimaryExpression(@NotNull ApexPrimaryExpression o) {
visitExpression(o);
}
public void visitPrimitiveType(@NotNull ApexPrimitiveType o) {
visitPsiElement(o);
}
public void visitSetCollection(@NotNull ApexSetCollection o) {
visitPsiElement(o);
}
public void visitSharingModifier(@NotNull ApexSharingModifier o) {
visitPsiElement(o);
}
public void visitShiftLeftAssignExpression(@NotNull ApexShiftLeftAssignExpression o) {
visitExpression(o);
}
public void visitShiftRightAssignExpression(@NotNull ApexShiftRightAssignExpression o) {
visitExpression(o);
}
public void visitShiftRightUnsignedAssignExpression(@NotNull ApexShiftRightUnsignedAssignExpression o) {
visitExpression(o);
}
public void visitStatement(@NotNull ApexStatement o) {
visitPsiElement(o);
}
public void visitStaticBlock(@NotNull ApexStaticBlock o) {
visitPsiElement(o);
}
public void visitStaticOrTransientModifier(@NotNull ApexStaticOrTransientModifier o) {
visitPsiElement(o);
}
public void visitSubtractionAssignExpression(@NotNull ApexSubtractionAssignExpression o) {
visitExpression(o);
}
public void visitSubtractionExpression(@NotNull ApexSubtractionExpression o) {
visitExpression(o);
}
public void visitSuperSuffix(@NotNull ApexSuperSuffix o) {
visitPsiElement(o);
}
public void visitSuperExpression(@NotNull ApexSuperExpression o) {
visitExpression(o);
}
public void visitTernaryExpression(@NotNull ApexTernaryExpression o) {
visitExpression(o);
}
public void visitThisExpression(@NotNull ApexThisExpression o) {
visitExpression(o);
}
public void visitTriggerBody(@NotNull ApexTriggerBody o) {
visitPsiElement(o);
}
public void visitTriggerDefinition(@NotNull ApexTriggerDefinition o) {
visitPsiElement(o);
}
public void visitTriggerParameter(@NotNull ApexTriggerParameter o) {
visitPsiElement(o);
}
public void visitTryStatement(@NotNull ApexTryStatement o) {
visitPsiElement(o);
}
public void visitTypeArgument(@NotNull ApexTypeArgument o) {
visitPsiElement(o);
}
public void visitTypeArgumentsOrDiamond(@NotNull ApexTypeArgumentsOrDiamond o) {
visitPsiElement(o);
}
public void visitUnaryNegationExpression(@NotNull ApexUnaryNegationExpression o) {
visitExpression(o);
}
public void visitVariableDefinition(@NotNull ApexVariableDefinition o) {
visitPsiElement(o);
}
public void visitVariableInitializer(@NotNull ApexVariableInitializer o) {
visitPsiElement(o);
}
public void visitVariableModifier(@NotNull ApexVariableModifier o) {
visitPsiElement(o);
}
public void visitVirtualOrAbstractModifier(@NotNull ApexVirtualOrAbstractModifier o) {
visitPsiElement(o);
}
public void visitVisibility(@NotNull ApexVisibility o) {
visitPsiElement(o);
}
public void visitWeirdExpression(@NotNull ApexWeirdExpression o) {
visitExpression(o);
}
public void visitWhileStatement(@NotNull ApexWhileStatement o) {
visitPsiElement(o);
}
public void visitDeclarationElement(@NotNull ApexDeclarationElement o) {
visitPsiElement(o);
}
public void visitReferenceElement(@NotNull ApexReferenceElement o) {
visitPsiElement(o);
}
public void visitPsiElement(@NotNull PsiElement o) {
visitElement(o);
}
}
| |
/*******************************************************************************
* Copyright 2009-2017 Amazon Services. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
*
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at: http://aws.amazon.com/apache2.0
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*******************************************************************************
* Seller Review Enrollment Payment Event
* API Version: 2015-05-01
* Library Version: 2017-07-26
* Generated: Tue Jul 25 12:48:56 UTC 2017
*/
package com.amazonservices.mws.finances.model;
import javax.xml.datatype.XMLGregorianCalendar;
import com.amazonservices.mws.client.*;
/**
* SellerReviewEnrollmentPaymentEvent complex type.
*
* XML schema:
*
* <pre>
* <complexType name="SellerReviewEnrollmentPaymentEvent">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="PostedDate" type="{http://www.w3.org/2001/XMLSchema}dateTime" minOccurs="0"/>
* <element name="EnrollmentId" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="ParentASIN" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="FeeComponent" type="{http://mws.amazonservices.com/Finances/2015-05-01}FeeComponent" minOccurs="0"/>
* <element name="ChargeComponent" type="{http://mws.amazonservices.com/Finances/2015-05-01}ChargeComponent" minOccurs="0"/>
* <element name="TotalAmount" type="{http://mws.amazonservices.com/Finances/2015-05-01}Currency" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*/
public class SellerReviewEnrollmentPaymentEvent extends AbstractMwsObject {
private XMLGregorianCalendar postedDate;
private String enrollmentId;
private String parentASIN;
private FeeComponent feeComponent;
private ChargeComponent chargeComponent;
private Currency totalAmount;
/**
* Get the value of PostedDate.
*
* @return The value of PostedDate.
*/
public XMLGregorianCalendar getPostedDate() {
return postedDate;
}
/**
* Set the value of PostedDate.
*
* @param postedDate
* The new value to set.
*/
public void setPostedDate(XMLGregorianCalendar postedDate) {
this.postedDate = postedDate;
}
/**
* Check to see if PostedDate is set.
*
* @return true if PostedDate is set.
*/
public boolean isSetPostedDate() {
return postedDate != null;
}
/**
* Set the value of PostedDate, return this.
*
* @param postedDate
* The new value to set.
*
* @return This instance.
*/
public SellerReviewEnrollmentPaymentEvent withPostedDate(XMLGregorianCalendar postedDate) {
this.postedDate = postedDate;
return this;
}
/**
* Get the value of EnrollmentId.
*
* @return The value of EnrollmentId.
*/
public String getEnrollmentId() {
return enrollmentId;
}
/**
* Set the value of EnrollmentId.
*
* @param enrollmentId
* The new value to set.
*/
public void setEnrollmentId(String enrollmentId) {
this.enrollmentId = enrollmentId;
}
/**
* Check to see if EnrollmentId is set.
*
* @return true if EnrollmentId is set.
*/
public boolean isSetEnrollmentId() {
return enrollmentId != null;
}
/**
* Set the value of EnrollmentId, return this.
*
* @param enrollmentId
* The new value to set.
*
* @return This instance.
*/
public SellerReviewEnrollmentPaymentEvent withEnrollmentId(String enrollmentId) {
this.enrollmentId = enrollmentId;
return this;
}
/**
* Get the value of ParentASIN.
*
* @return The value of ParentASIN.
*/
public String getParentASIN() {
return parentASIN;
}
/**
* Set the value of ParentASIN.
*
* @param parentASIN
* The new value to set.
*/
public void setParentASIN(String parentASIN) {
this.parentASIN = parentASIN;
}
/**
* Check to see if ParentASIN is set.
*
* @return true if ParentASIN is set.
*/
public boolean isSetParentASIN() {
return parentASIN != null;
}
/**
* Set the value of ParentASIN, return this.
*
* @param parentASIN
* The new value to set.
*
* @return This instance.
*/
public SellerReviewEnrollmentPaymentEvent withParentASIN(String parentASIN) {
this.parentASIN = parentASIN;
return this;
}
/**
* Get the value of FeeComponent.
*
* @return The value of FeeComponent.
*/
public FeeComponent getFeeComponent() {
return feeComponent;
}
/**
* Set the value of FeeComponent.
*
* @param feeComponent
* The new value to set.
*/
public void setFeeComponent(FeeComponent feeComponent) {
this.feeComponent = feeComponent;
}
/**
* Check to see if FeeComponent is set.
*
* @return true if FeeComponent is set.
*/
public boolean isSetFeeComponent() {
return feeComponent != null;
}
/**
* Set the value of FeeComponent, return this.
*
* @param feeComponent
* The new value to set.
*
* @return This instance.
*/
public SellerReviewEnrollmentPaymentEvent withFeeComponent(FeeComponent feeComponent) {
this.feeComponent = feeComponent;
return this;
}
/**
* Get the value of ChargeComponent.
*
* @return The value of ChargeComponent.
*/
public ChargeComponent getChargeComponent() {
return chargeComponent;
}
/**
* Set the value of ChargeComponent.
*
* @param chargeComponent
* The new value to set.
*/
public void setChargeComponent(ChargeComponent chargeComponent) {
this.chargeComponent = chargeComponent;
}
/**
* Check to see if ChargeComponent is set.
*
* @return true if ChargeComponent is set.
*/
public boolean isSetChargeComponent() {
return chargeComponent != null;
}
/**
* Set the value of ChargeComponent, return this.
*
* @param chargeComponent
* The new value to set.
*
* @return This instance.
*/
public SellerReviewEnrollmentPaymentEvent withChargeComponent(ChargeComponent chargeComponent) {
this.chargeComponent = chargeComponent;
return this;
}
/**
* Get the value of TotalAmount.
*
* @return The value of TotalAmount.
*/
public Currency getTotalAmount() {
return totalAmount;
}
/**
* Set the value of TotalAmount.
*
* @param totalAmount
* The new value to set.
*/
public void setTotalAmount(Currency totalAmount) {
this.totalAmount = totalAmount;
}
/**
* Check to see if TotalAmount is set.
*
* @return true if TotalAmount is set.
*/
public boolean isSetTotalAmount() {
return totalAmount != null;
}
/**
* Set the value of TotalAmount, return this.
*
* @param totalAmount
* The new value to set.
*
* @return This instance.
*/
public SellerReviewEnrollmentPaymentEvent withTotalAmount(Currency totalAmount) {
this.totalAmount = totalAmount;
return this;
}
/**
* Read members from a MwsReader.
*
* @param r
* The reader to read from.
*/
@Override
public void readFragmentFrom(MwsReader r) {
postedDate = r.read("PostedDate", XMLGregorianCalendar.class);
enrollmentId = r.read("EnrollmentId", String.class);
parentASIN = r.read("ParentASIN", String.class);
feeComponent = r.read("FeeComponent", FeeComponent.class);
chargeComponent = r.read("ChargeComponent", ChargeComponent.class);
totalAmount = r.read("TotalAmount", Currency.class);
}
/**
* Write members to a MwsWriter.
*
* @param w
* The writer to write to.
*/
@Override
public void writeFragmentTo(MwsWriter w) {
w.write("PostedDate", postedDate);
w.write("EnrollmentId", enrollmentId);
w.write("ParentASIN", parentASIN);
w.write("FeeComponent", feeComponent);
w.write("ChargeComponent", chargeComponent);
w.write("TotalAmount", totalAmount);
}
/**
* Write tag, xmlns and members to a MwsWriter.
*
* @param w
* The Writer to write to.
*/
@Override
public void writeTo(MwsWriter w) {
w.write("http://mws.amazonservices.com/Finances/2015-05-01", "SellerReviewEnrollmentPaymentEvent",this);
}
/** Default constructor. */
public SellerReviewEnrollmentPaymentEvent() {
super();
}
}
| |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.s3.model;
import static com.amazonaws.util.DateUtils.cloneDate;
import java.io.Serializable;
import java.util.Collections;
import java.util.Date;
import java.util.TreeMap;
import java.util.Map;
import com.amazonaws.services.s3.Headers;
import com.amazonaws.services.s3.internal.Constants;
import com.amazonaws.services.s3.internal.ObjectExpirationResult;
import com.amazonaws.services.s3.internal.ObjectRestoreResult;
import com.amazonaws.services.s3.internal.S3RequesterChargedResult;
import com.amazonaws.services.s3.internal.ServerSideEncryptionResult;
/**
* Represents the object metadata that is stored with Amazon S3. This includes custom
* user-supplied metadata, as well as the standard HTTP headers that Amazon S3
* sends and receives (Content-Length, ETag, Content-MD5, etc.).
*/
public class ObjectMetadata implements ServerSideEncryptionResult, S3RequesterChargedResult,
ObjectExpirationResult, ObjectRestoreResult, Cloneable, Serializable
{
/*
* TODO: Might be nice to get as many of the internal use only methods out
* of here so users never even see them.
* Example: we could set the ETag header directly through the raw
* metadata map instead of having a setter for it.
*/
/**
* Custom user metadata, represented in responses with the x-amz-meta-
* header prefix
*/
private Map<String, String> userMetadata = new TreeMap<String, String>(String.CASE_INSENSITIVE_ORDER);
/**
* All other (non user custom) headers such as Content-Length, Content-Type,
* etc.
*/
private Map<String, Object> metadata = new TreeMap<String, Object>(String.CASE_INSENSITIVE_ORDER);
public static final String AES_256_SERVER_SIDE_ENCRYPTION =
SSEAlgorithm.AES256.getAlgorithm();
/**
* The date when the object is no longer cacheable.
*/
private Date httpExpiresDate;
/**
* The time this object will expire and be completely removed from S3, or
* null if this object will never expire.
* <p>
* This and the expiration time rule aren't stored in the metadata map
* because the header contains both the time and the rule.
*/
private Date expirationTime;
/**
* The expiration rule id for this object.
*/
private String expirationTimeRuleId;
/**
* Boolean value indicating whether there is an ongoing request to restore
* an archived copy of this object from Amazon Glacier.
*/
private Boolean ongoingRestore;
/**
* The time at which an object that has been temporarily restored from
* Glacier will expire, and will need to be restored again in order to be
* accessed. Null if this object has not been restored from Glacier.
*/
private Date restoreExpirationTime;
public ObjectMetadata() {}
private ObjectMetadata(ObjectMetadata from) {
this.userMetadata = from.userMetadata == null
? null
: new TreeMap<String,String>(from.userMetadata);
// shallow clone the meata data
this.metadata = from.metadata == null
? null
: new TreeMap<String, Object>(from.metadata);
this.expirationTime = cloneDate(from.expirationTime);
this.expirationTimeRuleId = from.expirationTimeRuleId;
this.httpExpiresDate = cloneDate(from.httpExpiresDate);
this.ongoingRestore = from.ongoingRestore;
this.restoreExpirationTime = cloneDate(from.restoreExpirationTime);
}
/**
* <p>
* Gets the custom user-metadata for the associated object.
* </p>
* <p>
* Amazon S3 can store additional metadata on objects by internally
* representing it as HTTP headers prefixed with "x-amz-meta-". Use
* user-metadata to store arbitrary metadata alongside their data in Amazon
* S3. When setting user metadata, callers <i>should not</i> include the
* internal "x-amz-meta-" prefix; this library will handle that for them.
* Likewise, when callers retrieve custom user-metadata, they will not see
* the "x-amz-meta-" header prefix.
* </p>
* <p>
* User-metadata keys are <b>case insensitive</b> and will be returned as
* lowercase strings, even if they were originally specified with uppercase
* strings.
* </p>
* <p>
* Note that user-metadata for an object is limited by the HTTP request
* header limit. All HTTP headers included in a request (including user
* metadata headers and other standard HTTP headers) must be less than 8KB.
* </p>
*
* @return The custom user metadata for the associated object.
*
* @see ObjectMetadata#setUserMetadata(Map)
* @see ObjectMetadata#addUserMetadata(String, String)
*/
public Map<String, String> getUserMetadata() {
return userMetadata;
}
/**
* <p>
* Sets the custom user-metadata for the associated object.
* </p>
* <p>
* Amazon S3 can store additional metadata on objects by internally
* representing it as HTTP headers prefixed with "x-amz-meta-". Use
* user-metadata to store arbitrary metadata alongside their data in Amazon
* S3. When setting user metadata, callers <i>should not</i> include the
* internal "x-amz-meta-" prefix; this library will handle that for them.
* Likewise, when callers retrieve custom user-metadata, they will not see
* the "x-amz-meta-" header prefix.
* </p>
* <p>
* User-metadata keys are <b>case insensitive</b> and will be returned as
* lowercase strings, even if they were originally specified with uppercase
* strings.
* </p>
* <p>
* Note that user-metadata for an object is limited by the HTTP request
* header limit. All HTTP headers included in a request (including user
* metadata headers and other standard HTTP headers) must be less than 8KB.
* </p>
*
* @param userMetadata
* The custom user-metadata for the associated object. Note that
* the key should not include the internal S3 HTTP header prefix.
* @see ObjectMetadata#getUserMetadata()
* @see ObjectMetadata#addUserMetadata(String, String)
*/
public void setUserMetadata(Map<String, String> userMetadata) {
this.userMetadata = userMetadata;
}
/**
* For internal use only. Sets a specific metadata header value. Not
* intended to be called by external code.
*
* @param key
* The name of the header being set.
* @param value
* The value for the header.
*/
public void setHeader(String key, Object value) {
metadata.put(key, value);
}
/**
* <p>
* Adds the key value pair of custom user-metadata for the associated
* object. If the entry in the custom user-metadata map already contains the
* specified key, it will be replaced with these new contents.
* </p>
* <p>
* Amazon S3 can store additional metadata on objects by internally
* representing it as HTTP headers prefixed with "x-amz-meta-".
* Use user-metadata to store arbitrary metadata alongside their data in
* Amazon S3. When setting user metadata, callers <i>should not</i> include
* the internal "x-amz-meta-" prefix; this library will handle that for
* them. Likewise, when callers retrieve custom user-metadata, they will not
* see the "x-amz-meta-" header prefix.
* </p>
* <p>
* Note that user-metadata for an object is limited by the HTTP request
* header limit. All HTTP headers included in a request (including user
* metadata headers and other standard HTTP headers) must be less than 8KB.
* </p>
*
* @param key
* The key for the custom user metadata entry. Note that the key
* should not include
* the internal S3 HTTP header prefix.
* @param value
* The value for the custom user-metadata entry.
*
* @see ObjectMetadata#setUserMetadata(Map)
* @see ObjectMetadata#getUserMetadata()
*/
public void addUserMetadata(String key, String value) {
this.userMetadata.put(key, value);
}
/**
* For internal use only. Gets a map of the raw metadata/headers
* for the associated object.
*
* @return A map of the raw metadata/headers for the associated object.
*/
public Map<String, Object> getRawMetadata() {
return Collections.unmodifiableMap(new TreeMap<String,Object>(metadata));
}
/**
* For internal use only. Returns the raw value of the metadata/headers
* for the specified key.
*/
public Object getRawMetadataValue(String key) {
return metadata.get(key);
}
/**
* Gets the value of the Last-Modified header, indicating the date
* and time at which Amazon S3 last recorded a modification to the
* associated object.
*
* @return The date and time at which Amazon S3 last recorded a modification
* to the associated object. Returns <code>null</code> if
* the Last-Modified header hasn't been set.
*/
public Date getLastModified() {
return cloneDate((Date)metadata.get(Headers.LAST_MODIFIED));
}
/**
* For internal use only. Sets the Last-Modified header value
* indicating the date and time at which Amazon S3 last recorded a
* modification to the associated object.
*
* @param lastModified
* The date and time at which Amazon S3 last recorded a
* modification to the associated object.
*/
public void setLastModified(Date lastModified) {
metadata.put(Headers.LAST_MODIFIED, lastModified);
}
/**
* <p>
* Gets the Content-Length HTTP header indicating the size of the
* associated object in bytes.
* </p>
* <p>
* This field is required when uploading objects to S3, but the AWS S3 Java
* client will automatically set it when working directly with files. When
* uploading directly from a stream, set this field if
* possible. Otherwise the client must buffer the entire stream in
* order to calculate the content length before sending the data to
* Amazon S3.
* </p>
* <p>
* For more information on the Content-Length HTTP header, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13</a>
* </p>
*
* @return The Content-Length HTTP header indicating the size of the
* associated object in bytes. Returns <code>null</code>
* if it hasn't been set yet.
*
* @see ObjectMetadata#setContentLength(long)
*/
public long getContentLength() {
Long contentLength = (Long)metadata.get(Headers.CONTENT_LENGTH);
if (contentLength == null) return 0;
return contentLength.longValue();
}
/**
* Returns the physical length of the entire object stored in S3.
* This is useful during, for example, a range get operation.
*/
public long getInstanceLength() {
// See Content-Range in
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
String contentRange = (String)metadata.get(Headers.CONTENT_RANGE);
if (contentRange != null) {
int pos = contentRange.lastIndexOf("/");
if (pos >= 0)
return Long.parseLong(contentRange.substring(pos+1));
}
return getContentLength();
}
/**
* <p>
* Sets the Content-Length HTTP header indicating the size of the
* associated object in bytes.
* </p>
* <p>
* This field is required when uploading objects to S3, but the AWS S3 Java
* client will automatically set it when working directly with files. When
* uploading directly from a stream, set this field if
* possible. Otherwise the client must buffer the entire stream in
* order to calculate the content length before sending the data to
* Amazon S3.
* </p>
* <p>
* For more information on the Content-Length HTTP header, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13</a>
* </p>
*
* @param contentLength
* The Content-Length HTTP header indicating the size of the
* associated object in bytes.
*
* @see ObjectMetadata#getContentLength()
*/
public void setContentLength(long contentLength) {
metadata.put(Headers.CONTENT_LENGTH, contentLength);
}
/**
* <p>
* Gets the Content-Type HTTP header, which indicates the type of content
* stored in the associated object. The value of this header is a standard
* MIME type.
* </p>
* <p>
* When uploading files, the AWS S3 Java client will attempt to determine
* the correct content type if one hasn't been set yet. Users are
* responsible for ensuring a suitable content type is set when uploading
* streams. If no content type is provided and cannot be determined by
* the filename, the default content type, "application/octet-stream", will
* be used.
* </p>
* <p>
* For more information on the Content-Type header, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17</a>
* </p>
*
* @return The HTTP Content-Type header, indicating the type of content
* stored in the associated S3 object. Returns <code>null</code>
* if it hasn't been
* set.
*
* @see ObjectMetadata#setContentType(String)
*/
public String getContentType() {
return (String)metadata.get(Headers.CONTENT_TYPE);
}
/**
* <p>
* Sets the Content-Type HTTP header indicating the type of content
* stored in the associated object. The value of this header is a standard
* MIME type.
* </p>
* <p>
* When uploading files, the AWS S3 Java client will attempt to determine
* the correct content type if one hasn't been set yet. Users are
* responsible for ensuring a suitable content type is set when uploading
* streams. If no content type is provided and cannot be determined by
* the filename, the default content type "application/octet-stream" will
* be used.
* </p>
* <p>
* For more information on the Content-Type header, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17</a>
* </p>
*
* @param contentType
* The HTTP Content-Type header indicating the type of content
* stored in the associated S3 object.
*
* @see ObjectMetadata#getContentType()
*/
public void setContentType(String contentType) {
metadata.put(Headers.CONTENT_TYPE, contentType);
}
/**
* <p>
* Gets the Content-Language HTTP header, which describes the natural language(s) of the
* intended audience for the enclosed entity.
* </p>
* <p>
* For more information on the Content-Type header, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17</a>
* </p>
*
* @return The HTTP Content-Language header, which describes the natural language(s) of the
* intended audience for the enclosed entity. Returns <code>null</code>
* if it hasn't been set.
*
* @see ObjectMetadata#setContentLanguage(String)
*/
public String getContentLanguage() {
return (String)metadata.get(Headers.CONTENT_LANGUAGE);
}
/**
* <p>
* Sets the Content-Language HTTP header which describes the natural language(s) of the
* intended audience for the enclosed entity.
* </p>
* <p>
* For more information on the Content-Type header, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17</a>
* </p>
*
* @param contentLanguage
* The HTTP Content-Language header which describes the natural language(s) of the
* intended audience for the enclosed entity.
*
* @see ObjectMetadata#getContentLanguage()
*/
public void setContentLanguage(String contentLanguage) {
metadata.put(Headers.CONTENT_LANGUAGE, contentLanguage);
}
/**
* <p>
* Gets the optional Content-Encoding HTTP header specifying what
* content encodings have been applied to the object and what decoding
* mechanisms must be applied in order to obtain the media-type referenced
* by the Content-Type field.
* </p>
* <p>
* For more information on how the Content-Encoding HTTP header works, see
* <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11</a>
* </p>
*
* @return The HTTP Content-Encoding header.
* Returns <code>null</code> if it hasn't been set.
*
* @see ObjectMetadata#setContentType(String)
*/
public String getContentEncoding() {
return (String)metadata.get(Headers.CONTENT_ENCODING);
}
/**
* <p>
* Sets the optional Content-Encoding HTTP header specifying what
* content encodings have been applied to the object and what decoding
* mechanisms must be applied in order to obtain the media-type referenced
* by the Content-Type field.
* </p>
* <p>
* For more information on how the Content-Encoding HTTP header works, see
* <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11</a>
* </p>
*
* @param encoding
* The HTTP Content-Encoding header, as defined in RFC 2616.
*
* @see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11"
* >http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11</a>
*
* @see ObjectMetadata#getContentType()
*/
public void setContentEncoding(String encoding) {
metadata.put(Headers.CONTENT_ENCODING, encoding);
}
/**
* <p>
* Gets the optional Cache-Control HTTP header which allows the user to
* specify caching behavior along the HTTP request/reply chain.
* </p>
* <p>
* For more information on how the Cache-Control HTTP header affects HTTP
* requests and responses, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9</a>
* </p>
*
* @return The HTTP Cache-Control header as defined in RFC 2616.
* Returns <code>null</code> if
* it hasn't been set.
*
* @see ObjectMetadata#setCacheControl(String)
*/
public String getCacheControl() {
return (String)metadata.get(Headers.CACHE_CONTROL);
}
/**
* <p>
* Sets the optional Cache-Control HTTP header which allows the user to
* specify caching behavior along the HTTP request/reply chain.
* </p>
* <p>
* For more information on how the Cache-Control HTTP header affects HTTP
* requests and responses see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9</a>
* </p>
*
* @param cacheControl
* The HTTP Cache-Control header as defined in RFC 2616.
*
* @see ObjectMetadata#getCacheControl()
*/
public void setCacheControl(String cacheControl) {
metadata.put(Headers.CACHE_CONTROL, cacheControl);
}
/**
* <p>
* Sets the base64 encoded 128-bit MD5 digest of the associated object
* (content - not including headers) according to RFC 1864. This data is
* used as a message integrity check to verify that the data received by
* Amazon S3 is the same data that the caller sent. If set to null,then the
* MD5 digest is removed from the metadata.
* </p>
* <p>
* This field represents the base64 encoded 128-bit MD5 digest digest of an
* object's content as calculated on the caller's side. The ETag metadata
* field represents the hex encoded 128-bit MD5 digest as computed by Amazon
* S3.
* </p>
* <p>
* The AWS S3 Java client will attempt to calculate this field automatically
* when uploading files to Amazon S3.
* </p>
*
* @param md5Base64
* The base64 encoded MD5 hash of the content for the object
* associated with this metadata.
*
* @see ObjectMetadata#getContentMD5()
*/
public void setContentMD5(String md5Base64) {
if(md5Base64 == null){
metadata.remove(Headers.CONTENT_MD5);
}else{
metadata.put(Headers.CONTENT_MD5, md5Base64);
}
}
/**
* <p>
* Gets the base64 encoded 128-bit MD5 digest of the associated object
* (content - not including headers) according to RFC 1864. This data is
* used as a message integrity check to verify that the data received by
* Amazon S3 is the same data that the caller sent.
* </p>
* <p>
* This field represents the base64 encoded 128-bit MD5 digest digest of an
* object's content as calculated on the caller's side. The ETag metadata
* field represents the hex encoded 128-bit MD5 digest as computed by Amazon
* S3.
* </p>
* <p>
* The AWS S3 Java client will attempt to calculate this field automatically
* when uploading files to Amazon S3.
* </p>
*
* @return The base64 encoded MD5 hash of the content for the associated
* object. Returns <code>null</code> if the MD5 hash of the content
* hasn't been set.
*
* @see ObjectMetadata#setContentMD5(String)
*/
public String getContentMD5() {
return (String)metadata.get(Headers.CONTENT_MD5);
}
/**
* <p>
* Sets the optional Content-Disposition HTTP header, which specifies
* presentational information such as the recommended filename for the
* object to be saved as.
* </p>
* <p>
* For more information on how the Content-Disposition header affects HTTP
* client behavior, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1</a>
* </p>
*
* @param disposition
* The value for the Content-Disposition header.
*
* @see ObjectMetadata#getContentDisposition()
*/
public void setContentDisposition(String disposition) {
metadata.put(Headers.CONTENT_DISPOSITION, disposition);
}
/**
* <p>
* Gets the optional Content-Disposition HTTP header, which specifies
* presentation information for the object such as the recommended filename
* for the object to be saved as.
* </p>
* <p>
* For more information on how the Content-Disposition header affects HTTP
* client behavior, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1">
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1</a>
* </p>
*
* @return The value of the Content-Disposition header.
* Returns <code>null</code> if the Content-Disposition header
* hasn't been set.
*
* @see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1"
* >http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1</a>
*
* @see ObjectMetadata#setCacheControl(String)
*/
public String getContentDisposition() {
return (String)metadata.get(Headers.CONTENT_DISPOSITION);
}
/**
* Gets the hex encoded 128-bit MD5 digest of the associated object
* according to RFC 1864. This data is used as an integrity check to verify
* that the data received by the caller is the same data that was sent by
* Amazon S3.
* <p>
* This field represents the hex encoded 128-bit MD5 digest of an object's
* content as calculated by Amazon S3. The ContentMD5 field represents the
* base64 encoded 128-bit MD5 digest as calculated on the caller's side.
* </p>
*
* @return The hex encoded MD5 hash of the content for the associated object
* as calculated by Amazon S3.
* Returns <code>null</code> if it hasn't been set yet.
*/
public String getETag() {
return (String)metadata.get(Headers.ETAG);
}
/**
* Gets the version ID of the associated Amazon S3 object if available.
* Version IDs are only assigned to objects when an object is uploaded to an
* Amazon S3 bucket that has object versioning enabled.
*
* @return The version ID of the associated Amazon S3 object if available.
*/
public String getVersionId() {
return (String)metadata.get(Headers.S3_VERSION_ID);
}
/**
* Returns the server-side encryption algorithm when encrypting the object
* using AWS-managed keys .
*/
@Override
public String getSSEAlgorithm() {
return (String)metadata.get(Headers.SERVER_SIDE_ENCRYPTION);
}
/**
* @deprecated Replaced by {@link #getSSEAlgorithm()}
*/
@Deprecated
public String getServerSideEncryption() {
return (String)metadata.get(Headers.SERVER_SIDE_ENCRYPTION);
}
/**
* Sets the server-side encryption algorithm when encrypting the object
* using AWS-managed keys.
*
* @param algorithm
* The server-side encryption algorithm when encrypting the
* object using AWS-managed keys .
*/
@Override
public void setSSEAlgorithm(String algorithm) {
metadata.put(Headers.SERVER_SIDE_ENCRYPTION, algorithm);
}
/**
* @deprecated Replaced by {@link #setSSEAlgorithm(String)}
*/
@Deprecated
public void setServerSideEncryption(String algorithm) {
metadata.put(Headers.SERVER_SIDE_ENCRYPTION, algorithm);
}
/**
* {@inheritDoc}
*/
@Override
public String getSSECustomerAlgorithm() {
return (String) metadata.get(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM);
}
/**
* For internal use only. This method is only used to set the value in the
* object after receiving the value in a response from S3. When sending
* requests, use {@link SSECustomerKey} members in request objects.
*/
@Override
public void setSSECustomerAlgorithm(String algorithm) {
metadata.put(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM, algorithm);
}
/**
* {@inheritDoc}
*/
@Override
public String getSSECustomerKeyMd5() {
return (String)metadata.get(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5);
}
/**
* For internal use only. This method is only used to set the value in the
* object after receiving the value in a response from S3. When sending
* requests, use {@link SSECustomerKey} members in request objects.
*/
public void setSSECustomerKeyMd5(String md5Digest) {
metadata.put(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, md5Digest);
}
/**
* Returns the time this object will expire and be completely removed from
* S3. Returns null if this object will never expire.
*/
public Date getExpirationTime() {
return cloneDate(expirationTime);
}
/**
* For internal use only. This will *not* set the object's expiration time,
* and is only used to set the value in the object after receiving the value
* in a response from S3.
*
* @param expirationTime
* The expiration time for the object.
*/
public void setExpirationTime(Date expirationTime) {
this.expirationTime = expirationTime;
}
/**
* Returns the {@link BucketLifecycleConfiguration} rule ID for this
* object's expiration, or null if it doesn't expire.
*/
public String getExpirationTimeRuleId() {
return expirationTimeRuleId;
}
/**
* Sets the {@link BucketLifecycleConfiguration} rule ID for this object's
* expiration
*
* @param expirationTimeRuleId
* The rule ID for this object's expiration
*/
public void setExpirationTimeRuleId(String expirationTimeRuleId) {
this.expirationTimeRuleId = expirationTimeRuleId;
}
/**
* Returns the time at which an object that has been temporarily restored
* from Amazon Glacier will expire, and will need to be restored again in
* order to be accessed. Returns null if this is not a temporary copy of an
* object restored from Glacier.
*/
public Date getRestoreExpirationTime() {
return cloneDate(restoreExpirationTime);
}
/**
* For internal use only. This will *not* set the object's restore
* expiration time, and is only used to set the value in the object after
* receiving the value in a response from S3.
*
* @param restoreExpirationTime
* The new restore expiration time for the object.
*/
public void setRestoreExpirationTime(Date restoreExpirationTime) {
this.restoreExpirationTime = restoreExpirationTime;
}
/**
* For internal use only. Sets the boolean value which indicates whether
* there is ongoing restore request. Not intended to be called by external
* code.
*/
public void setOngoingRestore(boolean ongoingRestore) {
this.ongoingRestore = Boolean.valueOf(ongoingRestore);
}
/**
* Returns the boolean value which indicates whether there is ongoing restore request.
*/
public Boolean getOngoingRestore() {
return this.ongoingRestore;
}
/**
* Set the date when the object is no longer cacheable.
*/
public void setHttpExpiresDate(Date httpExpiresDate) {
this.httpExpiresDate = httpExpiresDate;
}
/**
* Returns the date when the object is no longer cacheable.
*/
public Date getHttpExpiresDate() {
return cloneDate(httpExpiresDate);
}
/**
* @return The storage class of the object. Returns null if the object is in STANDARD storage.
* See {@link StorageClass} for possible values
*/
public String getStorageClass() {
final Object storageClass = metadata.get(Headers.STORAGE_CLASS);
if (storageClass == null) {
return null;
}
return storageClass.toString();
}
/**
* Returns the value of the specified user meta datum.
*/
public String getUserMetaDataOf(String key) {
return userMetadata == null ? null : userMetadata.get(key);
}
/**
* Returns a clone of this <code>ObjectMetadata</code>. Note the clone of
* the internal {@link #metadata} is limited to a shallow copy due to the
* unlimited type of value in the map. Other fields can be regarded as deep
* clone.
*/
public ObjectMetadata clone() {
return new ObjectMetadata(this);
}
/**
* Returns the AWS Key Management System key id used for Server Side
* Encryption of the Amazon S3 object.
*/
public String getSSEAwsKmsKeyId() {
return (String) metadata
.get(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID);
}
@Override
public boolean isRequesterCharged() {
return metadata.get(Headers.REQUESTER_CHARGED_HEADER) != null;
}
@Override
public void setRequesterCharged(boolean isRequesterCharged) {
if (isRequesterCharged) {
metadata.put(Headers.REQUESTER_CHARGED_HEADER, Constants.REQUESTER_PAYS);
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.platform.cache;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.cache.CacheEntryProcessor;
import org.apache.ignite.cache.CacheMetrics;
import org.apache.ignite.cache.CachePartialUpdateException;
import org.apache.ignite.cache.CachePeekMode;
import org.apache.ignite.cache.query.Query;
import org.apache.ignite.cache.query.ScanQuery;
import org.apache.ignite.cache.query.SqlFieldsQuery;
import org.apache.ignite.cache.query.SqlQuery;
import org.apache.ignite.cache.query.TextQuery;
import org.apache.ignite.internal.binary.BinaryRawReaderEx;
import org.apache.ignite.internal.binary.BinaryRawWriterEx;
import org.apache.ignite.internal.processors.cache.CacheOperationContext;
import org.apache.ignite.internal.processors.cache.CachePartialUpdateCheckedException;
import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
import org.apache.ignite.internal.processors.cache.query.QueryCursorEx;
import org.apache.ignite.internal.processors.platform.PlatformAbstractTarget;
import org.apache.ignite.internal.processors.platform.PlatformContext;
import org.apache.ignite.internal.processors.platform.PlatformNativeException;
import org.apache.ignite.internal.processors.platform.cache.query.PlatformContinuousQuery;
import org.apache.ignite.internal.processors.platform.cache.query.PlatformFieldsQueryCursor;
import org.apache.ignite.internal.processors.platform.cache.query.PlatformQueryCursor;
import org.apache.ignite.internal.processors.platform.utils.PlatformFutureUtils;
import org.apache.ignite.internal.processors.platform.utils.PlatformUtils;
import org.apache.ignite.internal.util.GridConcurrentFactory;
import org.apache.ignite.internal.util.typedef.C1;
import org.apache.ignite.lang.IgniteFuture;
import org.jetbrains.annotations.Nullable;
import javax.cache.Cache;
import javax.cache.expiry.Duration;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.processor.EntryProcessorException;
import javax.cache.processor.EntryProcessorResult;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
/**
* Native cache wrapper implementation.
*/
@SuppressWarnings({"unchecked", "UnusedDeclaration", "TryFinallyCanBeTryWithResources"})
public class PlatformCache extends PlatformAbstractTarget {
/** */
public static final int OP_CLEAR = 1;
/** */
public static final int OP_CLEAR_ALL = 2;
/** */
public static final int OP_CONTAINS_KEY = 3;
/** */
public static final int OP_CONTAINS_KEYS = 4;
/** */
public static final int OP_GET = 5;
/** */
public static final int OP_GET_ALL = 6;
/** */
public static final int OP_GET_AND_PUT = 7;
/** */
public static final int OP_GET_AND_PUT_IF_ABSENT = 8;
/** */
public static final int OP_GET_AND_REMOVE = 9;
/** */
public static final int OP_GET_AND_REPLACE = 10;
/** */
public static final int OP_GET_NAME = 11;
/** */
public static final int OP_INVOKE = 12;
/** */
public static final int OP_INVOKE_ALL = 13;
/** */
public static final int OP_IS_LOCAL_LOCKED = 14;
/** */
public static final int OP_LOAD_CACHE = 15;
/** */
public static final int OP_LOC_EVICT = 16;
/** */
public static final int OP_LOC_LOAD_CACHE = 17;
/** */
public static final int OP_LOC_PROMOTE = 18;
/** */
public static final int OP_LOCAL_CLEAR = 20;
/** */
public static final int OP_LOCAL_CLEAR_ALL = 21;
/** */
public static final int OP_LOCK = 22;
/** */
public static final int OP_LOCK_ALL = 23;
/** */
public static final int OP_METRICS = 24;
/** */
private static final int OP_PEEK = 25;
/** */
private static final int OP_PUT = 26;
/** */
private static final int OP_PUT_ALL = 27;
/** */
public static final int OP_PUT_IF_ABSENT = 28;
/** */
public static final int OP_QRY_CONTINUOUS = 29;
/** */
public static final int OP_QRY_SCAN = 30;
/** */
public static final int OP_QRY_SQL = 31;
/** */
public static final int OP_QRY_SQL_FIELDS = 32;
/** */
public static final int OP_QRY_TXT = 33;
/** */
public static final int OP_REMOVE_ALL = 34;
/** */
public static final int OP_REMOVE_BOOL = 35;
/** */
public static final int OP_REMOVE_OBJ = 36;
/** */
public static final int OP_REPLACE_2 = 37;
/** */
public static final int OP_REPLACE_3 = 38;
/** Underlying JCache. */
private final IgniteCacheProxy cache;
/** Whether this cache is created with "keepBinary" flag on the other side. */
private final boolean keepBinary;
/** */
private static final GetAllWriter WRITER_GET_ALL = new GetAllWriter();
/** */
private static final EntryProcessorInvokeWriter WRITER_INVOKE = new EntryProcessorInvokeWriter();
/** */
private static final EntryProcessorInvokeAllWriter WRITER_INVOKE_ALL = new EntryProcessorInvokeAllWriter();
/** Map with currently active locks. */
private final ConcurrentMap<Long, Lock> lockMap = GridConcurrentFactory.newMap();
/** Lock ID sequence. */
private static final AtomicLong LOCK_ID_GEN = new AtomicLong();
/**
* Constructor.
*
* @param platformCtx Context.
* @param cache Underlying cache.
* @param keepBinary Keep binary flag.
*/
public PlatformCache(PlatformContext platformCtx, IgniteCache cache, boolean keepBinary) {
super(platformCtx);
this.cache = (IgniteCacheProxy)cache;
this.keepBinary = keepBinary;
}
/**
* Gets cache with "skip-store" flag set.
*
* @return Cache with "skip-store" flag set.
*/
public PlatformCache withSkipStore() {
if (cache.delegate().skipStore())
return this;
return new PlatformCache(platformCtx, cache.withSkipStore(), keepBinary);
}
/**
* Gets cache with "keep binary" flag.
*
* @return Cache with "keep binary" flag set.
*/
public PlatformCache withKeepBinary() {
if (keepBinary)
return this;
return new PlatformCache(platformCtx, cache.withKeepBinary(), true);
}
/**
* Gets cache with provided expiry policy.
*
* @param create Create.
* @param update Update.
* @param access Access.
* @return Cache.
*/
public PlatformCache withExpiryPolicy(final long create, final long update, final long access) {
IgniteCache cache0 = cache.withExpiryPolicy(new InteropExpiryPolicy(create, update, access));
return new PlatformCache(platformCtx, cache0, keepBinary);
}
/**
* Gets cache with asynchronous mode enabled.
*
* @return Cache with asynchronous mode enabled.
*/
public PlatformCache withAsync() {
if (cache.isAsync())
return this;
return new PlatformCache(platformCtx, (IgniteCache)cache.withAsync(), keepBinary);
}
/**
* Gets cache with no-retries mode enabled.
*
* @return Cache with no-retries mode enabled.
*/
public PlatformCache withNoRetries() {
CacheOperationContext opCtx = cache.operationContext();
if (opCtx != null && opCtx.noRetries())
return this;
return new PlatformCache(platformCtx, cache.withNoRetries(), keepBinary);
}
/** {@inheritDoc} */
@Override protected long processInStreamOutLong(int type, BinaryRawReaderEx reader) throws IgniteCheckedException {
switch (type) {
case OP_PUT:
cache.put(reader.readObjectDetached(), reader.readObjectDetached());
return TRUE;
case OP_REMOVE_BOOL:
return cache.remove(reader.readObjectDetached(), reader.readObjectDetached()) ? TRUE : FALSE;
case OP_REMOVE_ALL:
cache.removeAll(PlatformUtils.readSet(reader));
return TRUE;
case OP_PUT_ALL:
cache.putAll(PlatformUtils.readMap(reader));
return TRUE;
case OP_LOC_EVICT:
cache.localEvict(PlatformUtils.readCollection(reader));
return TRUE;
case OP_CONTAINS_KEY:
return cache.containsKey(reader.readObjectDetached()) ? TRUE : FALSE;
case OP_CONTAINS_KEYS:
return cache.containsKeys(PlatformUtils.readSet(reader)) ? TRUE : FALSE;
case OP_LOC_PROMOTE: {
cache.localPromote(PlatformUtils.readSet(reader));
break;
}
case OP_REPLACE_3:
return cache.replace(reader.readObjectDetached(), reader.readObjectDetached(),
reader.readObjectDetached()) ? TRUE : FALSE;
case OP_LOC_LOAD_CACHE:
loadCache0(reader, true);
break;
case OP_LOAD_CACHE:
loadCache0(reader, false);
break;
case OP_CLEAR:
cache.clear(reader.readObjectDetached());
break;
case OP_CLEAR_ALL:
cache.clearAll(PlatformUtils.readSet(reader));
break;
case OP_LOCAL_CLEAR:
cache.localClear(reader.readObjectDetached());
break;
case OP_LOCAL_CLEAR_ALL:
cache.localClearAll(PlatformUtils.readSet(reader));
break;
case OP_PUT_IF_ABSENT: {
return cache.putIfAbsent(reader.readObjectDetached(), reader.readObjectDetached()) ? TRUE : FALSE;
}
case OP_REPLACE_2: {
return cache.replace(reader.readObjectDetached(), reader.readObjectDetached()) ? TRUE : FALSE;
}
case OP_REMOVE_OBJ: {
return cache.remove(reader.readObjectDetached()) ? TRUE : FALSE;
}
case OP_IS_LOCAL_LOCKED:
return cache.isLocalLocked(reader.readObjectDetached(), reader.readBoolean()) ? TRUE : FALSE;
default:
return super.processInStreamOutLong(type, reader);
}
return TRUE;
}
/**
* Loads cache via localLoadCache or loadCache.
*/
private void loadCache0(BinaryRawReaderEx reader, boolean loc) {
PlatformCacheEntryFilter filter = null;
Object pred = reader.readObjectDetached();
if (pred != null)
filter = platformCtx.createCacheEntryFilter(pred, 0);
Object[] args = reader.readObjectArray();
if (loc)
cache.localLoadCache(filter, args);
else
cache.loadCache(filter, args);
}
/** {@inheritDoc} */
@Override protected Object processInStreamOutObject(int type, BinaryRawReaderEx reader)
throws IgniteCheckedException {
switch (type) {
case OP_QRY_SQL:
return runQuery(reader, readSqlQuery(reader));
case OP_QRY_SQL_FIELDS:
return runFieldsQuery(reader, readFieldsQuery(reader));
case OP_QRY_TXT:
return runQuery(reader, readTextQuery(reader));
case OP_QRY_SCAN:
return runQuery(reader, readScanQuery(reader));
case OP_QRY_CONTINUOUS: {
long ptr = reader.readLong();
boolean loc = reader.readBoolean();
boolean hasFilter = reader.readBoolean();
Object filter = reader.readObjectDetached();
int bufSize = reader.readInt();
long timeInterval = reader.readLong();
boolean autoUnsubscribe = reader.readBoolean();
Query initQry = readInitialQuery(reader);
PlatformContinuousQuery qry = platformCtx.createContinuousQuery(ptr, hasFilter, filter);
qry.start(cache, loc, bufSize, timeInterval, autoUnsubscribe, initQry);
return qry;
}
default:
return super.processInStreamOutObject(type, reader);
}
}
/**
* Read arguments for SQL query.
*
* @param reader Reader.
* @return Arguments.
*/
@Nullable private Object[] readQueryArgs(BinaryRawReaderEx reader) {
int cnt = reader.readInt();
if (cnt > 0) {
Object[] args = new Object[cnt];
for (int i = 0; i < cnt; i++)
args[i] = reader.readObjectDetached();
return args;
}
else
return null;
}
/** {@inheritDoc} */
@Override protected void processOutStream(int type, BinaryRawWriterEx writer) throws IgniteCheckedException {
switch (type) {
case OP_GET_NAME:
writer.writeObject(cache.getName());
break;
case OP_METRICS:
CacheMetrics metrics = cache.metrics();
writer.writeLong(metrics.getCacheGets());
writer.writeLong(metrics.getCachePuts());
writer.writeLong(metrics.getCacheHits());
writer.writeLong(metrics.getCacheMisses());
writer.writeLong(metrics.getCacheTxCommits());
writer.writeLong(metrics.getCacheTxRollbacks());
writer.writeLong(metrics.getCacheEvictions());
writer.writeLong(metrics.getCacheRemovals());
writer.writeFloat(metrics.getAveragePutTime());
writer.writeFloat(metrics.getAverageGetTime());
writer.writeFloat(metrics.getAverageRemoveTime());
writer.writeFloat(metrics.getAverageTxCommitTime());
writer.writeFloat(metrics.getAverageTxRollbackTime());
writer.writeString(metrics.name());
writer.writeLong(metrics.getOverflowSize());
writer.writeLong(metrics.getOffHeapEntriesCount());
writer.writeLong(metrics.getOffHeapAllocatedSize());
writer.writeInt(metrics.getSize());
writer.writeInt(metrics.getKeySize());
writer.writeBoolean(metrics.isEmpty());
writer.writeInt(metrics.getDhtEvictQueueCurrentSize());
writer.writeInt(metrics.getTxThreadMapSize());
writer.writeInt(metrics.getTxXidMapSize());
writer.writeInt(metrics.getTxCommitQueueSize());
writer.writeInt(metrics.getTxPrepareQueueSize());
writer.writeInt(metrics.getTxStartVersionCountsSize());
writer.writeInt(metrics.getTxCommittedVersionsSize());
writer.writeInt(metrics.getTxRolledbackVersionsSize());
writer.writeInt(metrics.getTxDhtThreadMapSize());
writer.writeInt(metrics.getTxDhtXidMapSize());
writer.writeInt(metrics.getTxDhtCommitQueueSize());
writer.writeInt(metrics.getTxDhtPrepareQueueSize());
writer.writeInt(metrics.getTxDhtStartVersionCountsSize());
writer.writeInt(metrics.getTxDhtCommittedVersionsSize());
writer.writeInt(metrics.getTxDhtRolledbackVersionsSize());
writer.writeBoolean(metrics.isWriteBehindEnabled());
writer.writeInt(metrics.getWriteBehindFlushSize());
writer.writeInt(metrics.getWriteBehindFlushThreadCount());
writer.writeLong(metrics.getWriteBehindFlushFrequency());
writer.writeInt(metrics.getWriteBehindStoreBatchSize());
writer.writeInt(metrics.getWriteBehindTotalCriticalOverflowCount());
writer.writeInt(metrics.getWriteBehindCriticalOverflowCount());
writer.writeInt(metrics.getWriteBehindErrorRetryCount());
writer.writeInt(metrics.getWriteBehindBufferSize());
writer.writeString(metrics.getKeyType());
writer.writeString(metrics.getValueType());
writer.writeBoolean(metrics.isStoreByValue());
writer.writeBoolean(metrics.isStatisticsEnabled());
writer.writeBoolean(metrics.isManagementEnabled());
writer.writeBoolean(metrics.isReadThrough());
writer.writeBoolean(metrics.isWriteThrough());
writer.writeFloat(metrics.getCacheHitPercentage());
writer.writeFloat(metrics.getCacheMissPercentage());
break;
default:
super.processOutStream(type, writer);
}
}
/** {@inheritDoc} */
@SuppressWarnings({"IfMayBeConditional", "ConstantConditions"})
@Override protected void processInStreamOutStream(int type, BinaryRawReaderEx reader, BinaryRawWriterEx writer)
throws IgniteCheckedException {
switch (type) {
case OP_GET: {
writer.writeObjectDetached(cache.get(reader.readObjectDetached()));
break;
}
case OP_GET_AND_PUT: {
writer.writeObjectDetached(cache.getAndPut(reader.readObjectDetached(), reader.readObjectDetached()));
break;
}
case OP_GET_AND_REPLACE: {
writer.writeObjectDetached(cache.getAndReplace(reader.readObjectDetached(),
reader.readObjectDetached()));
break;
}
case OP_GET_AND_REMOVE: {
writer.writeObjectDetached(cache.getAndRemove(reader.readObjectDetached()));
break;
}
case OP_GET_AND_PUT_IF_ABSENT: {
writer.writeObjectDetached(cache.getAndPutIfAbsent(reader.readObjectDetached(), reader.readObjectDetached()));
break;
}
case OP_PEEK: {
Object key = reader.readObjectDetached();
CachePeekMode[] modes = PlatformUtils.decodeCachePeekModes(reader.readInt());
writer.writeObjectDetached(cache.localPeek(key, modes));
break;
}
case OP_GET_ALL: {
Set keys = PlatformUtils.readSet(reader);
Map entries = cache.getAll(keys);
PlatformUtils.writeNullableMap(writer, entries);
break;
}
case OP_INVOKE: {
Object key = reader.readObjectDetached();
CacheEntryProcessor proc = platformCtx.createCacheEntryProcessor(reader.readObjectDetached(), 0);
try {
writer.writeObjectDetached(cache.invoke(key, proc));
}
catch (EntryProcessorException ex)
{
if (ex.getCause() instanceof PlatformNativeException)
writer.writeObjectDetached(((PlatformNativeException)ex.getCause()).cause());
else
throw ex;
}
break;
}
case OP_INVOKE_ALL: {
Set<Object> keys = PlatformUtils.readSet(reader);
CacheEntryProcessor proc = platformCtx.createCacheEntryProcessor(reader.readObjectDetached(), 0);
writeInvokeAllResult(writer, cache.invokeAll(keys, proc));
break;
}
case OP_LOCK:
writer.writeLong(registerLock(cache.lock(reader.readObjectDetached())));
break;
case OP_LOCK_ALL:
writer.writeLong(registerLock(cache.lockAll(PlatformUtils.readCollection(reader))));
break;
default:
super.processInStreamOutStream(type, reader, writer);
}
}
/** {@inheritDoc} */
@Override public Exception convertException(Exception e) {
if (e instanceof CachePartialUpdateException)
return new PlatformCachePartialUpdateException((CachePartialUpdateCheckedException)e.getCause(),
platformCtx, keepBinary);
if (e instanceof CachePartialUpdateCheckedException)
return new PlatformCachePartialUpdateException((CachePartialUpdateCheckedException)e, platformCtx, keepBinary);
if (e.getCause() instanceof EntryProcessorException)
return (EntryProcessorException) e.getCause();
return super.convertException(e);
}
/**
* Writes the result of InvokeAll cache method.
*
* @param writer Writer.
* @param results Results.
*/
private static void writeInvokeAllResult(BinaryRawWriterEx writer, Map<Object, EntryProcessorResult> results) {
if (results == null) {
writer.writeInt(-1);
return;
}
writer.writeInt(results.size());
for (Map.Entry<Object, EntryProcessorResult> entry : results.entrySet()) {
writer.writeObjectDetached(entry.getKey());
EntryProcessorResult procRes = entry.getValue();
try {
Object res = procRes.get();
writer.writeBoolean(false); // No exception
writer.writeObjectDetached(res);
}
catch (Exception ex) {
writer.writeBoolean(true); // Exception
writeError(writer, ex);
}
}
}
/**
* Writes an error to the writer either as a native exception, or as a couple of strings.
* @param writer Writer.
* @param ex Exception.
*/
private static void writeError(BinaryRawWriterEx writer, Exception ex) {
if (ex.getCause() instanceof PlatformNativeException)
writer.writeObjectDetached(((PlatformNativeException)ex.getCause()).cause());
else {
writer.writeObjectDetached(ex.getClass().getName());
writer.writeObjectDetached(ex.getMessage());
}
}
/** <inheritDoc /> */
@Override protected IgniteFuture currentFuture() throws IgniteCheckedException {
return cache.future();
}
/** <inheritDoc /> */
@Nullable @Override protected PlatformFutureUtils.Writer futureWriter(int opId) {
if (opId == OP_GET_ALL)
return WRITER_GET_ALL;
if (opId == OP_INVOKE)
return WRITER_INVOKE;
if (opId == OP_INVOKE_ALL)
return WRITER_INVOKE_ALL;
return null;
}
/**
* Clears the contents of the cache, without notifying listeners or
* {@ignitelink javax.cache.integration.CacheWriter}s.
*
* @throws IllegalStateException if the cache is closed.
* @throws javax.cache.CacheException if there is a problem during the clear
*/
public void clear() throws IgniteCheckedException {
cache.clear();
}
/**
* Removes all entries.
*
* @throws org.apache.ignite.IgniteCheckedException In case of error.
*/
public void removeAll() throws IgniteCheckedException {
cache.removeAll();
}
/**
* Read cache size.
*
* @param peekModes Encoded peek modes.
* @param loc Local mode flag.
* @return Size.
*/
public int size(int peekModes, boolean loc) {
CachePeekMode[] modes = PlatformUtils.decodeCachePeekModes(peekModes);
return loc ? cache.localSize(modes) : cache.size(modes);
}
/**
* Create cache iterator.
*
* @return Cache iterator.
*/
public PlatformCacheIterator iterator() {
Iterator<Cache.Entry> iter = cache.iterator();
return new PlatformCacheIterator(platformCtx, iter);
}
/**
* Create cache iterator over local entries.
*
* @param peekModes Peke modes.
* @return Cache iterator.
*/
public PlatformCacheIterator localIterator(int peekModes) {
CachePeekMode[] peekModes0 = PlatformUtils.decodeCachePeekModes(peekModes);
Iterator<Cache.Entry> iter = cache.localEntries(peekModes0).iterator();
return new PlatformCacheIterator(platformCtx, iter);
}
/**
* Enters a lock.
*
* @param id Lock id.
*/
public void enterLock(long id) throws InterruptedException {
lock(id).lockInterruptibly();
}
/**
* Exits a lock.
*
* @param id Lock id.
*/
public void exitLock(long id) {
lock(id).unlock();
}
/**
* Attempts to enter a lock.
*
* @param id Lock id.
* @param timeout Timeout, in milliseconds. -1 for infinite timeout.
*/
public boolean tryEnterLock(long id, long timeout) throws InterruptedException {
return timeout == -1
? lock(id).tryLock()
: lock(id).tryLock(timeout, TimeUnit.MILLISECONDS);
}
/**
* Rebalances the cache.
*
* @param futId Future id.
*/
public void rebalance(long futId) {
PlatformFutureUtils.listen(platformCtx, cache.rebalance().chain(new C1<IgniteFuture, Object>() {
@Override public Object apply(IgniteFuture fut) {
return null;
}
}), futId, PlatformFutureUtils.TYP_OBJ, this);
}
/**
* Unregister lock.
*
* @param id Lock id.
*/
public void closeLock(long id){
Lock lock = lockMap.remove(id);
assert lock != null : "Failed to unregister lock: " + id;
}
/**
* Get lock by id.
*
* @param id Id.
* @return Lock.
*/
private Lock lock(long id) {
Lock lock = lockMap.get(id);
assert lock != null : "Lock not found for ID: " + id;
return lock;
}
/**
* Registers a lock in a map.
*
* @param lock Lock to register.
* @return Registered lock id.
*/
private long registerLock(Lock lock) {
long id = LOCK_ID_GEN.incrementAndGet();
lockMap.put(id, lock);
return id;
}
/**
* Runs specified query.
*/
private PlatformQueryCursor runQuery(BinaryRawReaderEx reader, Query qry) throws IgniteCheckedException {
try {
QueryCursorEx cursor = (QueryCursorEx) cache.query(qry);
return new PlatformQueryCursor(platformCtx, cursor,
qry.getPageSize() > 0 ? qry.getPageSize(): Query.DFLT_PAGE_SIZE);
}
catch (Exception err) {
throw PlatformUtils.unwrapQueryException(err);
}
}
/**
* Runs specified fields query.
*/
private PlatformFieldsQueryCursor runFieldsQuery(BinaryRawReaderEx reader, Query qry)
throws IgniteCheckedException {
try {
QueryCursorEx cursor = (QueryCursorEx) cache.query(qry);
return new PlatformFieldsQueryCursor(platformCtx, cursor,
qry.getPageSize() > 0 ? qry.getPageSize() : Query.DFLT_PAGE_SIZE);
}
catch (Exception err) {
throw PlatformUtils.unwrapQueryException(err);
}
}
/**
* Reads the query of specified type.
*/
private Query readInitialQuery(BinaryRawReaderEx reader) throws IgniteCheckedException {
int typ = reader.readInt();
switch (typ) {
case -1:
return null;
case OP_QRY_SCAN:
return readScanQuery(reader);
case OP_QRY_SQL:
return readSqlQuery(reader);
case OP_QRY_TXT:
return readTextQuery(reader);
}
throw new IgniteCheckedException("Unsupported query type: " + typ);
}
/**
* Reads sql query.
*/
private Query readSqlQuery(BinaryRawReaderEx reader) {
boolean loc = reader.readBoolean();
String sql = reader.readString();
String typ = reader.readString();
final int pageSize = reader.readInt();
Object[] args = readQueryArgs(reader);
return new SqlQuery(typ, sql).setPageSize(pageSize).setArgs(args).setLocal(loc);
}
/**
* Reads fields query.
*/
private Query readFieldsQuery(BinaryRawReaderEx reader) {
boolean loc = reader.readBoolean();
String sql = reader.readString();
final int pageSize = reader.readInt();
Object[] args = readQueryArgs(reader);
return new SqlFieldsQuery(sql).setPageSize(pageSize).setArgs(args).setLocal(loc);
}
/**
* Reads text query.
*/
private Query readTextQuery(BinaryRawReaderEx reader) {
boolean loc = reader.readBoolean();
String txt = reader.readString();
String typ = reader.readString();
final int pageSize = reader.readInt();
return new TextQuery(typ, txt).setPageSize(pageSize).setLocal(loc);
}
/**
* Reads scan query.
*/
private Query readScanQuery(BinaryRawReaderEx reader) {
boolean loc = reader.readBoolean();
final int pageSize = reader.readInt();
boolean hasPart = reader.readBoolean();
Integer part = hasPart ? reader.readInt() : null;
ScanQuery qry = new ScanQuery().setPageSize(pageSize);
qry.setPartition(part);
Object pred = reader.readObjectDetached();
if (pred != null)
qry.setFilter(platformCtx.createCacheEntryFilter(pred, 0));
qry.setLocal(loc);
return qry;
}
/**
* Writes error with EntryProcessorException cause.
*/
private static class GetAllWriter implements PlatformFutureUtils.Writer {
/** <inheritDoc /> */
@Override public void write(BinaryRawWriterEx writer, Object obj, Throwable err) {
assert obj instanceof Map;
PlatformUtils.writeNullableMap(writer, (Map) obj);
}
/** <inheritDoc /> */
@Override public boolean canWrite(Object obj, Throwable err) {
return err == null;
}
}
/**
* Writes error with EntryProcessorException cause.
*/
private static class EntryProcessorInvokeWriter implements PlatformFutureUtils.Writer {
/** <inheritDoc /> */
@Override public void write(BinaryRawWriterEx writer, Object obj, Throwable err) {
if (err == null) {
writer.writeBoolean(false); // No error.
writer.writeObjectDetached(obj);
}
else {
writer.writeBoolean(true); // Error.
writeError(writer, (Exception) err);
}
}
/** <inheritDoc /> */
@Override public boolean canWrite(Object obj, Throwable err) {
return true;
}
}
/**
* Writes results of InvokeAll method.
*/
private static class EntryProcessorInvokeAllWriter implements PlatformFutureUtils.Writer {
/** <inheritDoc /> */
@Override public void write(BinaryRawWriterEx writer, Object obj, Throwable err) {
writeInvokeAllResult(writer, (Map)obj);
}
/** <inheritDoc /> */
@Override public boolean canWrite(Object obj, Throwable err) {
return obj != null && err == null;
}
}
/**
* Interop expiry policy.
*/
private static class InteropExpiryPolicy implements ExpiryPolicy {
/** Duration: unchanged. */
private static final long DUR_UNCHANGED = -2;
/** Duration: eternal. */
private static final long DUR_ETERNAL = -1;
/** Duration: zero. */
private static final long DUR_ZERO = 0;
/** Expiry for create. */
private final Duration create;
/** Expiry for update. */
private final Duration update;
/** Expiry for access. */
private final Duration access;
/**
* Constructor.
*
* @param create Expiry for create.
* @param update Expiry for update.
* @param access Expiry for access.
*/
public InteropExpiryPolicy(long create, long update, long access) {
this.create = convert(create);
this.update = convert(update);
this.access = convert(access);
}
/** {@inheritDoc} */
@Override public Duration getExpiryForCreation() {
return create;
}
/** {@inheritDoc} */
@Override public Duration getExpiryForUpdate() {
return update;
}
/** {@inheritDoc} */
@Override public Duration getExpiryForAccess() {
return access;
}
/**
* Convert encoded duration to actual duration.
*
* @param dur Encoded duration.
* @return Actual duration.
*/
private static Duration convert(long dur) {
if (dur == DUR_UNCHANGED)
return null;
else if (dur == DUR_ETERNAL)
return Duration.ETERNAL;
else if (dur == DUR_ZERO)
return Duration.ZERO;
else {
assert dur > 0;
return new Duration(TimeUnit.MILLISECONDS, dur);
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.