gt stringclasses 1
value | context stringlengths 2.05k 161k |
|---|---|
/*
* Copyright (c) 2016-2017, Adam <Adam@sigterm.info>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.cache.definitions.loaders;
import net.runelite.cache.definitions.NpcDefinition;
import net.runelite.cache.io.InputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
public class NpcLoader
{
private static final Logger logger = LoggerFactory.getLogger(NpcLoader.class);
public NpcDefinition load(int id, byte[] b)
{
NpcDefinition def = new NpcDefinition(id);
InputStream is = new InputStream(b);
while (true)
{
int opcode = is.readUnsignedByte();
if (opcode == 0)
{
break;
}
this.decodeValues(opcode, def, is);
}
return def;
}
private void decodeValues(int opcode, NpcDefinition def, InputStream stream)
{
int length;
int index;
if (opcode == 1)
{
length = stream.readUnsignedByte();
def.models = new int[length];
for (index = 0; index < length; ++index)
{
def.models[index] = stream.readUnsignedShort();
}
}
else if (opcode == 2)
{
def.name = stream.readString();
}
else if (opcode == 12)
{
def.tileSpacesOccupied = stream.readUnsignedByte();
}
else if (opcode == 13)
{
def.stanceAnimation = stream.readUnsignedShort();
}
else if (opcode == 14)
{
def.walkAnimation = stream.readUnsignedShort();
}
else if (opcode == 15)
{
def.anInt2165 = stream.readUnsignedShort();
}
else if (opcode == 16)
{
def.anInt2189 = stream.readUnsignedShort();
}
else if (opcode == 17)
{
def.walkAnimation = stream.readUnsignedShort();
def.rotate180Animation = stream.readUnsignedShort();
def.rotate90RightAnimation = stream.readUnsignedShort();
def.rotate90LeftAnimation = stream.readUnsignedShort();
}
else if (opcode >= 30 && opcode < 35)
{
def.options[opcode - 30] = stream.readString();
if (def.options[opcode - 30].equalsIgnoreCase("Hidden"))
{
def.options[opcode - 30] = null;
}
}
else if (opcode == 40)
{
length = stream.readUnsignedByte();
def.recolorToFind = new short[length];
def.recolorToReplace = new short[length];
for (index = 0; index < length; ++index)
{
def.recolorToFind[index] = (short) stream.readUnsignedShort();
def.recolorToReplace[index] = (short) stream.readUnsignedShort();
}
}
else if (opcode == 41)
{
length = stream.readUnsignedByte();
def.retextureToFind = new short[length];
def.retextureToReplace = new short[length];
for (index = 0; index < length; ++index)
{
def.retextureToFind[index] = (short) stream.readUnsignedShort();
def.retextureToReplace[index] = (short) stream.readUnsignedShort();
}
}
else if (opcode == 60)
{
length = stream.readUnsignedByte();
def.models_2 = new int[length];
for (index = 0; index < length; ++index)
{
def.models_2[index] = stream.readUnsignedShort();
}
}
else if (opcode == 93)
{
def.renderOnMinimap = false;
}
else if (opcode == 95)
{
def.combatLevel = stream.readUnsignedShort();
}
else if (opcode == 97)
{
def.resizeX = stream.readUnsignedShort();
}
else if (opcode == 98)
{
def.resizeY = stream.readUnsignedShort();
}
else if (opcode == 99)
{
def.hasRenderPriority = true;
}
else if (opcode == 100)
{
def.ambient = stream.readByte();
}
else if (opcode == 101)
{
def.contrast = stream.readByte();
}
else if (opcode == 102)
{
def.headIcon = stream.readUnsignedShort();
}
else if (opcode == 103)
{
def.anInt2156 = stream.readUnsignedShort();
}
else if (opcode == 106)
{
def.anInt2174 = stream.readUnsignedShort();
if ('\uffff' == def.anInt2174)
{
def.anInt2174 = -1;
}
def.anInt2187 = stream.readUnsignedShort();
if ('\uffff' == def.anInt2187)
{
def.anInt2187 = -1;
}
length = stream.readUnsignedByte();
def.anIntArray2185 = new int[length + 2];
for (index = 0; index <= length; ++index)
{
def.anIntArray2185[index] = stream.readUnsignedShort();
if (def.anIntArray2185[index] == '\uffff')
{
def.anIntArray2185[index] = -1;
}
}
def.anIntArray2185[length + 1] = -1;
}
else if (opcode == 107)
{
def.isClickable = false;
}
else if (opcode == 109)
{
def.aBool2170 = false;
}
else if (opcode == 111)
{
def.aBool2190 = true;
}
else if (opcode == 118)
{
def.anInt2174 = stream.readUnsignedShort();
if ('\uffff' == def.anInt2174)
{
def.anInt2174 = -1;
}
def.anInt2187 = stream.readUnsignedShort();
if ('\uffff' == def.anInt2187)
{
def.anInt2187 = -1;
}
int var = stream.readUnsignedShort();
if (var == 0xFFFF)
{
var = -1;
}
length = stream.readUnsignedByte();
def.anIntArray2185 = new int[length + 2];
for (index = 0; index <= length; ++index)
{
def.anIntArray2185[index] = stream.readUnsignedShort();
if (def.anIntArray2185[index] == '\uffff')
{
def.anIntArray2185[index] = -1;
}
}
def.anIntArray2185[length + 1] = var;
}
else if (opcode == 249)
{
length = stream.readUnsignedByte();
def.params = new HashMap<>(length);
for (int i = 0; i < length; i++)
{
boolean isString = stream.readUnsignedByte() == 1;
int key = stream.read24BitInt();
Object value;
if (isString)
{
value = stream.readString();
}
else
{
value = stream.readInt();
}
def.params.put(key, value);
}
}
else
{
logger.warn("Unrecognized opcode {}", opcode);
}
}
}
| |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.transport;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.MockIndicesRequest;
import org.elasticsearch.action.admin.indices.close.CloseIndexAction;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
import org.elasticsearch.action.admin.indices.open.OpenIndexAction;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.xpack.core.security.SecurityContext;
import org.elasticsearch.xpack.core.security.authc.Authentication;
import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef;
import org.elasticsearch.xpack.core.security.authz.permission.Role;
import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore;
import org.elasticsearch.xpack.core.security.user.SystemUser;
import org.elasticsearch.xpack.core.security.user.User;
import org.elasticsearch.xpack.core.security.user.XPackUser;
import org.elasticsearch.xpack.security.authc.AuthenticationService;
import org.elasticsearch.xpack.security.authz.AuthorizationService;
import org.junit.Before;
import java.io.IOException;
import java.util.Collections;
import static org.elasticsearch.mock.orig.Mockito.times;
import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError;
import static org.elasticsearch.xpack.core.security.support.Exceptions.authorizationError;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
public class ServerTransportFilterTests extends ESTestCase {
private AuthenticationService authcService;
private AuthorizationService authzService;
private TransportChannel channel;
private boolean failDestructiveOperations;
private DestructiveOperations destructiveOperations;
@Before
public void init() throws Exception {
authcService = mock(AuthenticationService.class);
authzService = mock(AuthorizationService.class);
channel = mock(TransportChannel.class);
when(channel.getProfileName()).thenReturn(TcpTransport.DEFAULT_PROFILE);
when(channel.getVersion()).thenReturn(Version.CURRENT);
failDestructiveOperations = randomBoolean();
Settings settings = Settings.builder()
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), failDestructiveOperations).build();
destructiveOperations = new DestructiveOperations(settings,
new ClusterSettings(settings, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)));
}
public void testInbound() throws Exception {
TransportRequest request = mock(TransportRequest.class);
Authentication authentication = mock(Authentication.class);
when(authentication.getVersion()).thenReturn(Version.CURRENT);
when(authentication.getUser()).thenReturn(SystemUser.INSTANCE);
doAnswer((i) -> {
ActionListener callback =
(ActionListener) i.getArguments()[3];
callback.onResponse(authentication);
return Void.TYPE;
}).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class));
ServerTransportFilter filter = getClientOrNodeFilter();
PlainActionFuture<Void> future = new PlainActionFuture<>();
filter.inbound("_action", request, channel, future);
//future.get(); // don't block it's not called really just mocked
verify(authzService).authorize(authentication, "_action", request, null, null);
}
public void testInboundDestructiveOperations() throws Exception {
String action = randomFrom(CloseIndexAction.NAME, OpenIndexAction.NAME, DeleteIndexAction.NAME);
TransportRequest request = new MockIndicesRequest(
IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()),
randomFrom("*", "_all", "test*"));
Authentication authentication = mock(Authentication.class);
when(authentication.getVersion()).thenReturn(Version.CURRENT);
when(authentication.getUser()).thenReturn(SystemUser.INSTANCE);
doAnswer((i) -> {
ActionListener callback =
(ActionListener) i.getArguments()[3];
callback.onResponse(authentication);
return Void.TYPE;
}).when(authcService).authenticate(eq(action), eq(request), eq((User)null), any(ActionListener.class));
ServerTransportFilter filter = getClientOrNodeFilter();
PlainActionFuture listener = mock(PlainActionFuture.class);
filter.inbound(action, request, channel, listener);
if (failDestructiveOperations) {
verify(listener).onFailure(isA(IllegalArgumentException.class));
verifyNoMoreInteractions(authzService);
} else {
verify(authzService).authorize(authentication, action, request, null, null);
}
}
public void testInboundAuthenticationException() throws Exception {
TransportRequest request = mock(TransportRequest.class);
Exception authE = authenticationError("authc failed");
doAnswer((i) -> {
ActionListener callback =
(ActionListener) i.getArguments()[3];
callback.onFailure(authE);
return Void.TYPE;
}).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class));
ServerTransportFilter filter = getClientOrNodeFilter();
try {
PlainActionFuture<Void> future = new PlainActionFuture<>();
filter.inbound("_action", request, channel, future);
future.actionGet();
fail("expected filter inbound to throw an authentication exception on authentication error");
} catch (ElasticsearchSecurityException e) {
assertThat(e.getMessage(), equalTo("authc failed"));
}
verifyZeroInteractions(authzService);
}
public void testInboundAuthorizationException() throws Exception {
ServerTransportFilter filter = getClientOrNodeFilter();
TransportRequest request = mock(TransportRequest.class);
Authentication authentication = mock(Authentication.class);
doAnswer((i) -> {
ActionListener callback =
(ActionListener) i.getArguments()[3];
callback.onResponse(authentication);
return Void.TYPE;
}).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class));
final Role empty = Role.EMPTY;
doAnswer((i) -> {
ActionListener callback =
(ActionListener) i.getArguments()[1];
callback.onResponse(empty);
return Void.TYPE;
}).when(authzService).roles(any(User.class), any(ActionListener.class));
when(authentication.getVersion()).thenReturn(Version.CURRENT);
when(authentication.getUser()).thenReturn(XPackUser.INSTANCE);
PlainActionFuture<Void> future = new PlainActionFuture<>();
doThrow(authorizationError("authz failed")).when(authzService).authorize(authentication, "_action", request,
empty, null);
ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> {
filter.inbound("_action", request, channel, future);
future.actionGet();
});
assertThat(e.getMessage(), equalTo("authz failed"));
}
public void testClientProfileRejectsNodeActions() throws Exception {
TransportRequest request = mock(TransportRequest.class);
ServerTransportFilter filter = getClientFilter(true);
ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
() -> filter.inbound("internal:foo/bar", request, channel, new PlainActionFuture<>()));
assertEquals("executing internal/shard actions is considered malicious and forbidden", e.getMessage());
e = expectThrows(ElasticsearchSecurityException.class,
() -> filter.inbound("indices:action" + randomFrom("[s]", "[p]", "[r]", "[n]", "[s][p]", "[s][r]", "[f]"),
request, channel, new PlainActionFuture<>()));
assertEquals("executing internal/shard actions is considered malicious and forbidden", e.getMessage());
verifyZeroInteractions(authcService);
}
public void testNodeProfileAllowsNodeActions() throws Exception {
final String internalAction = "internal:foo/bar";
final String nodeOrShardAction = "indices:action" + randomFrom("[s]", "[p]", "[r]", "[n]", "[s][p]", "[s][r]", "[f]");
ServerTransportFilter filter = getNodeFilter(true);
TransportRequest request = mock(TransportRequest.class);
Authentication authentication = new Authentication(new User("test", "superuser"), new RealmRef("test", "test", "node1"), null);
doAnswer((i) -> {
ActionListener callback =
(ActionListener) i.getArguments()[1];
callback.onResponse(authentication.getUser().equals(i.getArguments()[0]) ? ReservedRolesStore.SUPERUSER_ROLE : null);
return Void.TYPE;
}).when(authzService).roles(any(User.class), any(ActionListener.class));
doAnswer((i) -> {
ActionListener callback =
(ActionListener) i.getArguments()[3];
callback.onResponse(authentication);
return Void.TYPE;
}).when(authcService).authenticate(eq(internalAction), eq(request), eq((User)null), any(ActionListener.class));
doAnswer((i) -> {
ActionListener callback =
(ActionListener) i.getArguments()[3];
callback.onResponse(authentication);
return Void.TYPE;
}).when(authcService).authenticate(eq(nodeOrShardAction), eq(request), eq((User)null), any(ActionListener.class));
filter.inbound(internalAction, request, channel, new PlainActionFuture<>());
verify(authcService).authenticate(eq(internalAction), eq(request), eq((User)null), any(ActionListener.class));
verify(authzService).roles(eq(authentication.getUser()), any(ActionListener.class));
verify(authzService).authorize(authentication, internalAction, request, ReservedRolesStore.SUPERUSER_ROLE, null);
filter.inbound(nodeOrShardAction, request, channel, new PlainActionFuture<>());
verify(authcService).authenticate(eq(nodeOrShardAction), eq(request), eq((User)null), any(ActionListener.class));
verify(authzService, times(2)).roles(eq(authentication.getUser()), any(ActionListener.class));
verify(authzService).authorize(authentication, nodeOrShardAction, request, ReservedRolesStore.SUPERUSER_ROLE, null);
verifyNoMoreInteractions(authcService, authzService);
}
private ServerTransportFilter getClientOrNodeFilter() throws IOException {
return randomBoolean() ? getNodeFilter(true) : getClientFilter(true);
}
private ServerTransportFilter.ClientProfile getClientFilter(boolean reservedRealmEnabled) throws IOException {
Settings settings = Settings.builder().put("path.home", createTempDir()).build();
ThreadContext threadContext = new ThreadContext(settings);
return new ServerTransportFilter.ClientProfile(authcService, authzService, threadContext, false, destructiveOperations,
reservedRealmEnabled, new SecurityContext(settings, threadContext));
}
private ServerTransportFilter.NodeProfile getNodeFilter(boolean reservedRealmEnabled) throws IOException {
Settings settings = Settings.builder().put("path.home", createTempDir()).build();
ThreadContext threadContext = new ThreadContext(settings);
return new ServerTransportFilter.NodeProfile(authcService, authzService, threadContext, false, destructiveOperations,
reservedRealmEnabled, new SecurityContext(settings, threadContext));
}
}
| |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ConfigurationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext;
import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDERR;
import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDOUT;
/**
* This class is abstraction of the mechanism used to launch a container on the
* underlying OS. All executor implementations must extend ContainerExecutor.
*/
public abstract class ContainerExecutor implements Configurable {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerExecutor.class);
protected static final String WILDCARD = "*";
/**
* The permissions to use when creating the launch script.
*/
public static final FsPermission TASK_LAUNCH_SCRIPT_PERMISSION =
FsPermission.createImmutable((short)0700);
/**
* The relative path to which debug information will be written.
*
* @see ContainerLaunch.ShellScriptBuilder#listDebugInformation
*/
public static final String DIRECTORY_CONTENTS = "directory.info";
private Configuration conf;
private final ConcurrentMap<ContainerId, Path> pidFiles =
new ConcurrentHashMap<>();
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private final ReadLock readLock = lock.readLock();
private final WriteLock writeLock = lock.writeLock();
private String[] whitelistVars;
@Override
public void setConf(Configuration conf) {
this.conf = conf;
if (conf != null) {
whitelistVars = conf.get(YarnConfiguration.NM_ENV_WHITELIST,
YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(",");
}
}
@Override
public Configuration getConf() {
return conf;
}
/**
* Run the executor initialization steps.
* Verify that the necessary configs and permissions are in place.
*
* @param nmContext Context of NM
* @throws IOException if initialization fails
*/
public abstract void init(Context nmContext) throws IOException;
/**
* This function localizes the JAR file on-demand.
* On Windows the ContainerLaunch creates a temporary special JAR manifest of
* other JARs to workaround the CLASSPATH length. In a secure cluster this
* JAR must be localized so that the container has access to it.
* The default implementation returns the classpath passed to it, which
* is expected to have been created in the node manager's <i>fprivate</i>
* folder, which will not work with secure Windows clusters.
*
* @param jarPath the path to the JAR to localize
* @param target the directory where the JAR file should be localized
* @param owner the name of the user who should own the localized file
* @return the path to the localized JAR file
* @throws IOException if localization fails
*/
public Path localizeClasspathJar(Path jarPath, Path target, String owner)
throws IOException {
return jarPath;
}
/**
* Prepare the environment for containers in this application to execute.
* <pre>
* For $x in local.dirs
* create $x/$user/$appId
* Copy $nmLocal/appTokens {@literal ->} $N/$user/$appId
* For $rsrc in private resources
* Copy $rsrc {@literal ->} $N/$user/filecache/[idef]
* For $rsrc in job resources
* Copy $rsrc {@literal ->} $N/$user/$appId/filecache/idef
* </pre>
*
* @param ctx LocalizerStartContext that encapsulates necessary information
* for starting a localizer.
* @throws IOException for most application init failures
* @throws InterruptedException if application init thread is halted by NM
*/
public abstract void startLocalizer(LocalizerStartContext ctx)
throws IOException, InterruptedException;
/**
* Prepare the container prior to the launch environment being written.
* @param ctx Encapsulates information necessary for launching containers.
* @throws IOException if errors occur during container preparation
*/
public void prepareContainer(ContainerPrepareContext ctx) throws
IOException{
}
/**
* Launch the container on the node. This is a blocking call and returns only
* when the container exits.
* @param ctx Encapsulates information necessary for launching containers.
* @return the return status of the launch
* @throws IOException if the container launch fails
* @throws ConfigurationException if config error was found
*/
public abstract int launchContainer(ContainerStartContext ctx) throws
IOException, ConfigurationException;
/**
* Relaunch the container on the node. This is a blocking call and returns
* only when the container exits.
* @param ctx Encapsulates information necessary for relaunching containers.
* @return the return status of the relaunch
* @throws IOException if the container relaunch fails
* @throws ConfigurationException if config error was found
*/
public abstract int relaunchContainer(ContainerStartContext ctx) throws
IOException, ConfigurationException;
/**
* Signal container with the specified signal.
*
* @param ctx Encapsulates information necessary for signaling containers.
* @return returns true if the operation succeeded
* @throws IOException if signaling the container fails
*/
public abstract boolean signalContainer(ContainerSignalContext ctx)
throws IOException;
/**
* Perform the steps necessary to reap the container.
*
* @param ctx Encapsulates information necessary for reaping containers.
* @return returns true if the operation succeeded.
* @throws IOException if reaping the container fails.
*/
public abstract boolean reapContainer(ContainerReapContext ctx)
throws IOException;
/**
* Delete specified directories as a given user.
*
* @param ctx Encapsulates information necessary for deletion.
* @throws IOException if delete fails
* @throws InterruptedException if interrupted while waiting for the deletion
* operation to complete
*/
public abstract void deleteAsUser(DeletionAsUserContext ctx)
throws IOException, InterruptedException;
/**
* Create a symlink file which points to the target.
* @param target The target for symlink
* @param symlink the symlink file
* @throws IOException Error when creating symlinks
*/
public abstract void symLink(String target, String symlink)
throws IOException;
/**
* Check if a container is alive.
* @param ctx Encapsulates information necessary for container liveness check.
* @return true if container is still alive
* @throws IOException if there is a failure while checking the container
* status
*/
public abstract boolean isContainerAlive(ContainerLivenessContext ctx)
throws IOException;
/**
* Recover an already existing container. This is a blocking call and returns
* only when the container exits. Note that the container must have been
* activated prior to this call.
*
* @param ctx encapsulates information necessary to reacquire container
* @return The exit code of the pre-existing container
* @throws IOException if there is a failure while reacquiring the container
* @throws InterruptedException if interrupted while waiting to reacquire
* the container
*/
public int reacquireContainer(ContainerReacquisitionContext ctx)
throws IOException, InterruptedException {
Container container = ctx.getContainer();
String user = ctx.getUser();
ContainerId containerId = ctx.getContainerId();
Path pidPath = getPidFilePath(containerId);
if (pidPath == null) {
LOG.warn(containerId + " is not active, returning terminated error");
return ExitCode.TERMINATED.getExitCode();
}
String pid = ProcessIdFileReader.getProcessId(pidPath);
if (pid == null) {
throw new IOException("Unable to determine pid for " + containerId);
}
LOG.info("Reacquiring " + containerId + " with pid " + pid);
ContainerLivenessContext livenessContext = new ContainerLivenessContext
.Builder()
.setContainer(container)
.setUser(user)
.setPid(pid)
.build();
while (isContainerAlive(livenessContext)) {
Thread.sleep(1000);
}
// wait for exit code file to appear
final int sleepMsec = 100;
int msecLeft = 2000;
String exitCodeFile = ContainerLaunch.getExitCodeFile(pidPath.toString());
File file = new File(exitCodeFile);
while (!file.exists() && msecLeft >= 0) {
if (!isContainerActive(containerId)) {
LOG.info(containerId + " was deactivated");
return ExitCode.TERMINATED.getExitCode();
}
Thread.sleep(sleepMsec);
msecLeft -= sleepMsec;
}
if (msecLeft < 0) {
throw new IOException("Timeout while waiting for exit code from "
+ containerId);
}
try {
return Integer.parseInt(FileUtils.readFileToString(file).trim());
} catch (NumberFormatException e) {
throw new IOException("Error parsing exit code from pid " + pid, e);
}
}
/**
* This method writes out the launch environment of a container to the
* default container launch script. For the default container script path see
* {@link ContainerLaunch#CONTAINER_SCRIPT}.
*
* @param out the output stream to which the environment is written (usually
* a script file which will be executed by the Launcher)
* @param environment the environment variables and their values
* @param resources the resources which have been localized for this
* container. Symlinks will be created to these localized resources
* @param command the command that will be run
* @param logDir the log dir to which to copy debugging information
* @param user the username of the job owner
* @param nmVars the set of environment vars that are explicitly set by NM
* @throws IOException if any errors happened writing to the OutputStream,
* while creating symlinks
*/
public void writeLaunchEnv(OutputStream out, Map<String, String> environment,
Map<Path, List<String>> resources, List<String> command, Path logDir,
String user, LinkedHashSet<String> nmVars) throws IOException {
this.writeLaunchEnv(out, environment, resources, command, logDir, user,
ContainerLaunch.CONTAINER_SCRIPT, nmVars);
}
/**
* This method writes out the launch environment of a container to a specified
* path.
*
* @param out the output stream to which the environment is written (usually
* a script file which will be executed by the Launcher)
* @param environment the environment variables and their values
* @param resources the resources which have been localized for this
* container. Symlinks will be created to these localized resources
* @param command the command that will be run
* @param logDir the log dir to which to copy debugging information
* @param user the username of the job owner
* @param outFilename the path to which to write the launch environment
* @param nmVars the set of environment vars that are explicitly set by NM
* @throws IOException if any errors happened writing to the OutputStream,
* while creating symlinks
*/
@VisibleForTesting
public void writeLaunchEnv(OutputStream out, Map<String, String> environment,
Map<Path, List<String>> resources, List<String> command, Path logDir,
String user, String outFilename, LinkedHashSet<String> nmVars)
throws IOException {
ContainerLaunch.ShellScriptBuilder sb =
ContainerLaunch.ShellScriptBuilder.create();
// Add "set -o pipefail -e" to validate launch_container script.
sb.setExitOnFailure();
//Redirect stdout and stderr for launch_container script
sb.stdout(logDir, CONTAINER_PRE_LAUNCH_STDOUT);
sb.stderr(logDir, CONTAINER_PRE_LAUNCH_STDERR);
if (environment != null) {
sb.echo("Setting up env variables");
// Whitelist environment variables are treated specially.
// Only add them if they are not already defined in the environment.
// Add them using special syntax to prevent them from eclipsing
// variables that may be set explicitly in the container image (e.g,
// in a docker image). Put these before the others to ensure the
// correct expansion is used.
for(String var : whitelistVars) {
if (!environment.containsKey(var)) {
String val = getNMEnvVar(var);
if (val != null) {
sb.whitelistedEnv(var, val);
}
}
}
// Now write vars that were set explicitly by nodemanager, preserving
// the order they were written in.
for (String nmEnvVar : nmVars) {
sb.env(nmEnvVar, environment.get(nmEnvVar));
}
// Now write the remaining environment variables.
for (Map.Entry<String, String> env :
sb.orderEnvByDependencies(environment).entrySet()) {
if (!nmVars.contains(env.getKey())) {
sb.env(env.getKey(), env.getValue());
}
}
// Add the whitelist vars to the environment. Do this after writing
// environment variables so they are not written twice.
for(String var : whitelistVars) {
if (!environment.containsKey(var)) {
String val = getNMEnvVar(var);
if (val != null) {
environment.put(var, val);
}
}
}
}
if (resources != null) {
sb.echo("Setting up job resources");
for (Map.Entry<Path, List<String>> resourceEntry :
resources.entrySet()) {
for (String linkName : resourceEntry.getValue()) {
if (new Path(linkName).getName().equals(WILDCARD)) {
// If this is a wildcarded path, link to everything in the
// directory from the working directory
for (File wildLink : readDirAsUser(user, resourceEntry.getKey())) {
sb.symlink(new Path(wildLink.toString()),
new Path(wildLink.getName()));
}
} else {
sb.symlink(resourceEntry.getKey(), new Path(linkName));
}
}
}
}
// dump debugging information if configured
if (getConf() != null &&
getConf().getBoolean(YarnConfiguration.NM_LOG_CONTAINER_DEBUG_INFO,
YarnConfiguration.DEFAULT_NM_LOG_CONTAINER_DEBUG_INFO)) {
sb.echo("Copying debugging information");
sb.copyDebugInformation(new Path(outFilename),
new Path(logDir, outFilename));
sb.listDebugInformation(new Path(logDir, DIRECTORY_CONTENTS));
}
sb.echo("Launching container");
sb.command(command);
PrintStream pout = null;
try {
pout = new PrintStream(out, false, "UTF-8");
sb.write(pout);
} finally {
if (out != null) {
out.close();
}
}
}
/**
* Return the files in the target directory. If retrieving the list of files
* requires specific access rights, that access will happen as the
* specified user. The list will not include entries for "." or "..".
*
* @param user the user as whom to access the target directory
* @param dir the target directory
* @return a list of files in the target directory
*/
protected File[] readDirAsUser(String user, Path dir) {
return new File(dir.toString()).listFiles();
}
/**
* The container exit code.
*/
public enum ExitCode {
SUCCESS(0),
FORCE_KILLED(137),
TERMINATED(143),
LOST(154);
private final int code;
private ExitCode(int exitCode) {
this.code = exitCode;
}
/**
* Get the exit code as an int.
* @return the exit code as an int
*/
public int getExitCode() {
return code;
}
@Override
public String toString() {
return String.valueOf(code);
}
}
/**
* The constants for the signals.
*/
public enum Signal {
NULL(0, "NULL"),
QUIT(3, "SIGQUIT"),
KILL(9, "SIGKILL"),
TERM(15, "SIGTERM");
private final int value;
private final String str;
private Signal(int value, String str) {
this.str = str;
this.value = value;
}
/**
* Get the signal number.
* @return the signal number
*/
public int getValue() {
return value;
}
@Override
public String toString() {
return str;
}
}
/**
* Log each line of the output string as INFO level log messages.
*
* @param output the output string to log
*/
protected void logOutput(String output) {
String shExecOutput = output;
if (shExecOutput != null) {
for (String str : shExecOutput.split("\n")) {
LOG.info(str);
}
}
}
/**
* Get the pidFile of the container.
*
* @param containerId the container ID
* @return the path of the pid-file for the given containerId.
*/
protected Path getPidFilePath(ContainerId containerId) {
try {
readLock.lock();
return (this.pidFiles.get(containerId));
} finally {
readLock.unlock();
}
}
/**
* Return a command line to execute the given command in the OS shell.
* On Windows, the {code}groupId{code} parameter can be used to launch
* and associate the given GID with a process group. On
* non-Windows hosts, the {code}groupId{code} parameter is ignored.
*
* @param command the command to execute
* @param groupId the job owner's GID
* @param userName the job owner's username
* @param pidFile the path to the container's PID file
* @param config the configuration
* @return the command line to execute
*/
protected String[] getRunCommand(String command, String groupId,
String userName, Path pidFile, Configuration config) {
return getRunCommand(command, groupId, userName, pidFile, config, null);
}
/**
* Return a command line to execute the given command in the OS shell.
* On Windows, the {code}groupId{code} parameter can be used to launch
* and associate the given GID with a process group. On
* non-Windows hosts, the {code}groupId{code} parameter is ignored.
*
* @param command the command to execute
* @param groupId the job owner's GID for Windows. On other operating systems
* it is ignored.
* @param userName the job owner's username for Windows. On other operating
* systems it is ignored.
* @param pidFile the path to the container's PID file on Windows. On other
* operating systems it is ignored.
* @param config the configuration
* @param resource on Windows this parameter controls memory and CPU limits.
* If null, no limits are set. On other operating systems it is ignored.
* @return the command line to execute
*/
protected String[] getRunCommand(String command, String groupId,
String userName, Path pidFile, Configuration config, Resource resource) {
if (Shell.WINDOWS) {
return getRunCommandForWindows(command, groupId, userName, pidFile,
config, resource);
} else {
return getRunCommandForOther(command, config);
}
}
/**
* Return a command line to execute the given command in the OS shell.
* The {code}groupId{code} parameter can be used to launch
* and associate the given GID with a process group.
*
* @param command the command to execute
* @param groupId the job owner's GID
* @param userName the job owner's username
* @param pidFile the path to the container's PID file
* @param config the configuration
* @param resource this parameter controls memory and CPU limits.
* If null, no limits are set.
* @return the command line to execute
*/
protected String[] getRunCommandForWindows(String command, String groupId,
String userName, Path pidFile, Configuration config, Resource resource) {
int cpuRate = -1;
int memory = -1;
if (resource != null) {
if (config.getBoolean(
YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED,
YarnConfiguration.
DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) {
memory = (int) resource.getMemorySize();
}
if (config.getBoolean(
YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED,
YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED)) {
int containerVCores = resource.getVirtualCores();
int nodeVCores = NodeManagerHardwareUtils.getVCores(config);
int nodeCpuPercentage =
NodeManagerHardwareUtils.getNodeCpuPercentage(config);
float containerCpuPercentage =
(float)(nodeCpuPercentage * containerVCores) / nodeVCores;
// CPU should be set to a percentage * 100, e.g. 20% cpu rate limit
// should be set as 20 * 100.
cpuRate = Math.min(10000, (int)(containerCpuPercentage * 100));
}
}
return new String[] {
Shell.getWinUtilsPath(),
"task",
"create",
"-m",
String.valueOf(memory),
"-c",
String.valueOf(cpuRate),
groupId,
"cmd /c " + command
};
}
/**
* Return a command line to execute the given command in the OS shell.
*
* @param command the command to execute
* @param config the configuration
* @return the command line to execute
*/
protected String[] getRunCommandForOther(String command,
Configuration config) {
List<String> retCommand = new ArrayList<>();
boolean containerSchedPriorityIsSet = false;
int containerSchedPriorityAdjustment =
YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY;
if (config.get(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY) !=
null) {
containerSchedPriorityIsSet = true;
containerSchedPriorityAdjustment = config
.getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,
YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY);
}
if (containerSchedPriorityIsSet) {
retCommand.addAll(Arrays.asList("nice", "-n",
Integer.toString(containerSchedPriorityAdjustment)));
}
retCommand.addAll(Arrays.asList("bash", command));
return retCommand.toArray(new String[retCommand.size()]);
}
/**
* Return whether the container is still active.
*
* @param containerId the target container's ID
* @return true if the container is active
*/
protected boolean isContainerActive(ContainerId containerId) {
try {
readLock.lock();
return (this.pidFiles.containsKey(containerId));
} finally {
readLock.unlock();
}
}
@VisibleForTesting
protected String getNMEnvVar(String varname) {
return System.getenv(varname);
}
/**
* Mark the container as active.
*
* @param containerId the container ID
* @param pidFilePath the path where the executor should write the PID
* of the launched process
*/
public void activateContainer(ContainerId containerId, Path pidFilePath) {
try {
writeLock.lock();
this.pidFiles.put(containerId, pidFilePath);
} finally {
writeLock.unlock();
}
}
// LinuxContainerExecutor overrides this method and behaves differently.
public String[] getIpAndHost(Container container)
throws ContainerExecutionException {
return getLocalIpAndHost(container);
}
// ipAndHost[0] contains ip.
// ipAndHost[1] contains hostname.
public static String[] getLocalIpAndHost(Container container) {
String[] ipAndHost = new String[2];
try {
InetAddress address = InetAddress.getLocalHost();
ipAndHost[0] = address.getHostAddress();
ipAndHost[1] = address.getHostName();
} catch (UnknownHostException e) {
LOG.error("Unable to get Local hostname and ip for " + container
.getContainerId(), e);
}
return ipAndHost;
}
/**
* Mark the container as inactive. For inactive containers this
* method has no effect.
*
* @param containerId the container ID
*/
public void deactivateContainer(ContainerId containerId) {
try {
writeLock.lock();
this.pidFiles.remove(containerId);
} finally {
writeLock.unlock();
}
}
/**
* Pause the container. The default implementation is to raise a kill event.
* Specific executor implementations can override this behavior.
* @param container
* the Container
*/
public void pauseContainer(Container container) {
LOG.warn(container.getContainerId() + " doesn't support pausing.");
throw new UnsupportedOperationException();
}
/**
* Resume the container from pause state. The default implementation ignores
* this event. Specific implementations can override this behavior.
* @param container
* the Container
*/
public void resumeContainer(Container container) {
LOG.warn(container.getContainerId() + " doesn't support resume.");
throw new UnsupportedOperationException();
}
/**
* Get the process-identifier for the container.
*
* @param containerID the container ID
* @return the process ID of the container if it has already launched,
* or null otherwise
*/
public String getProcessId(ContainerId containerID) {
String pid = null;
Path pidFile = pidFiles.get(containerID);
// If PID is null, this container hasn't launched yet.
if (pidFile != null) {
try {
pid = ProcessIdFileReader.getProcessId(pidFile);
} catch (IOException e) {
LOG.error("Got exception reading pid from pid-file " + pidFile, e);
}
}
return pid;
}
/**
* This class will signal a target container after a specified delay.
* @see #signalContainer
*/
public static class DelayedProcessKiller extends Thread {
private final Container container;
private final String user;
private final String pid;
private final long delay;
private final Signal signal;
private final ContainerExecutor containerExecutor;
/**
* Basic constructor.
*
* @param container the container to signal
* @param user the user as whow to send the signal
* @param pid the PID of the container process
* @param delayMS the period of time to wait in millis before signaling
* the container
* @param signal the signal to send
* @param containerExecutor the executor to use to send the signal
*/
public DelayedProcessKiller(Container container, String user, String pid,
long delayMS, Signal signal, ContainerExecutor containerExecutor) {
this.container = container;
this.user = user;
this.pid = pid;
this.delay = delayMS;
this.signal = signal;
this.containerExecutor = containerExecutor;
setName("Task killer for " + pid);
setDaemon(false);
}
@Override
public void run() {
try {
Thread.sleep(delay);
containerExecutor.signalContainer(new ContainerSignalContext.Builder()
.setContainer(container)
.setUser(user)
.setPid(pid)
.setSignal(signal)
.build());
} catch (InterruptedException e) {
interrupt();
} catch (IOException e) {
String message = "Exception when user " + user + " killing task " + pid
+ " in DelayedProcessKiller: " + StringUtils.stringifyException(e);
LOG.warn(message);
container.handle(new ContainerDiagnosticsUpdateEvent(
container.getContainerId(), message));
}
}
}
}
| |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator.scalar;
import com.facebook.presto.common.Page;
import com.facebook.presto.common.block.Block;
import com.facebook.presto.common.block.BlockBuilder;
import com.facebook.presto.common.block.DictionaryBlock;
import com.facebook.presto.common.type.MapType;
import com.facebook.presto.metadata.MetadataManager;
import com.facebook.presto.operator.DriverYieldSignal;
import com.facebook.presto.operator.project.PageProcessor;
import com.facebook.presto.spi.function.FunctionHandle;
import com.facebook.presto.spi.relation.CallExpression;
import com.facebook.presto.spi.relation.RowExpression;
import com.facebook.presto.sql.gen.ExpressionCompiler;
import com.facebook.presto.sql.gen.PageFunctionCompiler;
import com.google.common.collect.ImmutableList;
import io.airlift.slice.Slice;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.VerboseMode;
import org.openjdk.jmh.runner.options.WarmupMode;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import static com.facebook.presto.block.BlockAssertions.createSlicesBlock;
import static com.facebook.presto.common.type.DoubleType.DOUBLE;
import static com.facebook.presto.common.type.VarcharType.createUnboundedVarcharType;
import static com.facebook.presto.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext;
import static com.facebook.presto.metadata.MetadataManager.createTestMetadataManager;
import static com.facebook.presto.sql.analyzer.TypeSignatureProvider.fromTypes;
import static com.facebook.presto.sql.relational.Expressions.field;
import static com.facebook.presto.testing.TestingConnectorSession.SESSION;
import static com.facebook.presto.util.StructuralTestUtil.mapType;
import static io.airlift.slice.Slices.utf8Slice;
@SuppressWarnings("MethodMayBeStatic")
@State(Scope.Thread)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@Fork(2)
@Warmup(iterations = 10, time = 500, timeUnit = TimeUnit.MILLISECONDS)
@Measurement(iterations = 10, time = 500, timeUnit = TimeUnit.MILLISECONDS)
@BenchmarkMode(Mode.AverageTime)
public class BenchmarkMapConcat
{
private static final int POSITIONS = 1000;
@Benchmark
@OperationsPerInvocation(POSITIONS)
public List<Optional<Page>> mapConcat(BenchmarkData data)
{
return ImmutableList.copyOf(
data.getPageProcessor().process(
SESSION.getSqlFunctionProperties(),
new DriverYieldSignal(),
newSimpleAggregatedMemoryContext().newLocalMemoryContext(PageProcessor.class.getSimpleName()),
data.getPage()));
}
@SuppressWarnings("FieldMayBeFinal")
@State(Scope.Thread)
public static class BenchmarkData
{
private String name = "map_concat";
@Param({"left_empty", "right_empty", "both_empty", "non_empty"})
private String mapConfig = "non_empty";
@Param({"10", "100", "1000"})
private int keyCount = 100;
private Page page;
private PageProcessor pageProcessor;
@Setup
public void setup()
{
MetadataManager metadata = createTestMetadataManager();
ExpressionCompiler compiler = new ExpressionCompiler(metadata, new PageFunctionCompiler(metadata, 0));
List<String> keyList1 = createRandomStringListFromSet(keyCount);
List<String> keyList2 = createRandomStringListFromSet(keyCount);
List<String> leftKeys;
List<String> rightKeys;
switch (mapConfig) {
case "left_empty":
leftKeys = ImmutableList.of();
rightKeys = keyList1;
break;
case "right_empty":
leftKeys = keyList1;
rightKeys = ImmutableList.of();
break;
case "both_empty":
leftKeys = ImmutableList.of();
rightKeys = ImmutableList.of();
break;
case "non_empty":
leftKeys = keyList1;
rightKeys = keyList2;
break;
default:
throw new UnsupportedOperationException();
}
MapType mapType = mapType(createUnboundedVarcharType(), DOUBLE);
Block leftKeyBlock = createKeyBlock(POSITIONS, leftKeys);
Block leftValueBlock = createValueBlock(POSITIONS, leftKeys.size());
Block leftBlock = createMapBlock(mapType, POSITIONS, leftKeyBlock, leftValueBlock);
Block rightKeyBlock = createKeyBlock(POSITIONS, rightKeys);
Block rightValueBlock = createValueBlock(POSITIONS, rightKeys.size());
Block rightBlock = createMapBlock(mapType, POSITIONS, rightKeyBlock, rightValueBlock);
ImmutableList.Builder<RowExpression> projectionsBuilder = ImmutableList.builder();
FunctionHandle functionHandle = metadata.getFunctionAndTypeManager().lookupFunction(name, fromTypes(mapType, mapType));
projectionsBuilder.add(new CallExpression(
name,
functionHandle,
mapType,
ImmutableList.of(field(0, mapType), field(1, mapType))));
ImmutableList<RowExpression> projections = projectionsBuilder.build();
pageProcessor = compiler.compilePageProcessor(SESSION.getSqlFunctionProperties(), Optional.empty(), projections).get();
page = new Page(leftBlock, rightBlock);
}
public PageProcessor getPageProcessor()
{
return pageProcessor;
}
public Page getPage()
{
return page;
}
private static Block createMapBlock(MapType mapType, int positionCount, Block keyBlock, Block valueBlock)
{
int[] offsets = new int[positionCount + 1];
int mapSize = keyBlock.getPositionCount() / positionCount;
for (int i = 0; i < offsets.length; i++) {
offsets[i] = mapSize * i;
}
return mapType.createBlockFromKeyValue(positionCount, Optional.empty(), offsets, keyBlock, valueBlock);
}
private static Block createKeyBlock(int positionCount, List<String> keys)
{
Block keyDictionaryBlock = createSliceArrayBlock(keys);
int[] keyIds = new int[positionCount * keys.size()];
for (int i = 0; i < keyIds.length; i++) {
keyIds[i] = i % keys.size();
}
return new DictionaryBlock(keyDictionaryBlock, keyIds);
}
private static Block createValueBlock(int positionCount, int mapSize)
{
BlockBuilder valueBlockBuilder = DOUBLE.createBlockBuilder(null, positionCount * mapSize);
for (int i = 0; i < positionCount * mapSize; i++) {
DOUBLE.writeDouble(valueBlockBuilder, ThreadLocalRandom.current().nextDouble());
}
return valueBlockBuilder.build();
}
private static Block createSliceArrayBlock(List<String> keys)
{
// last position is reserved for null
Slice[] sliceArray = new Slice[keys.size() + 1];
for (int i = 0; i < keys.size(); i++) {
sliceArray[i] = utf8Slice(keys.get(i));
}
return createSlicesBlock(sliceArray);
}
private static List<String> createRandomStringListFromSet(int keyCount)
{
Random random = new Random(0);
List<String> keyList = new ArrayList<>();
for (int i = 0; i < keyCount; i++) {
keyList.add(Integer.toString(random.nextInt(keyCount)));
}
return keyList;
}
}
public static void main(String[] args)
throws Throwable
{
// assure the benchmarks are valid before running
BenchmarkData data = new BenchmarkData();
data.setup();
new BenchmarkMapConcat().mapConcat(data);
Options options = new OptionsBuilder()
.verbosity(VerboseMode.NORMAL)
.warmupMode(WarmupMode.INDI)
.include(".*" + BenchmarkMapConcat.class.getSimpleName() + ".*")
.build();
new Runner(options).run();
}
}
| |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.engine.datacenter.entity.api.db.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity;
import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State;
import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State.Event;
import org.apache.cloudstack.engine.datacenter.entity.api.db.EngineClusterVO;
import org.apache.cloudstack.engine.datacenter.entity.api.db.EngineHostPodVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Grouping;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.UpdateBuilder;
import com.cloud.utils.exception.CloudRuntimeException;
@Component(value="EngineClusterDao")
@Local(value=EngineClusterDao.class)
public class EngineClusterDaoImpl extends GenericDaoBase<EngineClusterVO, Long> implements EngineClusterDao {
private static final Logger s_logger = Logger.getLogger(EngineClusterDaoImpl.class);
protected final SearchBuilder<EngineClusterVO> PodSearch;
protected final SearchBuilder<EngineClusterVO> HyTypeWithoutGuidSearch;
protected final SearchBuilder<EngineClusterVO> AvailHyperSearch;
protected final SearchBuilder<EngineClusterVO> ZoneSearch;
protected final SearchBuilder<EngineClusterVO> ZoneHyTypeSearch;
protected SearchBuilder<EngineClusterVO> StateChangeSearch;
protected SearchBuilder<EngineClusterVO> UUIDSearch;
private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( ";
private static final String GET_POD_CLUSTER_MAP_SUFFIX = " )";
@Inject protected EngineHostPodDao _hostPodDao;
protected EngineClusterDaoImpl() {
super();
HyTypeWithoutGuidSearch = createSearchBuilder();
HyTypeWithoutGuidSearch.and("hypervisorType", HyTypeWithoutGuidSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
HyTypeWithoutGuidSearch.and("guid", HyTypeWithoutGuidSearch.entity().getGuid(), SearchCriteria.Op.NULL);
HyTypeWithoutGuidSearch.done();
ZoneHyTypeSearch = createSearchBuilder();
ZoneHyTypeSearch.and("hypervisorType", ZoneHyTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
ZoneHyTypeSearch.and("dataCenterId", ZoneHyTypeSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
ZoneHyTypeSearch.done();
PodSearch = createSearchBuilder();
PodSearch.and("pod", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
PodSearch.and("name", PodSearch.entity().getName(), SearchCriteria.Op.EQ);
PodSearch.done();
ZoneSearch = createSearchBuilder();
ZoneSearch.and("dataCenterId", ZoneSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
ZoneSearch.groupBy(ZoneSearch.entity().getHypervisorType());
ZoneSearch.done();
AvailHyperSearch = createSearchBuilder();
AvailHyperSearch.and("zoneId", AvailHyperSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
AvailHyperSearch.select(null, Func.DISTINCT, AvailHyperSearch.entity().getHypervisorType());
AvailHyperSearch.done();
UUIDSearch = createSearchBuilder();
UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ);
UUIDSearch.done();
StateChangeSearch = createSearchBuilder();
StateChangeSearch.and("id", StateChangeSearch.entity().getId(), SearchCriteria.Op.EQ);
StateChangeSearch.and("state", StateChangeSearch.entity().getState(), SearchCriteria.Op.EQ);
StateChangeSearch.done();
}
@Override
public List<EngineClusterVO> listByZoneId(long zoneId) {
SearchCriteria<EngineClusterVO> sc = ZoneSearch.create();
sc.setParameters("dataCenterId", zoneId);
return listBy(sc);
}
@Override
public List<EngineClusterVO> listByPodId(long podId) {
SearchCriteria<EngineClusterVO> sc = PodSearch.create();
sc.setParameters("pod", podId);
return listBy(sc);
}
@Override
public EngineClusterVO findBy(String name, long podId) {
SearchCriteria<EngineClusterVO> sc = PodSearch.create();
sc.setParameters("pod", podId);
sc.setParameters("name", name);
return findOneBy(sc);
}
@Override
public List<EngineClusterVO> listByHyTypeWithoutGuid(String hyType) {
SearchCriteria<EngineClusterVO> sc = HyTypeWithoutGuidSearch.create();
sc.setParameters("hypervisorType", hyType);
return listBy(sc);
}
@Override
public List<EngineClusterVO> listByDcHyType(long dcId, String hyType) {
SearchCriteria<EngineClusterVO> sc = ZoneHyTypeSearch.create();
sc.setParameters("dataCenterId", dcId);
sc.setParameters("hypervisorType", hyType);
return listBy(sc);
}
@Override
public List<HypervisorType> getAvailableHypervisorInZone(Long zoneId) {
SearchCriteria<EngineClusterVO> sc = AvailHyperSearch.create();
if (zoneId != null) {
sc.setParameters("zoneId", zoneId);
}
List<EngineClusterVO> clusters = listBy(sc);
List<HypervisorType> hypers = new ArrayList<HypervisorType>(4);
for (EngineClusterVO cluster : clusters) {
hypers.add(cluster.getHypervisorType());
}
return hypers;
}
@Override
public Map<Long, List<Long>> getPodClusterIdMap(List<Long> clusterIds){
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
Map<Long, List<Long>> result = new HashMap<Long, List<Long>>();
try {
StringBuilder sql = new StringBuilder(GET_POD_CLUSTER_MAP_PREFIX);
if (clusterIds.size() > 0) {
for (Long clusterId : clusterIds) {
sql.append(clusterId).append(",");
}
sql.delete(sql.length()-1, sql.length());
sql.append(GET_POD_CLUSTER_MAP_SUFFIX);
}
pstmt = txn.prepareAutoCloseStatement(sql.toString());
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
Long podId = rs.getLong(1);
Long clusterIdInPod = rs.getLong(2);
if(result.containsKey(podId)){
List<Long> clusterList = result.get(podId);
clusterList.add(clusterIdInPod);
result.put(podId, clusterList);
}else{
List<Long> clusterList = new ArrayList<Long>();
clusterList.add(clusterIdInPod);
result.put(podId, clusterList);
}
}
return result;
} catch (SQLException e) {
throw new CloudRuntimeException("DB Exception on: " + GET_POD_CLUSTER_MAP_PREFIX, e);
} catch (Throwable e) {
throw new CloudRuntimeException("Caught: " + GET_POD_CLUSTER_MAP_PREFIX, e);
}
}
@Override
public List<Long> listDisabledClusters(long zoneId, Long podId) {
GenericSearchBuilder<EngineClusterVO, Long> clusterIdSearch = createSearchBuilder(Long.class);
clusterIdSearch.selectField(clusterIdSearch.entity().getId());
clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ);
if(podId != null){
clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ);
}
clusterIdSearch.and("allocationState", clusterIdSearch.entity().getAllocationState(), Op.EQ);
clusterIdSearch.done();
SearchCriteria<Long> sc = clusterIdSearch.create();
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId);
if (podId != null) {
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
}
sc.addAnd("allocationState", SearchCriteria.Op.EQ, Grouping.AllocationState.Disabled);
return customSearch(sc, null);
}
@Override
public List<Long> listClustersWithDisabledPods(long zoneId) {
GenericSearchBuilder<EngineHostPodVO, Long> disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class);
disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId());
disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ);
disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ);
GenericSearchBuilder<EngineClusterVO, Long> clusterIdSearch = createSearchBuilder(Long.class);
clusterIdSearch.selectField(clusterIdSearch.entity().getId());
clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER);
clusterIdSearch.done();
SearchCriteria<Long> sc = clusterIdSearch.create();
sc.setJoinParameters("disabledPodIdSearch", "dataCenterId", zoneId);
sc.setJoinParameters("disabledPodIdSearch", "allocationState", Grouping.AllocationState.Disabled);
return customSearch(sc, null);
}
@Override
public boolean remove(Long id) {
Transaction txn = Transaction.currentTxn();
txn.start();
EngineClusterVO cluster = createForUpdate();
cluster.setName(null);
cluster.setGuid(null);
update(id, cluster);
boolean result = super.remove(id);
txn.commit();
return result;
}
@Override
public boolean updateState(State currentState, Event event, State nextState, DataCenterResourceEntity clusterEntity, Object data) {
EngineClusterVO vo = findById(clusterEntity.getId());
Date oldUpdatedTime = vo.getLastUpdated();
SearchCriteria<EngineClusterVO> sc = StateChangeSearch.create();
sc.setParameters("id", vo.getId());
sc.setParameters("state", currentState);
UpdateBuilder builder = getUpdateBuilder(vo);
builder.set(vo, "state", nextState);
builder.set(vo, "lastUpdated", new Date());
int rows = update(vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
EngineClusterVO dbCluster = findByIdIncludingRemoved(vo.getId());
if (dbCluster != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
str.append(": DB Data={id=").append(dbCluster.getId()).append("; state=").append(dbCluster.getState()).append(";updatedTime=")
.append(dbCluster.getLastUpdated());
str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatedTime=").append(vo.getLastUpdated());
str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatedTime=").append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore");
}
}
return rows > 0;
}
}
| |
/*******************************************************************************
* Copyright (c) 2006-2010 eBay Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*******************************************************************************/
package org.ebayopensource.turmeric.runtime.common.impl.internal.monitoring;
import java.util.HashMap;
import java.util.Map;
import org.ebayopensource.turmeric.runtime.common.exceptions.ServiceException;
import org.ebayopensource.turmeric.runtime.common.impl.internal.config.GlobalConfigHolder;
import org.ebayopensource.turmeric.runtime.common.impl.internal.config.StorageProviderConfig;
import org.ebayopensource.turmeric.runtime.common.impl.monitoring.storage.DiffBasedSnapshotFileLogger;
import org.ebayopensource.turmeric.runtime.common.impl.utils.ReflectionUtils;
import org.ebayopensource.turmeric.runtime.common.monitoring.MetricsCollector;
import org.ebayopensource.turmeric.runtime.common.monitoring.MetricsRegistry;
import org.ebayopensource.turmeric.runtime.common.monitoring.MetricsStorageProvider;
import org.ebayopensource.turmeric.runtime.sif.impl.internal.monitoring.ClientServiceMonitoringCompStatus;
/**
* @author wdeng
*/
public final class MonitoringSystem {
private static final int DEFAULT_SNAPSHOT_INTERVAL = 60;
public final static String COLLECTION_LOCATION = "collectionLocation";
public final static String COLLECTION_LOCATION_SERVER = "server";
public final static String COLLECTION_LOCATION_CLIENT = "client";
private static boolean s_clientInitialized;
private static boolean s_serverInitialized;
private static MonitoringDesc s_clientMonDesc;
private static MonitoringDesc s_serverMonDesc;
private static MetricsSnapshotScheduler s_serverScheduler;
private static MetricsSnapshotScheduler s_clientScheduler;
private MonitoringSystem() {
// no instances
}
public synchronized static void initializeClient(
GlobalConfigHolder globalConfig) throws ServiceException {
if (s_clientInitialized) {
return;
}
s_clientInitialized = true;
MetricsRegistryImpl.createClientInstance();
MetricsRegistry.getClientInstance().registerAllMetricsForClass(
SystemMetricDefs.class);
ClientServiceMonitoringCompStatus.initializeCompStatus();
MetricsCollector collector = MetricsCollector.getClientInstance();
s_clientMonDesc = startSnapshotScheduler(globalConfig, collector,
COLLECTION_LOCATION_CLIENT, false);
}
public synchronized static void initializeServer(
GlobalConfigHolder globalConfig) throws ServiceException {
if (s_serverInitialized) {
return;
}
s_serverInitialized = true;
MetricsRegistryImpl.createServerInstance();
MetricsRegistry.getServerInstance().registerAllMetricsForClass(
SystemMetricDefs.class);
MetricsCollector collector = MetricsCollector.getServerInstance();
s_serverMonDesc = startSnapshotScheduler(globalConfig, collector,
COLLECTION_LOCATION_SERVER, true);
}
public static MonitoringDesc getServerMonitoringDesc() {
return s_serverMonDesc;
}
public static MonitoringDesc getClientMonitoringDesc() {
return s_clientMonDesc;
}
private static MonitoringDesc startSnapshotScheduler(
GlobalConfigHolder globalConfig, MetricsCollector collector,
String collectionLocation, boolean isServer) throws ServiceException {
MonitoringDesc monDesc = createMonitoringDesc(globalConfig, collectionLocation);
MetricsSnapshotScheduler scheduler = new MetricsSnapshotScheduler(
monDesc, collector);
if(isServer)
{
s_serverScheduler = scheduler;
}
else
{
s_clientScheduler = scheduler;
}
scheduler.start();
return monDesc;
}
public static void persistMetricsSnapSnapshot(String adminName, boolean isServer)
{
if(isServer && s_serverScheduler != null)
{
s_serverScheduler.persistSnapshot(adminName);
}
if(!isServer && s_clientScheduler != null)
{
s_clientScheduler.persistSnapshot(adminName);
}
}
private static MonitoringDesc createDefaultMonitoringDesc(String collectionLocation) {
String name = "DiffBasedFileLogger";
DiffBasedSnapshotFileLogger provider = new DiffBasedSnapshotFileLogger();
provider.init(null, name, collectionLocation, DEFAULT_SNAPSHOT_INTERVAL);
StorageProviderDesc defaultProviderDesc = new StorageProviderDesc(name, provider, null, null);
Map<String,StorageProviderDesc> providerDescs = new HashMap<String, StorageProviderDesc>(1);
providerDescs.put(name, defaultProviderDesc);
addInternalProviders(providerDescs, collectionLocation, DEFAULT_SNAPSHOT_INTERVAL);
return new MonitoringDesc(DEFAULT_SNAPSHOT_INTERVAL, providerDescs);
}
private static MonitoringDesc createMonitoringDesc(GlobalConfigHolder globalConfig,
String collectionLocation) throws ServiceException
{
if (null == globalConfig) {
return createDefaultMonitoringDesc(collectionLocation);
}
Integer snapshotInterval = globalConfig.getMonitorSnapshotInterval();
if (null == snapshotInterval) {
return createDefaultMonitoringDesc(collectionLocation);
}
Map<String, StorageProviderDesc> providerDescs = createStorageProviderDescs(
globalConfig, collectionLocation);
addInternalProviders(providerDescs, collectionLocation, snapshotInterval);
MonitoringDesc desc = new MonitoringDesc(snapshotInterval.longValue(), providerDescs);
return desc;
}
private static Map<String, StorageProviderDesc> createStorageProviderDescs(
GlobalConfigHolder globalConfig, String collectionLocation)
throws ServiceException
{
Map<String, StorageProviderConfig> providerConfigs = globalConfig.getStorageProviders();
if (null == providerConfigs || providerConfigs.isEmpty()) {
return new HashMap<String, StorageProviderDesc>();
}
int size = providerConfigs.size();
HashMap<String, StorageProviderDesc> descs = new HashMap<String, StorageProviderDesc>(size);
ClassLoader cl = Thread.currentThread().getContextClassLoader();
Integer snapshotInterval = globalConfig.getMonitorSnapshotInterval();
for (StorageProviderConfig config: providerConfigs.values()) {
String name = config.getName();
String clzName = config.getClassname();
Map<String,String> options = config.getOptions();
MetricsStorageProvider provider = ReflectionUtils.createInstance(
clzName, MetricsStorageProvider.class, cl);
provider.init(options, name, collectionLocation, snapshotInterval);
StorageProviderDesc desc = new StorageProviderDesc(
name, provider, options, collectionLocation);
descs.put(name, desc);
}
return descs;
}
private static void addInternalProviders(Map<String, StorageProviderDesc> providerDescs,
String collectionLocation, Integer snapshotInterval)
{
boolean hasDiffView = false;
for (StorageProviderDesc provider: providerDescs.values()) {
if (provider.getProvider() instanceof DiffSnapshotView) {
hasDiffView = true;
}
}
if (!hasDiffView) {
String name = "__sys_DiffSnapshotView_";
DiffSnapshotView provider = new DiffSnapshotView();
Map<String,String> options = new HashMap<String,String>();
MetricsConfigManager configManager = null;
if (COLLECTION_LOCATION_CLIENT.equalsIgnoreCase(collectionLocation)) {
configManager = MetricsConfigManager.getClientInstance();
} else {
configManager = MetricsConfigManager.getServerInstance();
}
provider.init(options, name, collectionLocation, snapshotInterval);
provider.setConfigManager(configManager);
StorageProviderDesc desc = new StorageProviderDesc(name, provider,
options, collectionLocation);
providerDescs.put(name, desc);
}
}
}
| |
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
Sparse, yet its (present) values can be changed. You can't, however, add
values that were (zero and) missing.
@author Andrew McCallum <a href="mailto:mccallum@cs.umass.edu">mccallum@cs.umass.edu</a>
*/
package cc.mallet.types;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Arrays;
import java.util.logging.*;
import java.io.*;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.Vector;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.PropertyList;
import gnu.trove.TIntIntHashMap;
public class HashedSparseVector extends SparseVector implements Serializable
{
private static Logger logger = MalletLogger.getLogger(SparseVector.class.getName());
TIntIntHashMap index2location;
int maxIndex;
public HashedSparseVector (int[] indices, double[] values,
int capacity, int size,
boolean copy,
boolean checkIndicesSorted,
boolean removeDuplicates)
{
super (indices, values, capacity, size, copy, checkIndicesSorted, removeDuplicates);
assert (indices != null);
}
/** Create an empty vector */
public HashedSparseVector ()
{
super (new int[0], new double[0], 0, 0, false, false, false);
}
/** Create non-binary vector, possibly dense if "featureIndices" or possibly sparse, if not */
public HashedSparseVector (int[] featureIndices,
double[] values)
{
super (featureIndices, values);
}
/** Create binary vector */
public HashedSparseVector (int[] featureIndices)
{
super (featureIndices);
}
// xxx We need to implement this in FeatureVector subclasses
public ConstantMatrix cloneMatrix ()
{
return new HashedSparseVector (indices, values);
}
public ConstantMatrix cloneMatrixZeroed () {
assert (values != null);
int[] newIndices = new int[indices.length];
System.arraycopy (indices, 0, newIndices, 0, indices.length);
HashedSparseVector sv = new HashedSparseVector (newIndices, new double[values.length],
values.length, values.length, false, false, false);
// share index2location trick ala IndexedSparseVector
if (index2location != null) {
sv.index2location = index2location;
sv.maxIndex = maxIndex;
}
return sv;
}
// Methods that change values
public void indexVector ()
{
if ((index2location == null) && (indices.length > 0))
setIndex2Location ();
}
private void setIndex2Location ()
{
//System.out.println ("HashedSparseVector setIndex2Location indices.length="+indices.length+" maxindex="+indices[indices.length-1]);
assert (index2location == null);
assert (indices.length > 0);
this.maxIndex = indices[indices.length - 1];
this.index2location = new TIntIntHashMap (numLocations ());
//index2location.setDefaultValue (-1);
for (int i = 0; i < indices.length; i++)
index2location.put (indices[i], i);
}
public final void setValue (int index, double value) {
if (index2location == null)
setIndex2Location ();
int location = index2location.get(index);
if (index2location.contains (index))
values[location] = value;
else
throw new IllegalArgumentException ("Trying to set value that isn't present in HashedSparseVector");
}
public final void setValueAtLocation (int location, double value)
{
values[location] = value;
}
// I dislike this name, but it's consistent with DenseVector. -cas
public void columnPlusEquals (int index, double value) {
if (index2location == null)
setIndex2Location ();
int location = index2location.get(index);
if (index2location.contains (index))
values[location] += value;
else
throw new IllegalArgumentException ("Trying to set value that isn't present in HashedSparseVector");
}
public final double dotProduct (DenseVector v) {
double ret = 0;
if (values == null)
for (int i = 0; i < indices.length; i++)
ret += v.value(indices[i]);
else
for (int i = 0; i < indices.length; i++)
ret += values[i] * v.value(indices[i]);
return ret;
}
public final double dotProduct (SparseVector v)
{
if (indices.length == 0)
return 0;
if (index2location == null)
setIndex2Location ();
double ret = 0;
int vNumLocs = v.numLocations();
if (values == null) {
// this vector is binary
for (int i = 0; i < vNumLocs; i++) {
int index = v.indexAtLocation(i);
if (index > maxIndex)
break;
if (index2location.contains(index))
ret += v.valueAtLocation (i);
}
} else {
for (int i = 0; i < vNumLocs; i++) {
int index = v.indexAtLocation(i);
if (index > maxIndex)
break;
if (index2location.containsKey(index)) {
ret += values[ index2location.get(index) ] * v.valueAtLocation (i);
}
//int location = index2location.get(index);
//if (location >= 0)
// ret += values[location] * v.valueAtLocation (i);
}
}
return ret;
}
public final void plusEqualsSparse (SparseVector v, double factor)
{
if (indices.length == 0)
return;
if (index2location == null)
setIndex2Location ();
int vNumLocs = v.numLocations();
for (int i = 0; i < vNumLocs; i++) {
int index = v.indexAtLocation(i);
if (index > maxIndex)
break;
if (index2location.containsKey(index)) {
values[ index2location.get(index) ] += v.valueAtLocation (i) * factor;
}
//int location = index2location.get(index);
//if (location >= 0)
// values[location] += v.valueAtLocation (i) * factor;
}
}
public final void plusEqualsSparse (SparseVector v)
{
if (indices.length == 0)
return;
if (index2location == null)
setIndex2Location ();
for (int i = 0; i < v.numLocations(); i++) {
int index = v.indexAtLocation(i);
if (index > maxIndex)
break;
int location = index2location.get(index);
if (index2location.contains (index))
values[location] += v.valueAtLocation (i);
}
}
public final void setAll (double v)
{
for (int i = 0; i < values.length; i++)
values[i] = v;
}
//Serialization
private static final long serialVersionUID = 1;
// Version history:
// 0 == Wrote out index2location. Probably a bad idea.
private static final int CURRENT_SERIAL_VERSION = 1;
static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeInt (maxIndex);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
maxIndex = in.readInt ();
if (version == 0) {
// gobble up index2location
Object obj = in.readObject ();
if (obj != null && !(obj instanceof TIntIntHashMap)) {
throw new IOException ("Unexpected object in de-serialization: "+obj);
}
}
}
}
| |
// Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.rules.extra;
import com.google.common.base.Function;
import com.google.common.base.Predicates;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.devtools.build.lib.actions.AbstractAction;
import com.google.devtools.build.lib.actions.Action;
import com.google.devtools.build.lib.actions.ActionExecutionContext;
import com.google.devtools.build.lib.actions.ActionExecutionException;
import com.google.devtools.build.lib.actions.Artifact;
import com.google.devtools.build.lib.actions.ArtifactResolver;
import com.google.devtools.build.lib.actions.DelegateSpawn;
import com.google.devtools.build.lib.actions.PackageRootResolutionException;
import com.google.devtools.build.lib.actions.PackageRootResolver;
import com.google.devtools.build.lib.actions.Spawn;
import com.google.devtools.build.lib.actions.SpawnActionContext;
import com.google.devtools.build.lib.analysis.actions.CommandLine;
import com.google.devtools.build.lib.analysis.actions.SpawnAction;
import com.google.devtools.build.lib.collect.nestedset.NestedSet;
import com.google.devtools.build.lib.collect.nestedset.NestedSetBuilder;
import com.google.devtools.build.lib.collect.nestedset.Order;
import com.google.devtools.build.lib.util.Preconditions;
import com.google.devtools.build.lib.vfs.FileSystemUtils;
import com.google.devtools.build.lib.vfs.PathFragment;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
import javax.annotation.concurrent.GuardedBy;
/**
* Action used by extra_action rules to create an action that shadows an existing action. Runs a
* command-line using {@link SpawnActionContext} for executions.
*/
public final class ExtraAction extends SpawnAction {
private final Action shadowedAction;
private final boolean createDummyOutput;
private final ImmutableMap<PathFragment, Artifact> runfilesManifests;
private final ImmutableSet<Artifact> extraActionInputs;
// This can be read/written from multiple threads, and so accesses should be synchronized.
@GuardedBy("this")
private boolean inputsKnown;
/**
* A long way to say (ExtraAction xa) -> xa.getShadowedAction().
*/
public static final Function<ExtraAction, Action> GET_SHADOWED_ACTION =
new Function<ExtraAction, Action>() {
@Nullable
@Override
public Action apply(@Nullable ExtraAction extraAction) {
return extraAction != null ? extraAction.getShadowedAction() : null;
}
};
public ExtraAction(
ImmutableSet<Artifact> extraActionInputs,
Map<PathFragment, Artifact> runfilesManifests,
Collection<Artifact> outputs,
Action shadowedAction,
boolean createDummyOutput,
CommandLine argv,
Map<String, String> environment,
Map<String, String> executionInfo,
String progressMessage,
String mnemonic) {
super(
shadowedAction.getOwner(),
ImmutableList.<Artifact>of(),
createInputs(shadowedAction.getInputs(), extraActionInputs),
outputs,
AbstractAction.DEFAULT_RESOURCE_SET,
argv,
ImmutableMap.copyOf(environment),
ImmutableMap.copyOf(executionInfo),
progressMessage,
getManifests(shadowedAction),
mnemonic,
false,
null);
this.shadowedAction = shadowedAction;
this.runfilesManifests = ImmutableMap.copyOf(runfilesManifests);
this.createDummyOutput = createDummyOutput;
this.extraActionInputs = extraActionInputs;
inputsKnown = shadowedAction.inputsKnown();
if (createDummyOutput) {
// Expecting just a single dummy file in the outputs.
Preconditions.checkArgument(outputs.size() == 1, outputs);
}
}
private static ImmutableMap<PathFragment, Artifact> getManifests(Action shadowedAction) {
// If the shadowed action is a SpawnAction, then we also add the input manifests to this
// action's input manifests.
// TODO(bazel-team): Also handle other action classes correctly.
if (shadowedAction instanceof SpawnAction) {
return ((SpawnAction) shadowedAction).getInputManifests();
}
return ImmutableMap.of();
}
@Override
public boolean discoversInputs() {
return shadowedAction.discoversInputs();
}
@Nullable
@Override
public Collection<Artifact> discoverInputs(ActionExecutionContext actionExecutionContext)
throws ActionExecutionException, InterruptedException {
Preconditions.checkState(discoversInputs(), this);
// We need to update our inputs to take account of any additional
// inputs the shadowed action may need to do its work.
if (shadowedAction.discoversInputs() && shadowedAction instanceof AbstractAction) {
Iterable<Artifact> additionalInputs =
((AbstractAction) shadowedAction).getInputFilesForExtraAction(actionExecutionContext);
updateInputs(createInputs(additionalInputs, extraActionInputs));
return ImmutableSet.copyOf(additionalInputs);
}
return null;
}
@Override
public synchronized boolean inputsKnown() {
return inputsKnown;
}
private static NestedSet<Artifact> createInputs(
Iterable<Artifact> shadowedActionInputs, ImmutableSet<Artifact> extraActionInputs) {
NestedSetBuilder<Artifact> result = new NestedSetBuilder<>(Order.STABLE_ORDER);
if (shadowedActionInputs instanceof NestedSet) {
result.addTransitive((NestedSet<Artifact>) shadowedActionInputs);
} else {
result.addAll(shadowedActionInputs);
}
return result.addAll(extraActionInputs).build();
}
@Override
public synchronized void updateInputs(Iterable<Artifact> discoveredInputs) {
setInputs(discoveredInputs);
inputsKnown = true;
}
@Nullable
@Override
public Iterable<Artifact> resolveInputsFromCache(
ArtifactResolver artifactResolver,
PackageRootResolver resolver,
Collection<PathFragment> inputPaths)
throws PackageRootResolutionException, InterruptedException {
// We update the inputs directly from the shadowed action.
Set<PathFragment> extraActionPathFragments =
ImmutableSet.copyOf(Artifact.asPathFragments(extraActionInputs));
return shadowedAction.resolveInputsFromCache(artifactResolver, resolver,
Collections2.filter(inputPaths, Predicates.in(extraActionPathFragments)));
}
/**
* @InheritDoc
*
* This method calls in to {@link AbstractAction#getInputFilesForExtraAction} and
* {@link Action#getExtraActionInfo} of the action being shadowed from the thread executing this
* ExtraAction. It assumes these methods are safe to call from a different thread than the thread
* responsible for the execution of the action being shadowed.
*/
@Override
public void execute(ActionExecutionContext actionExecutionContext)
throws ActionExecutionException, InterruptedException {
// PHASE 2: execution of extra_action.
super.execute(actionExecutionContext);
// PHASE 3: create dummy output.
// If the user didn't specify output, we need to create dummy output
// to make blaze schedule this action.
if (createDummyOutput) {
for (Artifact output : getOutputs()) {
try {
FileSystemUtils.touchFile(output.getPath());
} catch (IOException e) {
throw new ActionExecutionException(e.getMessage(), e, this, false);
}
}
}
synchronized (this) {
inputsKnown = true;
}
}
/**
* The spawn command for ExtraAction needs to be slightly modified from
* regular SpawnActions:
* -the extraActionInfo file needs to be added to the list of inputs.
* -the extraActionInfo file that is an output file of this task is created
* before the SpawnAction so should not be listed as one of its outputs.
*/
// TODO(bazel-team): Add more tests that execute this code path!
@Override
public Spawn getSpawn() {
final Spawn base = super.getSpawn();
return new DelegateSpawn(base) {
@Override public ImmutableMap<PathFragment, Artifact> getRunfilesManifests() {
ImmutableMap.Builder<PathFragment, Artifact> builder = ImmutableMap.builder();
builder.putAll(super.getRunfilesManifests());
builder.putAll(runfilesManifests);
return builder.build();
}
@Override public String getMnemonic() { return ExtraAction.this.getMnemonic(); }
};
}
/**
* Returns the action this extra action is 'shadowing'.
*/
public Action getShadowedAction() {
return shadowedAction;
}
}
| |
/**
* Copyright 2011 Google Inc.
* Copyright 2013 Ronald W Hoffman
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ScripterRon.BitcoinCore;
import java.io.EOFException;
import java.io.InputStream;
import java.io.IOException;
/**
* A VarInt is an unsigned variable-length encoded integer using the bitcoin encoding (called the 'compact size'
* in the reference client). It consists of a marker byte and zero or more data bytes as follows:
* <pre>
* Value Marker Data
* ====== ====== ====
* 0-252 0-252 0 bytes
* 253 to 2^16-1 253 2 bytes
* 2^16 to 2^32-1 254 4 bytes
* 2^32 to 2^64-1 255 8 bytes
* </pre>
*/
public final class VarInt {
/** The value of this VarInt */
private final long value;
/** The encoded size of this VarInt */
private int encodedSize;
/**
* Creates a new VarInt with the requested value
*
* @param value Requested value
*/
public VarInt(long value) {
this.value = value;
encodedSize = sizeOf(value);
}
/**
* Creates a new VarInt from a byte array in little-endian format
*
* @param buf Byte array
* @param offset Starting offset into the array
* @throws EOFException Buffer is too small
*/
public VarInt(byte[]buf, int offset) throws EOFException {
if (offset > buf.length)
throw new EOFException("End-of-data while processing VarInt");
int first = 0x00FF&(int)buf[offset];
if (first < 253) {
// 8 bits.
value = first;
encodedSize = 1;
} else if (first == 253) {
// 16 bits.
if (offset+2 > buf.length)
throw new EOFException("End-of-data while processing VarInt");
value = (0x00FF&(int)buf[offset+1]) | ((0x00FF&(int)buf[offset+2])<<8);
encodedSize = 3;
} else if (first == 254) {
// 32 bits.
if (offset+5 > buf.length)
throw new EOFException("End-of-data while processing VarInt");
value = Utils.readUint32LE(buf, offset+1);
encodedSize = 5;
} else {
// 64 bits.
if (offset+9 > buf.length)
throw new EOFException("End-of-data while processing VarInt");
value = Utils.readUint64LE(buf, offset+1);
encodedSize = 9;
}
}
/**
* Creates a new VarInt from an input stream encoded in little-endian format
*
* @param in Input stream
* @throws EOFException End-of-data processing stream
* @throws IOException I/O error processing stream
*/
public VarInt(InputStream in) throws EOFException, IOException {
int count;
int first = in.read();
if (first < 0)
throw new EOFException("End-of-data while processing VarInt");
if (first < 253) {
// 8 bits.
value = first;
encodedSize = 1;
} else if (first == 253) {
// 16 bits.
byte[] buf = new byte[2];
count = in.read(buf, 0, 2);
if (count < 2)
throw new EOFException("End-of-data while processing VarInt");
value = (0x00FF&(int)buf[0]) | ((0x00FF&(int)buf[1])<<8);
encodedSize = 3;
} else if (first == 254) {
// 32 bits.
byte[] buf = new byte[4];
count = in.read(buf, 0, 4);
if (count < 4)
throw new EOFException("End-of-data while processing VarInt");
value = Utils.readUint32LE(buf, 0);
encodedSize = 5;
} else {
// 64 bits.
byte[] buf = new byte[8];
count = in.read(buf, 0, 8);
if (count < 8)
throw new EOFException("End-of-data while processing VarInt");
value = Utils.readUint64LE(buf, 0);
encodedSize = 9;
}
}
/**
* Returns the value of thie VarInt as an int
*
* @return Integer value
*/
public int toInt() {
return (int)value;
}
/**
* Returns the value of this VarInt as a long
*
* @return Long value
*/
public long toLong() {
return value;
}
/**
* Returns the encoded size of this VarInt
*
* @return Encoded size
*/
public int getEncodedSize() {
return encodedSize;
}
/**
* Returns the encoded VarInt size
*
* @param bytes Encoded byte stream
* @param offset Offset of the encoded VarInt
* @return Encoded size
*/
public static int sizeOf(byte[] bytes, int offset) {
int length;
int varLength = (int)bytes[offset]&0xff;
if (varLength < 253)
length = 1;
else if (varLength == 253)
length = 3;
else if (varLength == 254)
length = 5;
else
length = 9;
return length;
}
/**
* Returns the encoded size of the given unsigned integer value.
*
* @param value Value to be encoded
* @return Encoded size
*/
public static int sizeOf(int value) {
int minSize;
long tValue = ((long)value)&0xffffffffL;
if (tValue < 253L)
minSize = 1; // Single data byte
else if (tValue < 65536L)
minSize = 3; // 1 marker + 2 data bytes
else
minSize = 5; // 1 marker + 4 data bytes
return minSize;
}
/**
* Returns the encoded size of the given unsigned long value
*
* @param value Value to be encoded
* @return Encoded size
*/
public static int sizeOf(long value) {
int minSize;
if ((value&0xFFFFFFFF00000000L) != 0) {
// 1 marker + 8 data bytes
minSize = 9;
} else if ((value&0x00000000FFFF0000L) != 0) {
// 1 marker + 4 data bytes
minSize = 5;
} else if (value >= 253L) {
// 1 marker + 2 data bytes
minSize = 3;
} else {
// Single data byte
minSize = 1;
}
return minSize;
}
/**
* Encode the value in little-endian format
*
* @return Encoded byte stream
*/
public byte[] encode() {
return encode(value);
}
/**
* Encode the value in little-endian format
*
* @param value Value to encode
* @return Byte array
*/
public static byte[] encode(long value) {
byte[] bytes;
if ((value&0xFFFFFFFF00000000L) != 0) {
// 1 marker + 8 data bytes
bytes = new byte[9];
bytes[0] = (byte)255;
Utils.uint64ToByteArrayLE(value, bytes, 1);
} else if ((value&0x00000000FFFF0000L) != 0) {
// 1 marker + 4 data bytes
bytes = new byte[5];
bytes[0] = (byte)254;
Utils.uint32ToByteArrayLE(value, bytes, 1);
} else if (value >= 253L) {
// 1 marker + 2 data bytes
bytes = new byte[]{(byte)253, (byte)value, (byte)(value>>8)};
} else {
// Single data byte
bytes = new byte[]{(byte)value};
}
return bytes;
}
}
| |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.util.json;
import com.facebook.buck.core.exceptions.HumanReadableException;
import com.facebook.buck.core.model.UnconfiguredBuildTarget;
import com.facebook.buck.core.model.targetgraph.raw.UnconfiguredTargetNodeWithDeps;
import com.facebook.buck.core.parser.buildtargetpattern.UnconfiguredBuildTargetParser;
import com.facebook.buck.core.path.ForwardRelativePath;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.core.type.WritableTypeId;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.deser.std.FromStringDeserializer;
import com.fasterxml.jackson.databind.jsontype.TypeSerializer;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.databind.ser.std.ToStringSerializer;
import com.fasterxml.jackson.datatype.guava.GuavaModule;
import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import com.fasterxml.jackson.module.kotlin.KotlinModule;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Map;
import java.util.function.Function;
public class ObjectMappers {
// It's important to re-use these objects for perf:
// https://github.com/FasterXML/jackson-docs/wiki/Presentation:-Jackson-Performance
public static final ObjectReader READER;
public static final ObjectWriter WRITER;
/** ObjectReader that deserializes objects that had type information preserved */
public static final ObjectReader READER_WITH_TYPE;
/** ObjectWrite that serializes objects along with their type information */
public static final ObjectWriter WRITER_WITH_TYPE;
/** ObjectReader that interns custom objects on serialization, like UnconfiguredBuildTarget */
public static final ObjectReader READER_INTERNED;
public static <T> T readValue(Path file, Class<T> clazz) throws IOException {
try (JsonParser parser = createParser(file)) {
return READER.readValue(parser, clazz);
}
}
public static <T> T readValue(Path file, TypeReference<T> clazz) throws IOException {
try (JsonParser parser = createParser(file)) {
return READER.readValue(parser, clazz);
}
}
public static <T> T readValue(String json, Class<T> clazz) throws IOException {
try (JsonParser parser = createParser(json)) {
return READER.readValue(parser, clazz);
}
}
public static <T> T readValue(String json, TypeReference<T> clazz) throws IOException {
try (JsonParser parser = createParser(json)) {
return READER.readValue(parser, clazz);
}
}
public static JsonParser createParser(Path path) throws IOException {
return jsonFactory.createParser(new BufferedInputStream(Files.newInputStream(path)));
}
public static JsonParser createParser(String json) throws IOException {
return jsonFactory.createParser(json);
}
public static JsonParser createParser(byte[] json) throws IOException {
return jsonFactory.createParser(json);
}
public static JsonParser createParser(InputStream stream) throws IOException {
return jsonFactory.createParser(stream);
}
public static JsonParser createParser(Reader reader) throws IOException {
return jsonFactory.createParser(reader);
}
public static JsonGenerator createGenerator(OutputStream stream) throws IOException {
return jsonFactory.createGenerator(stream);
}
public static <T> T convertValue(Map<String, Object> map, Class<T> clazz) {
return mapper.convertValue(map, clazz);
}
public static <T> Function<T, String> toJsonFunction() {
return input -> {
try {
return WRITER.writeValueAsString(input);
} catch (JsonProcessingException e) {
throw new HumanReadableException(e, "Failed to serialize to json: " + input);
}
};
}
public static <T> Function<String, T> fromJsonFunction(Class<T> type) {
return input -> {
try {
return readValue(input, type);
} catch (IOException e) {
throw new HumanReadableException(e, "Failed to read from json: " + input);
}
};
}
// This is mutable, and doesn't share a cache with the rest of Buck.
// All uses of it should be removed.
// Any new code should instead use READER or WRITER.
public static ObjectMapper legacyCreate() {
return create();
}
/**
* Creates an {@link ObjectMapper} that allows to use objects without fields.
*
* @see SerializationFeature#FAIL_ON_EMPTY_BEANS
*/
public static ObjectMapper createWithEmptyBeansPermitted() {
ObjectMapper objectMapper = create();
objectMapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
return objectMapper;
}
// Callers must not modify (i.e. reconfigure) this ObjectMapper.
private static final ObjectMapper mapper;
private static final ObjectMapper mapper_interned;
// Callers must not modify (i.e. reconfigure) this JsonFactory.
private static final JsonFactory jsonFactory;
static {
mapper = create_without_type();
mapper_interned = create_without_type_interned();
READER = mapper.reader();
READER_INTERNED = mapper_interned.reader();
WRITER = mapper.writer();
jsonFactory = mapper.getFactory();
ObjectMapper mapper_with_type = create_with_type();
READER_WITH_TYPE = mapper_with_type.reader();
WRITER_WITH_TYPE = mapper_with_type.writer();
}
private static ObjectMapper create() {
ObjectMapper mapper = new ObjectMapper();
// Disable automatic flush() after mapper.write() call, because it is usually unnecessary,
// and it makes BufferedOutputStreams to be useless
mapper.disable(SerializationFeature.FLUSH_AFTER_WRITE_VALUE);
mapper.setSerializationInclusion(Include.NON_ABSENT);
mapper.configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, true);
// Add support for serializing Guava collections.
mapper.registerModule(new GuavaModule());
mapper.registerModule(new Jdk8Module());
mapper.registerModule(new KotlinModule());
// With some version of Jackson JDK8 module, it starts to serialize Path objects using
// getURI() function, this results for serialized paths to be absolute paths with 'file:///'
// prefix. That does not work well with custom filesystems that Buck uses. Following hack
// restores legacy behavior to serialize Paths using toString().
SimpleModule pathModule = new SimpleModule("PathToString");
/**
* Custom Path serializer that serializes using {@link Object#toString()} method and also
* translates all {@link Path} implementations to use generic base type
*/
class PathSerializer extends ToStringSerializer {
public PathSerializer() {
super(Path.class);
}
@Override
public void serializeWithType(
Object value, JsonGenerator g, SerializerProvider provider, TypeSerializer typeSer)
throws IOException {
WritableTypeId typeIdDef =
typeSer.writeTypePrefix(g, typeSer.typeId(value, Path.class, JsonToken.VALUE_STRING));
serialize(value, g, provider);
typeSer.writeTypeSuffix(g, typeIdDef);
}
}
pathModule.addSerializer(Path.class, new PathSerializer());
pathModule.addDeserializer(
Path.class,
new FromStringDeserializer<Path>(Path.class) {
@Override
protected Path _deserialize(String value, DeserializationContext ctxt) {
return Paths.get(value);
}
@Override
protected Path _deserializeFromEmptyString() {
// by default it returns null but we want empty Path
return Paths.get("");
}
});
mapper.registerModule(pathModule);
return mapper;
}
private static ObjectMapper create_without_type() {
ObjectMapper mapper = create();
return addCustomModules(mapper, false);
}
private static ObjectMapper create_without_type_interned() {
ObjectMapper mapper = create();
return addCustomModules(mapper, true);
}
private static ObjectMapper addCustomModules(ObjectMapper mapper, boolean intern) {
// with this mixin UnconfiguredTargetNode properties are flattened with
// UnconfiguredTargetNodeWithDeps
// properties
// for prettier view. It only works for non-typed serialization.
mapper.addMixIn(
UnconfiguredTargetNodeWithDeps.class,
UnconfiguredTargetNodeWithDeps.UnconfiguredTargetNodeWithDepsUnwrappedMixin.class);
// Serialize and deserialize UnconfiguredBuildTarget as string
SimpleModule buildTargetModule = new SimpleModule("BuildTarget");
buildTargetModule.addSerializer(UnconfiguredBuildTarget.class, new ToStringSerializer());
buildTargetModule.addDeserializer(
UnconfiguredBuildTarget.class,
new FromStringDeserializer<UnconfiguredBuildTarget>(UnconfiguredBuildTarget.class) {
@Override
protected UnconfiguredBuildTarget _deserialize(
String value, DeserializationContext ctxt) {
return UnconfiguredBuildTargetParser.parse(value, intern);
}
});
mapper.registerModule(buildTargetModule);
mapper.registerModule(forwardRelativePathModule());
return mapper;
}
private static SimpleModule forwardRelativePathModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(ForwardRelativePath.class, new ToStringSerializer());
module.addDeserializer(
ForwardRelativePath.class,
new FromStringDeserializer<ForwardRelativePath>(ForwardRelativePath.class) {
@Override
protected ForwardRelativePath _deserialize(String value, DeserializationContext ctxt)
throws IOException {
return ForwardRelativePath.of(value);
}
@Override
protected ForwardRelativePath _deserializeFromEmptyString() throws IOException {
return ForwardRelativePath.EMPTY;
}
});
return module;
}
private static ObjectMapper create_with_type() {
return create().enableDefaultTyping();
}
private ObjectMappers() {}
}
| |
/*
* Copyright 2016 KairosDB Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kairosdb.core.datastore;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.SetMultimap;
import com.google.gson.JsonObject;
import lombok.ToString;
import org.kairosdb.plugin.Aggregator;
import org.kairosdb.plugin.GroupBy;
import org.kairosdb.util.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import static java.util.Objects.requireNonNull;
@ToString
public class QueryMetric implements DatastoreMetricQuery
{
private long startTime;
private long endTime;
private boolean endTimeSet;
private int cacheTime;
private String name;
private SetMultimap<String, String> tags = HashMultimap.create();
private List<GroupBy> groupBys = new ArrayList<GroupBy>();
private List<Aggregator> aggregators;
private String cacheString;
private boolean excludeTags = false;
private int limit;
private Order order = Order.ASC;
private List<QueryPlugin> plugins;
private boolean explicitTags = false;
private JsonObject m_jsonObj;
public QueryMetric(long start_time, int cacheTime, String name)
{
this.aggregators = new ArrayList<Aggregator>();
this.plugins = new ArrayList<QueryPlugin>();
this.startTime = start_time;
this.cacheTime = cacheTime;
this.name = Preconditions.requireNonNullOrEmpty(name);
}
public QueryMetric(long start_time, long end_time, int cacheTime, String name)
{
this.aggregators = new ArrayList<Aggregator>();
this.plugins = new ArrayList<QueryPlugin>();
this.startTime = start_time;
this.endTime = end_time;
this.endTimeSet = true;
this.cacheTime = cacheTime;
this.name = Preconditions.requireNonNullOrEmpty(name);
}
public QueryMetric addAggregator(Aggregator aggregator)
{
requireNonNull(aggregator);
this.aggregators.add(aggregator);
return (this);
}
public QueryMetric setTags(SetMultimap<String, String> tags)
{
this.tags = tags;
return this;
}
public QueryMetric setTags(Map<String, String> tags)
{
this.tags.clear();
for (String s : tags.keySet())
{
this.tags.put(s, tags.get(s));
}
return this;
}
public QueryMetric setExplicitTags(boolean explicitTags)
{
this.explicitTags = explicitTags;
return this;
}
public QueryMetric addTag(String name, String value)
{
this.tags.put(name, value);
return this;
}
@Override
public String getName()
{
return name;
}
public List<Aggregator> getAggregators()
{
return aggregators;
}
@Override
public SetMultimap<String, String> getTags()
{
return (tags);
}
@Override
public boolean isExplicitTags()
{
return explicitTags;
}
@Override
public long getStartTime()
{
return startTime;
}
@Override
public long getEndTime()
{
if (!endTimeSet)
endTime = Long.MAX_VALUE;
return endTime;
}
public int getCacheTime()
{
return cacheTime;
}
public void setEndTime(long endTime)
{
this.endTime = endTime;
this.endTimeSet = true;
}
public void setStartTime(long startTime)
{
this.startTime = startTime;
}
public List<GroupBy> getGroupBys()
{
return Collections.unmodifiableList(groupBys);
}
public void addGroupBy(GroupBy groupBy)
{
this.groupBys.add(groupBy);
}
public void setCacheString(String cacheString)
{
this.cacheString = cacheString;
}
public String getCacheString()
{
return (cacheString);
}
public boolean isExcludeTags()
{
return excludeTags;
}
public void setExcludeTags(boolean excludeTags)
{
this.excludeTags = excludeTags;
}
public void setLimit(int limit)
{
this.limit = limit;
}
public int getLimit()
{
return (limit);
}
public void setOrder(Order order)
{
this.order = order;
}
public Order getOrder()
{
return (order);
}
@Override
public List<QueryPlugin> getPlugins()
{
return Collections.unmodifiableList(plugins);
}
public void addPlugin(QueryPlugin plugin)
{
this.plugins.add(plugin);
}
//@Override
public String toString_Not()
{
return "QueryMetric{" +
"startTime=" + startTime +
", endTime=" + endTime +
", endTimeSet=" + endTimeSet +
", cacheTime=" + cacheTime +
", name='" + name + '\'' +
", tags=" + tags +
", groupBys=" + groupBys +
", aggregators=" + aggregators +
", cacheString='" + cacheString + '\'' +
", excludeTags=" + excludeTags +
", limit=" + limit +
", order=" + order +
", plugins=" + plugins +
'}';
}
public void setJsonObj(JsonObject obj)
{
m_jsonObj = obj;
}
public JsonObject getJsonObj()
{
return m_jsonObj;
}
}
| |
/*
* Copyright (c) 2014, Oracle America, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of Oracle nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.openjdk.jmh.samples;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.results.Result;
import org.openjdk.jmh.results.RunResult;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.TimeValue;
import org.openjdk.jmh.runner.options.VerboseMode;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@State(Scope.Thread)
public class JMHSample_25_API_GA {
/**
* This example shows the rather convoluted, but fun way to exploit
* JMH API in complex scenarios. Up to this point, we haven't consumed
* the results programmatically, and hence we are missing all the fun.
*
* Let's consider this naive code, which obviously suffers from the
* performance anomalies, since current HotSpot is resistant to make
* the tail-call optimizations.
*/
private int v;
@Benchmark
public int test() {
return veryImportantCode(1000, v);
}
public int veryImportantCode(int d, int v) {
if (d == 0) {
return v;
} else {
return veryImportantCode(d - 1, v);
}
}
/*
* We could probably make up for the absence of TCO with better inlining
* policy. But hand-tuning the policy requires knowing a lot about VM
* internals. Let's instead construct the layman's genetic algorithm
* which sifts through inlining settings trying to find the better policy.
*
* If you are not familiar with the concept of Genetic Algorithms,
* read the Wikipedia article first:
* http://en.wikipedia.org/wiki/Genetic_algorithm
*
* VM experts can guess which option should be tuned to get the max
* performance. Try to run the sample and see if it improves performance.
*/
public static void main(String[] args) throws RunnerException {
// These are our base options. We will mix these options into the
// measurement runs. That is, all measurement runs will inherit these,
// see how it's done below.
Options baseOpts = new OptionsBuilder()
.include(JMHSample_25_API_GA.class.getName())
.warmupTime(TimeValue.milliseconds(200))
.measurementTime(TimeValue.milliseconds(200))
.warmupIterations(5)
.measurementIterations(5)
.forks(1)
.verbosity(VerboseMode.SILENT)
.build();
// Initial population
Population pop = new Population();
final int POPULATION = 10;
for (int c = 0; c < POPULATION; c++) {
pop.addChromosome(new Chromosome(baseOpts));
}
// Make a few rounds of optimization:
final int GENERATIONS = 100;
for (int g = 0; g < GENERATIONS; g++) {
System.out.println("Entering generation " + g);
// Get the baseline score.
// We opt to remeasure it in order to get reliable current estimate.
RunResult runner = new Runner(baseOpts).runSingle();
Result baseResult = runner.getPrimaryResult();
// Printing a nice table...
System.out.println("---------------------------------------");
System.out.printf("Baseline score: %10.2f %s%n",
baseResult.getScore(),
baseResult.getScoreUnit()
);
for (Chromosome c : pop.getAll()) {
System.out.printf("%10.2f %s (%+10.2f%%) %s%n",
c.getScore(),
baseResult.getScoreUnit(),
(c.getScore() / baseResult.getScore() - 1) * 100,
c.toString()
);
}
System.out.println();
Population newPop = new Population();
// Copy out elite solutions
final int ELITE = 2;
for (Chromosome c : pop.getAll().subList(0, ELITE)) {
newPop.addChromosome(c);
}
// Cross-breed the rest of new population
while (newPop.size() < pop.size()) {
Chromosome p1 = pop.selectToBreed();
Chromosome p2 = pop.selectToBreed();
newPop.addChromosome(p1.crossover(p2).mutate());
newPop.addChromosome(p2.crossover(p1).mutate());
}
pop = newPop;
}
}
/**
* Population.
*/
public static class Population {
private final List<Chromosome> list = new ArrayList<Chromosome>();
public void addChromosome(Chromosome c) {
list.add(c);
Collections.sort(list);
}
/**
* Select the breeding material.
* Solutions with better score have better chance to be selected.
* @return breed
*/
public Chromosome selectToBreed() {
double totalScore = 0D;
for (Chromosome c : list) {
totalScore += c.score();
}
double thresh = Math.random() * totalScore;
for (Chromosome c : list) {
if (thresh < 0) return c;
thresh =- c.score();
}
throw new IllegalStateException("Can not choose");
}
public int size() {
return list.size();
}
public List<Chromosome> getAll() {
return list;
}
}
/**
* Chromosome: encodes solution.
*/
public static class Chromosome implements Comparable<Chromosome> {
// Current score is not yet computed.
double score = Double.NEGATIVE_INFINITY;
// Base options to mix in
final Options baseOpts;
// These are current HotSpot defaults.
int freqInlineSize = 325;
int inlineSmallCode = 1000;
int maxInlineLevel = 9;
int maxInlineSize = 35;
int maxRecursiveInlineLevel = 1;
int minInliningThreshold = 250;
public Chromosome(Options baseOpts) {
this.baseOpts = baseOpts;
}
public double score() {
if (score != Double.NEGATIVE_INFINITY) {
// Already got the score, shortcutting
return score;
}
try {
// Add the options encoded by this solution:
// a) Mix in base options.
// b) Add JVM arguments: we opt to parse the
// stringly representation to make the example
// shorter. There are, of course, cleaner ways
// to do this.
Options theseOpts = new OptionsBuilder()
.parent(baseOpts)
.jvmArgs(toString().split("[ ]"))
.build();
// Run through JMH and get the result back.
RunResult runResult = new Runner(theseOpts).runSingle();
score = runResult.getPrimaryResult().getScore();
} catch (RunnerException e) {
// Something went wrong, the solution is defective
score = Double.MIN_VALUE;
}
return score;
}
@Override
public int compareTo(Chromosome o) {
// Order by score, descending.
return -Double.valueOf(score()).compareTo(o.score());
}
@Override
public String toString() {
return "-XX:FreqInlineSize=" + freqInlineSize +
" -XX:InlineSmallCode=" + inlineSmallCode +
" -XX:MaxInlineLevel=" + maxInlineLevel +
" -XX:MaxInlineSize=" + maxInlineSize +
" -XX:MaxRecursiveInlineLevel=" + maxRecursiveInlineLevel +
" -XX:MinInliningThreshold=" + minInliningThreshold;
}
public Chromosome crossover(Chromosome other) {
// Perform crossover:
// While this is a very naive way to perform crossover, it still works.
final double CROSSOVER_PROB = 0.1;
Chromosome result = new Chromosome(baseOpts);
result.freqInlineSize = (Math.random() < CROSSOVER_PROB) ?
this.freqInlineSize : other.freqInlineSize;
result.inlineSmallCode = (Math.random() < CROSSOVER_PROB) ?
this.inlineSmallCode : other.inlineSmallCode;
result.maxInlineLevel = (Math.random() < CROSSOVER_PROB) ?
this.maxInlineLevel : other.maxInlineLevel;
result.maxInlineSize = (Math.random() < CROSSOVER_PROB) ?
this.maxInlineSize : other.maxInlineSize;
result.maxRecursiveInlineLevel = (Math.random() < CROSSOVER_PROB) ?
this.maxRecursiveInlineLevel : other.maxRecursiveInlineLevel;
result.minInliningThreshold = (Math.random() < CROSSOVER_PROB) ?
this.minInliningThreshold : other.minInliningThreshold;
return result;
}
public Chromosome mutate() {
// Perform mutation:
// Again, this is a naive way to do mutation, but it still works.
Chromosome result = new Chromosome(baseOpts);
result.freqInlineSize = (int) randomChange(freqInlineSize);
result.inlineSmallCode = (int) randomChange(inlineSmallCode);
result.maxInlineLevel = (int) randomChange(maxInlineLevel);
result.maxInlineSize = (int) randomChange(maxInlineSize);
result.maxRecursiveInlineLevel = (int) randomChange(maxRecursiveInlineLevel);
result.minInliningThreshold = (int) randomChange(minInliningThreshold);
return result;
}
private double randomChange(double v) {
final double MUTATE_PROB = 0.5;
if (Math.random() < MUTATE_PROB) {
if (Math.random() < 0.5) {
return v / (Math.random() * 2);
} else {
return v * (Math.random() * 2);
}
} else {
return v;
}
}
public double getScore() {
return score;
}
}
}
| |
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static com.google.common.base.Charsets.UTF_8;
import static org.apache.bookkeeper.util.BookKeeperConstants.FEATURE_DISABLE_ENSEMBLE_CHANGE;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.google.common.util.concurrent.RateLimiter;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.feature.SettableFeature;
import org.apache.bookkeeper.feature.SettableFeatureProvider;
import org.apache.bookkeeper.net.BookieSocketAddress;
import org.apache.bookkeeper.test.BookKeeperClusterTestCase;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test Case on Disabling Ensemble Change Feature.
*/
public class TestDisableEnsembleChange extends BookKeeperClusterTestCase {
private static final Logger logger = LoggerFactory.getLogger(TestDisableEnsembleChange.class);
public TestDisableEnsembleChange() {
super(4);
}
@Test
public void testDisableEnsembleChange() throws Exception {
disableEnsembleChangeTest(true);
}
@Test
public void testDisableEnsembleChangeNotEnoughBookies() throws Exception {
disableEnsembleChangeTest(false);
}
void disableEnsembleChangeTest(boolean startNewBookie) throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(metadataServiceUri)
.setDelayEnsembleChange(false)
.setDisableEnsembleChangeFeatureName(FEATURE_DISABLE_ENSEMBLE_CHANGE);
SettableFeatureProvider featureProvider = new SettableFeatureProvider("test", 0);
BookKeeper bkc = BookKeeper.forConfig(conf)
.featureProvider(featureProvider)
.build();
SettableFeature disableEnsembleChangeFeature = featureProvider.getFeature(FEATURE_DISABLE_ENSEMBLE_CHANGE);
disableEnsembleChangeFeature.set(true);
final byte[] password = new byte[0];
final LedgerHandle lh = bkc.createLedger(4, 3, 2, BookKeeper.DigestType.CRC32, password);
final AtomicBoolean finished = new AtomicBoolean(false);
final AtomicBoolean failTest = new AtomicBoolean(false);
final byte[] entry = "test-disable-ensemble-change".getBytes(UTF_8);
assertEquals(1, lh.getLedgerMetadata().getEnsembles().size());
ArrayList<BookieSocketAddress> ensembleBeforeFailure =
new ArrayList<>(lh.getLedgerMetadata().getEnsembles().entrySet().iterator().next().getValue());
final RateLimiter rateLimiter = RateLimiter.create(10);
Thread addThread = new Thread() {
@Override
public void run() {
try {
while (!finished.get()) {
rateLimiter.acquire();
lh.addEntry(entry);
}
} catch (Exception e) {
logger.error("Exception on adding entry : ", e);
failTest.set(true);
}
}
};
addThread.start();
Thread.sleep(2000);
killBookie(0);
Thread.sleep(2000);
finished.set(true);
addThread.join();
assertFalse("Should not fail adding entries facing one bookie failure when disable ensemble change",
failTest.get());
// check the ensemble after failure
assertEquals("No new ensemble should be added when disable ensemble change.",
1, lh.getLedgerMetadata().getEnsembles().size());
ArrayList<BookieSocketAddress> ensembleAfterFailure =
new ArrayList<>(lh.getLedgerMetadata().getEnsembles().entrySet().iterator().next().getValue());
assertArrayEquals(ensembleBeforeFailure.toArray(new BookieSocketAddress[ensembleBeforeFailure.size()]),
ensembleAfterFailure.toArray(new BookieSocketAddress[ensembleAfterFailure.size()]));
// enable ensemble change
disableEnsembleChangeFeature.set(false);
if (startNewBookie) {
startNewBookie();
}
// reset add thread
finished.set(false);
final CountDownLatch failLatch = new CountDownLatch(1);
addThread = new Thread() {
@Override
public void run() {
try {
while (!finished.get()) {
lh.addEntry(entry);
}
} catch (Exception e) {
logger.error("Exception on adding entry : ", e);
failLatch.countDown();
failTest.set(true);
}
}
};
addThread.start();
failLatch.await(4000, TimeUnit.MILLISECONDS);
finished.set(true);
addThread.join();
if (startNewBookie) {
assertFalse("Should not fail adding entries when enable ensemble change again.",
failTest.get());
assertFalse("Ledger should be closed when enable ensemble change again.",
lh.getLedgerMetadata().isClosed());
assertEquals("New ensemble should be added when enable ensemble change again.",
2, lh.getLedgerMetadata().getEnsembles().size());
} else {
assertTrue("Should fail adding entries when enable ensemble change again.",
failTest.get());
assertTrue("Ledger should be closed when enable ensemble change again.",
lh.getLedgerMetadata().isClosed());
}
}
@Test
public void testRetryFailureBookie() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(metadataServiceUri)
.setDelayEnsembleChange(false)
.setDisableEnsembleChangeFeatureName(FEATURE_DISABLE_ENSEMBLE_CHANGE);
SettableFeatureProvider featureProvider = new SettableFeatureProvider("test", 0);
BookKeeper bkc = BookKeeper.forConfig(conf)
.featureProvider(featureProvider)
.build();
SettableFeature disableEnsembleChangeFeature = featureProvider.getFeature(FEATURE_DISABLE_ENSEMBLE_CHANGE);
disableEnsembleChangeFeature.set(true);
LedgerHandle lh = bkc.createLedger(4, 4, 4, BookKeeper.DigestType.CRC32, new byte[] {});
byte[] entry = "testRetryFailureBookie".getBytes();
for (int i = 0; i < 10; i++) {
lh.addEntry(entry);
}
// kill a bookie
ServerConfiguration killedConf = killBookie(0);
final AtomicInteger res = new AtomicInteger(0xdeadbeef);
final CountDownLatch addLatch = new CountDownLatch(1);
AsyncCallback.AddCallback cb = new AsyncCallback.AddCallback() {
@Override
public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
logger.info("Add entry {} completed : rc {}.", entryId, rc);
res.set(rc);
addLatch.countDown();
}
};
lh.asyncAddEntry(entry, cb, null);
assertFalse("Add entry operation should not complete.",
addLatch.await(1000, TimeUnit.MILLISECONDS));
assertEquals(res.get(), 0xdeadbeef);
// start the original bookie
bsConfs.add(killedConf);
bs.add(startBookie(killedConf));
assertTrue("Add entry operation should complete at this point.",
addLatch.await(10000, TimeUnit.MILLISECONDS));
assertEquals(res.get(), BKException.Code.OK);
}
@Test
public void testRetrySlowBookie() throws Exception {
final int readTimeout = 2;
ClientConfiguration conf = new ClientConfiguration();
conf.setReadEntryTimeout(readTimeout)
.setAddEntryTimeout(readTimeout)
.setDelayEnsembleChange(false)
.setDisableEnsembleChangeFeatureName(FEATURE_DISABLE_ENSEMBLE_CHANGE)
.setMetadataServiceUri(metadataServiceUri);
SettableFeatureProvider featureProvider = new SettableFeatureProvider("test", 0);
BookKeeper bkc = BookKeeper.forConfig(conf)
.featureProvider(featureProvider)
.build();
SettableFeature disableEnsembleChangeFeature = featureProvider.getFeature(FEATURE_DISABLE_ENSEMBLE_CHANGE);
disableEnsembleChangeFeature.set(true);
LedgerHandle lh = bkc.createLedger(4, 4, 4, BookKeeper.DigestType.CRC32, new byte[] {});
byte[] entry = "testRetryFailureBookie".getBytes();
for (int i = 0; i < 10; i++) {
lh.addEntry(entry);
}
List<BookieSocketAddress> curEns = lh.getLedgerMetadata().currentEnsemble;
final CountDownLatch wakeupLatch = new CountDownLatch(1);
final CountDownLatch suspendLatch = new CountDownLatch(1);
sleepBookie(curEns.get(2), wakeupLatch, suspendLatch);
suspendLatch.await();
final AtomicInteger res = new AtomicInteger(0xdeadbeef);
final CountDownLatch addLatch = new CountDownLatch(1);
AsyncCallback.AddCallback cb = new AsyncCallback.AddCallback() {
@Override
public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
logger.info("Add entry {} completed : rc {}.", entryId, rc);
res.set(rc);
addLatch.countDown();
}
};
lh.asyncAddEntry(entry, cb, null);
assertFalse("Add entry operation should not complete.",
addLatch.await(1000, TimeUnit.MILLISECONDS));
assertEquals(res.get(), 0xdeadbeef);
// wait until read timeout
assertFalse("Add entry operation should not complete even timeout.",
addLatch.await(readTimeout, TimeUnit.SECONDS));
assertEquals(res.get(), 0xdeadbeef);
// wait one more read timeout, to ensure we resend multiple retries
// to ensure it works correctly
assertFalse("Add entry operation should not complete even timeout.",
addLatch.await(readTimeout, TimeUnit.SECONDS));
assertEquals(res.get(), 0xdeadbeef);
// wakeup the sleep bookie
wakeupLatch.countDown();
assertTrue("Add entry operation should complete at this point.",
addLatch.await(10000, TimeUnit.MILLISECONDS));
assertEquals(res.get(), BKException.Code.OK);
}
}
| |
/**
* Copyright 2010 the original author or authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.zkclient;
import com.github.zkclient.exception.ZkException;
import org.apache.zookeeper.client.FourLetterWordMain;
import org.apache.zookeeper.server.ServerConfig;
import org.apache.zookeeper.server.ZooKeeperServerMain;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.io.File;
import java.net.ConnectException;
public class ZkServer extends ZooKeeperServerMain {
private final static Logger LOG = LoggerFactory.getLogger(ZkServer.class);
;
public static final int DEFAULT_PORT = 2181;
public static final int DEFAULT_TICK_TIME = 5000;
public static final int DEFAULT_MIN_SESSION_TIMEOUT = 2 * DEFAULT_TICK_TIME;
private final String _dataDir;
private final String _logDir;
private ZkClient _zkClient;
private final int _port;
private final int _tickTime;
private final int _minSessionTimeout;
private volatile boolean shutdown = false;
private boolean daemon = true;
public ZkServer(String dataDir, String logDir) {
this(dataDir, logDir, DEFAULT_PORT);
}
public ZkServer(String dataDir, String logDir, int port) {
this(dataDir, logDir, port, DEFAULT_TICK_TIME);
}
public ZkServer(String dataDir, String logDir, int port, int tickTime) {
this(dataDir, logDir, port, tickTime, DEFAULT_MIN_SESSION_TIMEOUT);
}
public ZkServer(String dataDir, String logDir, int port, int tickTime, int minSessionTimeout) {
_dataDir = dataDir;
_logDir = logDir;
_port = port;
_tickTime = tickTime;
_minSessionTimeout = minSessionTimeout;
}
public int getPort() {
return _port;
}
@PostConstruct
public void start() {
shutdown = false;
startZkServer();
_zkClient = new ZkClient("localhost:" + _port, 10000);
}
private void startZkServer() {
final int port = _port;
if (ZkClientUtils.isPortFree(port)) {
final File dataDir = new File(_dataDir);
final File dataLogDir = new File(_logDir);
dataDir.mkdirs();
// single zk server
LOG.info("Start single zookeeper server, port={} data={} ", port, dataDir.getAbsolutePath());
//
final ZooKeeperServerMain serverMain = this;
final InnerServerConfig config = new InnerServerConfig();
config.parse(new String[]{"" + port, dataDir.getAbsolutePath(), "" + _tickTime, "60"});
config.setMinSessionTimeout(_minSessionTimeout);
//
final String threadName = "inner-zkserver-" + port;
final Thread innerThread = new Thread(new Runnable() {
@Override
public void run() {
try {
serverMain.runFromConfig(config);
} catch (Exception e) {
throw new ZkException("Unable to start single ZooKeeper server.", e);
}
}
}, threadName);
innerThread.setDaemon(daemon);
innerThread.start();
//
waitForServerUp(port, 30000, false);
} else {
throw new IllegalStateException("Zookeeper port " + port + " was already in use. Running in single machine mode?");
}
}
@PreDestroy
public void shutdown() {
if (!shutdown) {
shutdown = true;
LOG.info("Shutting down ZkServer port={}...", _port);
if (_zkClient != null) {
try {
_zkClient.close();
} catch (ZkException e) {
LOG.warn("Error on closing zkclient: " + e.getClass().getName());
}
_zkClient = null;
}
super.shutdown();
waitForServerDown(_port, 30000, false);
LOG.info("Shutting down ZkServer port={}...done", _port);
}
}
public ZkClient getZkClient() {
return _zkClient;
}
class InnerServerConfig extends ServerConfig {
public void setMinSessionTimeout(int minSessionTimeout) {
this.minSessionTimeout = minSessionTimeout;
}
}
public static boolean waitForServerUp(int port, long timeout, boolean secure) {
long start = System.currentTimeMillis();
while (true) {
try {
// if there are multiple hostports, just take the first one
String result = FourLetterWordMain.send4LetterWord("127.0.0.1", port, "stat");
if (result.startsWith("Zookeeper version:") &&
!result.contains("READ-ONLY")) {
return true;
}
} catch (ConnectException e) {
// ignore as this is expected, do not log stacktrace
LOG.debug("server {} not up: {}", port, e.toString());
} catch (Exception e) {
// ignore as this is expected
LOG.info("server {} not up", port, e);
}
if (System.currentTimeMillis() > start + timeout) {
break;
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
// ignore
}
}
return false;
}
public static boolean waitForServerDown(int port, long timeout, boolean secure) {
long start = System.currentTimeMillis();
while (true) {
try {
FourLetterWordMain.send4LetterWord("127.0.0.1", port, "stat");
} catch (Exception e) {
return true;
}
if (System.currentTimeMillis() > start + timeout) {
break;
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
// ignore
}
}
return false;
}
}
| |
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.FilterInputStream;
import java.io.IOException;
import java.lang.reflect.Field;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.FlushRequestListener;
import org.apache.hadoop.hbase.regionserver.FlushRequester;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.MemStoreSizing;
import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException;
import org.apache.hadoop.hbase.util.EnvironmentEdge;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileTestUtil;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALKeyImpl;
import org.apache.hadoop.hbase.wal.WALSplitUtil;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test replay of edits out of a WAL split.
*/
public abstract class AbstractTestWALReplay {
private static final Logger LOG = LoggerFactory.getLogger(AbstractTestWALReplay.class);
static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
private Path hbaseRootDir = null;
private String logName;
private Path oldLogDir;
private Path logDir;
private FileSystem fs;
private Configuration conf;
private WALFactory wals;
@Rule
public final TestName currentTest = new TestName();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
// The below config supported by 0.20-append and CDH3b2
conf.setInt("dfs.client.block.recovery.retries", 2);
TEST_UTIL.startMiniCluster(3);
Path hbaseRootDir =
TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
LOG.info("hbase.rootdir=" + hbaseRootDir);
FSUtils.setRootDir(conf, hbaseRootDir);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Before
public void setUp() throws Exception {
this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = FSUtils.getRootDir(this.conf);
this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
String serverName =
ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, System.currentTimeMillis())
.toString();
this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName);
this.logDir = new Path(this.hbaseRootDir, logName);
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
}
this.wals = new WALFactory(conf, currentTest.getMethodName());
}
@After
public void tearDown() throws Exception {
this.wals.close();
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
}
/*
* @param p Directory to cleanup
*/
private void deleteDir(final Path p) throws IOException {
if (this.fs.exists(p)) {
if (!this.fs.delete(p, true)) {
throw new IOException("Failed remove of " + p);
}
}
}
/**
*
* @throws Exception
*/
@Test
public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception {
final TableName tableName =
TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF");
byte[] family1 = Bytes.toBytes("cf1");
byte[] family2 = Bytes.toBytes("cf2");
byte[] qualifier = Bytes.toBytes("q");
byte[] value = Bytes.toBytes("testV");
byte[][] familys = { family1, family2 };
TEST_UTIL.createTable(tableName, familys);
Table htable = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(Bytes.toBytes("r1"));
put.addColumn(family1, qualifier, value);
htable.put(put);
ResultScanner resultScanner = htable.getScanner(new Scan());
int count = 0;
while (resultScanner.next() != null) {
count++;
}
resultScanner.close();
assertEquals(1, count);
MiniHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster();
List<HRegion> regions = hbaseCluster.getRegions(tableName);
assertEquals(1, regions.size());
// move region to another regionserver
Region destRegion = regions.get(0);
int originServerNum = hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName());
assertTrue("Please start more than 1 regionserver",
hbaseCluster.getRegionServerThreads().size() > 1);
int destServerNum = 0;
while (destServerNum == originServerNum) {
destServerNum++;
}
HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum);
HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum);
// move region to destination regionserver
TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), destServer.getServerName());
// delete the row
Delete del = new Delete(Bytes.toBytes("r1"));
htable.delete(del);
resultScanner = htable.getScanner(new Scan());
count = 0;
while (resultScanner.next() != null) {
count++;
}
resultScanner.close();
assertEquals(0, count);
// flush region and make major compaction
HRegion region =
(HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
region.flush(true);
// wait to complete major compaction
for (HStore store : region.getStores()) {
store.triggerMajorCompaction();
}
region.compact(true);
// move region to origin regionserver
TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), originServer.getServerName());
// abort the origin regionserver
originServer.abort("testing");
// see what we get
Result result = htable.get(new Get(Bytes.toBytes("r1")));
if (result != null) {
assertTrue("Row is deleted, but we get" + result.toString(),
(result == null) || result.isEmpty());
}
resultScanner.close();
}
/**
* Tests for hbase-2727.
* @throws Exception
* @see <a href="https://issues.apache.org/jira/browse/HBASE-2727">HBASE-2727</a>
*/
@Test
public void test2727() throws Exception {
// Test being able to have > 1 set of edits in the recovered.edits directory.
// Ensure edits are replayed properly.
final TableName tableName =
TableName.valueOf("test2727");
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
deleteDir(basedir);
HTableDescriptor htd = createBasic3FamilyHTD(tableName);
Region region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region2);
final byte [] rowName = tableName.getName();
WAL wal1 = createWAL(this.conf, hbaseRootDir, logName);
// Add 1k to each family.
final int countPerFamily = 1000;
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for(byte[] fam : htd.getFamiliesKeys()) {
scopes.put(fam, 0);
}
for (HColumnDescriptor hcd: htd.getFamilies()) {
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee,
wal1, htd, mvcc, scopes);
}
wal1.shutdown();
runWALSplit(this.conf);
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
// Add 1k to each family.
for (HColumnDescriptor hcd: htd.getFamilies()) {
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily,
ee, wal2, htd, mvcc, scopes);
}
wal2.shutdown();
runWALSplit(this.conf);
WAL wal3 = createWAL(this.conf, hbaseRootDir, logName);
try {
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal3);
long seqid = region.getOpenSeqNum();
// The regions opens with sequenceId as 1. With 6k edits, its sequence number reaches 6k + 1.
// When opened, this region would apply 6k edits, and increment the sequenceId by 1
assertTrue(seqid > mvcc.getWritePoint());
assertEquals(seqid - 1, mvcc.getWritePoint());
LOG.debug("region.getOpenSeqNum(): " + region.getOpenSeqNum() + ", wal3.id: "
+ mvcc.getReadPoint());
// TODO: Scan all.
region.close();
} finally {
wal3.close();
}
}
/**
* Test case of HRegion that is only made out of bulk loaded files. Assert
* that we don't 'crash'.
* @throws IOException
* @throws IllegalAccessException
* @throws NoSuchFieldException
* @throws IllegalArgumentException
* @throws SecurityException
*/
@Test
public void testRegionMadeOfBulkLoadedFilesOnly()
throws IOException, SecurityException, IllegalArgumentException,
NoSuchFieldException, IllegalAccessException, InterruptedException {
final TableName tableName =
TableName.valueOf("testRegionMadeOfBulkLoadedFilesOnly");
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
deleteDir(basedir);
final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
Region region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region2);
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
byte [] family = htd.getFamilies().iterator().next().getName();
Path f = new Path(basedir, "hfile");
HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(""),
Bytes.toBytes("z"), 10);
List<Pair<byte[], String>> hfs = new ArrayList<>(1);
hfs.add(Pair.newPair(family, f.toString()));
region.bulkLoadHFiles(hfs, true, null);
// Add an edit so something in the WAL
byte[] row = tableName.getName();
region.put((new Put(row)).addColumn(family, family, family));
wal.sync();
final int rowsInsertedCount = 11;
assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
// Now 'crash' the region by stealing its wal
final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtility.getDifferentUser(newConf,
tableName.getNameAsString());
user.runAs(new PrivilegedExceptionAction() {
@Override
public Object run() throws Exception {
runWALSplit(newConf);
WAL wal2 = createWAL(newConf, hbaseRootDir, logName);
HRegion region2 = HRegion.openHRegion(newConf, FileSystem.get(newConf),
hbaseRootDir, hri, htd, wal2);
long seqid2 = region2.getOpenSeqNum();
assertTrue(seqid2 > -1);
assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan())));
// I can't close wal1. Its been appropriated when we split.
region2.close();
wal2.close();
return null;
}
});
}
/**
* HRegion test case that is made of a major compacted HFile (created with three bulk loaded
* files) and an edit in the memstore.
* This is for HBASE-10958 "[dataloss] Bulk loading with seqids can prevent some log entries
* from being replayed"
* @throws IOException
* @throws IllegalAccessException
* @throws NoSuchFieldException
* @throws IllegalArgumentException
* @throws SecurityException
*/
@Test
public void testCompactedBulkLoadedFiles()
throws IOException, SecurityException, IllegalArgumentException,
NoSuchFieldException, IllegalAccessException, InterruptedException {
final TableName tableName =
TableName.valueOf("testCompactedBulkLoadedFiles");
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
deleteDir(basedir);
final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region2);
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
// Add an edit so something in the WAL
byte [] row = tableName.getName();
byte [] family = htd.getFamilies().iterator().next().getName();
region.put((new Put(row)).addColumn(family, family, family));
wal.sync();
List <Pair<byte[],String>> hfs= new ArrayList<>(1);
for (int i = 0; i < 3; i++) {
Path f = new Path(basedir, "hfile"+i);
HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(i + "00"),
Bytes.toBytes(i + "50"), 10);
hfs.add(Pair.newPair(family, f.toString()));
}
region.bulkLoadHFiles(hfs, true, null);
final int rowsInsertedCount = 31;
assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
// major compact to turn all the bulk loaded files into one normal file
region.compact(true);
assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
// Now 'crash' the region by stealing its wal
final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtility.getDifferentUser(newConf,
tableName.getNameAsString());
user.runAs(new PrivilegedExceptionAction() {
@Override
public Object run() throws Exception {
runWALSplit(newConf);
WAL wal2 = createWAL(newConf, hbaseRootDir, logName);
HRegion region2 = HRegion.openHRegion(newConf, FileSystem.get(newConf),
hbaseRootDir, hri, htd, wal2);
long seqid2 = region2.getOpenSeqNum();
assertTrue(seqid2 > -1);
assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan())));
// I can't close wal1. Its been appropriated when we split.
region2.close();
wal2.close();
return null;
}
});
}
/**
* Test writing edits into an HRegion, closing it, splitting logs, opening
* Region again. Verify seqids.
* @throws IOException
* @throws IllegalAccessException
* @throws NoSuchFieldException
* @throws IllegalArgumentException
* @throws SecurityException
*/
@Test
public void testReplayEditsWrittenViaHRegion()
throws IOException, SecurityException, IllegalArgumentException,
NoSuchFieldException, IllegalAccessException, InterruptedException {
final TableName tableName =
TableName.valueOf("testReplayEditsWrittenViaHRegion");
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
deleteDir(basedir);
final byte[] rowName = tableName.getName();
final int countPerFamily = 10;
final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region3);
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
long seqid = region.getOpenSeqNum();
boolean first = true;
for (HColumnDescriptor hcd: htd.getFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
if (first) {
// If first, so we have at least one family w/ different seqid to rest.
region.flush(true);
first = false;
}
}
// Now assert edits made it in.
final Get g = new Get(rowName);
Result result = region.get(g);
assertEquals(countPerFamily * htd.getFamilies().size(),
result.size());
// Now close the region (without flush), split the log, reopen the region and assert that
// replay of log has the correct effect, that our seqids are calculated correctly so
// all edits in logs are seen as 'stale'/old.
region.close(true);
wal.shutdown();
runWALSplit(this.conf);
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
long seqid2 = region2.getOpenSeqNum();
assertTrue(seqid + result.size() < seqid2);
final Result result1b = region2.get(g);
assertEquals(result.size(), result1b.size());
// Next test. Add more edits, then 'crash' this region by stealing its wal
// out from under it and assert that replay of the log adds the edits back
// correctly when region is opened again.
for (HColumnDescriptor hcd: htd.getFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
}
// Get count of edits.
final Result result2 = region2.get(g);
assertEquals(2 * result.size(), result2.size());
wal2.sync();
final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtility.getDifferentUser(newConf,
tableName.getNameAsString());
user.runAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
runWALSplit(newConf);
FileSystem newFS = FileSystem.get(newConf);
// Make a new wal for new region open.
WAL wal3 = createWAL(newConf, hbaseRootDir, logName);
final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {
@Override
protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreSizing) {
super.restoreEdit(s, cell, memstoreSizing);
countOfRestoredEdits.incrementAndGet();
}
};
long seqid3 = region3.initialize();
Result result3 = region3.get(g);
// Assert that count of cells is same as before crash.
assertEquals(result2.size(), result3.size());
assertEquals(htd.getFamilies().size() * countPerFamily,
countOfRestoredEdits.get());
// I can't close wal1. Its been appropriated when we split.
region3.close();
wal3.close();
return null;
}
});
}
/**
* Test that we recover correctly when there is a failure in between the
* flushes. i.e. Some stores got flushed but others did not.
*
* Unfortunately, there is no easy hook to flush at a store level. The way
* we get around this is by flushing at the region level, and then deleting
* the recently flushed store file for one of the Stores. This would put us
* back in the situation where all but that store got flushed and the region
* died.
*
* We restart Region again, and verify that the edits were replayed.
*
* @throws IOException
* @throws IllegalAccessException
* @throws NoSuchFieldException
* @throws IllegalArgumentException
* @throws SecurityException
*/
@Test
public void testReplayEditsAfterPartialFlush()
throws IOException, SecurityException, IllegalArgumentException,
NoSuchFieldException, IllegalAccessException, InterruptedException {
final TableName tableName =
TableName.valueOf("testReplayEditsWrittenViaHRegion");
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
deleteDir(basedir);
final byte[] rowName = tableName.getName();
final int countPerFamily = 10;
final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region3);
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
long seqid = region.getOpenSeqNum();
for (HColumnDescriptor hcd: htd.getFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
}
// Now assert edits made it in.
final Get g = new Get(rowName);
Result result = region.get(g);
assertEquals(countPerFamily * htd.getFamilies().size(),
result.size());
// Let us flush the region
region.flush(true);
region.close(true);
wal.shutdown();
// delete the store files in the second column family to simulate a failure
// in between the flushcache();
// we have 3 families. killing the middle one ensures that taking the maximum
// will make us fail.
int cf_count = 0;
for (HColumnDescriptor hcd: htd.getFamilies()) {
cf_count++;
if (cf_count == 2) {
region.getRegionFileSystem().deleteFamily(hcd.getNameAsString());
}
}
// Let us try to split and recover
runWALSplit(this.conf);
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
HRegion region2 = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal2);
long seqid2 = region2.getOpenSeqNum();
assertTrue(seqid + result.size() < seqid2);
final Result result1b = region2.get(g);
assertEquals(result.size(), result1b.size());
}
// StoreFlusher implementation used in testReplayEditsAfterAbortingFlush.
// Only throws exception if throwExceptionWhenFlushing is set true.
public static class CustomStoreFlusher extends DefaultStoreFlusher {
// Switch between throw and not throw exception in flush
static final AtomicBoolean throwExceptionWhenFlushing = new AtomicBoolean(false);
public CustomStoreFlusher(Configuration conf, HStore store) {
super(conf, store);
}
@Override
public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
MonitoredTask status, ThroughputController throughputController,
FlushLifeCycleTracker tracker) throws IOException {
if (throwExceptionWhenFlushing.get()) {
throw new IOException("Simulated exception by tests");
}
return super.flushSnapshot(snapshot, cacheFlushId, status, throughputController, tracker);
}
}
/**
* Test that we could recover the data correctly after aborting flush. In the
* test, first we abort flush after writing some data, then writing more data
* and flush again, at last verify the data.
* @throws IOException
*/
@Test
public void testReplayEditsAfterAbortingFlush() throws IOException {
final TableName tableName =
TableName.valueOf("testReplayEditsAfterAbortingFlush");
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
deleteDir(basedir);
final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region3);
// Write countPerFamily edits into the three families. Do a flush on one
// of the families during the load of edits so its seqid is not same as
// others to test we do right thing when different seqids.
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
Mockito.doReturn(false).when(rsServices).isAborted();
when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
when(rsServices.getConfiguration()).thenReturn(conf);
Configuration customConf = new Configuration(this.conf);
customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
CustomStoreFlusher.class.getName());
HRegion region =
HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null);
int writtenRowCount = 10;
List<HColumnDescriptor> families = new ArrayList<>(htd.getFamilies());
for (int i = 0; i < writtenRowCount; i++) {
Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"),
Bytes.toBytes("val"));
region.put(put);
}
// Now assert edits made it in.
RegionScanner scanner = region.getScanner(new Scan());
assertEquals(writtenRowCount, getScannedCount(scanner));
// Let us flush the region
CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
try {
region.flush(true);
fail("Injected exception hasn't been thrown");
} catch (IOException e) {
LOG.info("Expected simulated exception when flushing region, {}", e.getMessage());
// simulated to abort server
Mockito.doReturn(true).when(rsServices).isAborted();
region.setClosing(false); // region normally does not accept writes after
// DroppedSnapshotException. We mock around it for this test.
}
// writing more data
int moreRow = 10;
for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) {
Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"),
Bytes.toBytes("val"));
region.put(put);
}
writtenRowCount += moreRow;
// call flush again
CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
try {
region.flush(true);
} catch (IOException t) {
LOG.info("Expected exception when flushing region because server is stopped,"
+ t.getMessage());
}
region.close(true);
wal.shutdown();
// Let us try to split and recover
runWALSplit(this.conf);
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
Mockito.doReturn(false).when(rsServices).isAborted();
HRegion region2 =
HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null);
scanner = region2.getScanner(new Scan());
assertEquals(writtenRowCount, getScannedCount(scanner));
}
private int getScannedCount(RegionScanner scanner) throws IOException {
int scannedCount = 0;
List<Cell> results = new ArrayList<>();
while (true) {
boolean existMore = scanner.next(results);
if (!results.isEmpty())
scannedCount++;
if (!existMore)
break;
results.clear();
}
return scannedCount;
}
/**
* Create an HRegion with the result of a WAL split and test we only see the
* good edits
* @throws Exception
*/
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
final TableName tableName =
TableName.valueOf("testReplayEditsWrittenIntoWAL");
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
deleteDir(basedir);
final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region2);
final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
final byte[] rowName = tableName.getName();
final byte[] regionName = hri.getEncodedNameAsBytes();
// Add 1k to each family.
final int countPerFamily = 1000;
Set<byte[]> familyNames = new HashSet<>();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for(byte[] fam : htd.getFamiliesKeys()) {
scopes.put(fam, 0);
}
for (HColumnDescriptor hcd: htd.getFamilies()) {
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily,
ee, wal, htd, mvcc, scopes);
familyNames.add(hcd.getName());
}
// Add a cache flush, shouldn't have any effect
wal.startCacheFlush(regionName, familyNames);
wal.completeCacheFlush(regionName);
// Add an edit to another family, should be skipped.
WALEdit edit = new WALEdit();
long now = ee.currentTime();
edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName,
now, rowName));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes),
edit);
// Delete the c family to verify deletes make it over.
edit = new WALEdit();
now = ee.currentTime();
edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes),
edit);
// Sync.
wal.sync();
// Make a new conf and a new fs for the splitter to run on so we can take
// over old wal.
final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtility.getDifferentUser(newConf,
".replay.wal.secondtime");
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
runWALSplit(newConf);
FileSystem newFS = FileSystem.get(newConf);
// 100k seems to make for about 4 flushes during HRegion#initialize.
newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
// Make a new wal for new region.
WAL newWal = createWAL(newConf, hbaseRootDir, logName);
final AtomicInteger flushcount = new AtomicInteger(0);
try {
final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
@Override
protected FlushResultImpl internalFlushcache(final WAL wal, final long myseqid,
final Collection<HStore> storesToFlush, MonitoredTask status,
boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException {
LOG.info("InternalFlushCache Invoked");
FlushResultImpl fs = super.internalFlushcache(wal, myseqid, storesToFlush,
Mockito.mock(MonitoredTask.class), writeFlushWalMarker, tracker);
flushcount.incrementAndGet();
return fs;
}
};
// The seq id this region has opened up with
long seqid = region.initialize();
// The mvcc readpoint of from inserting data.
long writePoint = mvcc.getWritePoint();
// We flushed during init.
assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
assertTrue((seqid - 1) == writePoint);
Get get = new Get(rowName);
Result result = region.get(get);
// Make sure we only see the good edits
assertEquals(countPerFamily * (htd.getFamilies().size() - 1),
result.size());
region.close();
} finally {
newWal.close();
}
return null;
}
});
}
@Test
// the following test is for HBASE-6065
public void testSequentialEditLogSeqNum() throws IOException {
final TableName tableName = TableName.valueOf(currentTest.getMethodName());
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir =
FSUtils.getWALTableDir(conf, tableName);
deleteDir(basedir);
final byte[] rowName = tableName.getName();
final int countPerFamily = 10;
final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
// Mock the WAL
MockWAL wal = createMockWAL();
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
for (HColumnDescriptor hcd : htd.getFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
}
// Let us flush the region
// But this time completeflushcache is not yet done
region.flush(true);
for (HColumnDescriptor hcd : htd.getFamilies()) {
addRegionEdits(rowName, hcd.getName(), 5, this.ee, region, "x");
}
long lastestSeqNumber = region.getReadPoint(null);
// get the current seq no
wal.doCompleteCacheFlush = true;
// allow complete cache flush with the previous seq number got after first
// set of edits.
wal.completeCacheFlush(hri.getEncodedNameAsBytes());
wal.shutdown();
FileStatus[] listStatus = wal.getFiles();
assertNotNull(listStatus);
assertTrue(listStatus.length > 0);
WALSplitter.splitLogFile(hbaseRootDir, listStatus[0],
this.fs, this.conf, null, null, null, wals);
FileStatus[] listStatus1 = this.fs.listStatus(new Path(FSUtils.getWALTableDir(conf, tableName),
new Path(hri.getEncodedName(), "recovered.edits")),
new PathFilter() {
@Override
public boolean accept(Path p) {
return !WALSplitUtil.isSequenceIdFile(p);
}
});
int editCount = 0;
for (FileStatus fileStatus : listStatus1) {
editCount = Integer.parseInt(fileStatus.getPath().getName());
}
// The sequence number should be same
assertEquals(
"The sequence number of the recoverd.edits and the current edit seq should be same",
lastestSeqNumber, editCount);
}
/**
* testcase for https://issues.apache.org/jira/browse/HBASE-15252
*/
@Test
public void testDatalossWhenInputError() throws Exception {
final TableName tableName = TableName.valueOf("testDatalossWhenInputError");
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = FSUtils.getWALTableDir(conf, tableName);
deleteDir(basedir);
final byte[] rowName = tableName.getName();
final int countPerFamily = 10;
final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
HRegion region1 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
Path regionDir = region1.getWALRegionDir();
HBaseTestingUtility.closeRegionAndWAL(region1);
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
for (HColumnDescriptor hcd : htd.getFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
}
// Now assert edits made it in.
final Get g = new Get(rowName);
Result result = region.get(g);
assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
// Now close the region (without flush), split the log, reopen the region and assert that
// replay of log has the correct effect.
region.close(true);
wal.shutdown();
runWALSplit(this.conf);
// here we let the DFSInputStream throw an IOException just after the WALHeader.
Path editFile = WALSplitUtil.getSplitEditFilesSorted(this.fs, regionDir).first();
FSDataInputStream stream = fs.open(editFile);
stream.seek(ProtobufLogReader.PB_WAL_MAGIC.length);
Class<? extends AbstractFSWALProvider.Reader> logReaderClass =
conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
AbstractFSWALProvider.Reader.class);
AbstractFSWALProvider.Reader reader = logReaderClass.getDeclaredConstructor().newInstance();
reader.init(this.fs, editFile, conf, stream);
final long headerLength = stream.getPos();
reader.close();
FileSystem spyFs = spy(this.fs);
doAnswer(new Answer<FSDataInputStream>() {
@Override
public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
FSDataInputStream stream = (FSDataInputStream) invocation.callRealMethod();
Field field = FilterInputStream.class.getDeclaredField("in");
field.setAccessible(true);
final DFSInputStream in = (DFSInputStream) field.get(stream);
DFSInputStream spyIn = spy(in);
doAnswer(new Answer<Integer>() {
private long pos;
@Override
public Integer answer(InvocationOnMock invocation) throws Throwable {
if (pos >= headerLength) {
throw new IOException("read over limit");
}
int b = (Integer) invocation.callRealMethod();
if (b > 0) {
pos += b;
}
return b;
}
}).when(spyIn).read(any(byte[].class), anyInt(), anyInt());
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
invocation.callRealMethod();
in.close();
return null;
}
}).when(spyIn).close();
field.set(stream, spyIn);
return stream;
}
}).when(spyFs).open(eq(editFile));
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
HRegion region2;
try {
// log replay should fail due to the IOException, otherwise we may lose data.
region2 = HRegion.openHRegion(conf, spyFs, hbaseRootDir, hri, htd, wal2);
assertEquals(result.size(), region2.get(g).size());
} catch (IOException e) {
assertEquals("read over limit", e.getMessage());
}
region2 = HRegion.openHRegion(conf, fs, hbaseRootDir, hri, htd, wal2);
assertEquals(result.size(), region2.get(g).size());
}
/**
* testcase for https://issues.apache.org/jira/browse/HBASE-14949.
*/
private void testNameConflictWhenSplit(boolean largeFirst) throws IOException,
StreamLacksCapabilityException {
final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
deleteDir(basedir);
final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getFamiliesKeys()) {
scopes.put(fam, 0);
}
HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtility.closeRegionAndWAL(region);
final byte[] family = htd.getColumnFamilies()[0].getName();
final byte[] rowName = tableName.getName();
FSWALEntry entry1 = createFSWALEntry(htd, hri, 1L, rowName, family, ee, mvcc, 1, scopes);
FSWALEntry entry2 = createFSWALEntry(htd, hri, 2L, rowName, family, ee, mvcc, 2, scopes);
Path largeFile = new Path(logDir, "wal-1");
Path smallFile = new Path(logDir, "wal-2");
writerWALFile(largeFile, Arrays.asList(entry1, entry2));
writerWALFile(smallFile, Arrays.asList(entry2));
FileStatus first, second;
if (largeFirst) {
first = fs.getFileStatus(largeFile);
second = fs.getFileStatus(smallFile);
} else {
first = fs.getFileStatus(smallFile);
second = fs.getFileStatus(largeFile);
}
WALSplitter.splitLogFile(hbaseRootDir, first, fs, conf, null, null, null, wals);
WALSplitter.splitLogFile(hbaseRootDir, second, fs, conf, null, null, null, wals);
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
region = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal);
assertTrue(region.getOpenSeqNum() > mvcc.getWritePoint());
assertEquals(2, region.get(new Get(rowName)).size());
}
@Test
public void testNameConflictWhenSplit0() throws IOException, StreamLacksCapabilityException {
testNameConflictWhenSplit(true);
}
@Test
public void testNameConflictWhenSplit1() throws IOException, StreamLacksCapabilityException {
testNameConflictWhenSplit(false);
}
static class MockWAL extends FSHLog {
boolean doCompleteCacheFlush = false;
public MockWAL(FileSystem fs, Path rootDir, String logName, Configuration conf)
throws IOException {
super(fs, rootDir, logName, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
}
@Override
public void completeCacheFlush(byte[] encodedRegionName) {
if (!doCompleteCacheFlush) {
return;
}
super.completeCacheFlush(encodedRegionName);
}
}
private HTableDescriptor createBasic1FamilyHTD(final TableName tableName) {
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
htd.addFamily(a);
return htd;
}
private MockWAL createMockWAL() throws IOException {
MockWAL wal = new MockWAL(fs, hbaseRootDir, logName, conf);
wal.init();
// Set down maximum recovery so we dfsclient doesn't linger retrying something
// long gone.
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
return wal;
}
// Flusher used in this test. Keep count of how often we are called and
// actually run the flush inside here.
static class TestFlusher implements FlushRequester {
private HRegion r;
@Override
public boolean requestFlush(HRegion region, boolean force, FlushLifeCycleTracker tracker) {
try {
r.flush(force);
return true;
} catch (IOException e) {
throw new RuntimeException("Exception flushing", e);
}
}
@Override
public boolean requestDelayedFlush(HRegion region, long when, boolean forceFlushAllStores) {
return true;
}
@Override
public void registerFlushRequestListener(FlushRequestListener listener) {
}
@Override
public boolean unregisterFlushRequestListener(FlushRequestListener listener) {
return false;
}
@Override
public void setGlobalMemStoreLimit(long globalMemStoreSize) {
}
}
private WALKeyImpl createWALKey(final TableName tableName, final HRegionInfo hri,
final MultiVersionConcurrencyControl mvcc, NavigableMap<byte[], Integer> scopes) {
return new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, 999, mvcc, scopes);
}
private WALEdit createWALEdit(final byte[] rowName, final byte[] family, EnvironmentEdge ee,
int index) {
byte[] qualifierBytes = Bytes.toBytes(Integer.toString(index));
byte[] columnBytes = Bytes.toBytes(Bytes.toString(family) + ":" + Integer.toString(index));
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes));
return edit;
}
private FSWALEntry createFSWALEntry(HTableDescriptor htd, HRegionInfo hri, long sequence,
byte[] rowName, byte[] family, EnvironmentEdge ee, MultiVersionConcurrencyControl mvcc,
int index, NavigableMap<byte[], Integer> scopes) throws IOException {
FSWALEntry entry = new FSWALEntry(sequence, createWALKey(htd.getTableName(), hri, mvcc, scopes),
createWALEdit(rowName, family, ee, index), hri, true, null);
entry.stampRegionSequenceId(mvcc.begin());
return entry;
}
private void addWALEdits(final TableName tableName, final HRegionInfo hri, final byte[] rowName,
final byte[] family, final int count, EnvironmentEdge ee, final WAL wal,
final HTableDescriptor htd, final MultiVersionConcurrencyControl mvcc,
NavigableMap<byte[], Integer> scopes) throws IOException {
for (int j = 0; j < count; j++) {
wal.appendData(hri, createWALKey(tableName, hri, mvcc, scopes),
createWALEdit(rowName, family, ee, j));
}
wal.sync();
}
static List<Put> addRegionEdits(final byte[] rowName, final byte[] family, final int count,
EnvironmentEdge ee, final Region r, final String qualifierPrefix) throws IOException {
List<Put> puts = new ArrayList<>();
for (int j = 0; j < count; j++) {
byte[] qualifier = Bytes.toBytes(qualifierPrefix + Integer.toString(j));
Put p = new Put(rowName);
p.addColumn(family, qualifier, ee.currentTime(), rowName);
r.put(p);
puts.add(p);
}
return puts;
}
/*
* Creates an HRI around an HTD that has <code>tableName</code> and three
* column families named 'a','b', and 'c'.
* @param tableName Name of table to use when we create HTableDescriptor.
*/
private HRegionInfo createBasic3FamilyHRegionInfo(final TableName tableName) {
return new HRegionInfo(tableName, null, null, false);
}
/*
* Run the split. Verify only single split file made.
* @param c
* @return The single split file made
* @throws IOException
*/
private Path runWALSplit(final Configuration c) throws IOException {
List<Path> splits = WALSplitter.split(
hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals);
// Split should generate only 1 file since there's only 1 region
assertEquals("splits=" + splits, 1, splits.size());
// Make sure the file exists
assertTrue(fs.exists(splits.get(0)));
LOG.info("Split file=" + splits.get(0));
return splits.get(0);
}
private HTableDescriptor createBasic3FamilyHTD(final TableName tableName) {
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
htd.addFamily(a);
HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
htd.addFamily(b);
HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
htd.addFamily(c);
return htd;
}
private void writerWALFile(Path file, List<FSWALEntry> entries) throws IOException,
StreamLacksCapabilityException {
fs.mkdirs(file.getParent());
ProtobufLogWriter writer = new ProtobufLogWriter();
writer.init(fs, file, conf, true, WALUtil.getWALBlockSize(conf, fs, file));
for (FSWALEntry entry : entries) {
writer.append(entry);
}
writer.sync(false);
writer.close();
}
protected abstract WAL createWAL(Configuration c, Path hbaseRootDir, String logName)
throws IOException;
}
| |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
import org.elasticsearch.search.aggregations.metrics.Cardinality;
import org.elasticsearch.search.aggregations.metrics.GeoBounds;
import org.elasticsearch.search.aggregations.metrics.GeoCentroid;
import org.elasticsearch.search.aggregations.metrics.Percentiles;
import org.elasticsearch.search.aggregations.metrics.Stats;
import org.elasticsearch.test.ESIntegTestCase;
import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality;
import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.geoBounds;
import static org.elasticsearch.search.aggregations.AggregationBuilders.geoCentroid;
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.closeTo;
@ESIntegTestCase.SuiteScopeTestCase
public class MissingValueIT extends ESIntegTestCase {
@Override
protected int maximumNumberOfShards() {
return 2;
}
@Override
protected void setupSuiteScopeCluster() throws Exception {
assertAcked(prepareCreate("idx")
.setMapping("date", "type=date", "location", "type=geo_point", "str", "type=keyword").get());
indexRandom(true,
client().prepareIndex("idx").setId("1").setSource(),
client().prepareIndex("idx").setId("2")
.setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2"));
}
public void testUnmappedTerms() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(terms("my_terms").field("non_existing_field").missing("bar")).get();
assertSearchResponse(response);
Terms terms = response.getAggregations().get("my_terms");
assertEquals(1, terms.getBuckets().size());
assertEquals(2, terms.getBucketByKey("bar").getDocCount());
}
public void testStringTerms() {
for (ExecutionMode mode : ExecutionMode.values()) {
SearchResponse response = client().prepareSearch("idx").addAggregation(
terms("my_terms")
.field("str")
.executionHint(mode.toString())
.missing("bar")).get();
assertSearchResponse(response);
Terms terms = response.getAggregations().get("my_terms");
assertEquals(2, terms.getBuckets().size());
assertEquals(1, terms.getBucketByKey("foo").getDocCount());
assertEquals(1, terms.getBucketByKey("bar").getDocCount());
response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("str").missing("foo")).get();
assertSearchResponse(response);
terms = response.getAggregations().get("my_terms");
assertEquals(1, terms.getBuckets().size());
assertEquals(2, terms.getBucketByKey("foo").getDocCount());
}
}
public void testLongTerms() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(terms("my_terms").field("long").missing(4)).get();
assertSearchResponse(response);
Terms terms = response.getAggregations().get("my_terms");
assertEquals(2, terms.getBuckets().size());
assertEquals(1, terms.getBucketByKey("3").getDocCount());
assertEquals(1, terms.getBucketByKey("4").getDocCount());
response = client().prepareSearch("idx")
.addAggregation(terms("my_terms").field("long").missing(3)).get();
assertSearchResponse(response);
terms = response.getAggregations().get("my_terms");
assertEquals(1, terms.getBuckets().size());
assertEquals(2, terms.getBucketByKey("3").getDocCount());
}
public void testDoubleTerms() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(terms("my_terms").field("double").missing(4.5)).get();
assertSearchResponse(response);
Terms terms = response.getAggregations().get("my_terms");
assertEquals(2, terms.getBuckets().size());
assertEquals(1, terms.getBucketByKey("4.5").getDocCount());
assertEquals(1, terms.getBucketByKey("5.5").getDocCount());
response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("double").missing(5.5)).get();
assertSearchResponse(response);
terms = response.getAggregations().get("my_terms");
assertEquals(1, terms.getBuckets().size());
assertEquals(2, terms.getBucketByKey("5.5").getDocCount());
}
public void testUnmappedHistogram() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(histogram("my_histogram").field("non-existing_field").interval(5).missing(12)).get();
assertSearchResponse(response);
Histogram histogram = response.getAggregations().get("my_histogram");
assertEquals(1, histogram.getBuckets().size());
assertEquals(10d, histogram.getBuckets().get(0).getKey());
assertEquals(2, histogram.getBuckets().get(0).getDocCount());
}
public void testHistogram() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(histogram("my_histogram").field("long").interval(5).missing(7)).get();
assertSearchResponse(response);
Histogram histogram = response.getAggregations().get("my_histogram");
assertEquals(2, histogram.getBuckets().size());
assertEquals(0d, histogram.getBuckets().get(0).getKey());
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
assertEquals(5d, histogram.getBuckets().get(1).getKey());
assertEquals(1, histogram.getBuckets().get(1).getDocCount());
response = client().prepareSearch("idx")
.addAggregation(histogram("my_histogram").field("long").interval(5).missing(3)).get();
assertSearchResponse(response);
histogram = response.getAggregations().get("my_histogram");
assertEquals(1, histogram.getBuckets().size());
assertEquals(0d, histogram.getBuckets().get(0).getKey());
assertEquals(2, histogram.getBuckets().get(0).getDocCount());
}
public void testDateHistogram() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(
dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2014-05-07"))
.get();
assertSearchResponse(response);
Histogram histogram = response.getAggregations().get("my_histogram");
assertEquals(2, histogram.getBuckets().size());
assertEquals("2014-01-01T00:00:00.000Z", histogram.getBuckets().get(0).getKeyAsString());
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
assertEquals("2015-01-01T00:00:00.000Z", histogram.getBuckets().get(1).getKeyAsString());
assertEquals(1, histogram.getBuckets().get(1).getDocCount());
response = client().prepareSearch("idx")
.addAggregation(
dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2015-05-07"))
.get();
assertSearchResponse(response);
histogram = response.getAggregations().get("my_histogram");
assertEquals(1, histogram.getBuckets().size());
assertEquals("2015-01-01T00:00:00.000Z", histogram.getBuckets().get(0).getKeyAsString());
assertEquals(2, histogram.getBuckets().get(0).getDocCount());
}
public void testCardinality() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(cardinality("card").field("long").missing(2)).get();
assertSearchResponse(response);
Cardinality cardinality = response.getAggregations().get("card");
assertEquals(2, cardinality.getValue());
}
public void testPercentiles() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(percentiles("percentiles").field("long").missing(1000)).get();
assertSearchResponse(response);
Percentiles percentiles = response.getAggregations().get("percentiles");
assertEquals(1000, percentiles.percentile(100), 0);
}
public void testStats() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(stats("stats").field("long").missing(5)).get();
assertSearchResponse(response);
Stats stats = response.getAggregations().get("stats");
assertEquals(2, stats.getCount());
assertEquals(4, stats.getAvg(), 0);
}
public void testUnmappedGeoBounds() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(geoBounds("bounds").field("non_existing_field").missing("2,1")).get();
assertSearchResponse(response);
GeoBounds bounds = response.getAggregations().get("bounds");
assertThat(bounds.bottomRight().lat(), closeTo(2.0, 1E-5));
assertThat(bounds.bottomRight().lon(), closeTo(1.0, 1E-5));
assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5));
assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5));
}
public void testGeoBounds() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(geoBounds("bounds").field("location").missing("2,1")).get();
assertSearchResponse(response);
GeoBounds bounds = response.getAggregations().get("bounds");
assertThat(bounds.bottomRight().lat(), closeTo(1.0, 1E-5));
assertThat(bounds.bottomRight().lon(), closeTo(2.0, 1E-5));
assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5));
assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5));
}
public void testGeoCentroid() {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(geoCentroid("centroid").field("location").missing("2,1")).get();
assertSearchResponse(response);
GeoCentroid centroid = response.getAggregations().get("centroid");
GeoPoint point = new GeoPoint(1.5, 1.5);
assertThat(point.lat(), closeTo(centroid.centroid().lat(), 1E-5));
assertThat(point.lon(), closeTo(centroid.centroid().lon(), 1E-5));
}
}
| |
package org.estatio.module.registration.dom;
import java.math.BigDecimal;
import javax.inject.Inject;
import javax.jdo.annotations.Column;
import javax.jdo.annotations.InheritanceStrategy;
import com.google.common.base.Joiner;
import org.joda.time.LocalDate;
import org.apache.isis.applib.annotation.MemberOrder;
import org.apache.isis.applib.annotation.Named;
import org.apache.isis.applib.annotation.Optional;
import org.apache.isis.applib.annotation.Optionality;
import org.apache.isis.applib.annotation.Property;
import org.incode.module.base.dom.types.DescriptionType;
import org.incode.module.base.dom.types.NameType;
import org.estatio.module.asset.dom.registration.FixedAssetRegistration;
import lombok.Getter;
import lombok.Setter;
@javax.jdo.annotations.PersistenceCapable(
schema = "dbo" // Isis' ObjectSpecId inferred from @Discriminator
)
@javax.jdo.annotations.Inheritance(
strategy = InheritanceStrategy.NEW_TABLE)
@javax.jdo.annotations.Discriminator("org.estatio.dom.asset.registration.LandRegister")
public class LandRegister extends FixedAssetRegistration {
public String title() {
return getName();
}
@Override
public String getName() {
String title = Joiner.on("-").skipNulls().join(
getComuneAmministrativo(),
Joiner.on(".").skipNulls().join(
getFoglio(),
getParticella(),
getSubalterno()));
if (title == ""){
return getContainer().titleOf(getType()).concat(": ").concat(getContainer().titleOf(getSubject()));
}
return title;
}
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@Column(length= NameType.Meta.MAX_LEN)
@MemberOrder(sequence = "10")
@Getter @Setter
private String comuneAmministrativo;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@Column(length=NameType.Meta.MAX_LEN)
@MemberOrder(sequence = "11")
@Getter @Setter
private String comuneCatastale;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@Column(length=NameType.Meta.MAX_LEN)
@MemberOrder(sequence = "12")
@Getter @Setter
private String codiceComuneCatastale;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@javax.jdo.annotations.Column(scale = 2, allowsNull = "true")
@MemberOrder(sequence = "13")
@Getter @Setter
private BigDecimal rendita;
// //////////////////////////////////////
@Property
@MemberOrder(sequence = "14")
@Column(length=NameType.Meta.MAX_LEN)
@Getter @Setter
private String foglio;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@MemberOrder(sequence = "15")
@Column(length=NameType.Meta.MAX_LEN)
@Getter @Setter
private String particella;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@MemberOrder(sequence = "16")
@Column(length=NameType.Meta.MAX_LEN)
@Getter @Setter
private String subalterno;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@MemberOrder(sequence = "17")
@Column(length=NameType.Meta.MAX_LEN)
@Getter @Setter
private String categoria;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@Column(length=NameType.Meta.MAX_LEN)
@MemberOrder(sequence = "18")
@Getter @Setter
private String classe;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@Column(length=NameType.Meta.MAX_LEN)
@MemberOrder(sequence = "19")
@Getter @Setter
private String consistenza;
// //////////////////////////////////////
@Property(optionality = Optionality.OPTIONAL)
@Column(length= DescriptionType.Meta.MAX_LEN)
@MemberOrder(sequence = "20")
@Getter @Setter
private String description;
// //////////////////////////////////////
public LandRegister changeRegistration(
final @Named("Comune amministrativo") @Optional String comuneAmministrativo,
final @Named("Comune catastale") @Optional String comuneCatastale,
final @Named("Codice comuneCatastale") @Optional String codiceComuneCatastale,
final @Named("Rendita") @Optional BigDecimal rendita,
final @Named("Foglio") @Optional String foglio,
final @Named("Particella") @Optional String particella,
final @Named("Subalterno") @Optional String subalterno,
final @Named("Categoria") @Optional String categoria,
final @Named("Classe") @Optional String classe,
final @Named("Consistenza") @Optional String consistenza,
final @Named("Change start date") @Optional LocalDate changeStartDate,
final @Named("Change description") @Optional String changeDescription) {
if (changeStartDate != null) {
LandRegister landRegister = landRegisters.newRegistration(
getSubject(),
this,
comuneAmministrativo,
comuneCatastale,
codiceComuneCatastale,
rendita,
foglio,
particella,
subalterno,
categoria,
classe,
consistenza,
changeStartDate,
changeDescription);
landRegister.changeDates(changeStartDate, null);
return landRegister;
} else {
setComuneAmministrativo(comuneAmministrativo);
setComuneCatastale(comuneCatastale);
setCodiceComuneCatastale(codiceComuneCatastale);
setRendita(rendita);
setFoglio(foglio);
setParticella(particella);
setSubalterno(subalterno);
setCategoria(categoria);
setClasse(classe);
setConsistenza(consistenza);
setDescription(changeDescription);
return this;
}
}
public String default0ChangeRegistration() {
return getComuneAmministrativo();
}
public String default1ChangeRegistration() {
return getComuneCatastale();
}
public String default2ChangeRegistration() {
return getCodiceComuneCatastale();
}
public BigDecimal default3ChangeRegistration() {
return getRendita();
}
public String default4ChangeRegistration() {
return getFoglio();
}
public String default5ChangeRegistration() {
return getParticella();
}
public String default6ChangeRegistration() {
return getSubalterno();
}
public String default7ChangeRegistration() {
return getCategoria();
}
public String default8ChangeRegistration() {
return getClasse();
}
public String default9ChangeRegistration() {
return getConsistenza();
}
// //////////////////////////////////////
@Inject
LandRegisters landRegisters;
}
| |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.codeInsight.guess.impl;
import com.intellij.codeInsight.guess.GuessManager;
import com.intellij.codeInspection.dataFlow.*;
import com.intellij.codeInspection.dataFlow.instructions.InstanceofInstruction;
import com.intellij.codeInspection.dataFlow.instructions.Instruction;
import com.intellij.codeInspection.dataFlow.instructions.TypeCastInstruction;
import com.intellij.codeInspection.dataFlow.value.DfaValue;
import com.intellij.codeInspection.dataFlow.value.DfaVariableValue;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.TextRange;
import com.intellij.psi.*;
import com.intellij.psi.search.LocalSearchScope;
import com.intellij.psi.search.PsiElementProcessor;
import com.intellij.psi.search.PsiElementProcessorAdapter;
import com.intellij.psi.search.SearchScope;
import com.intellij.psi.search.searches.ClassInheritorsSearch;
import com.intellij.psi.search.searches.ReferencesSearch;
import com.intellij.psi.util.*;
import com.intellij.util.ArrayUtil;
import com.intellij.util.BitUtil;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.containers.MultiMap;
import com.siyeh.ig.callMatcher.CallMatcher;
import com.siyeh.ig.psiutils.ExpressionUtils;
import it.unimi.dsi.fastutil.objects.Object2ObjectOpenCustomHashMap;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.*;
public final class GuessManagerImpl extends GuessManager {
private final MethodPatternMap myMethodPatternMap = new MethodPatternMap();
{
initMethodPatterns();
}
private void initMethodPatterns() {
// Collection
myMethodPatternMap.addPattern(new MethodPattern("add", 1, 0));
myMethodPatternMap.addPattern(new MethodPattern("contains", 1, 0));
myMethodPatternMap.addPattern(new MethodPattern("remove", 1, 0));
// Vector
myMethodPatternMap.addPattern(new MethodPattern("add", 2, 1));
myMethodPatternMap.addPattern(new MethodPattern("addElement", 1, 0));
myMethodPatternMap.addPattern(new MethodPattern("elementAt", 1, -1));
myMethodPatternMap.addPattern(new MethodPattern("firstElement", 0, -1));
myMethodPatternMap.addPattern(new MethodPattern("lastElement", 0, -1));
myMethodPatternMap.addPattern(new MethodPattern("get", 1, -1));
myMethodPatternMap.addPattern(new MethodPattern("indexOf", 1, 0));
myMethodPatternMap.addPattern(new MethodPattern("indexOf", 2, 0));
myMethodPatternMap.addPattern(new MethodPattern("lastIndexOf", 1, 0));
myMethodPatternMap.addPattern(new MethodPattern("lastIndexOf", 2, 0));
myMethodPatternMap.addPattern(new MethodPattern("insertElementAt", 2, 0));
myMethodPatternMap.addPattern(new MethodPattern("removeElement", 1, 0));
myMethodPatternMap.addPattern(new MethodPattern("set", 2, 1));
myMethodPatternMap.addPattern(new MethodPattern("setElementAt", 2, 0));
}
private final Project myProject;
public GuessManagerImpl(Project project) {
myProject = project;
}
@Override
public PsiType @NotNull [] guessContainerElementType(PsiExpression containerExpr, TextRange rangeToIgnore) {
HashSet<PsiType> typesSet = new HashSet<>();
PsiType type = containerExpr.getType();
PsiType elemType;
if ((elemType = getGenericElementType(type)) != null) return new PsiType[]{elemType};
if (containerExpr instanceof PsiReferenceExpression){
PsiElement refElement = ((PsiReferenceExpression)containerExpr).resolve();
if (refElement instanceof PsiVariable){
PsiFile file = refElement.getContainingFile();
if (file == null){
file = containerExpr.getContainingFile(); // implicit variable in jsp
}
HashSet<PsiVariable> checkedVariables = new HashSet<>();
addTypesByVariable(typesSet, (PsiVariable)refElement, file, checkedVariables, CHECK_USAGE | CHECK_DOWN, rangeToIgnore);
checkedVariables.clear();
addTypesByVariable(typesSet, (PsiVariable)refElement, file, checkedVariables, CHECK_UP, rangeToIgnore);
}
}
return typesSet.toArray(PsiType.createArray(typesSet.size()));
}
@Nullable
private static PsiType getGenericElementType(PsiType collectionType) {
if (collectionType instanceof PsiClassType) {
PsiClassType classType = (PsiClassType) collectionType;
PsiType[] parameters = classType.getParameters();
if (parameters.length == 1) {
return parameters[0];
}
}
return null;
}
@Override
public PsiType @NotNull [] guessTypeToCast(PsiExpression expr) {
LinkedHashSet<PsiType> types = new LinkedHashSet<>(getControlFlowExpressionTypeConjuncts(expr));
addExprTypesWhenContainerElement(types, expr);
addExprTypesByDerivedClasses(types, expr);
return types.toArray(PsiType.createArray(types.size()));
}
@NotNull
@Override
public MultiMap<PsiExpression, PsiType> getControlFlowExpressionTypes(@NotNull PsiExpression forPlace, boolean honorAssignments) {
PsiElement scope = DfaPsiUtil.getTopmostBlockInSameClass(forPlace);
if (scope == null) {
PsiFile file = forPlace.getContainingFile();
if (!(file instanceof PsiCodeFragment)) {
return MultiMap.empty();
}
scope = file;
}
GuessManagerRunner runner = createRunner(honorAssignments, scope);
final ExpressionTypeInstructionVisitor visitor = new ExpressionTypeInstructionVisitor(runner, forPlace);
RunnerResult result = runner.analyzeMethodWithInlining(scope, visitor);
if (result == RunnerResult.OK || result == RunnerResult.CANCELLED) {
return visitor.getResult();
}
return MultiMap.empty();
}
@Nullable
private static PsiType getTypeFromDataflow(PsiExpression forPlace, boolean honorAssignments) {
PsiType type = forPlace.getType();
TypeConstraint initial = type == null ? TypeConstraints.TOP : TypeConstraints.instanceOf(type);
PsiElement scope = DfaPsiUtil.getTopmostBlockInSameClass(forPlace);
if (scope == null) {
PsiFile file = forPlace.getContainingFile();
if (!(file instanceof PsiCodeFragment)) {
return null;
}
scope = file;
}
GuessManagerRunner runner = createRunner(honorAssignments, scope);
class Visitor extends CastTrackingVisitor {
TypeConstraint constraint = TypeConstraints.BOTTOM;
@Override
protected void beforeExpressionPush(@NotNull DfaValue value,
@NotNull PsiExpression expression,
@Nullable TextRange range,
@NotNull DfaMemoryState state) {
if (expression == forPlace && range == null) {
if (!(value instanceof DfaVariableValue) || ((DfaVariableValue)value).isFlushableByCalls()) {
value = runner.getFactory().getVarFactory().createVariableValue(new ExpressionVariableDescriptor(expression));
}
constraint = constraint.join(TypeConstraint.fromDfType(state.getDfType(value)));
runner.placeVisited();
}
super.beforeExpressionPush(value, expression, range, state);
}
@Override
boolean isInteresting(@NotNull DfaValue value, @NotNull PsiExpression expression) {
return (!(value instanceof DfaVariableValue) || ((DfaVariableValue)value).isFlushableByCalls()) &&
ExpressionVariableDescriptor.EXPRESSION_HASHING_STRATEGY.equals(expression, forPlace);
}
}
final Visitor visitor = new Visitor();
RunnerResult result = runner.analyzeMethodWithInlining(scope, visitor);
if (result == RunnerResult.OK || result == RunnerResult.CANCELLED) {
return visitor.constraint.meet(initial).getPsiType(scope.getProject());
}
return null;
}
@NotNull
private static GuessManagerRunner createRunner(boolean honorAssignments, PsiElement scope) {
return new GuessManagerRunner(scope.getProject(), honorAssignments);
}
private static class GuessManagerRunner extends DataFlowRunner {
private final boolean myAssignments;
private boolean myPlaceVisited;
private int[] myLoopNumbers;
GuessManagerRunner(@NotNull Project project, boolean honorAssignments) {
super(project);
myAssignments = honorAssignments;
}
@Override
protected int getComplexityLimit() {
// Limit analysis complexity for completion as it could be relaunched many times
return DEFAULT_MAX_STATES_PER_BRANCH / 3;
}
void placeVisited() {
myPlaceVisited = true;
}
@Override
protected @NotNull List<DfaInstructionState> createInitialInstructionStates(@NotNull PsiElement psiBlock,
@NotNull Collection<? extends DfaMemoryState> memStates,
@NotNull ControlFlow flow) {
myLoopNumbers = flow.getLoopNumbers();
return super.createInitialInstructionStates(psiBlock, memStates, flow);
}
@Override
protected void afterInstruction(Instruction instruction) {
super.afterInstruction(instruction);
if (myPlaceVisited && myLoopNumbers[instruction.getIndex()] == 0) {
// We cancel the analysis first time we exit all the loops
// after the target expression is visited (in this case,
// we can be sure we'll not reach it again)
cancel();
}
}
@NotNull
@Override
protected DfaMemoryState createMemoryState() {
return myAssignments ? super.createMemoryState() : new AssignmentFilteringMemoryState(getFactory());
}
}
private static PsiElement getTopmostBlock(PsiElement scope) {
assert scope.isValid();
PsiElement lastScope = scope;
while (true) {
final PsiCodeBlock lastCodeBlock = PsiTreeUtil.getParentOfType(lastScope, PsiCodeBlock.class, true);
if (lastCodeBlock == null) {
break;
}
lastScope = lastCodeBlock;
}
if (lastScope == scope) {
PsiFile file = scope.getContainingFile();
if (file instanceof PsiCodeFragment) {
return file;
}
}
return lastScope;
}
private void addExprTypesByDerivedClasses(LinkedHashSet<? super PsiType> set, PsiExpression expr) {
PsiType type = expr.getType();
if (!(type instanceof PsiClassType)) return;
PsiClass refClass = PsiUtil.resolveClassInType(type);
if (refClass == null) return;
PsiManager manager = PsiManager.getInstance(myProject);
PsiElementProcessor.CollectElementsWithLimit<PsiClass> processor = new PsiElementProcessor.CollectElementsWithLimit<>(5);
ClassInheritorsSearch.search(refClass).forEach(new PsiElementProcessorAdapter<>(processor));
if (processor.isOverflow()) return;
for (PsiClass derivedClass : processor.getCollection()) {
if (derivedClass instanceof PsiAnonymousClass) continue;
PsiType derivedType = JavaPsiFacade.getElementFactory(manager.getProject()).createType(derivedClass);
set.add(derivedType);
}
}
private void addExprTypesWhenContainerElement(LinkedHashSet<? super PsiType> set, PsiExpression expr) {
if (expr instanceof PsiMethodCallExpression){
PsiMethodCallExpression callExpr = (PsiMethodCallExpression)expr;
PsiReferenceExpression methodExpr = callExpr.getMethodExpression();
String methodName = methodExpr.getReferenceName();
MethodPattern pattern = myMethodPatternMap.findPattern(methodName, callExpr.getArgumentList().getExpressionCount());
if (pattern != null && pattern.parameterIndex < 0/* return value */){
PsiExpression qualifier = methodExpr.getQualifierExpression();
if (qualifier != null) {
PsiType[] types = guessContainerElementType(qualifier, null);
for (PsiType type : types) {
if (type instanceof PsiClassType) {
if (((PsiClassType)type).resolve() instanceof PsiAnonymousClass) continue;
}
set.add(type);
}
}
}
}
}
private static final int CHECK_USAGE = 0x01;
private static final int CHECK_UP = 0x02;
private static final int CHECK_DOWN = 0x04;
private void addTypesByVariable(HashSet<? super PsiType> typesSet,
PsiVariable var,
PsiFile scopeFile,
HashSet<? super PsiVariable> checkedVariables,
int flags,
TextRange rangeToIgnore) {
if (!checkedVariables.add(var)) return;
//System.out.println("analyzing usages of " + var + " in file " + scopeFile);
SearchScope searchScope = new LocalSearchScope(scopeFile);
if (BitUtil.isSet(flags, CHECK_USAGE) || BitUtil.isSet(flags, CHECK_DOWN)) {
for (PsiReference varRef : ReferencesSearch.search(var, searchScope, false)) {
PsiElement ref = varRef.getElement();
if (BitUtil.isSet(flags, CHECK_USAGE)) {
PsiType type = guessElementTypeFromReference(myMethodPatternMap, ref, rangeToIgnore);
if (type != null && !(type instanceof PsiPrimitiveType)) {
typesSet.add(type);
}
}
if (BitUtil.isSet(flags, CHECK_DOWN)) {
if (ref.getParent() instanceof PsiExpressionList && ref.getParent().getParent() instanceof PsiMethodCallExpression) { //TODO : new
PsiExpressionList list = (PsiExpressionList)ref.getParent();
int argIndex = ArrayUtil.indexOf(list.getExpressions(), ref);
PsiMethodCallExpression methodCall = (PsiMethodCallExpression)list.getParent();
PsiMethod method = (PsiMethod)methodCall.getMethodExpression().resolve();
if (method != null) {
PsiParameter[] parameters = method.getParameterList().getParameters();
if (argIndex < parameters.length) {
addTypesByVariable(typesSet, parameters[argIndex], method.getContainingFile(), checkedVariables, flags | CHECK_USAGE,
rangeToIgnore);
}
}
}
}
}
}
if (BitUtil.isSet(flags, CHECK_UP)){
if (var instanceof PsiParameter && var.getParent() instanceof PsiParameterList && var.getParent().getParent() instanceof PsiMethod){
PsiParameterList list = (PsiParameterList)var.getParent();
PsiParameter[] parameters = list.getParameters();
int argIndex = -1;
for(int i = 0; i < parameters.length; i++){
PsiParameter parameter = parameters[i];
if (parameter.equals(var)){
argIndex = i;
break;
}
}
PsiMethod method = (PsiMethod)var.getParent().getParent();
//System.out.println("analyzing usages of " + method + " in file " + scopeFile);
for (PsiReference methodRef : ReferencesSearch.search(method, searchScope, false)) {
PsiElement ref = methodRef.getElement();
if (ref.getParent() instanceof PsiMethodCallExpression) {
PsiMethodCallExpression methodCall = (PsiMethodCallExpression)ref.getParent();
PsiExpression[] args = methodCall.getArgumentList().getExpressions();
if (args.length <= argIndex) continue;
PsiExpression arg = args[argIndex];
if (arg instanceof PsiReferenceExpression) {
PsiElement refElement = ((PsiReferenceExpression)arg).resolve();
if (refElement instanceof PsiVariable) {
addTypesByVariable(typesSet, (PsiVariable)refElement, scopeFile, checkedVariables, flags | CHECK_USAGE, rangeToIgnore);
}
}
//TODO : constructor
}
}
}
}
}
@Nullable
private static PsiType guessElementTypeFromReference(MethodPatternMap methodPatternMap,
PsiElement ref,
TextRange rangeToIgnore) {
PsiElement refParent = ref.getParent();
if (refParent instanceof PsiReferenceExpression){
PsiReferenceExpression parentExpr = (PsiReferenceExpression)refParent;
if (ref.equals(parentExpr.getQualifierExpression()) && parentExpr.getParent() instanceof PsiMethodCallExpression){
String methodName = parentExpr.getReferenceName();
PsiMethodCallExpression methodCall = (PsiMethodCallExpression)parentExpr.getParent();
PsiExpression[] args = methodCall.getArgumentList().getExpressions();
MethodPattern pattern = methodPatternMap.findPattern(methodName, args.length);
if (pattern != null){
if (pattern.parameterIndex < 0){ // return value
if (methodCall.getParent() instanceof PsiTypeCastExpression &&
(rangeToIgnore == null || !rangeToIgnore.contains(methodCall.getTextRange()))) {
return ((PsiTypeCastExpression)methodCall.getParent()).getType();
}
}
else{
return args[pattern.parameterIndex].getType();
}
}
}
}
return null;
}
@NotNull
@Override
public List<PsiType> getControlFlowExpressionTypeConjuncts(@NotNull PsiExpression expr, boolean honorAssignments) {
if (expr.getType() instanceof PsiPrimitiveType) {
return Collections.emptyList();
}
PsiExpression place = PsiUtil.skipParenthesizedExprDown(expr);
if (place == null) return Collections.emptyList();
List<PsiType> result = null;
if (!ControlFlowAnalyzer.inlinerMayInferPreciseType(place)) {
GuessTypeVisitor visitor = tryGuessingTypeWithoutDfa(place, honorAssignments);
if (!visitor.isDfaNeeded()) {
result = visitor.mySpecificType == null ?
Collections.emptyList() : Collections.singletonList(DfaPsiUtil.tryGenerify(expr, visitor.mySpecificType));
}
}
if (result == null) {
PsiType psiType = getTypeFromDataflow(expr, honorAssignments);
if (psiType instanceof PsiIntersectionType) {
result = ContainerUtil.mapNotNull(((PsiIntersectionType)psiType).getConjuncts(), type -> DfaPsiUtil.tryGenerify(expr, type));
}
else if (psiType != null) {
result = Collections.singletonList(DfaPsiUtil.tryGenerify(expr, psiType));
}
else {
result = Collections.emptyList();
}
}
result = ContainerUtil.filter(result, t -> {
PsiClass typeClass = PsiUtil.resolveClassInType(t);
return typeClass == null || PsiUtil.isAccessible(typeClass, expr, null);
});
if (result.equals(Collections.singletonList(TypeConversionUtil.erasure(expr.getType())))) {
return Collections.emptyList();
}
return result;
}
@NotNull
private static GuessTypeVisitor tryGuessingTypeWithoutDfa(PsiExpression place, boolean honorAssignments) {
List<PsiElement> exprsAndVars = getPotentiallyAffectingElements(place);
GuessTypeVisitor visitor = new GuessTypeVisitor(place, honorAssignments);
for (PsiElement e : exprsAndVars) {
e.accept(visitor);
if (e == place || visitor.isDfaNeeded()) {
break;
}
}
return visitor;
}
private static List<PsiElement> getPotentiallyAffectingElements(PsiExpression place) {
PsiElement topmostBlock = getTopmostBlock(place);
return CachedValuesManager.getCachedValue(topmostBlock, () -> {
List<PsiElement> list = SyntaxTraverser.psiTraverser(topmostBlock).filter(e -> e instanceof PsiExpression || e instanceof PsiLocalVariable).toList();
return new CachedValueProvider.Result<>(list, topmostBlock);
});
}
private static class GuessTypeVisitor extends JavaElementVisitor {
private static final CallMatcher OBJECT_GET_CLASS =
CallMatcher.exactInstanceCall(CommonClassNames.JAVA_LANG_OBJECT, "getClass").parameterCount(0);
private final @NotNull PsiExpression myPlace;
PsiType mySpecificType;
private boolean myNeedDfa;
private boolean myDeclared;
private final boolean myHonorAssignments;
GuessTypeVisitor(@NotNull PsiExpression place, boolean honorAssignments) {
myPlace = place;
myHonorAssignments = honorAssignments;
}
protected void handleAssignment(@Nullable PsiExpression expression) {
if (!myHonorAssignments || expression == null) return;
PsiType type = expression.getType();
if (type instanceof PsiPrimitiveType) {
type = ((PsiPrimitiveType)type).getBoxedType(expression);
}
PsiType rawType = type instanceof PsiClassType ? ((PsiClassType)type).rawType() : type;
if (rawType == null || rawType.equals(PsiType.NULL)) return;
if (mySpecificType == null) {
mySpecificType = rawType;
}
else if (!mySpecificType.equals(rawType)) {
myNeedDfa = true;
}
}
@Override
public void visitAssignmentExpression(PsiAssignmentExpression expression) {
if (ExpressionVariableDescriptor.EXPRESSION_HASHING_STRATEGY.equals(expression.getLExpression(), myPlace)) {
handleAssignment(expression.getRExpression());
}
super.visitAssignmentExpression(expression);
}
@Override
public void visitLocalVariable(PsiLocalVariable variable) {
if (ExpressionUtils.isReferenceTo(myPlace, variable)) {
myDeclared = true;
handleAssignment(variable.getInitializer());
}
super.visitLocalVariable(variable);
}
@Override
public void visitTypeCastExpression(PsiTypeCastExpression expression) {
PsiExpression operand = expression.getOperand();
if (operand != null && ExpressionVariableDescriptor.EXPRESSION_HASHING_STRATEGY.equals(operand, myPlace)) {
myNeedDfa = true;
}
super.visitTypeCastExpression(expression);
}
@Override
public void visitMethodCallExpression(PsiMethodCallExpression call) {
if (OBJECT_GET_CLASS.test(call)) {
PsiExpression qualifier = ExpressionUtils.getEffectiveQualifier(call.getMethodExpression());
if (qualifier != null && ExpressionVariableDescriptor.EXPRESSION_HASHING_STRATEGY.equals(qualifier, myPlace)) {
myNeedDfa = true;
}
}
super.visitMethodCallExpression(call);
}
@Override
public void visitInstanceOfExpression(PsiInstanceOfExpression expression) {
if (ExpressionVariableDescriptor.EXPRESSION_HASHING_STRATEGY.equals(expression.getOperand(), myPlace)) {
myNeedDfa = true;
}
super.visitInstanceOfExpression(expression);
}
public boolean isDfaNeeded() {
if (myNeedDfa) return true;
if (myDeclared || mySpecificType == null) return false;
PsiType type = myPlace.getType();
PsiType rawType = type instanceof PsiClassType ? ((PsiClassType)type).rawType() : type;
return !mySpecificType.equals(rawType);
}
}
abstract static class CastTrackingVisitor extends StandardInstructionVisitor {
@Override
public DfaInstructionState[] visitTypeCast(TypeCastInstruction instruction, DataFlowRunner runner, DfaMemoryState memState) {
DfaValue value = memState.pop();
memState.push(adjustValue(runner, value, instruction.getCasted()));
return super.visitTypeCast(instruction, runner, memState);
}
@Override
public DfaInstructionState[] visitInstanceof(InstanceofInstruction instruction, DataFlowRunner runner, DfaMemoryState memState) {
DfaValue dfaRight = memState.pop();
DfaValue dfaLeft = memState.pop();
memState.push(adjustValue(runner, dfaLeft, instruction.getLeft()));
memState.push(dfaRight);
return super.visitInstanceof(instruction, runner, memState);
}
private DfaValue adjustValue(DataFlowRunner runner, DfaValue value, @Nullable PsiExpression expression) {
if (expression != null && isInteresting(value, expression)) {
value = runner.getFactory().getVarFactory().createVariableValue(new ExpressionVariableDescriptor(expression));
}
return value;
}
boolean isInteresting(@NotNull DfaValue value, @NotNull PsiExpression expression) {
return true;
}
}
private static final class ExpressionTypeInstructionVisitor extends CastTrackingVisitor {
private final Map<DfaVariableValue, TypeConstraint> myResult = new HashMap<>();
private final GuessManagerRunner myRunner;
private final PsiElement myForPlace;
private ExpressionTypeInstructionVisitor(GuessManagerRunner runner,
@NotNull PsiElement forPlace) {
myRunner = runner;
myForPlace = PsiUtil.skipParenthesizedExprUp(forPlace);
}
MultiMap<PsiExpression, PsiType> getResult() {
MultiMap<PsiExpression, PsiType> result = MultiMap.createSet(
new Object2ObjectOpenCustomHashMap<>(ExpressionVariableDescriptor.EXPRESSION_HASHING_STRATEGY));
Project project = myForPlace.getProject();
myResult.forEach((value, constraint) -> {
if (value.getDescriptor() instanceof ExpressionVariableDescriptor) {
PsiExpression expression = ((ExpressionVariableDescriptor)value.getDescriptor()).getExpression();
PsiType type = constraint.getPsiType(project);
if (type instanceof PsiIntersectionType) {
result.putValues(expression, Arrays.asList(((PsiIntersectionType)type).getConjuncts()));
}
else if (type != null) {
result.putValue(expression, type);
}
}
});
return result;
}
@Override
protected void beforeExpressionPush(@NotNull DfaValue value,
@NotNull PsiExpression expression,
@Nullable TextRange range,
@NotNull DfaMemoryState state) {
if (range == null && myForPlace == expression) {
((DfaMemoryStateImpl)state).forRecordedVariableTypes((var, dfType) -> {
myResult.merge(var, TypeConstraint.fromDfType(dfType), TypeConstraint::join);
});
myRunner.placeVisited();
}
super.beforeExpressionPush(value, expression, range, state);
}
}
}
| |
/*
* =========================================================================
* Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
* This product is protected by U.S. and international copyright
* and intellectual property laws. Pivotal products are covered by
* more patents listed at http://www.pivotal.io/patents.
* =========================================================================
*/
package com.gemstone.gemfire.internal.cache.tier.sockets.command;
import java.io.IOException;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
import com.gemstone.gemfire.cache.Region;
import com.gemstone.gemfire.cache.execute.Function;
import com.gemstone.gemfire.cache.execute.FunctionException;
import com.gemstone.gemfire.cache.execute.FunctionInvocationTargetException;
import com.gemstone.gemfire.cache.execute.FunctionService;
import com.gemstone.gemfire.cache.operations.ExecuteFunctionOperationContext;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.cache.DistributedRegion;
import com.gemstone.gemfire.internal.cache.PartitionedRegion;
import com.gemstone.gemfire.internal.cache.execute.AbstractExecution;
import com.gemstone.gemfire.internal.cache.execute.DistributedRegionFunctionExecutor;
import com.gemstone.gemfire.internal.cache.execute.InternalFunctionInvocationTargetException;
import com.gemstone.gemfire.internal.cache.execute.MemberMappedArgument;
import com.gemstone.gemfire.internal.cache.execute.PartitionedRegionFunctionExecutor;
import com.gemstone.gemfire.internal.cache.execute.ServerToClientFunctionResultSender;
import com.gemstone.gemfire.internal.cache.execute.ServerToClientFunctionResultSender65;
import com.gemstone.gemfire.internal.cache.tier.CachedRegionHelper;
import com.gemstone.gemfire.internal.cache.tier.Command;
import com.gemstone.gemfire.internal.cache.tier.MessageType;
import com.gemstone.gemfire.internal.cache.tier.sockets.BaseCommand;
import com.gemstone.gemfire.internal.cache.tier.sockets.ChunkedMessage;
import com.gemstone.gemfire.internal.cache.tier.sockets.HandShake;
import com.gemstone.gemfire.internal.cache.tier.sockets.Message;
import com.gemstone.gemfire.internal.cache.tier.sockets.Part;
import com.gemstone.gemfire.internal.cache.tier.sockets.ServerConnection;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
import com.gemstone.gemfire.internal.security.AuthorizeRequest;
/**
*
* @author ymahajan
* @since 6.5
*/
public class ExecuteRegionFunction65 extends BaseCommand {
private final static ExecuteRegionFunction65 singleton = new ExecuteRegionFunction65();
public static Command getCommand() {
return singleton;
}
private ExecuteRegionFunction65() {
}
@Override
public void cmdExecute(Message msg, ServerConnection servConn, long start)
throws IOException {
String regionName = null;
Object function = null;
Object args = null;
MemberMappedArgument memberMappedArg = null;
byte isReExecute = 0;
Set<Object> filter = null;
byte hasResult = 0;
int removedNodesSize = 0;
Set<Object> removedNodesSet = null;
int filterSize = 0, partNumber = 0;
CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
byte functionState = 0;
try {
functionState = msg.getPart(0).getSerializedForm()[0];
if(functionState != 1) {
hasResult = (byte) ((functionState & 2) - 1);
}
else {
hasResult = functionState;
}
if (hasResult == 1) {
servConn.setAsTrue(REQUIRES_RESPONSE);
servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
}
regionName = msg.getPart(1).getString();
function = msg.getPart(2).getStringOrObject();
args = msg.getPart(3).getObject();
Part part = msg.getPart(4);
if (part != null) {
Object obj = part.getObject();
if (obj instanceof MemberMappedArgument) {
memberMappedArg = (MemberMappedArgument)obj;
}
}
isReExecute = msg.getPart(5).getSerializedForm()[0];
filterSize = msg.getPart(6).getInt();
if (filterSize != 0) {
filter = new HashSet<Object>();
partNumber = 7;
for (int i = 0; i < filterSize; i++) {
filter.add(msg.getPart(partNumber + i).getStringOrObject());
}
}
partNumber = 7 + filterSize;
removedNodesSize = msg.getPart(partNumber).getInt();
if(removedNodesSize != 0){
removedNodesSet = new HashSet<Object>();
partNumber = partNumber + 1;
for (int i = 0; i < removedNodesSize; i++) {
removedNodesSet.add(msg.getPart(partNumber + i).getStringOrObject());
}
}
}
catch (ClassNotFoundException exception) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), exception);
if (hasResult == 1) {
writeChunkedException(msg, exception, false, servConn);
servConn.setAsTrue(RESPONDED);
return;
}
}
if (function == null || regionName == null) {
String message = null;
if (function == null) {
message = LocalizedStrings.ExecuteRegionFunction_THE_INPUT_0_FOR_THE_EXECUTE_FUNCTION_REQUEST_IS_NULL.toLocalizedString("function");
}
if (regionName == null) {
message = LocalizedStrings.ExecuteRegionFunction_THE_INPUT_0_FOR_THE_EXECUTE_FUNCTION_REQUEST_IS_NULL.toLocalizedString("region");
}
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, msg, message, servConn);
return;
}
else {
Region region = crHelper.getRegion(regionName);
if (region == null) {
String message =
LocalizedStrings.ExecuteRegionFunction_THE_REGION_NAMED_0_WAS_NOT_FOUND_DURING_EXECUTE_FUNCTION_REQUEST
.toLocalizedString(regionName);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, msg, message, servConn);
return;
}
HandShake handShake = (HandShake)servConn.getHandshake();
int earlierClientReadTimeout = handShake.getClientReadTimeout();
handShake.setClientReadTimeout(0);
ServerToClientFunctionResultSender resultSender = null;
Function functionObject = null;
try {
if (function instanceof String) {
functionObject = FunctionService.getFunction((String)function);
if (functionObject == null) {
String message = LocalizedStrings.
ExecuteRegionFunction_THE_FUNCTION_0_HAS_NOT_BEEN_REGISTERED
.toLocalizedString(function);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, msg, message, servConn);
return;
}
else {
byte functionStateOnServerSide = AbstractExecution.getFunctionState(
functionObject.isHA(), functionObject.hasResult(),
functionObject.optimizeForWrite());
if (logger.isDebugEnabled()) {
logger.debug("Function State on server side: {} on client: {}", functionStateOnServerSide, functionState);
}
if (functionStateOnServerSide != functionState) {
String message = LocalizedStrings.FunctionService_FUNCTION_ATTRIBUTE_MISMATCH_CLIENT_SERVER
.toLocalizedString(function);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, msg, message, servConn);
return;
}
}
}
else {
functionObject = (Function)function;
}
// check if the caller is authorized to do this operation on server
AuthorizeRequest authzRequest = servConn.getAuthzRequest();
final String functionName = functionObject.getId();
final String regionPath = region.getFullPath();
ExecuteFunctionOperationContext executeContext = null;
if (authzRequest != null) {
executeContext = authzRequest.executeFunctionAuthorize(functionName,
regionPath, filter, args, functionObject.optimizeForWrite());
}
//Construct execution
AbstractExecution execution = (AbstractExecution)FunctionService.onRegion(region);
ChunkedMessage m = servConn.getFunctionResponseMessage();
m.setTransactionId(msg.getTransactionId());
resultSender = new ServerToClientFunctionResultSender65(m,
MessageType.EXECUTE_REGION_FUNCTION_RESULT, servConn,functionObject,executeContext);
if (execution instanceof PartitionedRegionFunctionExecutor) {
execution = new PartitionedRegionFunctionExecutor(
(PartitionedRegion)region, filter, args, memberMappedArg,
resultSender, removedNodesSet, false);
}
else {
execution = new DistributedRegionFunctionExecutor(
(DistributedRegion)region, filter, args, memberMappedArg,
resultSender);
}
if (isReExecute == 1) {
execution = execution.setIsReExecute();
}
if (logger.isDebugEnabled()) {
logger.debug("Executing Function: {} on Server: {} with Execution: {} functionState={} reexecute={} hasResult={}", functionObject.getId(), servConn, execution, functionState, isReExecute, hasResult);
}
if (hasResult == 1) {
if (function instanceof String) {
switch (functionState) {
case AbstractExecution.NO_HA_HASRESULT_NO_OPTIMIZEFORWRITE:
execution.execute((String)function, true, false, false).getResult();
break;
case AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE:
execution.execute((String)function, true, true, false).getResult();
break;
case AbstractExecution.HA_HASRESULT_OPTIMIZEFORWRITE:
execution.execute((String)function,true, true, true).getResult();
break;
case AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE:
execution.execute((String)function, true, false, true).getResult();
break;
}
}
else {
execution.execute(functionObject).getResult();
}
}else {
if (function instanceof String) {
switch (functionState) {
case AbstractExecution.NO_HA_NO_HASRESULT_NO_OPTIMIZEFORWRITE:
execution.execute((String)function, false, false, false);
break;
case AbstractExecution.NO_HA_NO_HASRESULT_OPTIMIZEFORWRITE:
execution.execute((String)function, false, false, true);
break;
}
}
else {
execution.execute(functionObject);
}
}
}
catch (IOException ioe) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), ioe);
final String message = LocalizedStrings.
ExecuteRegionFunction_SERVER_COULD_NOT_SEND_THE_REPLY
.toLocalizedString();
sendException(hasResult, msg, message, servConn,ioe);
}
catch (FunctionException fe) {
String message = fe.getMessage();
if (fe.getCause() instanceof FunctionInvocationTargetException) {
if (fe.getCause() instanceof InternalFunctionInvocationTargetException) {
// Fix for #44709: User should not be aware of
// InternalFunctionInvocationTargetException. No instance of
// InternalFunctionInvocationTargetException is giving useful
// information to user to take any corrective action hence logging
// this at fine level logging
// 1> When bucket is moved
// 2> Incase of HA FucntionInvocationTargetException thrown. Since
// it is HA, fucntion will be reexecuted on right node
// 3> Multiple target nodes found for single hop operation
// 4> in case of HA member departed
if (logger.isDebugEnabled()) {
logger.debug(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, new Object[] { function }), fe);
}
}
else if (functionObject.isHA()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function + " :" + message));
}
else {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), fe);
}
resultSender.setException(fe);
}
else {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), fe);
sendException(hasResult, msg, message, servConn, fe);
}
}
catch (Exception e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), e);
String message = e.getMessage();
sendException(hasResult, msg, message, servConn,e);
}
finally{
handShake.setClientReadTimeout(earlierClientReadTimeout);
}
}
}
private void sendException(byte hasResult, Message msg, String message,
ServerConnection servConn, Throwable e) throws IOException {
synchronized (msg) {
if (hasResult == 1) {
writeFunctionResponseException(msg, MessageType.EXCEPTION, message,
servConn, e);
servConn.setAsTrue(RESPONDED);
}
}
}
private void sendError(byte hasResult, Message msg, String message,
ServerConnection servConn) throws IOException {
synchronized (msg) {
if (hasResult == 1) {
writeFunctionResponseError(msg,
MessageType.EXECUTE_REGION_FUNCTION_ERROR, message, servConn);
servConn.setAsTrue(RESPONDED);
}
}
}
protected static void writeFunctionResponseException(Message origMsg,
int messageType, String message, ServerConnection servConn, Throwable e)
throws IOException {
ChunkedMessage functionResponseMsg = servConn.getFunctionResponseMessage();
ChunkedMessage chunkedResponseMsg = servConn.getChunkedResponseMessage();
int numParts = 0;
if (functionResponseMsg.headerHasBeenSent()) {
if (e instanceof FunctionException
&& e.getCause() instanceof InternalFunctionInvocationTargetException) {
functionResponseMsg.setNumberOfParts(3);
functionResponseMsg.addObjPart(e);
functionResponseMsg.addStringPart(BaseCommand.getExceptionTrace(e));
InternalFunctionInvocationTargetException fe = (InternalFunctionInvocationTargetException)e
.getCause();
functionResponseMsg.addObjPart(fe.getFailedNodeSet());
numParts = 3;
}
else {
functionResponseMsg.setNumberOfParts(2);
functionResponseMsg.addObjPart(e);
functionResponseMsg.addStringPart(BaseCommand.getExceptionTrace(e));
numParts = 2;
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Sending exception chunk while reply in progress: ", servConn.getName(), e);
}
functionResponseMsg.setServerConnection(servConn);
functionResponseMsg.setLastChunkAndNumParts(true, numParts);
//functionResponseMsg.setLastChunk(true);
functionResponseMsg.sendChunk(servConn);
}
else {
chunkedResponseMsg.setMessageType(messageType);
chunkedResponseMsg.setTransactionId(origMsg.getTransactionId());
chunkedResponseMsg.sendHeader();
if (e instanceof FunctionException
&& e.getCause() instanceof InternalFunctionInvocationTargetException) {
chunkedResponseMsg.setNumberOfParts(3);
chunkedResponseMsg.addObjPart(e);
chunkedResponseMsg.addStringPart(BaseCommand.getExceptionTrace(e));
InternalFunctionInvocationTargetException fe = (InternalFunctionInvocationTargetException)e
.getCause();
chunkedResponseMsg.addObjPart(fe.getFailedNodeSet());
numParts = 3;
}
else {
chunkedResponseMsg.setNumberOfParts(2);
chunkedResponseMsg.addObjPart(e);
chunkedResponseMsg.addStringPart(BaseCommand.getExceptionTrace(e));
numParts = 2;
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Sending exception chunk: ", servConn.getName(), e);
}
chunkedResponseMsg.setServerConnection(servConn);
chunkedResponseMsg.setLastChunkAndNumParts(true,numParts);
chunkedResponseMsg.sendChunk(servConn);
}
}
}
| |
/*
* (c) Copyright Christian P. Fries, Germany. Contact: email@christian-fries.de.
*
* Created on 12.07.2014
*/
package net.finmath.optimizer;
import java.util.ArrayList;
import java.util.Arrays;
import org.junit.Assert;
import org.junit.Test;
/**
* Unit tests for the LevenbergMarquardt optimizer.
*
* @author Christian Fries
*/
public class LevenbergMarquardtTest {
@Test
public void testSmallLinearSystem() throws CloneNotSupportedException, SolverException {
final LevenbergMarquardt optimizer = new LevenbergMarquardt() {
private static final long serialVersionUID = -6582160713209444489L;
@Override
public void setValues(final double[] parameters, final double[] values) {
values[0] = parameters[0] * 0.0 + parameters[1];
values[1] = parameters[0] * 2.0 + parameters[1];
}
};
// Set solver parameters
optimizer.setInitialParameters(new double[] { 0, 0 });
optimizer.setWeights(new double[] { 1, 1 });
optimizer.setMaxIteration(100);
optimizer.setTargetValues(new double[] { 5, 10 });
optimizer.run();
final double[] bestParameters = optimizer.getBestFitParameters();
System.out.println("The solver for problem 1 required " + optimizer.getIterations() + " iterations. Accuracy is " + optimizer.getRootMeanSquaredError() + ". The best fit parameters are:");
for (int i = 0; i < bestParameters.length; i++) {
System.out.println("\tparameter[" + i + "]: " + bestParameters[i]);
}
System.out.println();
Assert.assertTrue(Math.abs(bestParameters[0] - 2.5) < 1E-12);
Assert.assertTrue(Math.abs(bestParameters[1] - 5.0) < 1E-12);
/*
* Creating a clone, continuing the search with new target values.
* Note that we do not re-define the setValues method.
*/
final Optimizer optimizer2 = optimizer.getCloneWithModifiedTargetValues(new double[] { 5.1, 10.2 }, new double[] { 1, 1 }, true);
optimizer2.run();
final double[] bestParameters2 = optimizer2.getBestFitParameters();
System.out.println("The solver for problem 2 required " + optimizer2.getIterations() + " iterations. Accuracy is " + optimizer2.getRootMeanSquaredError() + ". The best fit parameters are:");
for (int i = 0; i < bestParameters2.length; i++) {
System.out.println("\tparameter[" + i + "]: " + bestParameters2[i]);
}
System.out.println("________________________________________________________________________________");
System.out.println();
Assert.assertTrue(Math.abs(bestParameters2[0] - 2.55) < 1E-12);
Assert.assertTrue(Math.abs(bestParameters2[1] - 5.10) < 1E-12);
}
@Test
public void testMultiThreaddedOptimizer() throws SolverException {
final LevenbergMarquardt optimizer = new LevenbergMarquardt(
new double[] { 0, 0, 0 }, // Initial parameters
new double[] { 5, 10, 2 }, // Target values
100, // Max iterations
10 // Number of threads
) {
private static final long serialVersionUID = -4656732051928259036L;
// Override your objective function here
@Override
public void setValues(final double[] parameters, final double[] values) {
values[0] = 1.0 * parameters[0] + 2.0 * parameters[1] + parameters[2] + parameters[0] * parameters[1];
values[1] = 2.0 * parameters[0] + 1.0 * parameters[1] + parameters[2] + parameters[1] * parameters[2];
values[2] = 3.0 * parameters[0] + 0.0 * parameters[1] + parameters[2];
}
};
optimizer.run();
final double[] bestParameters = optimizer.getBestFitParameters();
System.out.println("The solver for problem 3 required " + optimizer.getIterations() + " iterations. Accuracy is " + optimizer.getRootMeanSquaredError() + ". The best fit parameters are:");
for (int i = 0; i < bestParameters.length; i++) {
System.out.println("\tparameter[" + i + "]: " + bestParameters[i]);
}
final double[] values = new double[3];
optimizer.setValues(bestParameters, values);
for (int i = 0; i < bestParameters.length; i++) {
System.out.println("\tvalue[" + i + "]: " + values[i]);
}
System.out.println("________________________________________________________________________________");
System.out.println();
Assert.assertTrue(optimizer.getRootMeanSquaredError() < 1E-1);
}
@Test
public void testRosenbrockFunction() throws SolverException {
final LevenbergMarquardt optimizer = new LevenbergMarquardt(
new double[] { 0.5, 0.5 }, // Initial parameters
new double[] { 0.0, 0.0 }, // Target values
100, // Max iterations
10 // Number of threads
) {
private static final long serialVersionUID = 1636120150299382088L;
// Override your objective function here
@Override
public void setValues(final double[] parameters, final double[] values) {
values[0] = 10.0 * (parameters[1] - parameters[0]*parameters[0]);
values[1] = 1.0 - parameters[0];
}
};
optimizer.run();
final double[] bestParameters = optimizer.getBestFitParameters();
System.out.println("The solver for problem 'Rosebrock' required " + optimizer.getIterations() + " iterations. Accuracy is " + optimizer.getRootMeanSquaredError() + ". The best fit parameters are:");
for (int i = 0; i < bestParameters.length; i++) {
System.out.println("\tparameter[" + i + "]: " + bestParameters[i]);
}
final double[] values = new double[2];
optimizer.setValues(bestParameters, values);
for (int i = 0; i < values.length; i++) {
System.out.println("\tvalue[" + i + "]: " + values[i]);
}
System.out.println("________________________________________________________________________________");
System.out.println();
Assert.assertTrue(Math.abs(bestParameters[0] - 1.0) < 1E-10);
Assert.assertTrue(Math.abs(bestParameters[1] - 1.0) < 1E-10);
}
@Test
public void testRosenbrockFunctionWithList() throws SolverException {
final ArrayList<Number> initialParams = new ArrayList<>();
initialParams.add(0.5);
initialParams.add(0.5);
final ArrayList<Number> targetValues = new ArrayList<>();
targetValues.add(0.0);
targetValues.add(0.0);
final LevenbergMarquardt optimizer = new LevenbergMarquardt(
initialParams, // Initial parameters
targetValues, // Target values
100, // Max iterations
10 // Number of threads
) {
private static final long serialVersionUID = 5999706680609011046L;
// Override your objective function here
@Override
public void setValues(final double[] parameters, final double[] values) {
values[0] = 10.0 * (parameters[1] - parameters[0]*parameters[0]);
values[1] = 1.0 - parameters[0];
}
};
optimizer.run();
final double[] bestParameters = optimizer.getBestFitParameters();
System.out.println("The solver for problem 'Rosebrock' required " + optimizer.getIterations() + " iterations. Accuracy is " + optimizer.getRootMeanSquaredError() + ". The best fit parameters are:");
for (int i = 0; i < bestParameters.length; i++) {
System.out.println("\tparameter[" + i + "]: " + bestParameters[i]);
}
final double[] values = new double[2];
optimizer.setValues(bestParameters, values);
for (int i = 0; i < values.length; i++) {
System.out.println("\tvalue[" + i + "]: " + values[i]);
}
System.out.println("________________________________________________________________________________");
System.out.println();
Assert.assertTrue(Math.abs(bestParameters[0] - 1.0) < 1E-10);
Assert.assertTrue(Math.abs(bestParameters[1] - 1.0) < 1E-10);
}
/**
* Optimization of booth function \( f(x,y) = \left(x+2y-7\right)^{2}+\left(2x+y-5\right)^{2} \).
* The solution of \( f(x,y) = 0 \) is \( x=1 \), \( y=3 \).
*
* The test uses a finite difference approximation for the derivative.
*
* @throws SolverException Thrown if the solver fails to find a solution.
*/
@Test
public void testBoothFunction() throws SolverException {
final int numberOfParameters = 2;
final double[] initialParameters = new double[numberOfParameters];
final double[] parameterSteps = new double[numberOfParameters];
Arrays.fill(initialParameters, 2.0);
Arrays.fill(parameterSteps, 1E-8);
final double[] targetValues = new double[] { 0.0 };
final int maxIteration = 1000;
final LevenbergMarquardt optimizer = new LevenbergMarquardt(
initialParameters,
targetValues,
maxIteration, null) {
private static final long serialVersionUID = -282626938650139518L;
@Override
public void setValues(final double[] parameters, final double[] values) {
values[0] = Math.pow(parameters[0] + 2* parameters[1] - 7,2) + Math.pow(2 * parameters[0] + parameters[1] - 5,2);
}
};
optimizer.setParameterSteps(parameterSteps);
// Set solver parameters
optimizer.run();
final double[] bestParameters = optimizer.getBestFitParameters();
System.out.println("The solver for Booth's function required " + optimizer.getIterations() + " iterations. The best fit parameters are:");
for (int i = 0; i < bestParameters.length; i++) {
System.out.println("\tparameter[" + i + "]: " + bestParameters[i]);
}
System.out.println("The solver accuracy is " + optimizer.getRootMeanSquaredError());
System.out.println("________________________________________________________________________________");
System.out.println();
Assert.assertEquals(0.0, optimizer.getRootMeanSquaredError(), 2E-4);
}
/**
* Optimization of booth function \( f(x,y) = \left(x+2y-7\right)^{2}+\left(2x+y-5\right)^{2} \).
* The solution of \( f(x,y) = 0 \) is \( x=1 \), \( y=3 \).
*
* The test uses a a analytic calculation of derivative.
*
* @throws SolverException Thrown if the solver fails to find a solution.
*/
@Test
public void testBoothFunctionWithAnalyticDerivative() throws SolverException {
final int numberOfParameters = 2;
final double[] initialParameters = new double[numberOfParameters];
Arrays.fill(initialParameters, 2.0);
final double[] targetValues = new double[] { 0.0 };
final int maxIteration = 1000;
final LevenbergMarquardt optimizer = new LevenbergMarquardt(initialParameters, targetValues, maxIteration, null) {
private static final long serialVersionUID = -282626938650139518L;
@Override
public void setValues(final double[] parameters, final double[] values) {
values[0] = Math.pow(parameters[0] + 2* parameters[1] - 7,2) + Math.pow(2 * parameters[0] + parameters[1] - 5,2);
}
@Override
public void setDerivatives(final double[] parameters, final double[][] derivatives) {
derivatives[0][0] = Math.pow(parameters[0] + 2 * parameters[1] - 7,1) * 2 + Math.pow(2 * parameters[0] + parameters[1] - 5,1) * 4;
derivatives[1][0] = Math.pow(parameters[0] + 2 * parameters[1] - 7,1) * 4 + Math.pow(2 * parameters[0] + parameters[1] - 5,1) * 2;
}
};
// Set solver parameters
optimizer.run();
final double[] bestParameters = optimizer.getBestFitParameters();
System.out.println("The solver for Booth's function with analytic derivative required " + optimizer.getIterations() + " iterations. The best fit parameters are:");
for (int i = 0; i < bestParameters.length; i++) {
System.out.println("\tparameter[" + i + "]: " + bestParameters[i]);
}
System.out.println("The solver accuracy is " + optimizer.getRootMeanSquaredError());
System.out.println("________________________________________________________________________________");
System.out.println();
Assert.assertEquals(0.0, optimizer.getRootMeanSquaredError(), 2E-4);
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.end2end;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.mapreduce.index.IndexTool;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.util.*;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import java.io.IOException;
import java.sql.*;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.AFTER_REBUILD_INVALID_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.AFTER_REBUILD_MISSING_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.AFTER_REBUILD_VALID_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.BEFORE_REBUILD_OLD_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixIndexToolJobCounters.REBUILT_INDEX_ROW_COUNT;
import static org.junit.Assert.*;
@Category(NeedsOwnMiniClusterTest.class)
@RunWith(RunUntilFailure.class)
public class ConcurrentMutationsExtendedIT extends ParallelStatsDisabledIT {
private static final Random RAND = new Random(5);
private static final String MVCC_LOCK_TEST_TABLE_PREFIX = "MVCCLOCKTEST_";
private static final String LOCK_TEST_TABLE_PREFIX = "LOCKTEST_";
private static final int ROW_LOCK_WAIT_TIME = 10000;
private static final int MAX_LOOKBACK_AGE = 1000000;
private final Object lock = new Object();
@BeforeClass
public static synchronized void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
props.put(QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, Long.toString(0));
props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Integer.toString(MAX_LOOKBACK_AGE));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
static long verifyIndexTable(String tableName, String indexName,
Connection conn) throws Exception {
// This checks the state of every raw index row without rebuilding any row
IndexTool indexTool = IndexToolIT.runIndexTool(false, "", tableName,
indexName, null, 0, IndexTool.IndexVerifyType.ONLY);
System.out.println(indexTool.getJob().getCounters());
assertEquals(0, indexTool.getJob().getCounters().findCounter(REBUILT_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_OLD_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT).getValue());
// This checks the state of an index row after it is repaired
long actualRowCount = IndexScrutiny.scrutinizeIndex(conn, tableName, indexName);
// We want to check the index rows again as they may be modified by the read repair
indexTool = IndexToolIT.runIndexTool(false, "", tableName, indexName,
null, 0, IndexTool.IndexVerifyType.ONLY);
System.out.println(indexTool.getJob().getCounters());
assertEquals(0, indexTool.getJob().getCounters().findCounter(REBUILT_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT).getValue());
// The index scrutiny run will trigger index repair on all unverified rows and they rows will be made verified
// deleted (since the age threshold is set to zero ms for these tests
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_OLD_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT).getValue());
// Now we rebuild the entire index table and expect that it is still good after the full rebuild
indexTool = IndexToolIT.runIndexTool(false, "", tableName, indexName,
null, 0, IndexTool.IndexVerifyType.AFTER);
assertEquals(indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_VALID_INDEX_ROW_COUNT).getValue(),
indexTool.getJob().getCounters().findCounter(REBUILT_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_INVALID_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT).getValue());
// Truncate, rebuild and verify the index table
PTable pIndexTable = PhoenixRuntime.getTable(conn, indexName);
TableName physicalTableName = TableName.valueOf(pIndexTable.getPhysicalName().getBytes());
PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
try (Admin admin = pConn.getQueryServices().getAdmin()) {
admin.disableTable(physicalTableName);
admin.truncateTable(physicalTableName, true);
}
indexTool = IndexToolIT.runIndexTool(false, "", tableName, indexName,
null, 0, IndexTool.IndexVerifyType.AFTER);
assertEquals(0, indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_INVALID_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT).getValue());
long actualRowCountAfterCompaction = IndexScrutiny.scrutinizeIndex(conn, tableName, indexName);
assertEquals(actualRowCount, actualRowCountAfterCompaction);
return actualRowCount;
}
@Test
public void testSynchronousDeletesAndUpsertValues() throws Exception {
final String tableName = generateUniqueName();
final String indexName = generateUniqueName();
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement().execute("CREATE TABLE " + tableName
+ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, CONSTRAINT pk PRIMARY KEY (k1,k2)) COLUMN_ENCODED_BYTES = 0");
TestUtil.addCoprocessor(conn, tableName, DelayingRegionObserver.class);
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + tableName + "(v1)");
final CountDownLatch doneSignal = new CountDownLatch(2);
Runnable r1 = new Runnable() {
@Override public void run() {
try {
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
for (int i = 0; i < 50; i++) {
Thread.sleep(20);
synchronized (lock) {
try (PhoenixConnection conn = DriverManager.getConnection(getUrl(), props)
.unwrap(PhoenixConnection.class)) {
conn.setAutoCommit(true);
conn.createStatement().execute("DELETE FROM " + tableName);
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
} catch (InterruptedException e) {
Thread.interrupted();
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
Runnable r2 = new Runnable() {
@Override public void run() {
try {
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
int nRowsToUpsert = 1000;
for (int i = 0; i < nRowsToUpsert; i++) {
synchronized (lock) {
try (PhoenixConnection conn = DriverManager.getConnection(getUrl(), props)
.unwrap(PhoenixConnection.class)) {
conn.createStatement().execute(
"UPSERT INTO " + tableName + " VALUES (" + (i % 10)
+ ", 0, 1)");
if ((i % 20) == 0 || i == nRowsToUpsert - 1) {
conn.commit();
}
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
Thread t1 = new Thread(r1);
t1.start();
Thread t2 = new Thread(r2);
t2.start();
doneSignal.await(60, TimeUnit.SECONDS);
verifyIndexTable(tableName, indexName, conn);
}
@Test
public void testConcurrentDeletesAndUpsertValues() throws Exception {
final String tableName = generateUniqueName();
final String indexName = generateUniqueName();
final String singleCellindexName = "SC_" + generateUniqueName();
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement().execute("CREATE TABLE " + tableName
+ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, CONSTRAINT pk PRIMARY KEY (k1,k2))");
TestUtil.addCoprocessor(conn, tableName, DelayingRegionObserver.class);
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + tableName + "(v1)");
conn.createStatement().execute("CREATE INDEX " + singleCellindexName + " ON " + tableName + "(v1) IMMUTABLE_STORAGE_SCHEME=SINGLE_CELL_ARRAY_WITH_OFFSETS, COLUMN_ENCODED_BYTES=2");
final CountDownLatch doneSignal = new CountDownLatch(2);
Runnable r1 = new Runnable() {
@Override public void run() {
try {
Connection conn = DriverManager.getConnection(getUrl());
conn.setAutoCommit(true);
for (int i = 0; i < 50; i++) {
Thread.sleep(20);
conn.createStatement().execute("DELETE FROM " + tableName);
}
} catch (SQLException e) {
throw new RuntimeException(e);
} catch (InterruptedException e) {
Thread.interrupted();
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
Runnable r2 = new Runnable() {
@Override public void run() {
try {
Connection conn = DriverManager.getConnection(getUrl());
for (int i = 0; i < 1000; i++) {
conn.createStatement().execute(
"UPSERT INTO " + tableName + " VALUES (" + (i % 10) + ", 0, 1)");
if ((i % 20) == 0) {
conn.commit();
}
}
conn.commit();
} catch (SQLException e) {
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
Thread t1 = new Thread(r1);
t1.start();
Thread t2 = new Thread(r2);
t2.start();
doneSignal.await(60, TimeUnit.SECONDS);
verifyIndexTable(tableName, indexName, conn);
verifyIndexTable(tableName, singleCellindexName, conn);
}
@Test
public void testConcurrentUpserts() throws Exception {
int nThreads = 4;
final int batchSize = 200;
final int nRows = 51;
final int nIndexValues = 23;
final String tableName = generateUniqueName();
final String indexName = generateUniqueName();
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement().execute("CREATE TABLE " + tableName
+ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, a.v1 INTEGER, b.v2 INTEGER, c.v3 INTEGER, d.v4 INTEGER," +
"CONSTRAINT pk PRIMARY KEY (k1,k2)) COLUMN_ENCODED_BYTES = 0, VERSIONS=1");
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + tableName + "(v1) INCLUDE(v2, v3)");
final CountDownLatch doneSignal = new CountDownLatch(nThreads);
Runnable[] runnables = new Runnable[nThreads];
for (int i = 0; i < nThreads; i++) {
runnables[i] = new Runnable() {
@Override public void run() {
try {
Connection conn = DriverManager.getConnection(getUrl());
for (int i = 0; i < 10000; i++) {
conn.createStatement().execute(
"UPSERT INTO " + tableName + " VALUES (" + (i % nRows) + ", 0, "
+ (RAND.nextBoolean() ? null : (RAND.nextInt() % nIndexValues)) + ", "
+ (RAND.nextBoolean() ? null : RAND.nextInt()) + ", "
+ (RAND.nextBoolean() ? null : RAND.nextInt()) + ", "
+ (RAND.nextBoolean() ? null : RAND.nextInt()) + ")");
if ((i % batchSize) == 0) {
conn.commit();
}
}
conn.commit();
} catch (SQLException e) {
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
}
for (int i = 0; i < nThreads; i++) {
Thread t = new Thread(runnables[i]);
t.start();
}
assertTrue("Ran out of time", doneSignal.await(120, TimeUnit.SECONDS));
long actualRowCount = verifyIndexTable(tableName, indexName, conn);
assertEquals(nRows, actualRowCount);
}
@Test
public void testRowLockDuringPreBatchMutateWhenIndexed() throws Exception {
final String tableName = LOCK_TEST_TABLE_PREFIX + generateUniqueName();
final String indexName = generateUniqueName();
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement().execute("CREATE TABLE " + tableName
+ "(k VARCHAR PRIMARY KEY, v INTEGER) COLUMN_ENCODED_BYTES = 0");
TestUtil.addCoprocessor(conn, tableName, DelayingRegionObserver.class);
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + tableName + "(v)");
final CountDownLatch doneSignal = new CountDownLatch(2);
final String[] failedMsg = new String[1];
Runnable r1 = new Runnable() {
@Override public void run() {
try {
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement()
.execute("UPSERT INTO " + tableName + " VALUES ('foo',0)");
conn.createStatement()
.execute("UPSERT INTO " + tableName + " VALUES ('foo',1)");
conn.commit();
} catch (Exception e) {
failedMsg[0] = e.getMessage();
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
Runnable r2 = new Runnable() {
@Override public void run() {
try {
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement()
.execute("UPSERT INTO " + tableName + " VALUES ('foo',2)");
conn.createStatement()
.execute("UPSERT INTO " + tableName + " VALUES ('foo',3)");
conn.commit();
} catch (Exception e) {
failedMsg[0] = e.getMessage();
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
Thread t1 = new Thread(r1);
t1.start();
Thread t2 = new Thread(r2);
t2.start();
doneSignal.await(ROW_LOCK_WAIT_TIME + 5000, TimeUnit.SECONDS);
assertNull(failedMsg[0], failedMsg[0]);
long actualRowCount = IndexScrutiny.scrutinizeIndex(conn, tableName, indexName);
assertEquals(1, actualRowCount);
}
@Test
public void testLockUntilMVCCAdvanced() throws Exception {
final String tableName = MVCC_LOCK_TEST_TABLE_PREFIX + generateUniqueName();
final String indexName = generateUniqueName();
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement().execute("CREATE TABLE " + tableName
+ "(k VARCHAR PRIMARY KEY, v INTEGER) COLUMN_ENCODED_BYTES = 0");
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + tableName + "(v,k)");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES ('foo',0)");
conn.commit();
TestUtil.addCoprocessor(conn, tableName, DelayingRegionObserver.class);
final CountDownLatch doneSignal = new CountDownLatch(2);
final String[] failedMsg = new String[1];
Runnable r1 = new Runnable() {
@Override public void run() {
try {
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement()
.execute("UPSERT INTO " + tableName + " VALUES ('foo',1)");
conn.commit();
} catch (Exception e) {
failedMsg[0] = e.getMessage();
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
Runnable r2 = new Runnable() {
@Override public void run() {
try {
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement()
.execute("UPSERT INTO " + tableName + " VALUES ('foo',2)");
conn.commit();
} catch (Exception e) {
failedMsg[0] = e.getMessage();
throw new RuntimeException(e);
} finally {
doneSignal.countDown();
}
}
};
Thread t1 = new Thread(r1);
t1.start();
Thread t2 = new Thread(r2);
t2.start();
doneSignal.await(ROW_LOCK_WAIT_TIME + 5000, TimeUnit.SECONDS);
long actualRowCount = IndexScrutiny.scrutinizeIndex(conn, tableName, indexName);
assertEquals(1, actualRowCount);
}
public static class DelayingRegionObserver extends SimpleRegionObserver {
private volatile boolean lockedTableRow;
@Override public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
try {
String tableName = c.getEnvironment().getRegionInfo().getTable().getNameAsString();
if (tableName.startsWith(MVCC_LOCK_TEST_TABLE_PREFIX)) {
Thread.sleep(ROW_LOCK_WAIT_TIME
/ 2); // Wait long enough that they'll both have the same mvcc
}
} catch (InterruptedException e) {
}
}
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws HBaseIOException {
try {
String tableName = c.getEnvironment().getRegionInfo().getTable().getNameAsString();
if (tableName.startsWith(LOCK_TEST_TABLE_PREFIX)) {
if (lockedTableRow) {
throw new DoNotRetryIOException(
"Expected lock in preBatchMutate to be exclusive, but it wasn't for row "
+ Bytes
.toStringBinary(miniBatchOp.getOperation(0).getRow()));
}
lockedTableRow = true;
Thread.sleep(ROW_LOCK_WAIT_TIME + 2000);
}
Thread.sleep(Math.abs(RAND.nextInt()) % 10);
} catch (InterruptedException e) {
} finally {
lockedTableRow = false;
}
}
}
}
| |
/*
* ARX: Powerful Data Anonymization
* Copyright 2012 - 2021 Fabian Prasser and contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.deidentifier.arx.gui.view.impl.common;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.deidentifier.arx.gui.Controller;
import org.deidentifier.arx.gui.resources.Resources;
import org.deidentifier.arx.gui.view.SWTUtil;
import org.deidentifier.arx.gui.view.def.IComponent;
import org.eclipse.swt.SWT;
import org.eclipse.swt.custom.CTabFolder;
import org.eclipse.swt.custom.CTabFolder2Adapter;
import org.eclipse.swt.custom.CTabFolderEvent;
import org.eclipse.swt.custom.CTabItem;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.events.SelectionListener;
import org.eclipse.swt.graphics.Image;
import org.eclipse.swt.graphics.Point;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Control;
import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.Layout;
import org.eclipse.swt.widgets.ToolBar;
import org.eclipse.swt.widgets.ToolItem;
/**
* This class implements a titled folder.
*
* @author Fabian Prasser
*/
public class ComponentTitledFolder implements IComponent {
/**
* An entry in a folder
*
* @author Fabian Prasser
*/
private class TitledFolderEntry {
/** Field */
private String text;
/** Field */
private Control control;
/** Field */
private Image image;
/** Field */
private int index;
/** Field */
private boolean hideable;
/**
* Creates a new instance
* @param text
* @param control
* @param image
* @param index
* @param hideable
*/
public TitledFolderEntry(String text, Control control, Image image, int index, boolean hideable) {
this.text = text;
this.control = control;
this.image = image;
this.index = index;
this.hideable = hideable;
}
}
/** Entries*/
private List<TitledFolderEntry> entries = new ArrayList<TitledFolderEntry>();
/** The folder */
private final ComponentTabFolder folder;
/** Flag */
private final boolean hasHidingMenu;
/** Listener */
private SelectionListener itemVisibilityListener;
/**
* Creates a new instance.
*
* @param parent
* @param controller
* @param bar
* @param id
*/
public ComponentTitledFolder(Composite parent, Controller controller, ComponentTitledFolderButtonBar bar, String id){
this(parent, controller, bar, id, null, false, false);
}
/**
* Creates a new instance.
*
* @param parent
* @param controller
* @param bar
* @param id
* @param bottom
*/
public ComponentTitledFolder(Composite parent,
Controller controller,
ComponentTitledFolderButtonBar bar,
String id,
boolean bottom,
boolean supportsHidingElements){
this(parent, controller, bar, id, null, bottom, supportsHidingElements);
}
/**
* Creates a new instance.
*
* @param parent
* @param controller
* @param bar
* @param id
* @param helpids
*/
public ComponentTitledFolder(Composite parent, Controller controller, ComponentTitledFolderButtonBar bar, String id, Map<Composite, String> helpids){
this(parent, controller, bar, id, helpids, false, false);
}
/**
* Creates a new instance.
*
* @param parent
* @param controller
* @param bar
* @param id
* @param bottom
* @param hasHidingMenu
*/
public ComponentTitledFolder(Composite parent,
Controller controller,
ComponentTitledFolderButtonBar bar,
String id,
Map<Composite, String> helpids,
boolean bottom,
boolean hasHidingMenu){
int flags = SWT.BORDER | SWT.FLAT;
if (bottom) flags |= SWT.BOTTOM;
else flags |= SWT.TOP;
this.hasHidingMenu = hasHidingMenu;
this.folder = new ComponentTabFolder(parent, flags);
this.folder.setUnselectedCloseVisible(false);
this.folder.setSimple(false);
// Create help button
if (bar != null || controller != null) {
if (bar == null) SWTUtil.createHelpButton(controller, folder, id, helpids);
else createBar(controller, folder, bar);
}
// Prevent closing
this.folder.addCTabFolder2Listener(new CTabFolder2Adapter() {
@Override
public void close(final CTabFolderEvent event) {
event.doit = false;
}
});
}
/**
* Adds a selection listener.
*
* @param listener
*/
public void addSelectionListener(SelectionListener listener) {
folder.addSelectionListener(listener);
}
/**
* Creates a new entry in the folder.
*
* @param title
* @param image
* @return
*/
public Composite createItem(String title, Image image){
return createItem(title, image, getItemCount(), false);
}
/**
* Creates a new entry in the folder.
*
* @param title
* @param image
* @param hideable
* @return
*/
public Composite createItem(String title, Image image, boolean hideable){
return createItem(title, image, hideable, new GridLayout());
}
/**
* Creates a new entry in the folder.
*
* @param title
* @param image
* @param hideable
* @param layout
* @return
*/
public Composite createItem(String title, Image image, boolean hideable, Layout layout) {
return createItem(title, image, getItemCount(), hideable, layout);
}
/**
* Creates a new entry in the folder.
*
* @param title
* @param image
* @param index
* @param hideable
* @return
*/
public Composite createItem(String title, Image image, int index, boolean hideable){
return createItem(title, image, index, hideable, new GridLayout());
}
/**
* Creates a new entry in the folder.
*
* @param title
* @param image
* @param index
* @param hideable
* @param layout
* @return
*/
public Composite createItem(String title, Image image, int index, boolean hideable, Layout layout) {
Composite composite = new Composite(folder, SWT.NONE);
composite.setLayout(layout);
CTabItem item = new CTabItem(folder, SWT.NULL, index);
item.setText(title);
if (image!=null) item.setImage(image);
item.setShowClose(false);
item.setControl(composite);
entries.add(new TitledFolderEntry(title, composite, image, index, hideable));
return composite;
}
/**
* Returns the button item for the given text.
*
* @param text
* @return
*/
public ToolItem getButtonItem(String text) {
Control c = folder.getTopRight();
if (c == null) return null;
if (!(c instanceof ToolBar)) return null;
ToolBar t = (ToolBar)c;
for (ToolItem i : t.getItems()){
if (i.getToolTipText().equals(text)) return i;
}
return null;
}
/**
* Returns the number of items in the folder.
*
* @return
*/
public int getItemCount() {
return folder.getItemCount();
}
/**
* Returns the selected control
* @return
*/
public Control getSelectedControl() {
return folder.getSelection().getControl();
}
/**
* Returns the currently selected index.
*
* @return
*/
public int getSelectionIndex() {
return folder.getSelectionIndex();
}
/**
* @return
* @see org.eclipse.swt.widgets.Control#getSize()
*/
public Point getSize() {
return folder.getSize();
}
/**
* Returns all visible items
* @return
*/
public List<String> getVisibleItems() {
List<String> result = new ArrayList<String>();
for (CTabItem item : folder.getItems()) {
result.add(item.getText());
}
return result;
}
/**
* Enables/disables the component.
*
* @param b
*/
public void setEnabled(boolean b) {
folder.setEnabled(b);
}
/**
* Sets the item visibility listener
* @param listener
*/
public void setItemVisibilityListener(SelectionListener listener) {
this.itemVisibilityListener = listener;
}
/**
* Sets layout data.
*
* @param data
*/
public void setLayoutData(Object data){
folder.setLayoutData(data);
}
/**
* Selects the item with the given control
* @param c
*/
public void setSelectedControl(Control c) {
for (CTabItem item : folder.getItems()) {
if (item.getControl() == c) {
folder.setSelection(item);
return;
}
}
}
/**
* Sets the current selection.
*
* @param index
*/
public void setSelection(int index) {
folder.setSelection(index);
}
/**
* Sets the according item visible
* @param item
* @param visible
*/
public void setVisible(String item, boolean visible) {
// Change flag
boolean changed = false;
// Update
if (visible) {
changed = this.setVisible(item);
} else {
changed = this.setInvisible(item);
}
// If changed
if (changed && this.itemVisibilityListener != null) {
Event event = new Event();
event.widget = this.folder;
this.itemVisibilityListener.widgetSelected(new SelectionEvent(event));
}
}
/**
* Sets the given items as visible
* @param item
*/
public void setVisibleItems(List<String> items) {
// Change flag
boolean changed = false;
// Hide/show items
for (String item : getAllHideableItems()) {
if (items.contains(item)) {
changed |= setVisible(item);
if (this.folder.getItemCount() == 1) {
this.folder.setSelection(0);
}
} else {
changed |= setInvisible(item);
}
}
// If something has changed, fire event
if (changed && this.itemVisibilityListener != null) {
Event event = new Event();
event.widget = this.folder;
this.itemVisibilityListener.widgetSelected(new SelectionEvent(event));
}
}
/**
* Creates the bar .
*
* @param controller
* @param folder
* @param bar
*/
private void createBar(final Controller controller, final CTabFolder folder, final ComponentTitledFolderButtonBar bar) {
ToolBar toolbar = new ToolBar(folder, SWT.FLAT);
folder.setTopRight( toolbar, SWT.RIGHT );
if (this.hasHidingMenu) {
ToolItem item = new ToolItem( toolbar, SWT.PUSH );
item.setImage(controller.getResources().getManagedImage("manage.png")); //$NON-NLS-1$
item.setToolTipText(Resources.getMessage("General.1")); //$NON-NLS-1$
SWTUtil.createDisabledImage(item);
item.addSelectionListener(new SelectionAdapter(){
@Override
public void widgetSelected(SelectionEvent arg0) {
List<String> result = controller.actionShowMultiSelectionDialog(folder.getShell(),
Resources.getMessage("ComponentTitledFolder.0"), //$NON-NLS-1$
Resources.getMessage("ComponentTitledFolder.1"), //$NON-NLS-1$
getAllHideableItems(),
getVisibleItems());
if (result != null) {
setVisibleItems(result);
}
}
});
}
for (String title : bar.getTitles()){
final String key = title;
ToolItem item = null;
if (bar.isToggle(title)) item = new ToolItem( toolbar, SWT.CHECK);
else item = new ToolItem( toolbar, SWT.PUSH);
item.setImage(bar.getImage(key));
item.setToolTipText(title);
SWTUtil.createDisabledImage(item);
item.addSelectionListener(new SelectionAdapter(){
@Override
public void widgetSelected(SelectionEvent arg0) {
bar.getRunnable(key).run();
}
});
}
if (bar.getHelpId() != null || (bar.getHelpIds() != null && !bar.getHelpIds().isEmpty())) {
ToolItem item = new ToolItem( toolbar, SWT.PUSH );
item.setImage(controller.getResources().getManagedImage("help.png")); //$NON-NLS-1$
item.setToolTipText(Resources.getMessage("General.0")); //$NON-NLS-1$
SWTUtil.createDisabledImage(item);
item.addSelectionListener(new SelectionAdapter(){
@Override
public void widgetSelected(SelectionEvent arg0) {
if (bar.getHelpIds() == null || bar.getHelpIds().get(folder.getSelection().getControl()) == null) {
controller.actionShowHelpDialog(bar.getHelpId());
} else {
controller.actionShowHelpDialog(bar.getHelpIds().get(folder.getSelection().getControl()));
}
}
});
}
}
/**
* Returns all items
* @return
*/
private List<String> getAllHideableItems() {
List<String> result = new ArrayList<String>();
for (TitledFolderEntry entry : this.entries) {
if (entry.hideable) {
result.add(entry.text);
}
}
return result;
}
/**
* Returns a list of all invisible entries
* @return
*/
private List<TitledFolderEntry> getInvisibleEntries() {
List<TitledFolderEntry> result = new ArrayList<TitledFolderEntry>();
result.addAll(this.entries);
for (CTabItem item : folder.getItems()){
Iterator<TitledFolderEntry> iter = result.iterator();
while (iter.hasNext()) {
if (item.getText().equals(iter.next().text)) {
iter.remove();
}
}
}
return result;
}
/**
* Sets the given item invisible
* @param item
*/
private boolean setInvisible(String text) {
for (CTabItem item : folder.getItems()){
label: if (item.getText().equals(text)) {
for (TitledFolderEntry entry : this.entries) {
if (entry.text.equals(text) && !entry.hideable) {
break label;
}
}
item.dispose();
return true;
}
}
return false;
}
/**
* Sets an entry visible
* @return
*/
private boolean setVisible(String text) {
List<TitledFolderEntry> list = getInvisibleEntries();
// Find
for (TitledFolderEntry entry : list) {
if (entry.text.equals(text)) {
// Shift
int index = entry.index;
for (TitledFolderEntry other : list) {
if (other.index < entry.index) {
index--;
}
}
// Fix MacOS bug: store height
int height = folder.getTabHeight();
// Show
CTabItem item = new CTabItem(folder, SWT.NULL, index);
item.setText(entry.text);
if (entry.image!=null) item.setImage(entry.image);
item.setShowClose(false);
item.setControl(entry.control);
// Fix MacOS bug: update
folder.setTabHeight(height);
// Done
return true;
}
}
return false;
}
}
| |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.drive.model;
/**
* The apps resource provides a list of the apps that a user has installed, with information about
* each app's supported MIME types, file extensions, and other details.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Drive API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class App extends com.google.api.client.json.GenericJson {
/**
* Whether the app is authorized to access data on the user's Drive.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean authorized;
/**
* The template url to create a new file with this app in a given folder. The template will
* contain {folderId} to be replaced by the folder to create the new file in.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String createInFolderTemplate;
/**
* The url to create a new file with this app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String createUrl;
/**
* Whether the app has drive-wide scope. An app with drive-wide scope can access all files in the
* user's drive.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean hasDriveWideScope;
/**
* The various icons for the app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<Icons> icons;
static {
// hack to force ProGuard to consider Icons used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(Icons.class);
}
/**
* The ID of the app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String id;
/**
* Whether the app is installed.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean installed;
/**
* This is always drive#app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String kind;
/**
* A long description of the app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String longDescription;
/**
* The name of the app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/**
* The type of object this app creates (e.g. Chart). If empty, the app name should be used
* instead.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String objectType;
/**
* The template url for opening files with this app. The template will contain {ids} and/or
* {exportIds} to be replaced by the actual file ids. See Open Files for the full documentation.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String openUrlTemplate;
/**
* The list of primary file extensions.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<java.lang.String> primaryFileExtensions;
/**
* The list of primary mime types.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<java.lang.String> primaryMimeTypes;
/**
* The ID of the product listing for this app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String productId;
/**
* A link to the product listing for this app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String productUrl;
/**
* The list of secondary file extensions.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<java.lang.String> secondaryFileExtensions;
/**
* The list of secondary mime types.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<java.lang.String> secondaryMimeTypes;
/**
* A short description of the app.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String shortDescription;
/**
* Whether this app supports creating new objects.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean supportsCreate;
/**
* Whether this app supports importing Google Docs.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean supportsImport;
/**
* Whether this app supports opening more than one file.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean supportsMultiOpen;
/**
* Whether this app supports creating new files when offline.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean supportsOfflineCreate;
/**
* Whether the app is selected as the default handler for the types it supports.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean useByDefault;
/**
* Whether the app is authorized to access data on the user's Drive.
* @return value or {@code null} for none
*/
public java.lang.Boolean getAuthorized() {
return authorized;
}
/**
* Whether the app is authorized to access data on the user's Drive.
* @param authorized authorized or {@code null} for none
*/
public App setAuthorized(java.lang.Boolean authorized) {
this.authorized = authorized;
return this;
}
/**
* The template url to create a new file with this app in a given folder. The template will
* contain {folderId} to be replaced by the folder to create the new file in.
* @return value or {@code null} for none
*/
public java.lang.String getCreateInFolderTemplate() {
return createInFolderTemplate;
}
/**
* The template url to create a new file with this app in a given folder. The template will
* contain {folderId} to be replaced by the folder to create the new file in.
* @param createInFolderTemplate createInFolderTemplate or {@code null} for none
*/
public App setCreateInFolderTemplate(java.lang.String createInFolderTemplate) {
this.createInFolderTemplate = createInFolderTemplate;
return this;
}
/**
* The url to create a new file with this app.
* @return value or {@code null} for none
*/
public java.lang.String getCreateUrl() {
return createUrl;
}
/**
* The url to create a new file with this app.
* @param createUrl createUrl or {@code null} for none
*/
public App setCreateUrl(java.lang.String createUrl) {
this.createUrl = createUrl;
return this;
}
/**
* Whether the app has drive-wide scope. An app with drive-wide scope can access all files in the
* user's drive.
* @return value or {@code null} for none
*/
public java.lang.Boolean getHasDriveWideScope() {
return hasDriveWideScope;
}
/**
* Whether the app has drive-wide scope. An app with drive-wide scope can access all files in the
* user's drive.
* @param hasDriveWideScope hasDriveWideScope or {@code null} for none
*/
public App setHasDriveWideScope(java.lang.Boolean hasDriveWideScope) {
this.hasDriveWideScope = hasDriveWideScope;
return this;
}
/**
* The various icons for the app.
* @return value or {@code null} for none
*/
public java.util.List<Icons> getIcons() {
return icons;
}
/**
* The various icons for the app.
* @param icons icons or {@code null} for none
*/
public App setIcons(java.util.List<Icons> icons) {
this.icons = icons;
return this;
}
/**
* The ID of the app.
* @return value or {@code null} for none
*/
public java.lang.String getId() {
return id;
}
/**
* The ID of the app.
* @param id id or {@code null} for none
*/
public App setId(java.lang.String id) {
this.id = id;
return this;
}
/**
* Whether the app is installed.
* @return value or {@code null} for none
*/
public java.lang.Boolean getInstalled() {
return installed;
}
/**
* Whether the app is installed.
* @param installed installed or {@code null} for none
*/
public App setInstalled(java.lang.Boolean installed) {
this.installed = installed;
return this;
}
/**
* This is always drive#app.
* @return value or {@code null} for none
*/
public java.lang.String getKind() {
return kind;
}
/**
* This is always drive#app.
* @param kind kind or {@code null} for none
*/
public App setKind(java.lang.String kind) {
this.kind = kind;
return this;
}
/**
* A long description of the app.
* @return value or {@code null} for none
*/
public java.lang.String getLongDescription() {
return longDescription;
}
/**
* A long description of the app.
* @param longDescription longDescription or {@code null} for none
*/
public App setLongDescription(java.lang.String longDescription) {
this.longDescription = longDescription;
return this;
}
/**
* The name of the app.
* @return value or {@code null} for none
*/
public java.lang.String getName() {
return name;
}
/**
* The name of the app.
* @param name name or {@code null} for none
*/
public App setName(java.lang.String name) {
this.name = name;
return this;
}
/**
* The type of object this app creates (e.g. Chart). If empty, the app name should be used
* instead.
* @return value or {@code null} for none
*/
public java.lang.String getObjectType() {
return objectType;
}
/**
* The type of object this app creates (e.g. Chart). If empty, the app name should be used
* instead.
* @param objectType objectType or {@code null} for none
*/
public App setObjectType(java.lang.String objectType) {
this.objectType = objectType;
return this;
}
/**
* The template url for opening files with this app. The template will contain {ids} and/or
* {exportIds} to be replaced by the actual file ids. See Open Files for the full documentation.
* @return value or {@code null} for none
*/
public java.lang.String getOpenUrlTemplate() {
return openUrlTemplate;
}
/**
* The template url for opening files with this app. The template will contain {ids} and/or
* {exportIds} to be replaced by the actual file ids. See Open Files for the full documentation.
* @param openUrlTemplate openUrlTemplate or {@code null} for none
*/
public App setOpenUrlTemplate(java.lang.String openUrlTemplate) {
this.openUrlTemplate = openUrlTemplate;
return this;
}
/**
* The list of primary file extensions.
* @return value or {@code null} for none
*/
public java.util.List<java.lang.String> getPrimaryFileExtensions() {
return primaryFileExtensions;
}
/**
* The list of primary file extensions.
* @param primaryFileExtensions primaryFileExtensions or {@code null} for none
*/
public App setPrimaryFileExtensions(java.util.List<java.lang.String> primaryFileExtensions) {
this.primaryFileExtensions = primaryFileExtensions;
return this;
}
/**
* The list of primary mime types.
* @return value or {@code null} for none
*/
public java.util.List<java.lang.String> getPrimaryMimeTypes() {
return primaryMimeTypes;
}
/**
* The list of primary mime types.
* @param primaryMimeTypes primaryMimeTypes or {@code null} for none
*/
public App setPrimaryMimeTypes(java.util.List<java.lang.String> primaryMimeTypes) {
this.primaryMimeTypes = primaryMimeTypes;
return this;
}
/**
* The ID of the product listing for this app.
* @return value or {@code null} for none
*/
public java.lang.String getProductId() {
return productId;
}
/**
* The ID of the product listing for this app.
* @param productId productId or {@code null} for none
*/
public App setProductId(java.lang.String productId) {
this.productId = productId;
return this;
}
/**
* A link to the product listing for this app.
* @return value or {@code null} for none
*/
public java.lang.String getProductUrl() {
return productUrl;
}
/**
* A link to the product listing for this app.
* @param productUrl productUrl or {@code null} for none
*/
public App setProductUrl(java.lang.String productUrl) {
this.productUrl = productUrl;
return this;
}
/**
* The list of secondary file extensions.
* @return value or {@code null} for none
*/
public java.util.List<java.lang.String> getSecondaryFileExtensions() {
return secondaryFileExtensions;
}
/**
* The list of secondary file extensions.
* @param secondaryFileExtensions secondaryFileExtensions or {@code null} for none
*/
public App setSecondaryFileExtensions(java.util.List<java.lang.String> secondaryFileExtensions) {
this.secondaryFileExtensions = secondaryFileExtensions;
return this;
}
/**
* The list of secondary mime types.
* @return value or {@code null} for none
*/
public java.util.List<java.lang.String> getSecondaryMimeTypes() {
return secondaryMimeTypes;
}
/**
* The list of secondary mime types.
* @param secondaryMimeTypes secondaryMimeTypes or {@code null} for none
*/
public App setSecondaryMimeTypes(java.util.List<java.lang.String> secondaryMimeTypes) {
this.secondaryMimeTypes = secondaryMimeTypes;
return this;
}
/**
* A short description of the app.
* @return value or {@code null} for none
*/
public java.lang.String getShortDescription() {
return shortDescription;
}
/**
* A short description of the app.
* @param shortDescription shortDescription or {@code null} for none
*/
public App setShortDescription(java.lang.String shortDescription) {
this.shortDescription = shortDescription;
return this;
}
/**
* Whether this app supports creating new objects.
* @return value or {@code null} for none
*/
public java.lang.Boolean getSupportsCreate() {
return supportsCreate;
}
/**
* Whether this app supports creating new objects.
* @param supportsCreate supportsCreate or {@code null} for none
*/
public App setSupportsCreate(java.lang.Boolean supportsCreate) {
this.supportsCreate = supportsCreate;
return this;
}
/**
* Whether this app supports importing Google Docs.
* @return value or {@code null} for none
*/
public java.lang.Boolean getSupportsImport() {
return supportsImport;
}
/**
* Whether this app supports importing Google Docs.
* @param supportsImport supportsImport or {@code null} for none
*/
public App setSupportsImport(java.lang.Boolean supportsImport) {
this.supportsImport = supportsImport;
return this;
}
/**
* Whether this app supports opening more than one file.
* @return value or {@code null} for none
*/
public java.lang.Boolean getSupportsMultiOpen() {
return supportsMultiOpen;
}
/**
* Whether this app supports opening more than one file.
* @param supportsMultiOpen supportsMultiOpen or {@code null} for none
*/
public App setSupportsMultiOpen(java.lang.Boolean supportsMultiOpen) {
this.supportsMultiOpen = supportsMultiOpen;
return this;
}
/**
* Whether this app supports creating new files when offline.
* @return value or {@code null} for none
*/
public java.lang.Boolean getSupportsOfflineCreate() {
return supportsOfflineCreate;
}
/**
* Whether this app supports creating new files when offline.
* @param supportsOfflineCreate supportsOfflineCreate or {@code null} for none
*/
public App setSupportsOfflineCreate(java.lang.Boolean supportsOfflineCreate) {
this.supportsOfflineCreate = supportsOfflineCreate;
return this;
}
/**
* Whether the app is selected as the default handler for the types it supports.
* @return value or {@code null} for none
*/
public java.lang.Boolean getUseByDefault() {
return useByDefault;
}
/**
* Whether the app is selected as the default handler for the types it supports.
* @param useByDefault useByDefault or {@code null} for none
*/
public App setUseByDefault(java.lang.Boolean useByDefault) {
this.useByDefault = useByDefault;
return this;
}
@Override
public App set(String fieldName, Object value) {
return (App) super.set(fieldName, value);
}
@Override
public App clone() {
return (App) super.clone();
}
/**
* Model definition for AppIcons.
*/
public static final class Icons extends com.google.api.client.json.GenericJson {
/**
* Category of the icon. Allowed values are: - application - icon for the application - document
* - icon for a file associated with the app - documentShared - icon for a shared file associated
* with the app
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String category;
/**
* URL for the icon.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String iconUrl;
/**
* Size of the icon. Represented as the maximum of the width and height.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Integer size;
/**
* Category of the icon. Allowed values are: - application - icon for the application - document
* - icon for a file associated with the app - documentShared - icon for a shared file associated
* with the app
* @return value or {@code null} for none
*/
public java.lang.String getCategory() {
return category;
}
/**
* Category of the icon. Allowed values are: - application - icon for the application - document
* - icon for a file associated with the app - documentShared - icon for a shared file associated
* with the app
* @param category category or {@code null} for none
*/
public Icons setCategory(java.lang.String category) {
this.category = category;
return this;
}
/**
* URL for the icon.
* @return value or {@code null} for none
*/
public java.lang.String getIconUrl() {
return iconUrl;
}
/**
* URL for the icon.
* @param iconUrl iconUrl or {@code null} for none
*/
public Icons setIconUrl(java.lang.String iconUrl) {
this.iconUrl = iconUrl;
return this;
}
/**
* Size of the icon. Represented as the maximum of the width and height.
* @return value or {@code null} for none
*/
public java.lang.Integer getSize() {
return size;
}
/**
* Size of the icon. Represented as the maximum of the width and height.
* @param size size or {@code null} for none
*/
public Icons setSize(java.lang.Integer size) {
this.size = size;
return this;
}
@Override
public Icons set(String fieldName, Object value) {
return (Icons) super.set(fieldName, value);
}
@Override
public Icons clone() {
return (Icons) super.clone();
}
}
}
| |
/**
* $RCSfile: ,v $
* $Revision: $
* $Date: $
*
* Copyright (C) 2004-2011 Jive Software. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jivesoftware;
import org.jivesoftware.resource.Res;
import org.jivesoftware.smack.AccountManager;
import org.jivesoftware.smack.ConnectionConfiguration;
import org.jivesoftware.smack.XMPPConnection;
import org.jivesoftware.smack.XMPPException;
import org.jivesoftware.smack.packet.XMPPError;
import org.jivesoftware.smack.util.StringUtils;
import org.jivesoftware.spark.component.TitlePanel;
import org.jivesoftware.spark.util.DummySSLSocketFactory;
import org.jivesoftware.spark.util.ModelUtil;
import org.jivesoftware.spark.util.ResourceUtils;
import org.jivesoftware.spark.util.SwingWorker;
import org.jivesoftware.sparkimpl.settings.local.LocalPreferences;
import org.jivesoftware.sparkimpl.settings.local.SettingsManager;
import java.awt.BorderLayout;
import java.awt.Component;
import java.awt.GridBagConstraints;
import java.awt.GridBagLayout;
import java.awt.Insets;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import javax.swing.JButton;
import javax.swing.JDialog;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JPasswordField;
import javax.swing.JProgressBar;
import javax.swing.JTextField;
/**
* Allows the creation of accounts on an XMPP server.
*/
public class AccountCreationWizard extends JPanel {
private static final long serialVersionUID = -7808507939643878212L;
private JLabel usernameLabel = new JLabel();
private JTextField usernameField = new JTextField();
private JLabel passwordLabel = new JLabel();
private JPasswordField passwordField = new JPasswordField();
private JLabel confirmPasswordLabel = new JLabel();
private JPasswordField confirmPasswordField = new JPasswordField();
private JLabel serverLabel = new JLabel();
private JTextField serverField = new JTextField();
private JButton createAccountButton = new JButton();
private JButton closeButton = new JButton();
private JDialog dialog;
private boolean registered;
private XMPPConnection connection = null;
private JProgressBar progressBar;
/**
* Construct the AccountCreationWizard UI.
*/
public AccountCreationWizard() {
// Associate Mnemonics
ResourceUtils.resLabel(usernameLabel, usernameField, Res.getString("label.username") + ":");
ResourceUtils.resLabel(passwordLabel, passwordField, Res.getString("label.password") + ":");
ResourceUtils.resLabel(confirmPasswordLabel, confirmPasswordField, Res.getString("label.confirm.password") + ":");
ResourceUtils.resLabel(serverLabel, serverField, Res.getString("label.server") + ":");
ResourceUtils.resButton(createAccountButton, Res.getString("button.create.account"));
setLayout(new GridBagLayout());
// Add component to UI
add(usernameLabel, new GridBagConstraints(0, 0, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(5, 5, 5, 5), 0, 0));
add(usernameField, new GridBagConstraints(1, 0, 3, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, new Insets(5, 5, 5, 5), 150, 0));
add(passwordLabel, new GridBagConstraints(0, 1, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(5, 5, 5, 5), 0, 0));
add(passwordField, new GridBagConstraints(1, 1, 3, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, new Insets(5, 5, 5, 5), 0, 0));
add(confirmPasswordLabel, new GridBagConstraints(0, 2, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(5, 5, 5, 5), 0, 0));
add(confirmPasswordField, new GridBagConstraints(1, 2, 3, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, new Insets(5, 5, 5, 5), 0, 0));
add(serverLabel, new GridBagConstraints(0, 3, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(5, 5, 5, 5), 0, 0));
add(serverField, new GridBagConstraints(1, 3, 3, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, new Insets(5, 5, 5, 5), 0, 0));
progressBar = new JProgressBar();
add(progressBar, new GridBagConstraints(1, 4, 3, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, new Insets(5, 5, 5, 5), 0, 0));
progressBar.setVisible(false);
add(createAccountButton, new GridBagConstraints(2, 5, 1, 1, 1.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, new Insets(5, 5, 5, 5), 0, 0));
ResourceUtils.resButton(closeButton, Res.getString("button.close"));
add(closeButton, new GridBagConstraints(3, 5, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, new Insets(5, 5, 5, 5), 0, 0));
createAccountButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent actionEvent) {
createAccount();
}
});
closeButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent actionEvent) {
dialog.dispose();
}
});
}
/**
* Returns the username to use for the new account.
*
* @return the username.
*/
public String getUsername() {
return StringUtils.escapeNode(usernameField.getText().toLowerCase());
}
/**
* Returns the username to use for the new account.
*
* @return the username.
*/
public String getUsernameWithoutEscape() {
return usernameField.getText();
}
/**
* Returns the password to use for the new account.
*
* @return the password to use for the new account.
*/
public String getPassword() {
return new String(passwordField.getPassword());
}
/**
* Returns the confirmation password to use for the new account.
*
* @return the password to use for the new account.
*/
public String getConfirmPassword() {
return new String(confirmPasswordField.getPassword());
}
/**
* Returns the server to use with the new account.
*
* @return the server to use.
*/
public String getServer() {
return serverField.getText();
}
/**
* Returns true if the passwords match.
*
* @return true if the passwords match.
*/
public boolean isPasswordValid() {
return getPassword().equals(getConfirmPassword());
}
/**
* Creates the new account using the supplied information.
*/
private void createAccount() {
boolean errors = false;
String errorMessage = "";
if (!ModelUtil.hasLength(getUsername())) {
errors = true;
usernameField.requestFocus();
errorMessage = Res.getString("message.username.error");
}
else if (!ModelUtil.hasLength(getPassword())) {
errors = true;
errorMessage = Res.getString("message.password.error");
}
else if (!ModelUtil.hasLength(getConfirmPassword())) {
errors = true;
errorMessage = Res.getString("message.confirmation.password.error");
}
else if (!ModelUtil.hasLength(getServer())) {
errors = true;
errorMessage = Res.getString("message.account.error");
}
else if (!isPasswordValid()) {
errors = true;
errorMessage = Res.getString("message.confirmation.password.error");
}
if (errors) {
JOptionPane.showMessageDialog(this, errorMessage, Res.getString("title.create.problem"), JOptionPane.ERROR_MESSAGE);
return;
}
final Component ui = this;
progressBar.setIndeterminate(true);
progressBar.setStringPainted(true);
progressBar.setString(Res.getString("message.registering", getServer()));
progressBar.setVisible(true);
final SwingWorker worker = new SwingWorker() {
int errorCode;
public Object construct() {
try {
createAccountButton.setEnabled(false);
connection = getConnection();
}
catch (XMPPException e) {
return e;
}
try {
final AccountManager accountManager = new AccountManager(connection);
accountManager.createAccount(getUsername(), getPassword());
}
catch (XMPPException e) {
XMPPError error = e.getXMPPError();
if (error != null) {
errorCode = error.getCode();
}
else {
errorCode = 500;
}
}
return "ok";
}
public void finished() {
progressBar.setVisible(false);
if (connection == null) {
if (ui.isShowing()) {
createAccountButton.setEnabled(true);
JOptionPane.showMessageDialog(ui, Res.getString("message.connection.failed", getServer()), Res.getString("title.create.problem"), JOptionPane.ERROR_MESSAGE);
createAccountButton.setEnabled(true);
}
return;
}
if (errorCode == 0) {
accountCreationSuccessful();
}
else {
accountCreationFailed(errorCode);
}
}
};
worker.start();
}
/**
* Called if the account creation failed.
*
* @param errorCode the error code.
*/
private void accountCreationFailed(int errorCode) {
String message = Res.getString("message.create.account");
if (errorCode == 409) {
message = Res.getString("message.already.exists");
usernameField.setText("");
usernameField.requestFocus();
}
JOptionPane.showMessageDialog(this, message, Res.getString("title.create.problem"), JOptionPane.ERROR_MESSAGE);
createAccountButton.setEnabled(true);
}
/**
* Called if the account was created succesfully.
*/
private void accountCreationSuccessful() {
registered = true;
JOptionPane.showMessageDialog(this, Res.getString("message.account.created"), Res.getString("title.account.created"), JOptionPane.INFORMATION_MESSAGE);
dialog.dispose();
}
/**
* Invokes the AccountCreationWizard.
*
* @param parent the parent frame to use.
*/
public void invoke(JFrame parent) {
dialog = new JDialog(parent, Res.getString("title.create.new.account"), true);
TitlePanel titlePanel = new TitlePanel(Res.getString("title.account.create.registration"), Res.getString("message.account.create"), null, true);
dialog.getContentPane().setLayout(new BorderLayout());
dialog.getContentPane().add(titlePanel, BorderLayout.NORTH);
dialog.getContentPane().add(this, BorderLayout.CENTER);
dialog.pack();
dialog.setSize(400, 300);
dialog.setLocationRelativeTo(parent);
dialog.setVisible(true);
}
/**
* Creates an XMPPConnection based on the users settings.
*
* @return the XMPPConnection created.
* @throws XMPPException thrown if an exception occured creating the connection.
*/
private XMPPConnection getConnection() throws XMPPException {
final LocalPreferences localPreferences = SettingsManager.getLocalPreferences();
XMPPConnection connection = null;
// Get connection
int port = localPreferences.getXmppPort();
String serverName = getServer();
int checkForPort = serverName.indexOf(":");
if (checkForPort != -1) {
String portString = serverName.substring(checkForPort + 1);
if (ModelUtil.hasLength(portString)) {
// Set new port.
port = Integer.valueOf(portString);
}
}
boolean useSSL = localPreferences.isSSL();
boolean hostPortConfigured = localPreferences.isHostAndPortConfigured();
ConnectionConfiguration config;
if (useSSL) {
if (!hostPortConfigured) {
config = new ConnectionConfiguration(serverName, 5223);
config.setSocketFactory(new DummySSLSocketFactory());
}
else {
config = new ConnectionConfiguration(localPreferences.getXmppHost(), port, serverName);
config.setSocketFactory(new DummySSLSocketFactory());
}
}
else {
if (!hostPortConfigured) {
config = new ConnectionConfiguration(serverName);
}
else {
config = new ConnectionConfiguration(localPreferences.getXmppHost(), port, serverName);
}
}
if (config != null) {
config.setReconnectionAllowed(true);
boolean compressionEnabled = localPreferences.isCompressionEnabled();
config.setCompressionEnabled(compressionEnabled);
connection = new XMPPConnection(config);
}
if (connection != null) {
connection.connect();
}
return connection;
}
/**
* Returns true if the user is registered.
*
* @return true if the user is registered.
*/
public boolean isRegistered() {
return registered;
}
}
| |
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.screens.examples.client.wizard;
import java.util.List;
import javax.enterprise.event.Event;
import org.guvnor.common.services.project.context.WorkspaceProjectContextChangeEvent;
import org.jboss.errai.common.client.api.Caller;
import org.jboss.errai.ui.client.local.spi.TranslationService;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.kie.workbench.common.screens.examples.client.wizard.model.ExamplesWizardModel;
import org.kie.workbench.common.screens.examples.client.wizard.pages.project.ProjectPage;
import org.kie.workbench.common.screens.examples.client.wizard.pages.sourcerepository.SourceRepositoryPage;
import org.kie.workbench.common.screens.examples.model.ExampleOrganizationalUnit;
import org.kie.workbench.common.screens.examples.model.ExampleRepository;
import org.kie.workbench.common.screens.examples.model.ExamplesMetaData;
import org.kie.workbench.common.screens.examples.service.ExamplesService;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.Mock;
import org.mockito.Spy;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
import org.mockito.stubbing.Answer;
import org.uberfire.client.callbacks.Callback;
import org.uberfire.ext.widgets.common.client.common.BusyIndicatorView;
import org.uberfire.ext.widgets.core.client.wizards.WizardView;
import org.uberfire.mocks.CallerMock;
import org.uberfire.mocks.EventSourceMock;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
@RunWith(MockitoJUnitRunner.class)
public class ExamplesWizardTest {
private static final String EXAMPLE_REPOSITORY1 = "https://github.com/guvnorngtestuser1/guvnorng-playground.git";
private static final String EXAMPLE_ORGANIZATIONAL_UNIT1 = "ou1";
private static final String EXAMPLE_ORGANIZATIONAL_UNIT2 = "ou2";
private final WizardView mockView = mock(WizardView.class);
private final ExampleRepository repository = new ExampleRepository(EXAMPLE_REPOSITORY1);
@Mock
private SourceRepositoryPage sourceRepositoryPage;
@Mock
private ProjectPage projectPage;
@Mock
private BusyIndicatorView busyIndicatorView;
private ExamplesService examplesService = mock(ExamplesService.class);
private Caller<ExamplesService> examplesServiceCaller = new CallerMock<ExamplesService>(examplesService);
@Spy
private Event<WorkspaceProjectContextChangeEvent> event = new EventSourceMock<WorkspaceProjectContextChangeEvent>() {
@Override
public void fire(final WorkspaceProjectContextChangeEvent event) {
//Do nothing. Default implementation throws an exception.
}
};
@Mock
private TranslationService translator;
@Captor
private ArgumentCaptor<ExampleRepository> repositoryArgumentCaptor;
@Mock
private Callback<Boolean> callback;
private ExamplesMetaData metaData = new ExamplesMetaData(repository);
private ExamplesWizard wizard;
@Before
public void setup() {
wizard = new ExamplesWizard(sourceRepositoryPage,
projectPage,
busyIndicatorView,
examplesServiceCaller,
event,
translator) {
{
this.view = mockView;
}
};
when(examplesService.getMetaData()).thenReturn(metaData);
}
@Test
public void testStart() {
final ArgumentCaptor<ExamplesWizardModel> modelArgumentCaptor = ArgumentCaptor.forClass(ExamplesWizardModel.class);
wizard.start();
verify(sourceRepositoryPage,
times(1)).initialise();
verify(projectPage,
times(1)).initialise();
verify(sourceRepositoryPage,
times(1)).setModel(modelArgumentCaptor.capture());
verify(projectPage,
times(1)).setModel(modelArgumentCaptor.getValue());
verify(sourceRepositoryPage,
times(1)).setPlaygroundRepository(repositoryArgumentCaptor.capture());
assertEquals(repository,
repositoryArgumentCaptor.getValue());
}
@Test
public void testClose() {
wizard.close();
verify(sourceRepositoryPage,
times(1)).destroy();
verify(projectPage,
times(1)).destroy();
}
@Test
public void testGetPageWidget() {
wizard.getPageWidget(0);
verify(sourceRepositoryPage,
times(1)).prepareView();
verify(sourceRepositoryPage,
times(1)).asWidget();
wizard.getPageWidget(1);
verify(projectPage,
times(1)).prepareView();
verify(projectPage,
times(1)).asWidget();
}
@Test
@SuppressWarnings("unchecked")
public void testIsComplete_RepositoryPageIncomplete() {
doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(final InvocationOnMock invocation) throws Throwable {
final Callback<Boolean> callback = (Callback<Boolean>) invocation.getArguments()[0];
callback.callback(false);
return null;
}
}).when(sourceRepositoryPage).isComplete(any(Callback.class));
wizard.isComplete(callback);
verify(callback,
times(1)).callback(eq(true));
verify(callback,
times(1)).callback(eq(false));
}
@Test
@SuppressWarnings("unchecked")
public void testIsComplete_ProjectPageIncomplete() {
doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(final InvocationOnMock invocation) throws Throwable {
final Callback<Boolean> callback = (Callback<Boolean>) invocation.getArguments()[0];
callback.callback(false);
return null;
}
}).when(projectPage).isComplete(any(Callback.class));
wizard.isComplete(callback);
verify(callback,
times(1)).callback(eq(true));
verify(callback,
times(1)).callback(eq(false));
}
@Test
@SuppressWarnings("unchecked")
public void testIsComplete_AllPagesComplete() {
doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(final InvocationOnMock invocation) throws Throwable {
final Callback<Boolean> callback = (Callback<Boolean>) invocation.getArguments()[0];
callback.callback(true);
return null;
}
}).when(sourceRepositoryPage).isComplete(any(Callback.class));
doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(final InvocationOnMock invocation) throws Throwable {
final Callback<Boolean> callback = (Callback<Boolean>) invocation.getArguments()[0];
callback.callback(true);
return null;
}
}).when(projectPage).isComplete(any(Callback.class));
wizard.isComplete(callback);
verify(callback,
times(1)).callback(eq(true));
verify(callback,
never()).callback(eq(false));
}
@Test
@SuppressWarnings("unchecked")
public void testOnComplete() {
wizard.start();
wizard.complete();
verify(busyIndicatorView,
times(1)).showBusyIndicator(any(String.class));
verify(busyIndicatorView,
times(1)).hideBusyIndicator();
verify(examplesService,
times(1)).setupExamples(any(ExampleOrganizationalUnit.class),
any(List.class));
verify(event,
times(1)).fire(any(WorkspaceProjectContextChangeEvent.class));
}
@Test
public void testSetDefaultTargetOrganizationalUnit() {
wizard.setDefaultTargetOrganizationalUnit("testOU");
ExampleOrganizationalUnit targetOrganizationalUnit = wizard.getModel().getTargetOrganizationalUnit();
assertNotNull(targetOrganizationalUnit);
assertEquals("testOU",
targetOrganizationalUnit.getName());
}
}
| |
package drishti.assisted.com.drishti;
import android.app.Activity;
import android.content.Context;
import android.media.AudioManager;
import android.net.wifi.ScanResult;
import android.net.wifi.WifiInfo;
import android.net.wifi.WifiManager;
import android.os.AsyncTask;
import android.speech.tts.TextToSpeech;
import android.support.v7.app.ActionBarActivity;
import android.os.Bundle;
import android.text.method.ScrollingMovementMethod;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import android.widget.Toast;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.message.BasicHeader;
import org.apache.http.protocol.HTTP;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
public class MainActivity extends Activity {
TextView text;
EditText edit1,edit2,edit3;
Button btn;
TextToSpeech ttsobj;
WifiManager wifiManager;
Context context;
WifiInfo mywifiInfo;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
text = (TextView) findViewById(R.id.textId);
text.setMovementMethod(new ScrollingMovementMethod());
btn = (Button) findViewById(R.id.buttonId);
edit1 = (EditText) findViewById(R.id.editId1);
edit2 = (EditText) findViewById(R.id.editId2);
edit3 = (EditText) findViewById(R.id.editId3);
wifiManager = (WifiManager) getSystemService(Context.WIFI_SERVICE);
mywifiInfo = wifiManager.getConnectionInfo();
btn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
new connection().execute();
}
});
/*
ttsobj=new TextToSpeech(getApplicationContext(),new TextToSpeech.OnInitListener(){
@Override
public void onInit(int status) {
if(status != TextToSpeech.ERROR){
ttsobj.setLanguage(Locale.UK);
}
}
});
text.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
speakOut();
}
});
*/
Toast.makeText(getApplicationContext(),"before Wifi",Toast.LENGTH_SHORT).show();
scanAccessPoints();
}
public class connection extends AsyncTask<Void,Void,String>{
JSONObject jsonobject = new JSONObject();
@Override
protected String doInBackground(Void... params) {
// String resultstring = "";
JSONObject jsonobject = new JSONObject();
HttpResponse httpresponse = null;
String url = "http://lamp-test212.appspot.com";
DefaultHttpClient httpclient = new DefaultHttpClient();
HttpPost httppostreq = new HttpPost(url);
try{
jsonobject.put("rss1",edit1.getText().toString());
jsonobject.put("rss2",edit2.getText().toString());
jsonobject.put("rss3",edit3.getText().toString());
}catch(JSONException e){
e.printStackTrace();
}
StringEntity se = null;
try {
se = new StringEntity(jsonobject.toString());
} catch (UnsupportedEncodingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
se.setContentType("application/json;charset=UTF-8");
se.setContentEncoding(new BasicHeader(HTTP.CONTENT_TYPE, "application/json;charset=UTF-8"));
httppostreq.setEntity(se);
httppostreq.setHeader("Content-type", "application/json");
httppostreq.setHeader("Accept","application/json");
try {
httpresponse = httpclient.execute(httppostreq);
} catch (IOException e) {
e.printStackTrace();
}
HttpEntity resultentity = httpresponse.getEntity();
InputStream inputstream = null;
try {
inputstream = resultentity.getContent();
} catch (IOException e) {
e.printStackTrace();
}
String resultstring = convertStreamToString(inputstream);
try {
inputstream.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
resultstring = resultstring.substring(1,resultstring.length()-1);
return resultstring;
}
private String convertStreamToString(InputStream is) {
String line = "";
StringBuilder total = new StringBuilder();
BufferedReader rd = new BufferedReader(new InputStreamReader(is));
try {
while ((line = rd.readLine()) != null) {
total.append(line);
}
} catch (Exception e) {
e.printStackTrace();
}
return total.toString();
}
/*
@Override
protected void onPreExecute() {
super.onPreExecute();
JSONObject jsonobject = new JSONObject();
try{
jsonobject.put("content","message");
httppostreq.setHeader("Content-Type", "application/json");
httppostreq.setHeader("Accept", "application/json");
}catch(JSONException e){
Toast.makeText(getApplicationContext(),"wrong json object",Toast.LENGTH_SHORT).show();
e.printStackTrace();
}
}
*/
@Override
protected void onPostExecute(String result) {
//super.onPostExecute(s);
text.setText(result);
}
}
/*
public void speakOut(){
String utterance="";
String toSpeak = text.getText().toString();
Toast.makeText(getApplicationContext(), toSpeak,Toast.LENGTH_SHORT).show();
ttsobj.speak(toSpeak,TextToSpeech.QUEUE_FLUSH,null,utterance);
}
*/
public void scanAccessPoints(){
String[] access_points = {"AndroidAP", "Manish", "Vipul-Ubuntu"};
int maxCount=3;
int apCount = 0;
int value;
Toast.makeText(getApplicationContext(),"in WifiScan",Toast.LENGTH_SHORT).show();
//int myRSSI = mywifiInfo.getRssi();
if (wifiManager.isWifiEnabled()) {
Toast.makeText(getApplicationContext(),"in Wifi",Toast.LENGTH_SHORT).show();
if (wifiManager.startScan()) {
List<ScanResult> scanResults = wifiManager.getScanResults();
if (scanResults != null) {
//filter by AP name
for (int index = 0; index < scanResults.size(); index++) {
ScanResult scan_result = scanResults.get(index);
//if((scan_result.SSID).contains(ACCESS_POINT_FILTER_NAME)) {
for (String temp: access_points) {
if ((scan_result.SSID).contains(temp)) {
//stringBuffer.append("" + scan_result.SSID + ":" + scan_result.level);
if(apCount==0) {
value = scan_result.level;
edit1.setText(String.valueOf(value));
Toast.makeText(getApplicationContext(),"SSID "+scan_result.SSID,Toast.LENGTH_SHORT).show();
}
else if(apCount==1) {
value = scan_result.level;
edit2.setText(String.valueOf(value));
Toast.makeText(getApplicationContext(),"SSID "+scan_result.SSID,Toast.LENGTH_SHORT).show();
}
else if(apCount==2) {
value = scan_result.level;
edit3.setText(String.valueOf(value));
Toast.makeText(getApplicationContext(),"SSID "+scan_result.SSID,Toast.LENGTH_SHORT).show();
}
apCount++;
if (apCount == maxCount)
break;
}
}
}
}
}
}
}
@Override
protected void onPause() {
if(ttsobj !=null){
ttsobj.stop();
ttsobj.shutdown();
}
super.onPause();
}
}
| |
package com.forbesdigital.jee.oauth.rest.api;
import com.forbesdigital.jee.oauth.OAuthTokenError;
import com.forbesdigital.jee.oauth.OAuthTokenResponse;
import com.forbesdigital.jee.oauth.configuration.EOAuthGrantType;
import com.forbesdigital.jee.oauth.configuration.IOAuthConfiguration;
import com.forbesdigital.jee.oauth.configuration.OAuthContext;
import com.forbesdigital.jee.oauth.model.IOAuthClient;
import com.forbesdigital.jee.oauth.model.IOAuthToken;
import com.forbesdigital.jee.oauth.model.IOAuthUser;
import com.forbesdigital.jee.oauth.spring.client.exceptions.AbstractOAuthTokenRequestException;
import com.forbesdigital.jee.oauth.spring.client.exceptions.InvalidGrantException;
import com.forbesdigital.jee.oauth.spring.client.exceptions.InvalidRequestException;
import com.forbesdigital.jee.oauth.spring.client.exceptions.InvalidScopeException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import javax.ws.rs.core.Response;
import org.springframework.http.HttpStatus;
/**
* Abstract implementation of a resource for requesting OAuth Tokens.
* Provides implementation for default OAuth specification and also abstract and protected methods
* which can/have to be overwritten in order to provide custom functionality to the resource.
*
* @param <Client> An class implementing IOAuthClient
* @param <User> An class implementing IOAuthUser
* @param <Token> An class implementing IOAuthToken
* @author Cristian Calugar <cristian.calugar@fortech.ro>
*/
public abstract class AbstractAccessTokenResource<Client extends IOAuthClient, User extends IOAuthUser, Token extends IOAuthToken> {
// Header names
private static final String CACHE_CONTROL = "Cache-Control";
private static final String PRAGMA = "Pragma";
// Header values
private static final String CACHE_CONTROL_NO_STORE = "no-store";
private static final String PRAGMA_NO_CACHE = "no-cache";
/**
* A method for requesting an OAuth Token.
*
* @param grantType The requested OAuth grant type
* @param scope The requested list of scopes, formatted as a space separated String
* @param username The provided OAuth User username
* @param password The provided OAuth User password
* @param expiresIn The provided value in seconds for the token lifetime
* @return The generated OAuth Token if the provided parameters were correct
*/
protected Response requestToken(String grantType, String scope,
String username, String password, String expiresIn){
try {
Client client = getAuthenticatedClient();
User user = getAuthenticatedUser(grantType, username);
Set<String> grantedScopes = validateOAuthScope(scope, client, grantType);
// calculate the token lifetime and expirationDate
Integer expiresInParsed = validateExpiresIn(expiresIn);
Integer tokenLifetime = getTokenLifetime(client, expiresInParsed, user);
Token token = createOAuthToken(client, tokenLifetime, grantedScopes, user);
OAuthTokenResponse tokenTO = toTransferObject(token);
Map<String, String> headers = new HashMap<>();
headers.put(CACHE_CONTROL, CACHE_CONTROL_NO_STORE);
headers.put(PRAGMA, PRAGMA_NO_CACHE);
//return the generated access token and the Cache-Cntrol and Pragma headers.
return buildSuccessfulResponse(tokenTO, headers);
} catch (AbstractOAuthTokenRequestException cae) {
OAuthTokenError errorTO = new OAuthTokenError(cae.getOAuth2ErrorCode(), cae.getMessage());
return buildErrorResponse(errorTO, cae.getHttpStatusCode());
}
}
/**
* @return The authenticated OAuth Client
*/
protected abstract Client getAuthenticatedClient();
/**
* Retrieves the OAuth User with the provided username.
* Should be overwritten if password grant type is used. Otherwise not.
*
* @param username The provided username
* @return The OAuth User, based on the provided username.
*/
protected User getOAuthUser(String username){
throw new UnsupportedOperationException("This method has to be overwritten if password grant type is used.");
}
/**
* Creates an oAuth Token based on the provided information.
*
* @param client The authenticated OAuth Client
* @param tokenLifetime The requested Token Lifetime
* @param grantedScopes The granted OAuth Scopes
* @param user The authenticated OAuth user, if there is one
* @return The generated Token
*/
protected abstract Token createOAuthToken(Client client, Integer tokenLifetime, Set<String> grantedScopes, User user);
/**
* Method provided to be overwritten in case a custom processing of the granted scopes is desired after the default OAuth validation.
*
* @param grantedScopes The Granted OAuth Scopes
* @return The processed list of Granted OAuth Scopes
*/
protected Set<String> afterOAuthScopesResolution(Set<String> grantedScopes){
// Default behaviour - the granted scopes list is not modified
return grantedScopes;
}
/**
* Method provided to be overwritten in case a custom processing of the token lifetime is desired after the default OAuth validation.
*
* @param client The authenticated OAuth Client
* @param tokenLifetime The requested Token Lifetime
* @param user The authenticated OAuth user, if there is one
* @return The processed token lifetime
*/
protected Integer afterTokenLifetimeResolution(Client client, Integer tokenLifetime, User user) {
// Default behaviour - the token lifetime is not modified
return tokenLifetime;
}
/**
* Method provided to be overwritten in case additional information need to be added in the response body.
*
* @param token The OAuth Token
* @return A map containing the name and the value of the additional parameters
*/
protected Map<String, Object> getAdditionalResponseParameters(Token token) {
// Default behaviour - no additional information needed
return null;
}
/**
* Builds a successful response with the provided token To as body and with the provided headers.
*
* @param tokenResponse The token response containing all information about the generated OAuth Token
* @param headers The list of header to be added to the response
* @return The built response
*/
protected Response buildSuccessfulResponse(OAuthTokenResponse tokenResponse, Map<String, String> headers) {
return buildResponse(tokenResponse, headers, HttpStatus.OK.value());
}
/**
* Builds an error response using the provided error to as body and with the provided status code.
*
* @param errorResponse The error response
* @param statusCode The status code
* @return The built response
*/
protected Response buildErrorResponse(OAuthTokenError errorResponse, int statusCode) {
return buildResponse(errorResponse, null, statusCode);
}
/**
* Builds a HTTP Response with the provided response body, headers and status code.
*
* @param responseBody The response body
* @param headers The headers
* @param statusCode The status code
* @return The built response
*/
private Response buildResponse(Object responseBody, Map<String, String> headers, int statusCode) {
// Constructs an HTTP 200 OK response with the specified entity as the body.
Response.ResponseBuilder responseBuilder = Response.ok(responseBody);
// Set the headers
if (headers != null && !headers.isEmpty() ){
for (String header : headers.keySet()) {
responseBuilder.header(header, headers.get(header));
}
}
// Set the status code
responseBuilder.status(statusCode);
return responseBuilder.build();
}
//<editor-fold defaultstate="collapsed" desc="Validations">
/**
* @param grantType The requested grant type
* @param username The provided username
* @return The Authenticated OAuth User, if there is one. Otherwise null.
*/
private User getAuthenticatedUser(String grantTypeStr, String username){
EOAuthGrantType grantType = EOAuthGrantType.fromValue(grantTypeStr);
if (EOAuthGrantType.RESOURCE_OWNER_PASSWORD_CREDENTIALS.equals(grantType)) {
User user = getOAuthUser(username);
if (user == null) {
throw new InvalidGrantException("The provided authorization grant is not valid.");
}
return user;
}
return null;
}
/**
* Validates the requested scope and builds the granted scope which will be associated to the token.
*
* @param requestedScope The requested scope
* @param client The Authenticated Client
* @param grantType The requested grantType (a valid {@link EOAuthGrantType} string)
* @return The list of granted scopes
*/
private Set<String> validateOAuthScope(String requestedScope, Client client, String grantTypeStr) {
Set<String> grantedScopes = new TreeSet<>();
// In case no scope is requested return the default BASIC_ACCESS scope only.
if (requestedScope == null || requestedScope.isEmpty()){
// BASIC_SCOPE granted by default
return afterOAuthScopesResolution(grantedScopes);
}
// Check Scope format
if (!requestedScope.matches(Token.SCOPE_PATTERN)) {
throw new InvalidScopeException("The requested scope is malformed.");
}
// Split Scope by space and validate each individual scope
String[] requestedScopes = requestedScope.split(Token.SCOPES_SEPARATOR);
Set<String> allScopes = OAuthContext.getConfig().getAllScopes();
Set<String> allowedScopes = OAuthContext.getConfig().getClientRole(client.getClientRole()).getAllowedScopes();
for (String clientScope : requestedScopes) {
// Handle the case when double spaces are found in the requested Scope.
if (clientScope.isEmpty()){
continue;
}
// Check if the scope is valid
if (!allScopes.contains(clientScope)) {
throw new InvalidScopeException("The requested scope is invalid.");
}
// Check if the scope is allowed
if (!allowedScopes.contains(clientScope)) {
throw new InvalidScopeException("The requested scope exceeds the scope granted by the resource owner.");
}
// check that the scope is allowed for the given grant type
EOAuthGrantType grantType = EOAuthGrantType.fromValue(grantTypeStr);
if (!OAuthContext.getConfig().getAllowedScopes(grantType).contains(clientScope)) {
throw new InvalidScopeException("The requested scope requires a different grant_type.");
}
// Add the scope to the granted scopes list
grantedScopes.add(clientScope);
}
return afterOAuthScopesResolution(grantedScopes);
}
/**
* Calculates the token lifetime based on the required expiresIn, the client token lifetime
* and the default token lifetime for the user role.
*
* @param client The authenticated OAuth Client
* @param user The authenticated OAuth user, if there is one
* @param expiresIn The requested value in seconds for the token lifetime
* @return The calculated token lifetime.
*/
private Integer getTokenLifetime(Client client, Integer expiresIn, User user){
Integer tokenLifetime = client.getTokenLifetime();
if (tokenLifetime == null) {
// Use default tokenLifetime in case this is not defined for the client
IOAuthConfiguration config = OAuthContext.getConfig();
tokenLifetime = config.getClientRole(client.getClientRole()).getTokenLifetime();
}
// Try setting the lifetime requested by the user
if (expiresIn != null && expiresIn > 0) {
tokenLifetime = Math.min(expiresIn, tokenLifetime);
}
return afterTokenLifetimeResolution(client, tokenLifetime, user);
}
/**
* Validates expiresIn parameter to be a valid strictly positive integer value.
*
* @param expiresIn The requested value in seconds for the token lifetime
* @return the parsed expiresIn value.
*/
private Integer validateExpiresIn(String expiresIn) {
if (expiresIn == null) {
return null;
}
// try parse expires_in to a integer
Integer expiresInParsed = null;
try {
expiresInParsed = Integer.parseInt(expiresIn);
} catch (NumberFormatException e) {
throw new InvalidRequestException("The 'expires_in' parameter is not a valid strictly positive integer value.");
}
if (expiresInParsed <= 0) {
throw new InvalidRequestException("The 'expires_in' parameter is not a valid strictly positive integer value.");
}
return expiresInParsed;
}
//</editor-fold>
//<editor-fold defaultstate="collapsed" desc="Transfer methods">
/**
* Converts a OAuth Token to a transfer object.
*
* @param token The OAuth Token to convert to a transfer object
* @return A transfer object corresponding to the inputted token
*/
private OAuthTokenResponse toTransferObject(Token token) {
OAuthTokenResponse tokenTO = new OAuthTokenResponse();
tokenTO.setAccessToken(token.getAccessToken());
tokenTO.setTokenType(token.getTokenType());
tokenTO.setExpiresIn(token.getExpiresIn());
tokenTO.setScope(buildScope(token.getScopes()));
String dateStr = new DateHelper().convertToUtcString(token.getExpirationDate());
tokenTO.setExpirationDate(dateStr);
// Add additional information in the token TO
Map<String, Object> additionalInfo = getAdditionalResponseParameters(token);
if (additionalInfo != null && !additionalInfo.isEmpty()){
tokenTO.getAdditionalInformation().putAll(additionalInfo);
}
return tokenTO;
}
/**
* Build a scope string containing all granted scopes, separate by one white space.
*
* @param grantedScopes The granted scopes list
* @return The generated scope string.
*/
private String buildScope(Set<String> grantedScopes) {
StringBuilder scopeBuilder = new StringBuilder();
for (String scope : grantedScopes) {
scopeBuilder.append(scope);
scopeBuilder.append(Token.SCOPES_SEPARATOR);
}
return scopeBuilder.toString().trim();
}
//</editor-fold>
}
| |
/**
* Licensed to Apereo under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Apereo licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at the following location:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apereo.portal.spring.spel;
import javax.servlet.http.HttpServletRequest;
import net.sf.ehcache.Ehcache;
import net.sf.ehcache.Element;
import org.apache.commons.lang.Validate;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apereo.portal.security.IPerson;
import org.apereo.portal.url.IPortalRequestUtils;
import org.apereo.portal.user.IUserInstance;
import org.apereo.portal.user.IUserInstanceManager;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.expression.BeanFactoryResolver;
import org.springframework.expression.BeanResolver;
import org.springframework.expression.EvaluationContext;
import org.springframework.expression.Expression;
import org.springframework.expression.ExpressionParser;
import org.springframework.expression.ParseException;
import org.springframework.expression.ParserContext;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.expression.spel.support.StandardEvaluationContext;
import org.springframework.stereotype.Service;
import org.springframework.web.context.request.WebRequest;
/**
* PortalSpELServiceImpl provides the default implementation of
* IPortalSpELService.
*
* @author Jen Bourey, jbourey@unicon.net
* @version $Revision$
*/
@Service
public class PortalSpELServiceImpl implements IPortalSpELService, BeanFactoryAware {
protected final Log logger = LogFactory.getLog(this.getClass());
private ExpressionParser expressionParser = new SpelExpressionParser();
private Ehcache expressionCache;
private BeanResolver beanResolver;
private IPortalRequestUtils portalRequestUtils;
private IUserInstanceManager userInstanceManager;
@Autowired
public void setPortalRequestUtils(IPortalRequestUtils portalRequestUtils) {
this.portalRequestUtils = portalRequestUtils;
}
@Autowired
public void setUserInstanceManager(IUserInstanceManager userInstanceManager) {
this.userInstanceManager = userInstanceManager;
}
public void setExpressionParser(ExpressionParser expressionParser) {
Validate.notNull(expressionParser);
this.expressionParser = expressionParser;
}
@Autowired
public void setExpressionCache(@Qualifier("SpELExpressionCache") Ehcache expressionCache) {
this.expressionCache = expressionCache;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanResolver = new BeanFactoryResolver(beanFactory);
}
@Override
public Expression parseExpression(String expressionString) throws ParseException {
return this.parseCachedExpression(expressionString, null);
}
@Override
public Expression parseExpression(String expressionString, ParserContext parserContext) throws ParseException {
if (parserContext == null) {
return this.expressionParser.parseExpression(expressionString);
}
return this.expressionParser.parseExpression(expressionString, parserContext);
}
protected Expression parseCachedExpression(String expressionString, ParserContext parserContext) throws ParseException {
if (this.expressionCache == null) {
return parseExpression(expressionString, parserContext);
}
Element element = this.expressionCache.get(expressionString);
if (element != null) {
return (Expression)element.getObjectValue();
}
final Expression expression = parseExpression(expressionString, parserContext);
element = new Element(expressionString, expression);
this.expressionCache.put(element);
return expression;
}
@Override
public String parseString(String expressionString, WebRequest request) {
final Expression expression = this.parseCachedExpression(expressionString, TemplateParserContext.INSTANCE);
return this.getValue(expression, request, String.class);
}
@Override
public <T> T getValue(String expressionString, WebRequest request, Class<T> desiredResultType) {
final Expression expression = this.parseExpression(expressionString);
return this.getValue(expression, request, desiredResultType);
}
@Override
public <T> T getValue(Expression expression, WebRequest request, Class<T> desiredResultType) {
final EvaluationContext evaluationContext = this.getEvaluationContext(request);
return expression.getValue(evaluationContext, desiredResultType);
}
@Override
public Object getValue(String expressionString, WebRequest request) {
final Expression expression = this.parseExpression(expressionString);
return this.getValue(expression, request);
}
@Override
public Object getValue(Expression expression, WebRequest request) {
final EvaluationContext evaluationContext = this.getEvaluationContext(request);
return expression.getValue(evaluationContext);
}
@Override
public String getValue(String expressionString, Object spelEnvironment) {
final StandardEvaluationContext context = new StandardEvaluationContext(spelEnvironment);
context.setBeanResolver(this.beanResolver);
final Expression expression = this.parseCachedExpression(expressionString, TemplateParserContext.INSTANCE);
return expression.getValue(context, String.class);
}
/**
* Return a SpEL evaluation context for the supplied web request.
*
* @param request
* @return
*/
protected EvaluationContext getEvaluationContext(WebRequest request) {
final HttpServletRequest httpRequest = this.portalRequestUtils.getOriginalPortalRequest(request);
final IUserInstance userInstance = this.userInstanceManager.getUserInstance(httpRequest);
final IPerson person = userInstance.getPerson();
final SpELEnvironmentRoot root = new SpELEnvironmentRoot(request, person);
final StandardEvaluationContext context = new StandardEvaluationContext(root);
context.setBeanResolver(this.beanResolver);
return context;
}
/**
* Limited-use POJO representing the root of a SpEL environment. At the
* current moment, we're only using the request object in the evaluation
* context, but we'd like to be able to add additional objects in the
* future.
*/
@SuppressWarnings("unused")
private static class SpELEnvironmentRoot {
private final WebRequest request;
private final IPerson person;
/**
* Create a new SpEL environment root for use in a SpEL evaluation
* context.
*
* @param request web request
*/
private SpELEnvironmentRoot(WebRequest request, IPerson person) {
this.request = request;
this.person = person;
}
/**
* Get the request associated with this environment root.
*/
public WebRequest getRequest() {
return request;
}
/**
* The person associated with this environment root
*/
public IPerson getPerson() {
return this.person;
}
}
public static class TemplateParserContext implements ParserContext {
public static final TemplateParserContext INSTANCE = new TemplateParserContext();
@Override
public String getExpressionPrefix() {
return "${";
}
@Override
public String getExpressionSuffix() {
return "}";
}
@Override
public boolean isTemplate() {
return true;
}
}
}
| |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.core.monitoring.exporter;
import org.elasticsearch.Version;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.time.DateFormatter;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.xpack.core.monitoring.MonitoredSystem;
import org.elasticsearch.xpack.core.template.TemplateUtils;
import java.io.IOException;
import java.time.Instant;
import java.util.Locale;
public final class MonitoringTemplateUtils {
private static final String TEMPLATE_FILE = "/monitoring-%s.json";
private static final String TEMPLATE_VERSION_PROPERTY = "monitoring.template.version";
/**
* The last version of X-Pack that updated the templates and pipelines.
* <p>
* It may be possible for this to diverge between templates and pipelines, but for now they're the same.
*
* Note that the templates were last updated in 7.11.0, but the versions were not updated, meaning that upgrades
* to 7.11.0 would not have updated the templates. See https://github.com/elastic/elasticsearch/pull/69317.
*/
public static final int LAST_UPDATED_VERSION = Version.V_7_12_0.id;
/**
* Current version of templates used in their name to differentiate from breaking changes (separate from product version).
* Version 7 has the same structure as version 6, but uses the `_doc` type.
*/
public static final String TEMPLATE_VERSION = "7";
/**
* The previous version of templates, which we still support via the REST /_monitoring/bulk endpoint because
* nothing changed for those documents.
*/
public static final String OLD_TEMPLATE_VERSION = "6";
/**
* IDs of templates that can be used with {@linkplain #loadTemplate(String) loadTemplate}.
*/
public static final String[] TEMPLATE_IDS = { "alerts-7", "es", "kibana", "logstash", "beats" };
/**
* IDs of templates that can be used with {@linkplain #createEmptyTemplate(String) createEmptyTemplate} that are not managed by a
* Resolver.
* <p>
* These should only be used by the HTTP Exporter to create old templates so that older versions can be properly upgraded. Older
* instances will attempt to create a named template based on the templates that they expect (e.g., ".monitoring-es-2") and not the
* ones that we are creating.
*/
public static final String[] OLD_TEMPLATE_IDS = { "data", "es", "kibana", "logstash" }; //excluding alerts since 6.x watches use it
/**
* IDs of pipelines that can be used with
*/
public static final String[] PIPELINE_IDS = { TEMPLATE_VERSION, OLD_TEMPLATE_VERSION };
private MonitoringTemplateUtils() { }
/**
* Get a template name for any template ID.
*
* @param id The template identifier.
* @return Never {@code null} {@link String} prefixed by ".monitoring-".
* @see #TEMPLATE_IDS
*/
public static String templateName(final String id) {
return ".monitoring-" + id;
}
/**
* Get a template name for any template ID for old templates in the previous version.
*
* @param id The template identifier.
* @return Never {@code null} {@link String} prefixed by ".monitoring-" and ended by the {@code OLD_TEMPLATE_VERSION}.
* @see #OLD_TEMPLATE_IDS
*/
public static String oldTemplateName(final String id) {
return ".monitoring-" + id + "-" + OLD_TEMPLATE_VERSION;
}
public static String loadTemplate(final String id) {
String resource = String.format(Locale.ROOT, TEMPLATE_FILE, id);
return TemplateUtils.loadTemplate(resource, TEMPLATE_VERSION, TEMPLATE_VERSION_PROPERTY);
}
/**
* Create a template that does nothing but exist and provide a newer {@code version} so that we know that <em>we</em> created it.
*
* @param id The template identifier.
* @return Never {@code null}.
* @see #OLD_TEMPLATE_IDS
* @see #OLD_TEMPLATE_VERSION
*/
public static String createEmptyTemplate(final String id) {
// e.g., { "index_patterns": [ ".monitoring-data-6*" ], "version": 6000002 }
return "{\"index_patterns\":[\".monitoring-" + id + "-" + OLD_TEMPLATE_VERSION + "*\"],\"version\":" + LAST_UPDATED_VERSION + "}";
}
/**
* Get a pipeline name for any template ID.
*
* @param id The template identifier.
* @return Never {@code null} {@link String} prefixed by "xpack_monitoring_" and the {@code id}.
* @see #TEMPLATE_IDS
*/
public static String pipelineName(String id) {
return "xpack_monitoring_" + id;
}
/**
* Create a pipeline that allows documents for different template versions to be upgraded.
* <p>
* The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or
* {@link BytesReference#bytes(XContentBuilder)}}.
*
* @param id The API version (e.g., "6") to use
* @param type The type of data you want to format for the request
* @return Never {@code null}. Always an ended-object.
* @throws IllegalArgumentException if {@code apiVersion} is unrecognized
* @see #PIPELINE_IDS
*/
public static XContentBuilder loadPipeline(final String id, final XContentType type) {
switch (id) {
case TEMPLATE_VERSION:
return emptyPipeline(type);
case OLD_TEMPLATE_VERSION:
return pipelineForApiVersion6(type);
}
throw new IllegalArgumentException("unrecognized pipeline API version [" + id + "]");
}
/**
* Create a pipeline to upgrade documents from {@link MonitoringTemplateUtils#OLD_TEMPLATE_VERSION}
* The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or
* {@link BytesReference#bytes(XContentBuilder)}}.
*
* @param type The type of data you want to format for the request
* @return Never {@code null}. Always an ended-object.
* @see #LAST_UPDATED_VERSION
*/
static XContentBuilder pipelineForApiVersion6(final XContentType type) {
try {
return XContentBuilder.builder(type.xContent()).startObject()
.field("description", "This pipeline upgrades documents from the older version of the Monitoring API to " +
"the newer version (" + TEMPLATE_VERSION + ") by fixing breaking " +
"changes in those older documents before they are indexed from the older version (" +
OLD_TEMPLATE_VERSION + ").")
.field("version", LAST_UPDATED_VERSION)
.startArray("processors")
.startObject()
// remove the type
.startObject("script")
.field("source","ctx._type = null" )
.endObject()
.endObject()
.startObject()
// ensure the data lands in the correct index
.startObject("gsub")
.field("field", "_index")
.field("pattern", "(.monitoring-\\w+-)6(-.+)")
.field("replacement", "$1" + TEMPLATE_VERSION + "$2")
.endObject()
.endObject()
.endArray()
.endObject();
} catch (final IOException e) {
throw new RuntimeException("Failed to create pipeline to upgrade from older version [" + OLD_TEMPLATE_VERSION +
"] to the newer version [" + TEMPLATE_VERSION + "].", e);
}
}
/**
* Create an empty pipeline.
* The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or
* {@link BytesReference#bytes(XContentBuilder)}}.
*
* @param type The type of data you want to format for the request
* @return Never {@code null}. Always an ended-object.
* @see #LAST_UPDATED_VERSION
*/
public static XContentBuilder emptyPipeline(final XContentType type) {
try {
// For now: We prepend the API version to the string so that it's easy to parse in the future; if we ever add metadata
// to pipelines, then it would better serve this use case
return XContentBuilder.builder(type.xContent()).startObject()
.field("description", "This is a placeholder pipeline for Monitoring API version " + TEMPLATE_VERSION +
" so that future versions may fix breaking changes.")
.field("version", LAST_UPDATED_VERSION)
.startArray("processors").endArray()
.endObject();
} catch (final IOException e) {
throw new RuntimeException("Failed to create empty pipeline", e);
}
}
/**
* Get the index name given a specific date format, a monitored system and a timestamp.
*
* @param formatter the {@link DateFormatter} to use to compute the timestamped index name
* @param system the {@link MonitoredSystem} for which the index name is computed
* @param timestamp the timestamp value to use to compute the timestamped index name
* @return the index name as a @{link String}
*/
public static String indexName(final DateFormatter formatter, final MonitoredSystem system, final long timestamp) {
return ".monitoring-" + system.getSystem() + "-" + TEMPLATE_VERSION + "-" + formatter.format(Instant.ofEpochMilli(timestamp));
}
}
| |
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*/
package com.microsoft.azure.management;
import com.microsoft.azure.management.network.PublicIPAddress;
import com.microsoft.azure.management.network.PublicIPAddresses;
import com.microsoft.azure.management.resources.ResourceGroup;
import com.microsoft.azure.management.resources.fluentcore.arm.Region;
import com.microsoft.azure.management.resources.fluentcore.utils.SdkContext;
import com.microsoft.azure.management.trafficmanager.EndpointType;
import com.microsoft.azure.management.trafficmanager.TargetAzureResourceType;
import com.microsoft.azure.management.trafficmanager.TrafficManagerAzureEndpoint;
import com.microsoft.azure.management.trafficmanager.TrafficManagerExternalEndpoint;
import com.microsoft.azure.management.trafficmanager.TrafficManagerNestedProfileEndpoint;
import com.microsoft.azure.management.trafficmanager.TrafficManagerProfile;
import com.microsoft.azure.management.trafficmanager.TrafficManagerProfiles;
import org.junit.Assert;
import java.util.Map;
/**
* Test of traffic manager management.
*/
public class TestTrafficManager extends TestTemplate<TrafficManagerProfile, TrafficManagerProfiles> {
private final PublicIPAddresses publicIPAddresses;
private final String externalEndpointName21 = "external-ep-1";
private final String externalEndpointName22 = "external-ep-2";
private final String externalEndpointName23 = "external-ep-3";
private final String externalFqdn21 = "www.azure.microsoft.com";
private final String externalFqdn22 = "www.bing.com";
private final String externalFqdn23 = "www.github.com";
private final String azureEndpointName = "azure-ep-1";
private final String nestedProfileEndpointName = "nested-profile-ep-1";
public TestTrafficManager(PublicIPAddresses publicIPAddresses) {
this.publicIPAddresses = publicIPAddresses;
}
@Override
public TrafficManagerProfile createResource(TrafficManagerProfiles profiles) throws Exception {
final Region region = Region.US_EAST;
final String groupName = "rg" + this.testId;
final String pipName = "pip" + this.testId;
final String pipDnsLabel = SdkContext.randomResourceName("contoso", 15);
final String tmProfileName = "tm" + this.testId;
final String nestedTmProfileName = "nested" + tmProfileName;
final String tmProfileDnsLabel = SdkContext.randomResourceName("tmdns", 15);
final String nestedTmProfileDnsLabel = "nested" + tmProfileDnsLabel;
ResourceGroup.DefinitionStages.WithCreate rgCreatable = profiles.manager().resourceManager().resourceGroups().define(groupName)
.withRegion(region);
// Creates a TM profile that will be used as a nested profile endpoint in parent TM profile
//
TrafficManagerProfile nestedProfile = profiles.define(nestedTmProfileName)
.withNewResourceGroup(rgCreatable)
.withLeafDomainLabel(nestedTmProfileDnsLabel)
.withPriorityBasedRouting()
.defineExternalTargetEndpoint("external-ep-1")
.toFqdn("www.gitbook.com")
.fromRegion(Region.INDIA_CENTRAL)
.attach()
.withHttpsMonitoring()
.withTimeToLive(500)
.create();
Assert.assertTrue(nestedProfile.isEnabled());
Assert.assertNotNull(nestedProfile.monitorStatus());
Assert.assertEquals(nestedProfile.monitoringPort(), 443);
Assert.assertEquals(nestedProfile.monitoringPath(), "/");
Assert.assertEquals(nestedProfile.azureEndpoints().size(), 0);
Assert.assertEquals(nestedProfile.nestedProfileEndpoints().size(), 0);
Assert.assertEquals(nestedProfile.externalEndpoints().size(), 1);
Assert.assertEquals(nestedProfile.fqdn(), nestedTmProfileDnsLabel + ".trafficmanager.net");
Assert.assertEquals(nestedProfile.timeToLive(), 500);
// Creates a public ip to be used as an Azure endpoint
//
PublicIPAddress publicIPAddress = this.publicIPAddresses.define(pipName)
.withRegion(region)
.withNewResourceGroup(rgCreatable)
.withLeafDomainLabel(pipDnsLabel)
.create();
Assert.assertNotNull(publicIPAddress.fqdn());
// Creates a TM profile
//
TrafficManagerProfile profile = profiles.define(tmProfileName)
.withNewResourceGroup(rgCreatable)
.withLeafDomainLabel(tmProfileDnsLabel)
.withWeightBasedRouting()
.defineExternalTargetEndpoint(externalEndpointName21)
.toFqdn(externalFqdn21)
.fromRegion(Region.US_EAST)
.withRoutingPriority(1)
.withRoutingWeight(1)
.attach()
.defineExternalTargetEndpoint(externalEndpointName22)
.toFqdn(externalFqdn22)
.fromRegion(Region.US_EAST2)
.withRoutingPriority(2)
.withRoutingWeight(1)
.withTrafficDisabled()
.attach()
.defineAzureTargetEndpoint(azureEndpointName)
.toResourceId(publicIPAddress.id())
.withRoutingPriority(3)
.attach()
.defineNestedTargetEndpoint(nestedProfileEndpointName)
.toProfile(nestedProfile)
.fromRegion(Region.INDIA_CENTRAL)
.withMinimumEndpointsToEnableTraffic(1)
.withRoutingPriority(4)
.attach()
.withHttpMonitoring()
.create();
Assert.assertTrue(profile.isEnabled());
Assert.assertNotNull(profile.monitorStatus());
Assert.assertEquals(profile.monitoringPort(), 80);
Assert.assertEquals(profile.monitoringPath(), "/");
Assert.assertEquals(profile.azureEndpoints().size(), 1);
Assert.assertEquals(profile.nestedProfileEndpoints().size(), 1);
Assert.assertEquals(profile.externalEndpoints().size(), 2);
Assert.assertEquals(profile.fqdn(), tmProfileDnsLabel + ".trafficmanager.net");
Assert.assertEquals(profile.timeToLive(), 300); // Default
profile = profile.refresh();
Assert.assertEquals(profile.azureEndpoints().size(), 1);
Assert.assertEquals(profile.nestedProfileEndpoints().size(), 1);
Assert.assertEquals(profile.externalEndpoints().size(), 2);
int c = 0;
for (TrafficManagerExternalEndpoint endpoint : profile.externalEndpoints().values()) {
Assert.assertEquals(endpoint.endpointType(), EndpointType.EXTERNAL);
if (endpoint.name().equalsIgnoreCase(externalEndpointName21)) {
Assert.assertEquals(endpoint.routingPriority(), 1);
Assert.assertEquals(endpoint.fqdn(), externalFqdn21);
Assert.assertNotNull(endpoint.monitorStatus());
Assert.assertEquals(endpoint.sourceTrafficLocation(), Region.US_EAST);
c++;
} else if (endpoint.name().equalsIgnoreCase(externalEndpointName22)) {
Assert.assertEquals(endpoint.routingPriority(), 2);
Assert.assertEquals(endpoint.fqdn(), externalFqdn22);
Assert.assertNotNull(endpoint.monitorStatus());
Assert.assertEquals(endpoint.sourceTrafficLocation(), Region.US_EAST2);
c++;
}
}
Assert.assertEquals(c, 2);
c = 0;
for (TrafficManagerAzureEndpoint endpoint : profile.azureEndpoints().values()) {
Assert.assertEquals(endpoint.endpointType(), EndpointType.AZURE);
if (endpoint.name().equalsIgnoreCase(azureEndpointName)) {
Assert.assertEquals(endpoint.routingPriority(), 3);
Assert.assertNotNull(endpoint.monitorStatus());
Assert.assertEquals(endpoint.targetAzureResourceId(), publicIPAddress.id());
Assert.assertEquals(endpoint.targetResourceType(), TargetAzureResourceType.PUBLICIP);
c++;
}
}
Assert.assertEquals(c, 1);
c = 0;
for (TrafficManagerNestedProfileEndpoint endpoint : profile.nestedProfileEndpoints().values()) {
Assert.assertEquals(endpoint.endpointType(), EndpointType.NESTED_PROFILE);
if (endpoint.name().equalsIgnoreCase(nestedProfileEndpointName)) {
Assert.assertEquals(endpoint.routingPriority(), 4);
Assert.assertNotNull(endpoint.monitorStatus());
Assert.assertEquals(endpoint.minimumChildEndpointCount(), 1);
Assert.assertEquals(endpoint.nestedProfileId(), nestedProfile.id());
Assert.assertEquals(endpoint.sourceTrafficLocation(), Region.INDIA_CENTRAL);
c++;
}
}
Assert.assertEquals(c, 1);
return profile;
}
@Override
public TrafficManagerProfile updateResource(TrafficManagerProfile profile) throws Exception {
// Remove an endpoint, update two endpoints and add new one
//
profile.update()
.withTimeToLive(600)
.withHttpMonitoring(8080, "/")
.withPerformanceBasedRouting()
.withoutEndpoint(externalEndpointName21)
.updateAzureTargetEndpoint(azureEndpointName)
.withRoutingPriority(5)
.withRoutingWeight(2)
.parent()
.updateNestedProfileTargetEndpoint(nestedProfileEndpointName)
.withTrafficDisabled()
.parent()
.defineExternalTargetEndpoint(externalEndpointName23)
.toFqdn(externalFqdn23)
.fromRegion(Region.US_CENTRAL)
.withRoutingPriority(6)
.attach()
.apply();
Assert.assertEquals(profile.monitoringPort(), 8080);
Assert.assertEquals(profile.monitoringPath(), "/");
Assert.assertEquals(profile.azureEndpoints().size(), 1);
Assert.assertEquals(profile.nestedProfileEndpoints().size(), 1);
Assert.assertEquals(profile.externalEndpoints().size(), 2);
Assert.assertEquals(profile.timeToLive(), 600);
int c = 0;
for (TrafficManagerExternalEndpoint endpoint : profile.externalEndpoints().values()) {
Assert.assertEquals(endpoint.endpointType(), EndpointType.EXTERNAL);
if (endpoint.name().equalsIgnoreCase(externalEndpointName22)) {
Assert.assertEquals(endpoint.routingPriority(), 2);
Assert.assertEquals(endpoint.fqdn(), externalFqdn22);
Assert.assertEquals(endpoint.sourceTrafficLocation(), Region.US_EAST2);
Assert.assertNotNull(endpoint.monitorStatus());
c++;
} else if (endpoint.name().equalsIgnoreCase(externalEndpointName23)) {
Assert.assertEquals(endpoint.routingPriority(), 6);
Assert.assertEquals(endpoint.fqdn(), externalFqdn23);
Assert.assertNotNull(endpoint.monitorStatus());
Assert.assertEquals(endpoint.sourceTrafficLocation(), Region.US_CENTRAL);
c++;
} else {
c++;
}
}
Assert.assertEquals(c, 2);
c = 0;
for (TrafficManagerAzureEndpoint endpoint : profile.azureEndpoints().values()) {
Assert.assertEquals(endpoint.endpointType(), EndpointType.AZURE);
if (endpoint.name().equalsIgnoreCase(azureEndpointName)) {
Assert.assertEquals(endpoint.routingPriority(), 5);
Assert.assertEquals(endpoint.routingWeight(), 2);
Assert.assertEquals(endpoint.targetResourceType(), TargetAzureResourceType.PUBLICIP);
c++;
}
}
Assert.assertEquals(c, 1);
return profile;
}
@Override
public void print(TrafficManagerProfile profile) {
StringBuilder info = new StringBuilder();
info.append("Traffic Manager Profile: ").append(profile.id())
.append("\n\tName: ").append(profile.name())
.append("\n\tResource group: ").append(profile.resourceGroupName())
.append("\n\tRegion: ").append(profile.regionName())
.append("\n\tTags: ").append(profile.tags())
.append("\n\tDNSLabel: ").append(profile.dnsLabel())
.append("\n\tFQDN: ").append(profile.fqdn())
.append("\n\tTTL: ").append(profile.timeToLive())
.append("\n\tEnabled: ").append(profile.isEnabled())
.append("\n\tRoutingMethod: ").append(profile.trafficRoutingMethod())
.append("\n\tMonitor status: ").append(profile.monitorStatus())
.append("\n\tMonitoring port: ").append(profile.monitoringPort())
.append("\n\tMonitoring path: ").append(profile.monitoringPath());
Map<String, TrafficManagerAzureEndpoint> azureEndpoints = profile.azureEndpoints();
if (!azureEndpoints.isEmpty()) {
info.append("\n\tAzure endpoints:");
int idx = 1;
for (TrafficManagerAzureEndpoint endpoint : azureEndpoints.values()) {
info.append("\n\t\tAzure endpoint: #").append(idx++)
.append("\n\t\t\tId: ").append(endpoint.id())
.append("\n\t\t\tType: ").append(endpoint.endpointType())
.append("\n\t\t\tTarget resourceId: ").append(endpoint.targetAzureResourceId())
.append("\n\t\t\tTarget resourceType: ").append(endpoint.targetResourceType())
.append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus())
.append("\n\t\t\tEnabled: ").append(endpoint.isEnabled())
.append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority())
.append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight());
}
}
Map<String, TrafficManagerExternalEndpoint> externalEndpoints = profile.externalEndpoints();
if (!externalEndpoints.isEmpty()) {
info.append("\n\tExternal endpoints:");
int idx = 1;
for (TrafficManagerExternalEndpoint endpoint : externalEndpoints.values()) {
info.append("\n\t\tExternal endpoint: #").append(idx++)
.append("\n\t\t\tId: ").append(endpoint.id())
.append("\n\t\t\tType: ").append(endpoint.endpointType())
.append("\n\t\t\tFQDN: ").append(endpoint.fqdn())
.append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation())
.append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus())
.append("\n\t\t\tEnabled: ").append(endpoint.isEnabled())
.append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority())
.append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight());
}
}
Map<String, TrafficManagerNestedProfileEndpoint> nestedProfileEndpoints = profile.nestedProfileEndpoints();
if (!nestedProfileEndpoints.isEmpty()) {
info.append("\n\tNested profile endpoints:");
int idx = 1;
for (TrafficManagerNestedProfileEndpoint endpoint : nestedProfileEndpoints.values()) {
info.append("\n\t\tNested profile endpoint: #").append(idx++)
.append("\n\t\t\tId: ").append(endpoint.id())
.append("\n\t\t\tType: ").append(endpoint.endpointType())
.append("\n\t\t\tNested profileId: ").append(endpoint.nestedProfileId())
.append("\n\t\t\tMinimum child threshold: ").append(endpoint.minimumChildEndpointCount())
.append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation())
.append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus())
.append("\n\t\t\tEnabled: ").append(endpoint.isEnabled())
.append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority())
.append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight());
}
}
System.out.println(info.toString());
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.kubernetes.kubeclient.parameters;
import org.apache.flink.client.deployment.ClusterSpecification;
import org.apache.flink.configuration.BlobServerOptions;
import org.apache.flink.configuration.HighAvailabilityOptions;
import org.apache.flink.configuration.IllegalConfigurationException;
import org.apache.flink.configuration.JobManagerOptions;
import org.apache.flink.configuration.ResourceManagerOptions;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.kubernetes.KubernetesTestBase;
import org.apache.flink.kubernetes.configuration.KubernetesConfigOptions;
import org.apache.flink.kubernetes.configuration.KubernetesConfigOptionsInternal;
import org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory;
import org.apache.flink.kubernetes.utils.Constants;
import org.apache.flink.util.FlinkRuntimeException;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/** General tests for the {@link KubernetesJobManagerParameters}. */
public class KubernetesJobManagerParametersTest extends KubernetesTestBase {
private static final double JOB_MANAGER_CPU = 2.0;
private static final double JOB_MANAGER_CPU_LIMIT_FACTOR = 2.5;
private static final double JOB_MANAGER_MEMORY_LIMIT_FACTOR = 2.0;
private final ClusterSpecification clusterSpecification =
new ClusterSpecification.ClusterSpecificationBuilder()
.setMasterMemoryMB(JOB_MANAGER_MEMORY)
.setTaskManagerMemoryMB(1024)
.setSlotsPerTaskManager(1)
.createClusterSpecification();
private final KubernetesJobManagerParameters kubernetesJobManagerParameters =
new KubernetesJobManagerParameters(flinkConfig, clusterSpecification);
@Test
public void testGetEnvironments() {
final Map<String, String> expectedEnvironments = new HashMap<>();
expectedEnvironments.put("k1", "v1");
expectedEnvironments.put("k2", "v2");
expectedEnvironments.forEach(
(k, v) ->
flinkConfig.setString(
ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX + k, v));
final Map<String, String> resultEnvironments =
kubernetesJobManagerParameters.getEnvironments();
assertEquals(expectedEnvironments, resultEnvironments);
}
@Test
public void testGetEmptyAnnotations() {
assertTrue(kubernetesJobManagerParameters.getAnnotations().isEmpty());
}
@Test
public void testGetJobManagerAnnotations() {
final Map<String, String> expectedAnnotations = new HashMap<>();
expectedAnnotations.put("a1", "v1");
expectedAnnotations.put("a2", "v2");
flinkConfig.set(KubernetesConfigOptions.JOB_MANAGER_ANNOTATIONS, expectedAnnotations);
final Map<String, String> resultAnnotations =
kubernetesJobManagerParameters.getAnnotations();
assertThat(resultAnnotations, is(equalTo(expectedAnnotations)));
}
@Test
public void testGetServiceAnnotations() {
final Map<String, String> expectedAnnotations = new HashMap<>();
expectedAnnotations.put("a1", "v1");
expectedAnnotations.put("a2", "v2");
flinkConfig.set(KubernetesConfigOptions.REST_SERVICE_ANNOTATIONS, expectedAnnotations);
final Map<String, String> resultAnnotations =
kubernetesJobManagerParameters.getRestServiceAnnotations();
assertThat(resultAnnotations, is(equalTo(expectedAnnotations)));
}
@Test
public void testGetJobManagerMemoryMB() {
assertEquals(JOB_MANAGER_MEMORY, kubernetesJobManagerParameters.getJobManagerMemoryMB());
}
@Test
public void testGetJobManagerCPU() {
flinkConfig.set(KubernetesConfigOptions.JOB_MANAGER_CPU, JOB_MANAGER_CPU);
assertEquals(JOB_MANAGER_CPU, kubernetesJobManagerParameters.getJobManagerCPU(), 0.00001);
}
@Test
public void testGetJobManagerCPULimitFactor() {
flinkConfig.set(
KubernetesConfigOptions.JOB_MANAGER_CPU_LIMIT_FACTOR, JOB_MANAGER_CPU_LIMIT_FACTOR);
assertEquals(
JOB_MANAGER_CPU_LIMIT_FACTOR,
kubernetesJobManagerParameters.getJobManagerCPULimitFactor(),
0.00001);
}
@Test
public void testGetJobManagerMemoryLimitFactor() {
flinkConfig.set(
KubernetesConfigOptions.JOB_MANAGER_MEMORY_LIMIT_FACTOR,
JOB_MANAGER_MEMORY_LIMIT_FACTOR);
assertEquals(
JOB_MANAGER_MEMORY_LIMIT_FACTOR,
kubernetesJobManagerParameters.getJobManagerMemoryLimitFactor(),
0.00001);
}
@Test
public void testGetRestPort() {
flinkConfig.set(RestOptions.PORT, 12345);
assertEquals(12345, kubernetesJobManagerParameters.getRestPort());
}
@Test
public void testGetRpcPort() {
flinkConfig.set(JobManagerOptions.PORT, 1234);
assertEquals(1234, kubernetesJobManagerParameters.getRPCPort());
}
@Test
public void testGetBlobServerPort() {
flinkConfig.set(BlobServerOptions.PORT, "2345");
assertEquals(2345, kubernetesJobManagerParameters.getBlobServerPort());
}
@Test
public void testGetBlobServerPortException1() {
flinkConfig.set(BlobServerOptions.PORT, "1000-2000");
try {
kubernetesJobManagerParameters.getBlobServerPort();
fail("Should fail with an exception.");
} catch (FlinkRuntimeException e) {
assertThat(
e.getMessage(),
containsString(
BlobServerOptions.PORT.key()
+ " should be specified to a fixed port. Do not support a range of ports."));
}
}
@Test
public void testGetBlobServerPortException2() {
flinkConfig.set(BlobServerOptions.PORT, "0");
try {
kubernetesJobManagerParameters.getBlobServerPort();
fail("Should fail with an exception.");
} catch (IllegalArgumentException e) {
assertThat(
e.getMessage(),
containsString(BlobServerOptions.PORT.key() + " should not be 0."));
}
}
@Test
public void testGetServiceAccount() {
flinkConfig.set(KubernetesConfigOptions.JOB_MANAGER_SERVICE_ACCOUNT, "flink");
assertThat(kubernetesJobManagerParameters.getServiceAccount(), is("flink"));
}
@Test
public void testGetServiceAccountFallback() {
flinkConfig.set(KubernetesConfigOptions.KUBERNETES_SERVICE_ACCOUNT, "flink-fallback");
assertThat(kubernetesJobManagerParameters.getServiceAccount(), is("flink-fallback"));
}
@Test
public void testGetServiceAccountShouldReturnDefaultIfNotExplicitlySet() {
assertThat(kubernetesJobManagerParameters.getServiceAccount(), is("default"));
}
@Test
public void testGetEntrypointMainClass() {
final String entrypointClass = "org.flink.kubernetes.Entrypoint";
flinkConfig.set(KubernetesConfigOptionsInternal.ENTRY_POINT_CLASS, entrypointClass);
assertEquals(entrypointClass, kubernetesJobManagerParameters.getEntrypointClass());
}
@Test
public void testGetRestServiceExposedType() {
flinkConfig.set(
KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE,
KubernetesConfigOptions.ServiceExposedType.NodePort);
assertEquals(
KubernetesConfigOptions.ServiceExposedType.NodePort,
kubernetesJobManagerParameters.getRestServiceExposedType());
}
@Test
public void testPrioritizeBuiltInLabels() {
final Map<String, String> userLabels = new HashMap<>();
userLabels.put(Constants.LABEL_TYPE_KEY, "user-label-type");
userLabels.put(Constants.LABEL_APP_KEY, "user-label-app");
userLabels.put(Constants.LABEL_COMPONENT_KEY, "user-label-component-jm");
flinkConfig.set(KubernetesConfigOptions.JOB_MANAGER_LABELS, userLabels);
final Map<String, String> expectedLabels = new HashMap<>(getCommonLabels());
expectedLabels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_JOB_MANAGER);
assertThat(kubernetesJobManagerParameters.getLabels(), is(equalTo(expectedLabels)));
}
@Test(expected = IllegalConfigurationException.class)
public void testGetReplicasWithTwoShouldFailWhenHAIsNotEnabled() {
flinkConfig.set(KubernetesConfigOptions.KUBERNETES_JOBMANAGER_REPLICAS, 2);
kubernetesJobManagerParameters.getReplicas();
}
@Test(expected = IllegalConfigurationException.class)
public void testGetReplicasWithInvalidValue() {
flinkConfig.set(KubernetesConfigOptions.KUBERNETES_JOBMANAGER_REPLICAS, 0);
kubernetesJobManagerParameters.getReplicas();
}
@Test
public void testGetReplicas() {
flinkConfig.set(
HighAvailabilityOptions.HA_MODE,
KubernetesHaServicesFactory.class.getCanonicalName());
flinkConfig.set(KubernetesConfigOptions.KUBERNETES_JOBMANAGER_REPLICAS, 2);
assertThat(kubernetesJobManagerParameters.getReplicas(), is(2));
}
}
| |
/**
* Copyright 2005-2015 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.kew.doctype.service.impl;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.kuali.rice.core.api.CoreConstants;
import org.kuali.rice.core.api.datetime.DateTimeService;
import org.kuali.rice.core.api.reflect.ObjectDefinition;
import org.kuali.rice.core.api.resourceloader.GlobalResourceLoader;
import org.kuali.rice.core.api.util.KeyValue;
import org.kuali.rice.kew.api.KewApiServiceLocator;
import org.kuali.rice.kew.api.WorkflowRuntimeException;
import org.kuali.rice.kew.api.document.Document;
import org.kuali.rice.kew.api.document.search.DocumentSearchResult;
import org.kuali.rice.kew.api.document.search.DocumentSearchResults;
import org.kuali.rice.kew.api.extension.ExtensionDefinition;
import org.kuali.rice.kew.api.extension.ExtensionRepositoryService;
import org.kuali.rice.kew.doctype.DocumentTypeSecurity;
import org.kuali.rice.kew.framework.KewFrameworkServiceLocator;
import org.kuali.rice.kew.framework.document.security.DocumentSecurityDirective;
import org.kuali.rice.kew.framework.document.security.DocumentSecurityHandlerService;
import org.kuali.rice.kew.framework.document.security.DocumentSecurityAttribute;
import org.kuali.rice.kew.doctype.SecurityPermissionInfo;
import org.kuali.rice.kew.doctype.SecuritySession;
import org.kuali.rice.kew.doctype.bo.DocumentType;
import org.kuali.rice.kew.doctype.service.DocumentSecurityService;
import org.kuali.rice.kew.routeheader.DocumentRouteHeaderValue;
import org.kuali.rice.kew.service.KEWServiceLocator;
import org.kuali.rice.kew.user.UserUtils;
import org.kuali.rice.kew.api.KewApiConstants;
import org.kuali.rice.kim.api.group.Group;
import org.kuali.rice.kim.api.services.KimApiServiceLocator;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class DocumentSecurityServiceImpl implements DocumentSecurityService {
public static final org.apache.log4j.Logger LOG = org.apache.log4j.Logger.getLogger(
DocumentSecurityServiceImpl.class);
private ExtensionRepositoryService extensionRepositoryService;
@Override
public boolean routeLogAuthorized(String principalId, DocumentRouteHeaderValue routeHeader,
SecuritySession securitySession) {
Document document = DocumentRouteHeaderValue.to(routeHeader);
if(document != null) {
Set<String> authorizationResults = checkAuthorizations(principalId, securitySession, Collections.singletonList(document));
return authorizationResults.contains(routeHeader.getDocumentId());
} else {
return false;
}
}
@Override
public Set<String> documentSearchResultAuthorized(String principalId, DocumentSearchResults results,
SecuritySession securitySession) {
List<Document> documents = new ArrayList<Document>();
for (DocumentSearchResult result : results.getSearchResults()) {
documents.add(result.getDocument());
}
return checkAuthorizations(principalId, securitySession, documents);
}
protected Set<String> checkAuthorizations(String principalId, SecuritySession securitySession,
List<Document> documents) {
Set<String> authorizations = new HashSet<String>();
// a list of documents which need to be processed with security extension attributes after the standard set of
// security has been attempted
List<Document> documentsRequiringExtensionProcessing = new ArrayList<Document>();
boolean admin = isAdmin(securitySession);
for (Document document : documents) {
if (admin) {
authorizations.add(document.getDocumentId());
continue;
}
DocumentTypeSecurity security = null;
try {
security = getDocumentTypeSecurity(document.getDocumentTypeName(), securitySession);
if (security == null || !security.isActive() || checkStandardAuthorization(security, principalId,
document, securitySession)) {
authorizations.add(document.getDocumentId());
} else {
// if we get to this point, it means we aren't authorized yet, last chance for authorization will be
// security extension attributes, so prepare for execution of those after the main loop is complete
if (CollectionUtils.isNotEmpty(security.getSecurityAttributeExtensionNames())) {
documentsRequiringExtensionProcessing.add(document);
}
}
} catch (Exception e) {
LOG.warn(
"Not able to retrieve DocumentTypeSecurity from remote system for documentTypeName: " + document
.getDocumentTypeName(), e);
continue;
}
}
processDocumentRequiringExtensionProcessing(documentsRequiringExtensionProcessing, securitySession,
authorizations);
return authorizations;
}
protected void processDocumentRequiringExtensionProcessing(List<Document> documentsRequiringExtensionProcessing,
SecuritySession securitySession, Set<String> authorizations) {
if (CollectionUtils.isNotEmpty(documentsRequiringExtensionProcessing)) {
LOG.info("Beginning processing of documents requiring extension processing (total: "
+ documentsRequiringExtensionProcessing.size()
+ " documents)");
long start = System.currentTimeMillis();
MultiValueMap<PartitionKey, Document> partitions = partitionDocumentsForSecurity(
documentsRequiringExtensionProcessing, securitySession);
MultiValueMap<String, DocumentSecurityDirective> applicationSecurityDirectives =
new LinkedMultiValueMap<String, DocumentSecurityDirective>();
for (PartitionKey partitionKey : partitions.keySet()) {
DocumentSecurityDirective directive = DocumentSecurityDirective.create(
partitionKey.getDocumentSecurityAttributeNameList(), partitions.get(partitionKey));
applicationSecurityDirectives.add(partitionKey.applicationId, directive);
}
for (String applicationId : applicationSecurityDirectives.keySet()) {
List<DocumentSecurityDirective> documentSecurityDirectives = applicationSecurityDirectives.get(
applicationId);
DocumentSecurityHandlerService securityHandler = loadSecurityHandler(applicationId);
List<String> authorizedDocumentIds = securityHandler.getAuthorizedDocumentIds(
securitySession.getPrincipalId(), documentSecurityDirectives);
if (CollectionUtils.isNotEmpty(authorizedDocumentIds)) {
authorizations.addAll(authorizedDocumentIds);
}
}
long end = System.currentTimeMillis();
LOG.info("Finished processing of documents requiring extension processing (total time: "
+ (start - end)
+ ")");
}
}
protected MultiValueMap<PartitionKey, Document> partitionDocumentsForSecurity(List<Document> documents,
SecuritySession securitySession) {
MultiValueMap<PartitionKey, Document> partitions = new LinkedMultiValueMap<PartitionKey, Document>();
for (Document document : documents) {
DocumentTypeSecurity security = getDocumentTypeSecurity(document.getDocumentTypeName(), securitySession);
MultiValueMap<String, ExtensionDefinition> securityAttributeExtensionDefinitions = loadExtensionDefinitions(
security, securitySession);
for (String applicationId : securityAttributeExtensionDefinitions.keySet()) {
List<ExtensionDefinition> extensionDefinitions = securityAttributeExtensionDefinitions.get(
applicationId);
PartitionKey key = new PartitionKey(applicationId, extensionDefinitions);
partitions.add(key, document);
}
}
return partitions;
}
protected MultiValueMap<String, ExtensionDefinition> loadExtensionDefinitions(DocumentTypeSecurity security,
SecuritySession securitySession) {
MultiValueMap<String, ExtensionDefinition> securityAttributeExtensionDefinitions =
new LinkedMultiValueMap<String, ExtensionDefinition>();
List<String> securityAttributeExtensionNames = security.getSecurityAttributeExtensionNames();
for (String securityAttributeExtensionName : securityAttributeExtensionNames) {
ExtensionDefinition extensionDefinition = extensionRepositoryService.getExtensionByName(
securityAttributeExtensionName);
securityAttributeExtensionDefinitions.add(extensionDefinition.getApplicationId(), extensionDefinition);
}
return securityAttributeExtensionDefinitions;
}
protected DocumentSecurityHandlerService loadSecurityHandler(String applicationId) {
DocumentSecurityHandlerService service = KewFrameworkServiceLocator.getDocumentSecurityHandlerService(
applicationId);
if (service == null) {
throw new WorkflowRuntimeException(
"Failed to locate DocumentSecurityHandlerService for applicationId: " + applicationId);
}
return service;
}
protected boolean isAdmin(SecuritySession session) {
if (session.getPrincipalId() == null) {
return false;
}
return KimApiServiceLocator.getPermissionService().isAuthorized(session.getPrincipalId(),
KewApiConstants.KEW_NAMESPACE, KewApiConstants.PermissionNames.UNRESTRICTED_DOCUMENT_SEARCH, new HashMap<String, String>());
}
protected boolean checkStandardAuthorization(DocumentTypeSecurity security, String principalId, Document document,
SecuritySession securitySession) {
String documentId = document.getDocumentId();
String initiatorPrincipalId = document.getInitiatorPrincipalId();
LOG.debug("auth check user=" + principalId + " docId=" + documentId);
// Doc Initiator Authorization
if (security.getInitiatorOk() != null && security.getInitiatorOk()) {
boolean isInitiator = StringUtils.equals(initiatorPrincipalId, principalId);
if (isInitiator) {
return true;
}
}
// Permission Authorization
List<SecurityPermissionInfo> securityPermissions = security.getPermissions();
if (securityPermissions != null) {
for (SecurityPermissionInfo securityPermission : securityPermissions) {
if (isAuthenticatedByPermission(documentId, securityPermission.getPermissionNamespaceCode(),
securityPermission.getPermissionName(), securityPermission.getPermissionDetails(),
securityPermission.getQualifications(), securitySession)) {
return true;
}
}
}
// Group Authorization
List<Group> securityWorkgroups = security.getWorkgroups();
if (securityWorkgroups != null) {
for (Group securityWorkgroup : securityWorkgroups) {
if (isGroupAuthenticated(securityWorkgroup.getNamespaceCode(), securityWorkgroup.getName(),
securitySession)) {
return true;
}
}
}
// Searchable Attribute Authorization
Collection searchableAttributes = security.getSearchableAttributes();
if (searchableAttributes != null) {
for (Iterator iterator = searchableAttributes.iterator(); iterator.hasNext(); ) {
KeyValue searchableAttr = (KeyValue) iterator.next();
String attrName = searchableAttr.getKey();
String idType = searchableAttr.getValue();
String idValue = UserUtils.getIdValue(idType, principalId);
if (!StringUtils.isEmpty(idValue)) {
if (KEWServiceLocator.getRouteHeaderService().hasSearchableAttributeValue(documentId, attrName,
idValue)) {
return true;
}
}
}
}
// Route Log Authorization
if (security.getRouteLogAuthenticatedOk() != null && security.getRouteLogAuthenticatedOk()) {
boolean isInitiator = StringUtils.equals(initiatorPrincipalId, principalId);
if (isInitiator) {
return true;
}
boolean hasTakenAction = KEWServiceLocator.getActionTakenService().hasUserTakenAction(principalId,
documentId);
if (hasTakenAction) {
return true;
}
boolean hasRequest = KEWServiceLocator.getActionRequestService().doesPrincipalHaveRequest(principalId,
documentId);
if (hasRequest) {
return true;
}
}
// local security attribute authorization
List<DocumentSecurityAttribute> immediateSecurityAttributes = getImmediateSecurityAttributes(document, security,
securitySession);
if (immediateSecurityAttributes != null) {
for (DocumentSecurityAttribute immediateSecurityAttribute : immediateSecurityAttributes) {
boolean isAuthorized = immediateSecurityAttribute.isAuthorizedForDocument(principalId, document);
if (isAuthorized) {
return true;
}
}
}
LOG.debug("user not authorized");
return false;
}
protected List<DocumentSecurityAttribute> getImmediateSecurityAttributes(Document document, DocumentTypeSecurity security,
SecuritySession securitySession) {
List<DocumentSecurityAttribute> securityAttributes = new ArrayList<DocumentSecurityAttribute>();
for (String securityAttributeClassName : security.getSecurityAttributeClassNames()) {
DocumentSecurityAttribute securityAttribute = securitySession.getSecurityAttributeForClass(
securityAttributeClassName);
if (securityAttribute == null) {
securityAttribute = GlobalResourceLoader.getObject(new ObjectDefinition(securityAttributeClassName));
securitySession.setSecurityAttributeForClass(securityAttributeClassName, securityAttribute);
}
securityAttributes.add(securityAttribute);
}
return securityAttributes;
}
protected DocumentTypeSecurity getDocumentTypeSecurity(String documentTypeName, SecuritySession session) {
DocumentTypeSecurity security = session.getDocumentTypeSecurity().get(documentTypeName);
if (security == null) {
DocumentType docType = KEWServiceLocator.getDocumentTypeService().findByName(documentTypeName);
if (docType != null) {
security = docType.getDocumentTypeSecurity();
session.getDocumentTypeSecurity().put(documentTypeName, security);
}
}
return security;
}
protected boolean isGroupAuthenticated(String namespace, String groupName, SecuritySession session) {
String key = namespace.trim() + KewApiConstants.KIM_GROUP_NAMESPACE_NAME_DELIMITER_CHARACTER + groupName.trim();
Boolean existingAuth = session.getAuthenticatedWorkgroups().get(key);
if (existingAuth != null) {
return existingAuth;
}
boolean memberOfGroup = isMemberOfGroupWithName(namespace, groupName, session.getPrincipalId());
session.getAuthenticatedWorkgroups().put(key, memberOfGroup);
return memberOfGroup;
}
private boolean isMemberOfGroupWithName(String namespace, String groupName, String principalId) {
for (Group group : KimApiServiceLocator.getGroupService().getGroupsByPrincipalId(principalId)) {
if (StringUtils.equals(namespace, group.getNamespaceCode()) && StringUtils.equals(groupName,
group.getName())) {
return true;
}
}
return false;
}
protected boolean isAuthenticatedByPermission(String documentId, String permissionNamespaceCode,
String permissionName, Map<String, String> permissionDetails, Map<String, String> qualification,
SecuritySession session) {
Document document;
try {
document = KewApiServiceLocator.getWorkflowDocumentService().getDocument(documentId);
for (String qualificationKey : qualification.keySet()) {
String qualificationValue = qualification.get(qualificationKey);
String replacementValue = getReplacementString(document, qualificationValue);
qualification.put(qualificationKey, replacementValue);
}
for (String permissionDetailKey : permissionDetails.keySet()) {
String detailValue = qualification.get(permissionDetailKey);
String replacementValue = getReplacementString(document, detailValue);
qualification.put(permissionDetailKey, replacementValue);
}
} catch (Exception e) {
LOG.error(e.getMessage(), e);
return false;
}
return KimApiServiceLocator.getPermissionService().isAuthorized(session.getPrincipalId(),
permissionNamespaceCode, permissionName, qualification);
}
private String getReplacementString(Document document, String value) throws Exception {
String startsWith = "${document.";
String endsWith = "}";
if (value.startsWith(startsWith)) {
int tokenStart = value.indexOf(startsWith);
int tokenEnd = value.indexOf(endsWith, tokenStart + startsWith.length());
if (tokenEnd == -1) {
throw new RuntimeException("No ending bracket on token in value " + value);
}
String token = value.substring(tokenStart + startsWith.length(), tokenEnd);
return getRouteHeaderVariableValue(document, token);
}
return value;
}
private String getRouteHeaderVariableValue(Document document, String variableName) throws Exception {
Field field;
try {
field = document.getClass().getDeclaredField(variableName);
} catch (NoSuchFieldException nsfe) {
LOG.error("Field '" + variableName + "' not found on Document object.");
// instead of raising an exception, return null as a value
// this leaves it up to proper permission configuration to fail the check if a field value
// is required
return null;
}
field.setAccessible(true);
Object fieldValue = field.get(document);
Class<?> clazzType = field.getType();
if (clazzType.equals(String.class)) {
return (String) fieldValue;
} else if (clazzType.getName().equals("boolean") || clazzType.getName().equals("java.lang.Boolean")) {
if ((Boolean) fieldValue) {
return "Y";
}
return "N";
} else if (clazzType.getName().equals("java.util.Calendar")) {
DateTimeService dateTimeService = GlobalResourceLoader.getService(CoreConstants.Services.DATETIME_SERVICE);
return dateTimeService.toDateString(((Calendar) fieldValue).getTime());
}
return String.valueOf(fieldValue);
}
public ExtensionRepositoryService getExtensionRepositoryService() {
return extensionRepositoryService;
}
public void setExtensionRepositoryService(ExtensionRepositoryService extensionRepositoryService) {
this.extensionRepositoryService = extensionRepositoryService;
}
/**
* Simple class which defines the key of a partition of security attributes associated with an application id.
*
* <p>This class allows direct field access since it is intended for internal use only.</p>
*/
private static final class PartitionKey {
String applicationId;
Set<String> documentSecurityAttributeNames;
PartitionKey(String applicationId, Collection<ExtensionDefinition> extensionDefinitions) {
this.applicationId = applicationId;
this.documentSecurityAttributeNames = new HashSet<String>();
for (ExtensionDefinition extensionDefinition : extensionDefinitions) {
this.documentSecurityAttributeNames.add(extensionDefinition.getName());
}
}
List<String> getDocumentSecurityAttributeNameList() {
return new ArrayList<String>(documentSecurityAttributeNames);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof PartitionKey)) {
return false;
}
PartitionKey key = (PartitionKey) o;
EqualsBuilder builder = new EqualsBuilder();
builder.append(applicationId, key.applicationId);
builder.append(documentSecurityAttributeNames, key.documentSecurityAttributeNames);
return builder.isEquals();
}
@Override
public int hashCode() {
HashCodeBuilder builder = new HashCodeBuilder();
builder.append(applicationId);
builder.append(documentSecurityAttributeNames);
return builder.hashCode();
}
}
}
| |
package at.linuxtage.companion.activities;
import android.annotation.SuppressLint;
import android.app.AlertDialog;
import android.app.Dialog;
import android.app.SearchManager;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.IntentFilter;
import android.content.SharedPreferences;
import android.content.pm.PackageManager.NameNotFoundException;
import android.content.res.TypedArray;
import android.graphics.Color;
import android.graphics.PorterDuff;
import android.graphics.drawable.Drawable;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Bundle;
import android.support.annotation.DrawableRes;
import android.support.annotation.NonNull;
import android.support.annotation.StringRes;
import android.support.v4.app.DialogFragment;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentTransaction;
import android.support.v4.content.LocalBroadcastManager;
import android.support.v4.view.MenuItemCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.ActionBarActivity;
import android.support.v7.app.ActionBarDrawerToggle;
import android.support.v7.widget.SearchView;
import android.support.v7.widget.Toolbar;
import android.text.SpannableString;
import android.text.Spanned;
import android.text.method.LinkMovementMethod;
import android.text.style.ForegroundColorSpan;
import android.view.Gravity;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.view.animation.AnimationUtils;
import android.widget.AdapterView;
import android.widget.BaseAdapter;
import android.widget.ListView;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import java.text.DateFormat;
import java.util.Date;
import java.util.Locale;
import at.linuxtage.companion.R;
import at.linuxtage.companion.api.GLTApi;
import at.linuxtage.companion.db.DatabaseManager;
import at.linuxtage.companion.fragments.BookmarksListFragment;
import at.linuxtage.companion.fragments.LiveFragment;
import at.linuxtage.companion.fragments.MapFragment;
import at.linuxtage.companion.fragments.PersonsListFragment;
import at.linuxtage.companion.fragments.TracksFragment;
/**
* Main entry point of the application. Allows to switch between section fragments and update the database.
*
* @author Christophe Beyls
*/
public class MainActivity extends ActionBarActivity implements ListView.OnItemClickListener {
private enum Section {
TRACKS(TracksFragment.class, R.string.menu_tracks, R.drawable.ic_event_grey600_24dp, true),
BOOKMARKS(BookmarksListFragment.class, R.string.menu_bookmarks, R.drawable.ic_bookmark_grey600_24dp, false),
LIVE(LiveFragment.class, R.string.menu_live, R.drawable.ic_play_circle_outline_grey600_24dp, false),
SPEAKERS(PersonsListFragment.class, R.string.menu_speakers, R.drawable.ic_people_grey600_24dp, false),
MAP(MapFragment.class, R.string.menu_map, R.drawable.ic_map_grey600_24dp, false);
private final String fragmentClassName;
private final int titleResId;
private final int iconResId;
private final boolean keep;
private Section(Class<? extends Fragment> fragmentClass, @StringRes int titleResId,
@DrawableRes int iconResId, boolean keep) {
this.fragmentClassName = fragmentClass.getName();
this.titleResId = titleResId;
this.iconResId = iconResId;
this.keep = keep;
}
public String getFragmentClassName() {
return fragmentClassName;
}
public int getTitleResId() {
return titleResId;
}
public int getIconResId() {
return iconResId;
}
public boolean shouldKeep() {
return keep;
}
}
private static final long DATABASE_VALIDITY_DURATION = 24L * 60L * 60L * 1000L; // 24h
private static final long DOWNLOAD_REMINDER_SNOOZE_DURATION = 24L * 60L * 60L * 1000L; // 24h
private static final String PREF_LAST_DOWNLOAD_REMINDER_TIME = "last_download_reminder_time";
private static final String STATE_CURRENT_SECTION = "current_section";
private static final DateFormat LAST_UPDATE_DATE_FORMAT = DateFormat.getDateTimeInstance(DateFormat.MEDIUM, DateFormat.MEDIUM, Locale.getDefault());
private Section currentSection;
private ProgressBar progressBar;
private DrawerLayout drawerLayout;
private ActionBarDrawerToggle drawerToggle;
private View mainMenu;
private TextView lastUpdateTextView;
private MainMenuAdapter menuAdapter;
private MenuItem searchMenuItem;
private final BroadcastReceiver scheduleDownloadProgressReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
progressBar.setIndeterminate(false);
progressBar.setProgress(intent.getIntExtra(GLTApi.EXTRA_PROGRESS, 0));
}
};
private final BroadcastReceiver scheduleDownloadResultReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
// Hide the progress bar with a fill and fade out animation
progressBar.setIndeterminate(false);
progressBar.setProgress(100);
progressBar.startAnimation(AnimationUtils.loadAnimation(MainActivity.this, android.R.anim.fade_out));
progressBar.setVisibility(View.GONE);
int result = intent.getIntExtra(GLTApi.EXTRA_RESULT, GLTApi.RESULT_ERROR);
String message;
switch (result) {
case GLTApi.RESULT_ERROR:
message = getString(R.string.schedule_loading_error);
break;
case GLTApi.RESULT_UP_TO_DATE:
message = getString(R.string.events_download_up_to_date);
break;
case 0:
message = getString(R.string.events_download_empty);
break;
default:
message = getResources().getQuantityString(R.plurals.events_download_completed, result, result);
}
Toast.makeText(MainActivity.this, message, Toast.LENGTH_LONG).show();
}
};
private final BroadcastReceiver scheduleRefreshedReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
updateLastUpdateTime();
}
};
public static class DownloadScheduleReminderDialogFragment extends DialogFragment {
@NonNull
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
return new AlertDialog.Builder(getActivity()).setTitle(R.string.download_reminder_title).setMessage(R.string.download_reminder_message)
.setPositiveButton(android.R.string.ok, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
((MainActivity) getActivity()).startDownloadSchedule();
}
}).setNegativeButton(android.R.string.cancel, null).create();
}
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
setSupportActionBar((Toolbar) findViewById(R.id.toolbar));
progressBar = (ProgressBar) findViewById(R.id.progress);
// Setup drawer layout
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
drawerLayout = (DrawerLayout) findViewById(R.id.drawer_layout);
drawerLayout.setDrawerShadow(getResources().getDrawable(R.drawable.drawer_shadow), Gravity.LEFT);
drawerToggle = new ActionBarDrawerToggle(this, drawerLayout, R.string.main_menu, R.string.close_menu) {
@Override
public void onDrawerOpened(View drawerView) {
updateActionBar();
supportInvalidateOptionsMenu();
// Make keypad navigation easier
mainMenu.requestFocus();
}
@Override
public void onDrawerClosed(View drawerView) {
updateActionBar();
supportInvalidateOptionsMenu();
}
};
drawerToggle.setDrawerIndicatorEnabled(true);
drawerLayout.setDrawerListener(drawerToggle);
// Disable drawerLayout focus to allow trackball navigation.
// We handle the drawer closing on back press ourselves.
drawerLayout.setFocusable(false);
// Setup Main menu
mainMenu = findViewById(R.id.main_menu);
ListView menuListView = (ListView) findViewById(R.id.main_menu_list);
LayoutInflater inflater = LayoutInflater.from(this);
View menuHeaderView = inflater.inflate(R.layout.header_main_menu, null);
menuListView.addHeaderView(menuHeaderView, null, false);
View menuFooterView = inflater.inflate(R.layout.footer_main_menu, null);
menuFooterView.findViewById(R.id.settings).setOnClickListener(menuFooterClickListener);
menuFooterView.findViewById(R.id.about).setOnClickListener(menuFooterClickListener);
menuListView.addFooterView(menuFooterView, null, false);
LocalBroadcastManager.getInstance(this).registerReceiver(scheduleRefreshedReceiver, new IntentFilter(DatabaseManager.ACTION_SCHEDULE_REFRESHED));
menuAdapter = new MainMenuAdapter(inflater);
menuListView.setAdapter(menuAdapter);
menuListView.setOnItemClickListener(this);
// Last update date, below the menu
lastUpdateTextView = (TextView) findViewById(R.id.last_update);
updateLastUpdateTime();
// Restore current section
if (savedInstanceState == null) {
currentSection = Section.TRACKS;
String fragmentClassName = currentSection.getFragmentClassName();
Fragment f = Fragment.instantiate(this, fragmentClassName);
getSupportFragmentManager().beginTransaction().add(R.id.content, f, fragmentClassName).commit();
} else {
currentSection = Section.values()[savedInstanceState.getInt(STATE_CURRENT_SECTION)];
}
// Ensure the current section is visible in the menu
menuListView.setSelection(currentSection.ordinal());
updateActionBar();
}
private void updateActionBar() {
if (drawerLayout.isDrawerOpen(mainMenu)) {
getSupportActionBar().setTitle(null);
} else {
getSupportActionBar().setTitle(currentSection.getTitleResId());
}
}
private void updateLastUpdateTime() {
long lastUpdateTime = DatabaseManager.getInstance().getLastUpdateTime();
lastUpdateTextView.setText(getString(R.string.last_update,
(lastUpdateTime == -1L) ? getString(R.string.never) : LAST_UPDATE_DATE_FORMAT.format(new Date(lastUpdateTime))));
}
@Override
protected void onPostCreate(Bundle savedInstanceState) {
super.onPostCreate(savedInstanceState);
if (drawerLayout.isDrawerOpen(mainMenu)) {
updateActionBar();
}
drawerToggle.syncState();
}
@Override
public void onBackPressed() {
if (drawerLayout.isDrawerOpen(mainMenu)) {
drawerLayout.closeDrawer(mainMenu);
} else {
super.onBackPressed();
}
}
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putInt(STATE_CURRENT_SECTION, currentSection.ordinal());
}
@Override
protected void onStart() {
super.onStart();
// Ensure the progress bar is hidden when starting
progressBar.setVisibility(View.GONE);
// Monitor the schedule download
LocalBroadcastManager lbm = LocalBroadcastManager.getInstance(this);
lbm.registerReceiver(scheduleDownloadProgressReceiver, new IntentFilter(GLTApi.ACTION_DOWNLOAD_SCHEDULE_PROGRESS));
lbm.registerReceiver(scheduleDownloadResultReceiver, new IntentFilter(GLTApi.ACTION_DOWNLOAD_SCHEDULE_RESULT));
// Download reminder
long now = System.currentTimeMillis();
long time = DatabaseManager.getInstance().getLastUpdateTime();
if ((time == -1L) || (time < (now - DATABASE_VALIDITY_DURATION))) {
SharedPreferences prefs = getPreferences(Context.MODE_PRIVATE);
time = prefs.getLong(PREF_LAST_DOWNLOAD_REMINDER_TIME, -1L);
if ((time == -1L) || (time < (now - DOWNLOAD_REMINDER_SNOOZE_DURATION))) {
prefs.edit().putLong(PREF_LAST_DOWNLOAD_REMINDER_TIME, now).commit();
FragmentManager fm = getSupportFragmentManager();
if (fm.findFragmentByTag("download_reminder") == null) {
new DownloadScheduleReminderDialogFragment().show(fm, "download_reminder");
}
}
}
}
@Override
protected void onStop() {
if ((searchMenuItem != null) && (MenuItemCompat.isActionViewExpanded(searchMenuItem))) {
MenuItemCompat.collapseActionView(searchMenuItem);
}
LocalBroadcastManager lbm = LocalBroadcastManager.getInstance(this);
lbm.unregisterReceiver(scheduleDownloadProgressReceiver);
lbm.unregisterReceiver(scheduleDownloadResultReceiver);
super.onStop();
}
@Override
protected void onDestroy() {
super.onDestroy();
LocalBroadcastManager.getInstance(this).unregisterReceiver(scheduleRefreshedReceiver);
}
@SuppressLint("NewApi")
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.main, menu);
MenuItem searchMenuItem = menu.findItem(R.id.search);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.FROYO) {
this.searchMenuItem = searchMenuItem;
// Associate searchable configuration with the SearchView
SearchManager searchManager = (SearchManager) getSystemService(Context.SEARCH_SERVICE);
SearchView searchView = (SearchView) MenuItemCompat.getActionView(searchMenuItem);
searchView.setSearchableInfo(searchManager.getSearchableInfo(getComponentName()));
} else {
// Legacy search mode for Eclair
MenuItemCompat.setActionView(searchMenuItem, null);
MenuItemCompat.setShowAsAction(searchMenuItem, MenuItemCompat.SHOW_AS_ACTION_IF_ROOM);
}
return true;
}
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
// Hide & disable primary (contextual) action items when the main menu is opened
if (drawerLayout.isDrawerOpen(mainMenu)) {
final int size = menu.size();
for (int i = 0; i < size; ++i) {
MenuItem item = menu.getItem(i);
if ((item.getOrder() & 0xFFFF0000) == 0) {
item.setVisible(false).setEnabled(false);
}
}
}
return super.onPrepareOptionsMenu(menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Will close the drawer if the home button is pressed
if (drawerToggle.onOptionsItemSelected(item)) {
return true;
}
switch (item.getItemId()) {
case R.id.search:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.FROYO) {
return false;
} else {
// Legacy search mode for Eclair
onSearchRequested();
return true;
}
case R.id.refresh:
startDownloadSchedule();
return true;
}
return false;
}
@SuppressLint("NewApi")
public void startDownloadSchedule() {
// Start by displaying indeterminate progress, determinate will come later
progressBar.clearAnimation();
progressBar.setIndeterminate(true);
progressBar.setVisibility(View.VISIBLE);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
new DownloadScheduleAsyncTask(this).executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
} else {
new DownloadScheduleAsyncTask(this).execute();
}
}
private static class DownloadScheduleAsyncTask extends AsyncTask<Void, Void, Void> {
private final Context appContext;
public DownloadScheduleAsyncTask(Context context) {
appContext = context.getApplicationContext();
}
@Override
protected Void doInBackground(Void... args) {
GLTApi.downloadSchedule(appContext);
return null;
}
}
// MAIN MENU
private final View.OnClickListener menuFooterClickListener = new View.OnClickListener() {
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.settings:
startActivity(new Intent(MainActivity.this, SettingsActivity.class));
overridePendingTransition(R.anim.slide_in_right, R.anim.partial_zoom_out);
break;
case R.id.about:
new AboutDialogFragment().show(getSupportFragmentManager(), "about");
break;
}
drawerLayout.closeDrawer(mainMenu);
}
};
private class MainMenuAdapter extends BaseAdapter {
private Section[] sections = Section.values();
private LayoutInflater inflater;
private int currentSectionForegroundColor;
private int currentSectionBackgroundColor;
public MainMenuAdapter(LayoutInflater inflater) {
this.inflater = inflater;
// Select the primary color to tint the current section
TypedArray a = getTheme().obtainStyledAttributes(new int[]{R.attr.colorPrimary});
try {
currentSectionForegroundColor = a.getColor(0, Color.TRANSPARENT);
} finally {
a.recycle();
}
currentSectionBackgroundColor = getResources().getColor(R.color.translucent_grey);
}
@Override
public int getCount() {
return sections.length;
}
@Override
public Section getItem(int position) {
return sections[position];
}
@Override
public long getItemId(int position) {
return position;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
if (convertView == null) {
convertView = inflater.inflate(R.layout.item_main_menu, parent, false);
}
Section section = getItem(position);
TextView tv = (TextView) convertView.findViewById(R.id.section_text);
SpannableString sectionTitle = new SpannableString(getString(section.getTitleResId()));
Drawable sectionIcon = getResources().getDrawable(section.getIconResId());
int backgroundColor;
if (section == currentSection) {
// Special color for the current section
//sectionTitle.setSpan(new StyleSpan(Typeface.BOLD), 0, sectionTitle.length(), Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
sectionTitle.setSpan(new ForegroundColorSpan(currentSectionForegroundColor), 0, sectionTitle.length(), Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
// We need to mutate the drawable before applying the ColorFilter, or else all the similar drawable instances will be tinted.
sectionIcon.mutate().setColorFilter(currentSectionForegroundColor, PorterDuff.Mode.SRC_IN);
backgroundColor = currentSectionBackgroundColor;
} else {
backgroundColor = Color.TRANSPARENT;
}
tv.setText(sectionTitle);
tv.setCompoundDrawablesWithIntrinsicBounds(sectionIcon, null, null, null);
tv.setBackgroundColor(backgroundColor);
return convertView;
}
}
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
// Decrease position by 1 since the listView has a header view.
Section section = menuAdapter.getItem(position - 1);
if (section != currentSection) {
// Switch to new section
FragmentManager fm = getSupportFragmentManager();
FragmentTransaction ft = fm.beginTransaction().setTransition(FragmentTransaction.TRANSIT_FRAGMENT_FADE);
Fragment f = fm.findFragmentById(R.id.content);
if (f != null) {
if (currentSection.shouldKeep()) {
ft.detach(f);
} else {
ft.remove(f);
}
}
String fragmentClassName = section.getFragmentClassName();
if (section.shouldKeep() && ((f = fm.findFragmentByTag(fragmentClassName)) != null)) {
ft.attach(f);
} else {
f = Fragment.instantiate(this, fragmentClassName);
ft.add(R.id.content, f, fragmentClassName);
}
ft.commit();
currentSection = section;
menuAdapter.notifyDataSetChanged();
}
drawerLayout.closeDrawer(mainMenu);
}
public static class AboutDialogFragment extends DialogFragment {
@NonNull
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
Context context = getActivity();
String title;
try {
String versionName = context.getPackageManager().getPackageInfo(context.getPackageName(), 0).versionName;
title = String.format("%1$s %2$s", getString(R.string.app_name), versionName);
} catch (NameNotFoundException e) {
title = getString(R.string.app_name);
}
return new AlertDialog.Builder(context).setTitle(title).setIcon(R.drawable.ic_launcher).setMessage(getResources().getText(R.string.about_text))
.setPositiveButton(android.R.string.ok, null).create();
}
@Override
public void onStart() {
super.onStart();
// Make links clickable; must be called after the dialog is shown
((TextView) getDialog().findViewById(android.R.id.message)).setMovementMethod(LinkMovementMethod.getInstance());
}
}
}
| |
/* ownCloud Android Library is available under MIT license
* Copyright (C) 2016 ownCloud GmbH.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.owncloud.android.lib.test_project.test;
import java.io.IOException;
import java.io.InputStream;
import java.net.SocketTimeoutException;
import java.security.GeneralSecurityException;
import org.apache.commons.httpclient.ConnectTimeoutException;
import org.apache.commons.httpclient.HttpException;
import org.apache.commons.httpclient.methods.HeadMethod;
import org.apache.commons.httpclient.protocol.Protocol;
import org.apache.commons.httpclient.protocol.ProtocolSocketFactory;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.jackrabbit.webdav.DavConstants;
import org.apache.jackrabbit.webdav.client.methods.PropFindMethod;
import junit.framework.AssertionFailedError;
import android.net.Uri;
import android.test.AndroidTestCase;
import android.util.Log;
import com.owncloud.android.lib.common.OwnCloudClient;
import com.owncloud.android.lib.common.OwnCloudCredentials;
import com.owncloud.android.lib.common.OwnCloudCredentialsFactory;
import com.owncloud.android.lib.common.accounts.AccountUtils;
import com.owncloud.android.lib.common.network.NetworkUtils;
import com.owncloud.android.lib.test_project.R;
import com.owncloud.android.lib.test_project.SelfSignedConfidentSslSocketFactory;
/**
* Unit test for OwnCloudClient
*
* @author David A. Velasco
*/
public class OwnCloudClientTest extends AndroidTestCase {
private static final String TAG = OwnCloudClientTest.class.getSimpleName();
private Uri mServerUri;
private String mUsername;
private String mPassword;
public OwnCloudClientTest() {
super();
Protocol pr = Protocol.getProtocol("https");
if (pr == null || !(pr.getSocketFactory() instanceof SelfSignedConfidentSslSocketFactory)) {
try {
ProtocolSocketFactory psf = new SelfSignedConfidentSslSocketFactory();
Protocol.registerProtocol(
"https",
new Protocol("https", psf, 443));
} catch (GeneralSecurityException e) {
throw new AssertionFailedError(
"Self-signed confident SSL context could not be loaded");
}
}
}
@Override
protected void setUp() throws Exception {
super.setUp();
mServerUri = Uri.parse(getContext().getString(R.string.server_base_url));
mUsername = getContext().getString(R.string.username);
mPassword = getContext().getString(R.string.password);
}
public void testConstructor() {
try {
new OwnCloudClient(null, NetworkUtils.getMultiThreadedConnManager());
throw new AssertionFailedError("Accepted NULL parameter");
} catch(Exception e) {
assertTrue("Unexpected exception passing NULL baseUri",
(e instanceof IllegalArgumentException));
}
try {
new OwnCloudClient(mServerUri, null);
throw new AssertionFailedError("Accepted NULL parameter");
} catch(Exception e) {
assertTrue("Unexpected exception passing NULL connectionMgr",
(e instanceof IllegalArgumentException));
}
OwnCloudClient client =
new OwnCloudClient(mServerUri, NetworkUtils.getMultiThreadedConnManager());
assertNotNull("OwnCloudClient instance not built", client);
}
public void testGetSetCredentials() {
OwnCloudClient client =
new OwnCloudClient(mServerUri, NetworkUtils.getMultiThreadedConnManager());
assertNotNull("Returned NULL credentials", client.getCredentials());
assertEquals("Not instanced without credentials",
client.getCredentials(), OwnCloudCredentialsFactory.getAnonymousCredentials());
OwnCloudCredentials credentials =
OwnCloudCredentialsFactory.newBasicCredentials("user", "pass");
client.setCredentials(credentials);
assertEquals("Basic credentials not set", credentials, client.getCredentials());
credentials = OwnCloudCredentialsFactory.newBearerCredentials("bearerToken");
client.setCredentials(credentials);
assertEquals("Bearer credentials not set", credentials, client.getCredentials());
credentials = OwnCloudCredentialsFactory.newSamlSsoCredentials("user", "samlSessionCookie=124");
client.setCredentials(credentials);
assertEquals("SAML2 session credentials not set", credentials, client.getCredentials());
}
public void testExecuteMethodWithTimeouts() throws HttpException, IOException {
OwnCloudClient client =
new OwnCloudClient(mServerUri, NetworkUtils.getMultiThreadedConnManager());
int connectionTimeout = client.getConnectionTimeout();
int readTimeout = client.getDataTimeout();
HeadMethod head = new HeadMethod(client.getWebdavUri() + "/");
try {
client.executeMethod(head, 1, 1000);
throw new AssertionFailedError("Completed HEAD with impossible read timeout");
} catch (Exception e) {
Log.e("OwnCloudClientTest", "EXCEPTION", e);
assertTrue("Unexcepted exception " + e.getLocalizedMessage(),
(e instanceof ConnectTimeoutException) ||
(e instanceof SocketTimeoutException));
} finally {
head.releaseConnection();
}
assertEquals("Connection timeout was changed for future requests",
connectionTimeout, client.getConnectionTimeout());
assertEquals("Read timeout was changed for future requests",
readTimeout, client.getDataTimeout());
try {
client.executeMethod(head, 1000, 1);
throw new AssertionFailedError("Completed HEAD with impossible connection timeout");
} catch (Exception e) {
Log.e("OwnCloudClientTest", "EXCEPTION", e);
assertTrue("Unexcepted exception " + e.getLocalizedMessage(),
(e instanceof ConnectTimeoutException) ||
(e instanceof SocketTimeoutException));
} finally {
head.releaseConnection();
}
assertEquals("Connection timeout was changed for future requests",
connectionTimeout, client.getConnectionTimeout());
assertEquals("Read timeout was changed for future requests",
readTimeout, client.getDataTimeout());
}
public void testExecuteMethod() {
OwnCloudClient client =
new OwnCloudClient(mServerUri, NetworkUtils.getMultiThreadedConnManager());
HeadMethod head = new HeadMethod(client.getWebdavUri() + "/");
int status = -1;
try {
status = client.executeMethod(head);
assertTrue("Wrong status code returned: " + status,
status > 99 && status < 600);
} catch (IOException e) {
Log.e(TAG, "Exception in HEAD method execution", e);
// TODO - make it fail? ; try several times, and make it fail if none
// is right?
} finally {
head.releaseConnection();
}
}
public void testExhaustResponse() {
OwnCloudClient client =
new OwnCloudClient(mServerUri, NetworkUtils.getMultiThreadedConnManager());
PropFindMethod propfind = null;
try {
propfind = new PropFindMethod(client.getWebdavUri() + "/",
DavConstants.PROPFIND_ALL_PROP,
DavConstants.DEPTH_0);
client.executeMethod(propfind);
InputStream responseBody = propfind.getResponseBodyAsStream();
if (responseBody != null) {
client.exhaustResponse(responseBody);
try {
int character = responseBody.read();
assertEquals("Response body was not fully exhausted",
character, -1); // -1 is acceptable
} catch (IOException e) {
// this is the preferred result
}
} else {
Log.e(TAG, "Could not test exhaustResponse due to wrong response");
// TODO - make it fail? ; try several times, and make it fail if none
// is right?
}
} catch (IOException e) {
Log.e(TAG, "Exception in PROPFIND method execution", e);
// TODO - make it fail? ; try several times, and make it fail if none
// is right?
} finally {
propfind.releaseConnection();
}
client.exhaustResponse(null); // must run with no exception
}
public void testGetSetDefaultTimeouts() {
OwnCloudClient client =
new OwnCloudClient(mServerUri, NetworkUtils.getMultiThreadedConnManager());
int oldDataTimeout = client.getDataTimeout();
int oldConnectionTimeout = client.getConnectionTimeout();
client.setDefaultTimeouts(oldDataTimeout + 1000, oldConnectionTimeout + 1000);
assertEquals("Data timeout not set",
oldDataTimeout + 1000, client.getDataTimeout());
assertEquals("Connection timeout not set",
oldConnectionTimeout + 1000, client.getConnectionTimeout());
client.setDefaultTimeouts(0, 0);
assertEquals("Zero data timeout not set",
0, client.getDataTimeout());
assertEquals("Zero connection timeout not set",
0, client.getConnectionTimeout());
client.setDefaultTimeouts(-1, -1);
assertEquals("Negative data timeout not ignored",
0, client.getDataTimeout());
assertEquals("Negative connection timeout not ignored",
0, client.getConnectionTimeout());
client.setDefaultTimeouts(-1, 1000);
assertEquals("Negative data timeout not ignored",
0, client.getDataTimeout());
assertEquals("Connection timeout not set",
1000, client.getConnectionTimeout());
client.setDefaultTimeouts(1000, -1);
assertEquals("Data timeout not set",
1000, client.getDataTimeout());
assertEquals("Negative connection timeout not ignored",
1000, client.getConnectionTimeout());
}
public void testGetWebdavUri() {
OwnCloudClient client =
new OwnCloudClient(mServerUri, NetworkUtils.getMultiThreadedConnManager());
client.setCredentials(OwnCloudCredentialsFactory.newBearerCredentials("fakeToken"));
Uri webdavUri = client.getWebdavUri();
assertTrue("WebDAV URI does not point to the right entry point for OAuth2 " +
"authenticated servers",
webdavUri.getPath().endsWith(AccountUtils.ODAV_PATH));
assertTrue("WebDAV URI is not a subpath of base URI",
webdavUri.getAuthority().equals(mServerUri.getAuthority()) &&
webdavUri.getPath().startsWith(mServerUri.getPath()));
client.setCredentials(OwnCloudCredentialsFactory.newBasicCredentials(
mUsername, mPassword));
webdavUri = client.getWebdavUri();
assertTrue("WebDAV URI does not point to the right entry point",
webdavUri.getPath().endsWith(AccountUtils.WEBDAV_PATH_4_0));
PropFindMethod propfind = null;
try {
propfind = new PropFindMethod(webdavUri + "/",
DavConstants.PROPFIND_ALL_PROP,
DavConstants.DEPTH_0);
int status = client.executeMethod(propfind);
assertEquals("WebDAV request did not work on WebDAV URI",
HttpStatus.SC_MULTI_STATUS, status);
} catch (IOException e) {
Log.e(TAG, "Exception in PROPFIND method execution", e);
// TODO - make it fail? ; try several times, and make it fail if none
// is right?
} finally {
propfind.releaseConnection();
}
}
public void testGetSetBaseUri() {
OwnCloudClient client =
new OwnCloudClient(mServerUri, NetworkUtils.getMultiThreadedConnManager());
assertEquals("Returned base URI different that URI passed to constructor",
mServerUri, client.getBaseUri());
Uri otherUri = Uri.parse("https://whatever.com/basePath/here");
client.setBaseUri(otherUri);
assertEquals("Returned base URI different that URI passed to constructor",
otherUri, client.getBaseUri());
try {
client.setBaseUri(null);
throw new AssertionFailedError("Accepted NULL parameter");
} catch(Exception e) {
assertTrue("Unexpected exception passing NULL base URI",
(e instanceof IllegalArgumentException));
}
}
public void testGetCookiesString() {
// TODO implement test body
/*public String getCookiesString(){
Cookie[] cookies = getState().getCookies();
String cookiesString ="";
for (Cookie cookie: cookies) {
cookiesString = cookiesString + cookie.toString() + ";";
logCookie(cookie);
}
return cookiesString;
}
*/
}
public void testSetFollowRedirects() {
// TODO - to implement this test we need a redirected server
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.processor.internals;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.metrics.stats.Value;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
import org.apache.kafka.streams.processor.TimestampExtractor;
import org.apache.kafka.test.InternalMockProcessorContext;
import org.apache.kafka.test.MockSourceNode;
import org.apache.kafka.test.MockTimestampExtractor;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
import static org.apache.kafka.common.utils.Utils.mkEntry;
import static org.apache.kafka.common.utils.Utils.mkMap;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThrows;
public class PartitionGroupTest {
private final LogContext logContext = new LogContext();
private final Time time = new MockTime();
private final Serializer<Integer> intSerializer = new IntegerSerializer();
private final Deserializer<Integer> intDeserializer = new IntegerDeserializer();
private final TimestampExtractor timestampExtractor = new MockTimestampExtractor();
private final TopicPartition unknownPartition = new TopicPartition("unknown-partition", 0);
private final String errMessage = "Partition " + unknownPartition + " not found.";
private final String[] topics = {"topic"};
private final TopicPartition partition1 = new TopicPartition(topics[0], 1);
private final TopicPartition partition2 = new TopicPartition(topics[0], 2);
private final RecordQueue queue1 = new RecordQueue(
partition1,
new MockSourceNode<>(topics, intDeserializer, intDeserializer),
timestampExtractor,
new LogAndContinueExceptionHandler(),
new InternalMockProcessorContext(),
logContext
);
private final RecordQueue queue2 = new RecordQueue(
partition2,
new MockSourceNode<>(topics, intDeserializer, intDeserializer),
timestampExtractor,
new LogAndContinueExceptionHandler(),
new InternalMockProcessorContext(),
logContext
);
private final byte[] recordValue = intSerializer.serialize(null, 10);
private final byte[] recordKey = intSerializer.serialize(null, 1);
private final Metrics metrics = new Metrics();
private final MetricName lastLatenessValue = new MetricName("record-lateness-last-value", "", "", mkMap());
private final PartitionGroup group = new PartitionGroup(
mkMap(mkEntry(partition1, queue1), mkEntry(partition2, queue2)),
getValueSensor(metrics, lastLatenessValue)
);
private static Sensor getValueSensor(final Metrics metrics, final MetricName metricName) {
final Sensor lastRecordedValue = metrics.sensor(metricName.name());
lastRecordedValue.add(metricName, new Value());
return lastRecordedValue;
}
@Test
public void testTimeTracking() {
testFirstBatch();
testSecondBatch();
}
private void testFirstBatch() {
StampedRecord record;
final PartitionGroup.RecordInfo info = new PartitionGroup.RecordInfo();
assertThat(group.numBuffered(), is(0));
// add three 3 records with timestamp 1, 3, 5 to partition-1
final List<ConsumerRecord<byte[], byte[]>> list1 = Arrays.asList(
new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 3L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue));
group.addRawRecords(partition1, list1);
// add three 3 records with timestamp 2, 4, 6 to partition-2
final List<ConsumerRecord<byte[], byte[]>> list2 = Arrays.asList(
new ConsumerRecord<>("topic", 2, 2L, recordKey, recordValue),
new ConsumerRecord<>("topic", 2, 4L, recordKey, recordValue),
new ConsumerRecord<>("topic", 2, 6L, recordKey, recordValue));
group.addRawRecords(partition2, list2);
// 1:[1, 3, 5]
// 2:[2, 4, 6]
// st: -1 since no records was being processed yet
verifyBuffered(6, 3, 3);
assertThat(group.partitionTimestamp(partition1), is(RecordQueue.UNKNOWN));
assertThat(group.partitionTimestamp(partition2), is(RecordQueue.UNKNOWN));
assertThat(group.headRecordOffset(partition1), is(1L));
assertThat(group.headRecordOffset(partition2), is(2L));
assertThat(group.streamTime(), is(RecordQueue.UNKNOWN));
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(0.0));
// get one record, now the time should be advanced
record = group.nextRecord(info, time.milliseconds());
// 1:[3, 5]
// 2:[2, 4, 6]
// st: 1
assertThat(info.partition(), equalTo(partition1));
assertThat(group.partitionTimestamp(partition1), is(1L));
assertThat(group.partitionTimestamp(partition2), is(RecordQueue.UNKNOWN));
assertThat(group.headRecordOffset(partition1), is(3L));
assertThat(group.headRecordOffset(partition2), is(2L));
verifyTimes(record, 1L, 1L);
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(0.0));
// get one record, now the time should be advanced
record = group.nextRecord(info, time.milliseconds());
// 1:[3, 5]
// 2:[4, 6]
// st: 2
assertThat(info.partition(), equalTo(partition2));
assertThat(group.partitionTimestamp(partition1), is(1L));
assertThat(group.partitionTimestamp(partition2), is(2L));
assertThat(group.headRecordOffset(partition1), is(3L));
assertThat(group.headRecordOffset(partition2), is(4L));
verifyTimes(record, 2L, 2L);
verifyBuffered(4, 2, 2);
assertEquals(0.0, metrics.metric(lastLatenessValue).metricValue());
}
private void testSecondBatch() {
StampedRecord record;
final PartitionGroup.RecordInfo info = new PartitionGroup.RecordInfo();
// add 2 more records with timestamp 2, 4 to partition-1
final List<ConsumerRecord<byte[], byte[]>> list3 = Arrays.asList(
new ConsumerRecord<>("topic", 1, 2L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 4L, recordKey, recordValue));
group.addRawRecords(partition1, list3);
// 1:[3, 5, 2, 4]
// 2:[4, 6]
// st: 2 (just adding records shouldn't change it)
verifyBuffered(6, 4, 2);
assertThat(group.partitionTimestamp(partition1), is(1L));
assertThat(group.partitionTimestamp(partition2), is(2L));
assertThat(group.headRecordOffset(partition1), is(3L));
assertThat(group.headRecordOffset(partition2), is(4L));
assertThat(group.streamTime(), is(2L));
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(0.0));
// get one record, time should be advanced
record = group.nextRecord(info, time.milliseconds());
// 1:[5, 2, 4]
// 2:[4, 6]
// st: 3
assertThat(info.partition(), equalTo(partition1));
assertThat(group.partitionTimestamp(partition1), is(3L));
assertThat(group.partitionTimestamp(partition2), is(2L));
assertThat(group.headRecordOffset(partition1), is(5L));
assertThat(group.headRecordOffset(partition2), is(4L));
verifyTimes(record, 3L, 3L);
verifyBuffered(5, 3, 2);
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(0.0));
// get one record, time should be advanced
record = group.nextRecord(info, time.milliseconds());
// 1:[5, 2, 4]
// 2:[6]
// st: 4
assertThat(info.partition(), equalTo(partition2));
assertThat(group.partitionTimestamp(partition1), is(3L));
assertThat(group.partitionTimestamp(partition2), is(4L));
assertThat(group.headRecordOffset(partition1), is(5L));
assertThat(group.headRecordOffset(partition2), is(6L));
verifyTimes(record, 4L, 4L);
verifyBuffered(4, 3, 1);
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(0.0));
// get one more record, time should be advanced
record = group.nextRecord(info, time.milliseconds());
// 1:[2, 4]
// 2:[6]
// st: 5
assertThat(info.partition(), equalTo(partition1));
assertThat(group.partitionTimestamp(partition1), is(5L));
assertThat(group.partitionTimestamp(partition2), is(4L));
assertThat(group.headRecordOffset(partition1), is(2L));
assertThat(group.headRecordOffset(partition2), is(6L));
verifyTimes(record, 5L, 5L);
verifyBuffered(3, 2, 1);
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(0.0));
// get one more record, time should not be advanced
record = group.nextRecord(info, time.milliseconds());
// 1:[4]
// 2:[6]
// st: 5
assertThat(info.partition(), equalTo(partition1));
assertThat(group.partitionTimestamp(partition1), is(5L));
assertThat(group.partitionTimestamp(partition2), is(4L));
assertThat(group.headRecordOffset(partition1), is(4L));
assertThat(group.headRecordOffset(partition2), is(6L));
verifyTimes(record, 2L, 5L);
verifyBuffered(2, 1, 1);
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(3.0));
// get one more record, time should not be advanced
record = group.nextRecord(info, time.milliseconds());
// 1:[]
// 2:[6]
// st: 5
assertThat(info.partition(), equalTo(partition1));
assertThat(group.partitionTimestamp(partition1), is(5L));
assertThat(group.partitionTimestamp(partition2), is(4L));
assertNull(group.headRecordOffset(partition1));
assertThat(group.headRecordOffset(partition2), is(6L));
verifyTimes(record, 4L, 5L);
verifyBuffered(1, 0, 1);
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(1.0));
// get one more record, time should be advanced
record = group.nextRecord(info, time.milliseconds());
// 1:[]
// 2:[]
// st: 6
assertThat(info.partition(), equalTo(partition2));
assertThat(group.partitionTimestamp(partition1), is(5L));
assertThat(group.partitionTimestamp(partition2), is(6L));
assertNull(group.headRecordOffset(partition1));
assertNull(group.headRecordOffset(partition2));
verifyTimes(record, 6L, 6L);
verifyBuffered(0, 0, 0);
assertThat(metrics.metric(lastLatenessValue).metricValue(), is(0.0));
}
@Test
public void shouldChooseNextRecordBasedOnHeadTimestamp() {
assertEquals(0, group.numBuffered());
// add three 3 records with timestamp 1, 5, 3 to partition-1
final List<ConsumerRecord<byte[], byte[]>> list1 = Arrays.asList(
new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 3L, recordKey, recordValue));
group.addRawRecords(partition1, list1);
verifyBuffered(3, 3, 0);
assertEquals(-1L, group.streamTime());
assertEquals(0.0, metrics.metric(lastLatenessValue).metricValue());
StampedRecord record;
final PartitionGroup.RecordInfo info = new PartitionGroup.RecordInfo();
// get first two records from partition 1
record = group.nextRecord(info, time.milliseconds());
assertEquals(record.timestamp, 1L);
record = group.nextRecord(info, time.milliseconds());
assertEquals(record.timestamp, 5L);
// add three 3 records with timestamp 2, 4, 6 to partition-2
final List<ConsumerRecord<byte[], byte[]>> list2 = Arrays.asList(
new ConsumerRecord<>("topic", 2, 2L, recordKey, recordValue),
new ConsumerRecord<>("topic", 2, 4L, recordKey, recordValue),
new ConsumerRecord<>("topic", 2, 6L, recordKey, recordValue));
group.addRawRecords(partition2, list2);
// 1:[3]
// 2:[2, 4, 6]
// get one record, next record should be ts=2 from partition 2
record = group.nextRecord(info, time.milliseconds());
// 1:[3]
// 2:[4, 6]
assertEquals(record.timestamp, 2L);
// get one record, next up should have ts=3 from partition 1 (even though it has seen a larger max timestamp =5)
record = group.nextRecord(info, time.milliseconds());
// 1:[]
// 2:[4, 6]
assertEquals(record.timestamp, 3L);
}
private void verifyTimes(final StampedRecord record, final long recordTime, final long streamTime) {
assertThat(record.timestamp, is(recordTime));
assertThat(group.streamTime(), is(streamTime));
}
private void verifyBuffered(final int totalBuffered, final int partitionOneBuffered, final int partitionTwoBuffered) {
assertEquals(totalBuffered, group.numBuffered());
assertEquals(partitionOneBuffered, group.numBuffered(partition1));
assertEquals(partitionTwoBuffered, group.numBuffered(partition2));
}
@Test
public void shouldSetPartitionTimestampAndStreamTime() {
group.setPartitionTime(partition1, 100L);
assertEquals(100L, group.partitionTimestamp(partition1));
assertEquals(100L, group.streamTime());
group.setPartitionTime(partition2, 50L);
assertEquals(50L, group.partitionTimestamp(partition2));
assertEquals(100L, group.streamTime());
}
@Test
public void shouldThrowIllegalStateExceptionUponAddRecordsIfPartitionUnknown() {
final IllegalStateException exception = assertThrows(
IllegalStateException.class,
() -> group.addRawRecords(unknownPartition, null));
assertThat(errMessage, equalTo(exception.getMessage()));
}
@Test
public void shouldThrowIllegalStateExceptionUponNumBufferedIfPartitionUnknown() {
final IllegalStateException exception = assertThrows(
IllegalStateException.class,
() -> group.numBuffered(unknownPartition));
assertThat(errMessage, equalTo(exception.getMessage()));
}
@Test
public void shouldThrowIllegalStateExceptionUponSetPartitionTimestampIfPartitionUnknown() {
final IllegalStateException exception = assertThrows(
IllegalStateException.class,
() -> group.setPartitionTime(unknownPartition, 0L));
assertThat(errMessage, equalTo(exception.getMessage()));
}
@Test
public void shouldThrowIllegalStateExceptionUponGetPartitionTimestampIfPartitionUnknown() {
final IllegalStateException exception = assertThrows(
IllegalStateException.class,
() -> group.partitionTimestamp(unknownPartition));
assertThat(errMessage, equalTo(exception.getMessage()));
}
@Test
public void shouldThrowIllegalStateExceptionUponGetHeadRecordOffsetIfPartitionUnknown() {
final IllegalStateException exception = assertThrows(
IllegalStateException.class,
() -> group.headRecordOffset(unknownPartition));
assertThat(errMessage, equalTo(exception.getMessage()));
}
@Test
public void shouldEmptyPartitionsOnClear() {
final List<ConsumerRecord<byte[], byte[]>> list = Arrays.asList(
new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 3L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue));
group.addRawRecords(partition1, list);
group.nextRecord(new PartitionGroup.RecordInfo(), time.milliseconds());
group.nextRecord(new PartitionGroup.RecordInfo(), time.milliseconds());
group.clear();
assertThat(group.numBuffered(), equalTo(0));
assertThat(group.streamTime(), equalTo(RecordQueue.UNKNOWN));
assertThat(group.nextRecord(new PartitionGroup.RecordInfo(), time.milliseconds()), equalTo(null));
assertThat(group.partitionTimestamp(partition1), equalTo(RecordQueue.UNKNOWN));
group.addRawRecords(partition1, list);
}
@Test
public void shouldCleanPartitionsOnClose() {
final List<ConsumerRecord<byte[], byte[]>> list = Arrays.asList(
new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 3L, recordKey, recordValue),
new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue));
group.addRawRecords(partition1, list);
group.nextRecord(new PartitionGroup.RecordInfo(), time.milliseconds());
group.close();
assertThat(group.numBuffered(), equalTo(0));
assertThat(group.streamTime(), equalTo(RecordQueue.UNKNOWN));
assertThat(group.nextRecord(new PartitionGroup.RecordInfo(), time.milliseconds()), equalTo(null));
assertThat(group.partitionTimestamp(partition1), equalTo(RecordQueue.UNKNOWN));
// The partition1 should still be able to find.
assertThat(group.addRawRecords(partition1, list), equalTo(3));
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.cql3.statements;
import java.util.*;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.cql3.CFName;
import org.apache.cassandra.cql3.ColumnIdentifier;
import org.apache.cassandra.cql3.IndexName;
import org.apache.cassandra.db.marshal.MapType;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.exceptions.RequestValidationException;
import org.apache.cassandra.exceptions.UnauthorizedException;
import org.apache.cassandra.schema.ColumnMetadata;
import org.apache.cassandra.schema.IndexMetadata;
import org.apache.cassandra.schema.Indexes;
import org.apache.cassandra.schema.MigrationManager;
import org.apache.cassandra.schema.Schema;
import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.QueryState;
import org.apache.cassandra.transport.Event;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
/** A <code>CREATE INDEX</code> statement parsed from a CQL query. */
public class CreateIndexStatement extends SchemaAlteringStatement
{
private static final Logger logger = LoggerFactory.getLogger(CreateIndexStatement.class);
private final String indexName;
private final List<IndexTarget.Raw> rawTargets;
private final IndexPropDefs properties;
private final boolean ifNotExists;
public CreateIndexStatement(CFName name,
IndexName indexName,
List<IndexTarget.Raw> targets,
IndexPropDefs properties,
boolean ifNotExists)
{
super(name);
this.indexName = indexName.getIdx();
this.rawTargets = targets;
this.properties = properties;
this.ifNotExists = ifNotExists;
}
public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
{
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
}
public void validate(ClientState state) throws RequestValidationException
{
TableMetadata table = Schema.instance.validateTable(keyspace(), columnFamily());
if (table.isCounter())
throw new InvalidRequestException("Secondary indexes are not supported on counter tables");
if (table.isView())
throw new InvalidRequestException("Secondary indexes are not supported on materialized views");
if (table.isCompactTable() && !table.isStaticCompactTable())
throw new InvalidRequestException("Secondary indexes are not supported on COMPACT STORAGE tables that have clustering columns");
List<IndexTarget> targets = new ArrayList<>(rawTargets.size());
for (IndexTarget.Raw rawTarget : rawTargets)
targets.add(rawTarget.prepare(table));
if (targets.isEmpty() && !properties.isCustom)
throw new InvalidRequestException("Only CUSTOM indexes can be created without specifying a target column");
if (targets.size() > 1)
validateTargetsForMultiColumnIndex(targets);
for (IndexTarget target : targets)
{
ColumnMetadata cd = table.getColumn(target.column);
if (cd == null)
throw new InvalidRequestException("No column definition found for column " + target.column);
if (cd.type.referencesDuration())
{
checkFalse(cd.type.isCollection(), "Secondary indexes are not supported on collections containing durations");
checkFalse(cd.type.isTuple(), "Secondary indexes are not supported on tuples containing durations");
checkFalse(cd.type.isUDT(), "Secondary indexes are not supported on UDTs containing durations");
throw invalidRequest("Secondary indexes are not supported on duration columns");
}
// TODO: we could lift that limitation
if (table.isCompactTable() && cd.isPrimaryKeyColumn())
throw new InvalidRequestException("Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables");
if (cd.kind == ColumnMetadata.Kind.PARTITION_KEY && table.partitionKeyColumns().size() == 1)
throw new InvalidRequestException(String.format("Cannot create secondary index on partition key column %s", target.column));
boolean isMap = cd.type instanceof MapType;
boolean isFrozenCollection = cd.type.isCollection() && !cd.type.isMultiCell();
if (isFrozenCollection)
{
validateForFrozenCollection(target);
}
else
{
validateNotFullIndex(target);
validateIsSimpleIndexIfTargetColumnNotCollection(cd, target);
validateTargetColumnIsMapIfIndexInvolvesKeys(isMap, target);
}
checkFalse(cd.type.isUDT() && cd.type.isMultiCell(), "Secondary indexes are not supported on non-frozen UDTs");
}
if (!Strings.isNullOrEmpty(indexName))
{
if (Schema.instance.getKeyspaceMetadata(keyspace()).existingIndexNames(null).contains(indexName))
{
if (ifNotExists)
return;
else
throw new InvalidRequestException(String.format("Index %s already exists", indexName));
}
}
properties.validate();
}
private void validateForFrozenCollection(IndexTarget target) throws InvalidRequestException
{
if (target.type != IndexTarget.Type.FULL)
throw new InvalidRequestException(String.format("Cannot create %s() index on frozen column %s. " +
"Frozen collections only support full() indexes",
target.type, target.column));
}
private void validateNotFullIndex(IndexTarget target) throws InvalidRequestException
{
if (target.type == IndexTarget.Type.FULL)
throw new InvalidRequestException("full() indexes can only be created on frozen collections");
}
private void validateIsSimpleIndexIfTargetColumnNotCollection(ColumnMetadata cd, IndexTarget target) throws InvalidRequestException
{
if (!cd.type.isCollection() && target.type != IndexTarget.Type.SIMPLE)
throw new InvalidRequestException(String.format("Cannot create %s() index on %s. " +
"Non-collection columns support only simple indexes",
target.type.toString(), target.column));
}
private void validateTargetColumnIsMapIfIndexInvolvesKeys(boolean isMap, IndexTarget target) throws InvalidRequestException
{
if (target.type == IndexTarget.Type.KEYS || target.type == IndexTarget.Type.KEYS_AND_VALUES)
{
if (!isMap)
throw new InvalidRequestException(String.format("Cannot create index on %s of column %s with non-map type",
target.type, target.column));
}
}
private void validateTargetsForMultiColumnIndex(List<IndexTarget> targets)
{
if (!properties.isCustom)
throw new InvalidRequestException("Only CUSTOM indexes support multiple columns");
Set<ColumnIdentifier> columns = Sets.newHashSetWithExpectedSize(targets.size());
for (IndexTarget target : targets)
if (!columns.add(target.column))
throw new InvalidRequestException("Duplicate column " + target.column + " in index target list");
}
public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws RequestValidationException
{
TableMetadata current = Schema.instance.getTableMetadata(keyspace(), columnFamily());
List<IndexTarget> targets = new ArrayList<>(rawTargets.size());
for (IndexTarget.Raw rawTarget : rawTargets)
targets.add(rawTarget.prepare(current));
String acceptedName = indexName;
if (Strings.isNullOrEmpty(acceptedName))
{
acceptedName = Indexes.getAvailableIndexName(keyspace(),
columnFamily(),
targets.size() == 1 ? targets.get(0).column.toString() : null);
}
if (Schema.instance.getKeyspaceMetadata(keyspace()).existingIndexNames(null).contains(acceptedName))
{
if (ifNotExists)
return null;
else
throw new InvalidRequestException(String.format("Index %s already exists", acceptedName));
}
IndexMetadata.Kind kind;
Map<String, String> indexOptions;
if (properties.isCustom)
{
kind = IndexMetadata.Kind.CUSTOM;
indexOptions = properties.getOptions();
}
else
{
indexOptions = Collections.emptyMap();
kind = current.isCompound() ? IndexMetadata.Kind.COMPOSITES : IndexMetadata.Kind.KEYS;
}
IndexMetadata index = IndexMetadata.fromIndexTargets(targets, acceptedName, kind, indexOptions);
// check to disallow creation of an index which duplicates an existing one in all but name
Optional<IndexMetadata> existingIndex = Iterables.tryFind(current.indexes, existing -> existing.equalsWithoutName(index));
if (existingIndex.isPresent())
{
if (ifNotExists)
return null;
else
throw new InvalidRequestException(String.format("Index %s is a duplicate of existing index %s",
index.name,
existingIndex.get().name));
}
TableMetadata updated =
current.unbuild()
.indexes(current.indexes.with(index))
.build();
logger.trace("Updating index definition for {}", indexName);
MigrationManager.announceTableUpdate(updated, isLocalOnly);
// Creating an index is akin to updating the CF
return new Event.SchemaChange(Event.SchemaChange.Change.UPDATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
}
@Override
public String toString()
{
return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* StructMemberAcessJUnitTest.java
* JUnit based test
*
* Created on March 24, 2005, 5:54 PM
*/
package com.gemstone.gemfire.cache.query.functional;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.gemstone.gemfire.cache.AttributesFactory;
import com.gemstone.gemfire.cache.Region;
import com.gemstone.gemfire.cache.RegionAttributes;
import com.gemstone.gemfire.cache.query.CacheUtils;
import com.gemstone.gemfire.cache.query.Query;
import com.gemstone.gemfire.cache.query.SelectResults;
import com.gemstone.gemfire.cache.query.Utils;
import com.gemstone.gemfire.cache.query.data.Address;
import com.gemstone.gemfire.cache.query.data.Employee;
import com.gemstone.gemfire.cache.query.data.Manager;
import com.gemstone.gemfire.cache.query.data.Portfolio;
import com.gemstone.gemfire.cache.query.internal.StructSet;
import com.gemstone.gemfire.cache.query.types.CollectionType;
import com.gemstone.gemfire.cache.query.types.StructType;
import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
/**
* @author vaibhav
*/
@Category(IntegrationTest.class)
public class StructMemberAccessJUnitTest {
@Before
public void setUp() throws java.lang.Exception {
CacheUtils.startCache();
Region region = CacheUtils.createRegion("Portfolios", Portfolio.class);
for (int i = 0; i < 4; i++) {
region.put("" + i, new Portfolio(i));
}
}
@After
public void tearDown() throws java.lang.Exception {
CacheUtils.closeCache();
}
@Test
public void testUnsupportedQueries() throws Exception {
String queries[] = {
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios ptf, positions pos)"
+ " WHERE value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios ptf, positions pos) p"
+ " WHERE p.get(1).value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios ptf, positions pos) p"
+ " WHERE p[1].value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios ptf, positions pos) p"
+ " WHERE p.value.secId = 'IBM'"};
for (int i = 0; i < queries.length; i++) {
try {
Query q = CacheUtils.getQueryService().newQuery(queries[i]);
Object r = q.execute();
CacheUtils.log(Utils.printResult(r));
fail(queries[i]);
} catch (Exception e) {
//e.printStackTrace();
}
}
}
@Test
public void testSupportedQueries() throws Exception {
String queries[] = {
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios ptf, positions pos)"
+ " WHERE pos.value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios AS ptf, positions AS pos)"
+ " WHERE pos.value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM ptf IN /Portfolios, pos IN positions)"
+ " WHERE pos.value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT pos AS myPos FROM /Portfolios ptf, positions pos)"
+ " WHERE myPos.value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios ptf, positions pos) p"
+ " WHERE p.pos.value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios ptf, positions pos) p"
+ " WHERE pos.value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios, positions) p"
+ " WHERE p.positions.value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios, positions)"
+ " WHERE positions.value.secId = 'IBM'",
"SELECT DISTINCT * FROM"
+ " (SELECT DISTINCT * FROM /Portfolios ptf, positions pos) p"
+ " WHERE p.get('pos').value.secId = 'IBM'",
"SELECT DISTINCT name FROM"
+ " /Portfolios , secIds name where length > 0 ",};
for (int i = 0; i < queries.length; i++) {
try {
Query q = CacheUtils.getQueryService().newQuery(queries[i]);
Object r = q.execute();
CacheUtils.log(Utils.printResult(r));
} catch (Exception e) {
e.printStackTrace();
fail(queries[i]);
}
}
}
@Test
public void testResultComposition() throws Exception {
String queries[] = { "select distinct p from /Portfolios p where p.ID > 0",
"select distinct p.getID from /Portfolios p where p.ID > 0 ",
"select distinct p.getID as secID from /Portfolios p where p.ID > 0 "};
for (int i = 0; i < queries.length; i++) {
Query q = CacheUtils.getQueryService().newQuery(queries[i]);
Object o = q.execute();
if (o instanceof SelectResults) {
SelectResults sr = (SelectResults) o;
if (sr instanceof StructSet && i != 2)
fail(" StructMemberAccess::testResultComposition: Got StrcutSet when expecting ResultSet");
CollectionType ct = sr.getCollectionType();
CacheUtils.log("***Elememt Type of Colelction = "
+ ct.getElementType());
CacheUtils.log((sr.getCollectionType())
.getElementType().getSimpleClassName());
List ls = sr.asList();
for (int j = 0; j < ls.size(); ++j)
CacheUtils.log("Object in the resultset = "
+ ls.get(j).getClass());
switch (i) {
case 0:
if (ct.getElementType().getSimpleClassName().equals("Portfolio")) {
assertTrue(true);
} else {
System.out
.println("StructMemberAcessJUnitTest::testResultComposition:Colelction Element's class="
+ ct.getElementType().getSimpleClassName());
fail();
}
break;
case 1:
if (ct.getElementType().getSimpleClassName().equals("int")) {
assertTrue(true);
} else {
System.out
.println("StructMemberAcessJUnitTest::testResultComposition:Colelction Element's class="
+ ct.getElementType().getSimpleClassName());
fail();
}
break;
case 2:
if (ct.getElementType().getSimpleClassName().equals("Struct")) {
assertTrue(true);
} else {
System.out
.println("StructMemberAcessJUnitTest::testResultComposition:Colelction Element's class="
+ ct.getElementType().getSimpleClassName());
fail();
}
}
}
}
}
public void _BUGtestSubClassQuery() throws Exception {
Set add1 = new HashSet();
Set add2 = new HashSet();
add1.add(new Address("Hp3 9yf", "Apsley"));
add1.add(new Address("Hp4 9yf", "Apsleyss"));
add2.add(new Address("Hp3 8DZ", "Hemel"));
add2.add(new Address("Hp4 8DZ", "Hemel"));
Region region = CacheUtils.createRegion("employees", Employee.class);
region.put("1", new Manager("aaa", 27, 270, "QA", 1800, add1, 2701));
region.put("2", new Manager("bbb", 28, 280, "QA", 1900, add2, 2801));
String queries[] = { "SELECT DISTINCT e.manager_id FROM /employees e"};
for (int i = 0; i < queries.length; i++) {
Query q = CacheUtils.getQueryService().newQuery(queries[i]);
Object r = q.execute();
CacheUtils.log(Utils.printResult(r));
String className = (((SelectResults) r).getCollectionType())
.getElementType().getSimpleClassName();
if (className.equals("Employee")) {
CacheUtils.log("pass");
} else {
fail("StructMemberAccessTest::testSubClassQuery:failed .Expected class name Employee. Actualy obtained="
+ className);
}
}
}
@Test
public void testBugNumber_32354() {
String queries[] = { "select distinct * from /root/portfolios.values, positions.values ",};
int i = 0;
try {
tearDown();
CacheUtils.startCache();
Region rootRegion = CacheUtils.createRegion("root", null);
AttributesFactory attributesFactory = new AttributesFactory();
attributesFactory.setValueConstraint(Portfolio.class);
RegionAttributes regionAttributes = attributesFactory
.create();
Region region = rootRegion
.createSubregion("portfolios", regionAttributes);
for (i = 0; i < 4; i++) {
region.put("" + i, new Portfolio(i));
}
for (i = 0; i < queries.length; i++) {
Query q = CacheUtils.getQueryService().newQuery(queries[i]);
Object r = q.execute();
CacheUtils.log(Utils.printResult(r));
StructType type = ((StructType) ((SelectResults) r).getCollectionType()
.getElementType());
String fieldNames[] = type.getFieldNames();
for (i = 0; i < fieldNames.length; ++i) {
String name = fieldNames[i];
CacheUtils.log("Struct Field name = " + name);
if (name.equals("/root/portfolios")
|| name.equals("positions.values")) {
fail("The field name in struct = " + name);
}
}
}
} catch (Exception e) {
e.printStackTrace();
fail(queries[i]);
}
}
@Test
public void testBugNumber_32355() {
String queries[] = { "select distinct positions.values.toArray[0], positions.values.toArray[0],status from /Portfolios",};
int i = 0;
try {
for (i = 0; i < queries.length; i++) {
Query q = CacheUtils.getQueryService().newQuery(queries[i]);
Object r = q.execute();
CacheUtils.log(Utils.printResult(r));
StructType type = ((StructType) ((SelectResults) r).getCollectionType()
.getElementType());
String fieldNames[] = type.getFieldNames();
for (i = 0; i < fieldNames.length; ++i) {
String name = fieldNames[i];
CacheUtils.log("Struct Field name = " + name);
}
}
} catch (Exception e) {
e.printStackTrace();
fail(queries[i]);
}
}
}
| |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.util;
import com.facebook.presto.metadata.FunctionRegistry;
import com.facebook.presto.spi.PrestoException;
import com.facebook.presto.spi.type.Type;
import com.google.common.collect.ImmutableList;
import it.unimi.dsi.fastutil.Hash;
import it.unimi.dsi.fastutil.booleans.BooleanOpenHashSet;
import it.unimi.dsi.fastutil.doubles.DoubleHash;
import it.unimi.dsi.fastutil.doubles.DoubleOpenCustomHashSet;
import it.unimi.dsi.fastutil.longs.LongHash;
import it.unimi.dsi.fastutil.longs.LongOpenCustomHashSet;
import it.unimi.dsi.fastutil.objects.ObjectOpenCustomHashSet;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodType;
import java.util.Collection;
import java.util.Set;
import static com.facebook.presto.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR;
import static com.facebook.presto.spi.function.OperatorType.EQUAL;
import static com.facebook.presto.spi.function.OperatorType.HASH_CODE;
import static com.google.common.base.Throwables.throwIfInstanceOf;
import static java.lang.Math.toIntExact;
public final class FastutilSetHelper
{
private FastutilSetHelper() {}
@SuppressWarnings({"unchecked"})
public static Set<?> toFastutilHashSet(Set<?> set, Type type, FunctionRegistry registry)
{
// 0.25 as the load factor is chosen because the argument set is assumed to be small (<10000),
// and the return set is assumed to be read-heavy.
// The performance of InCodeGenerator heavily depends on the load factor being small.
Class<?> javaElementType = type.getJavaType();
if (javaElementType == long.class) {
return new LongOpenCustomHashSet((Collection<Long>) set, 0.25f, new LongStrategy(registry, type));
}
if (javaElementType == double.class) {
return new DoubleOpenCustomHashSet((Collection<Double>) set, 0.25f, new DoubleStrategy(registry, type));
}
if (javaElementType == boolean.class) {
return new BooleanOpenHashSet((Collection<Boolean>) set, 0.25f);
}
else if (!type.getJavaType().isPrimitive()) {
return new ObjectOpenCustomHashSet(set, 0.25f, new ObjectStrategy(registry, type));
}
else {
throw new UnsupportedOperationException("Unsupported native type in set: " + type.getJavaType() + " with type " + type.getTypeSignature());
}
}
public static boolean in(boolean booleanValue, BooleanOpenHashSet set)
{
return set.contains(booleanValue);
}
public static boolean in(double doubleValue, DoubleOpenCustomHashSet set)
{
return set.contains(doubleValue);
}
public static boolean in(long longValue, LongOpenCustomHashSet set)
{
return set.contains(longValue);
}
public static boolean in(Object objectValue, ObjectOpenCustomHashSet<?> set)
{
return set.contains(objectValue);
}
private static final class LongStrategy
implements LongHash.Strategy
{
private final MethodHandle hashCodeHandle;
private final MethodHandle equalsHandle;
private LongStrategy(FunctionRegistry registry, Type type)
{
hashCodeHandle = registry.getScalarFunctionImplementation(registry.resolveOperator(HASH_CODE, ImmutableList.of(type))).getMethodHandle();
equalsHandle = registry.getScalarFunctionImplementation(registry.resolveOperator(EQUAL, ImmutableList.of(type, type))).getMethodHandle();
}
@Override
public int hashCode(long value)
{
try {
return Long.hashCode((long) hashCodeHandle.invokeExact(value));
}
catch (Throwable t) {
throwIfInstanceOf(t, Error.class);
throwIfInstanceOf(t, PrestoException.class);
throw new PrestoException(GENERIC_INTERNAL_ERROR, t);
}
}
@Override
public boolean equals(long a, long b)
{
try {
return (boolean) equalsHandle.invokeExact(a, b);
}
catch (Throwable t) {
throwIfInstanceOf(t, Error.class);
throwIfInstanceOf(t, PrestoException.class);
throw new PrestoException(GENERIC_INTERNAL_ERROR, t);
}
}
}
private static final class DoubleStrategy
implements DoubleHash.Strategy
{
private final MethodHandle hashCodeHandle;
private final MethodHandle equalsHandle;
private DoubleStrategy(FunctionRegistry registry, Type type)
{
hashCodeHandle = registry.getScalarFunctionImplementation(registry.resolveOperator(HASH_CODE, ImmutableList.of(type))).getMethodHandle();
equalsHandle = registry.getScalarFunctionImplementation(registry.resolveOperator(EQUAL, ImmutableList.of(type, type))).getMethodHandle();
}
@Override
public int hashCode(double value)
{
try {
return Long.hashCode((long) hashCodeHandle.invokeExact(value));
}
catch (Throwable t) {
throwIfInstanceOf(t, Error.class);
throwIfInstanceOf(t, PrestoException.class);
throw new PrestoException(GENERIC_INTERNAL_ERROR, t);
}
}
@Override
public boolean equals(double a, double b)
{
try {
return (boolean) equalsHandle.invokeExact(a, b);
}
catch (Throwable t) {
throwIfInstanceOf(t, Error.class);
throwIfInstanceOf(t, PrestoException.class);
throw new PrestoException(GENERIC_INTERNAL_ERROR, t);
}
}
}
private static final class ObjectStrategy
implements Hash.Strategy
{
private final MethodHandle hashCodeHandle;
private final MethodHandle equalsHandle;
private ObjectStrategy(FunctionRegistry registry, Type type)
{
hashCodeHandle = registry.getScalarFunctionImplementation(registry.resolveOperator(HASH_CODE, ImmutableList.of(type)))
.getMethodHandle()
.asType(MethodType.methodType(long.class, Object.class));
equalsHandle = registry.getScalarFunctionImplementation(registry.resolveOperator(EQUAL, ImmutableList.of(type, type)))
.getMethodHandle()
.asType(MethodType.methodType(boolean.class, Object.class, Object.class));
}
@Override
public int hashCode(Object value)
{
try {
return toIntExact(Long.hashCode((long) hashCodeHandle.invokeExact(value)));
}
catch (Throwable t) {
throwIfInstanceOf(t, Error.class);
throwIfInstanceOf(t, PrestoException.class);
throw new PrestoException(GENERIC_INTERNAL_ERROR, t);
}
}
@Override
public boolean equals(Object a, Object b)
{
try {
return (boolean) equalsHandle.invokeExact(a, b);
}
catch (Throwable t) {
throwIfInstanceOf(t, Error.class);
throwIfInstanceOf(t, PrestoException.class);
throw new PrestoException(GENERIC_INTERNAL_ERROR, t);
}
}
}
}
| |
package org.gw4e.eclipse.wizard.convert.page;
/*-
* #%L
* gw4e
* $Id:$
* $HeadURL:$
* %%
* Copyright (C) 2017 gw4e-project
* %%
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* #L%
*/
import java.io.IOException;
import java.util.List;
import java.util.regex.Pattern;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IFolder;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.runtime.IPath;
import org.eclipse.jdt.core.ICompilationUnit;
import org.eclipse.jdt.core.IJavaElement;
import org.eclipse.jdt.core.IPackageFragment;
import org.eclipse.jdt.core.IPackageFragmentRoot;
import org.eclipse.jdt.core.JavaCore;
import org.eclipse.jdt.core.JavaModelException;
import org.eclipse.jdt.ui.JavaElementLabelProvider;
import org.eclipse.jface.viewers.ComboViewer;
import org.eclipse.jface.viewers.ISelectionChangedListener;
import org.eclipse.jface.viewers.IStructuredContentProvider;
import org.eclipse.jface.viewers.IStructuredSelection;
import org.eclipse.jface.viewers.SelectionChangedEvent;
import org.eclipse.jface.viewers.StructuredSelection;
import org.eclipse.swt.SWT;
import org.eclipse.swt.custom.BusyIndicator;
import org.eclipse.swt.events.ModifyEvent;
import org.eclipse.swt.events.ModifyListener;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.widgets.Button;
import org.eclipse.swt.widgets.Combo;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.Text;
import org.gw4e.eclipse.facade.GraphWalkerFacade;
import org.gw4e.eclipse.facade.JDTManager;
import org.gw4e.eclipse.facade.ResourceManager;
import org.gw4e.eclipse.message.MessageUtil;
import org.gw4e.eclipse.wizard.convert.ResourceContext;
public class GeneratorChoiceComposite extends Composite {
public static final String GW4E_CONVERSION_WIDGET_ID = "id.gw4e.conversion.widget.id";
public static final String GW4E_CONVERSION_COMBO_ANCESTOR_APPEND_TEST = "id.gw4e.conversion.combo.ancestor.append.id";
public static final String GW4E_CONVERSION_COMBO_ANCESTOR_EXTEND_TEST = "id.gw4e.conversion.combo.ancestor.extend.id";
public static final String GW4E_APPEND_CHECKBOX = "id.gw4e.conversion.choice.append.class";
public static final String GW4E_EXTEND_CHECKBOX = "id.gw4e.conversion.choice.extend.class";
public static final String GW4E_NEWCLASS_CHECKBOX = "id.gw4e.conversion.choice.new.class";
public static final String GW4E_EXTEND_CLASS_TEXT = "id.gw4e.conversion.text.extend.class";
public static final String GW4E_NEWCLASS_TEXT = "id.gw4e.conversion.text.new.class";
private Button btnAppendRadioButton;
private Label lblAppendClassNameLabel;
private AncestorViewer comboAppendClassnameViewer;
private Button btnExtendRadioButton;
private Label lblExtendedLabel;
private AncestorViewer comboExtendedClassnameViewer;
private Label lblExtendingLabel;
private Text extendingClassnameText;
private Button btnCreateNewRadioButton;
private Label lblNewClassnameLabel;
private Text newClassnameText;
private Listener listener;
List<IFile> ancestors = null;
IStructuredSelection selection = null;
IPackageFragment pkgf = null;
String classname;
IPath path;
String startElement = null;
public GeneratorChoiceComposite(Composite parent, int style, IStructuredSelection selection, Listener listener) {
super(parent, style);
setLayout(new GridLayout(12, false));
GridData gridData = new GridData();
gridData.horizontalAlignment = GridData.FILL;
gridData.grabExcessHorizontalSpace = true;
setLayoutData(gridData);
this.listener = listener;
this.selection = selection;
IFile file = (IFile) selection.getFirstElement();
loadAncestor(file);
findStartElement () ;
Label explanationLabel = new Label(this, SWT.NONE);
explanationLabel.setLayoutData(new GridData(SWT.FILL));
explanationLabel.setText(MessageUtil.getString("three_modes_explanation"));
skip(this);
createAppendMode();
createExtendMode();
createNewMode();
}
public IPackageFragment getPackageFragment() {
return pkgf;
}
public ResourceContext.GENERATION_MODE getMode() {
if (isAppendMode())
return ResourceContext.GENERATION_MODE.APPEND;
if (isExtendMode())
return ResourceContext.GENERATION_MODE.EXTEND;
return ResourceContext.GENERATION_MODE.CREATE;
}
public IPackageFragmentRoot getRoot() {
IFile file = (IFile) selection.getFirstElement();
IPackageFragmentRoot root = null;
try {
root = JDTManager.findPackageFragmentRoot(file.getProject(), pkgf.getPath());
} catch (JavaModelException e) {
ResourceManager.logException(e);
}
return root;
}
public boolean isAppendMode() {
return btnAppendRadioButton.getSelection();
}
public boolean isExtendMode() {
return btnExtendRadioButton.getSelection();
}
public boolean isCreateMode() {
return btnCreateNewRadioButton.getSelection();
}
private void findStartElement () {
IFile file = (IFile) selection.getFirstElement();
try {
startElement = GraphWalkerFacade.getStartElement(ResourceManager.toFile(file.getFullPath()));
} catch (IOException e) {
ResourceManager.logException(e);
}
}
public String getClassName() {
if (isCreateMode()) {
return newClassnameText.getText();
}
if (isExtendMode()) {
return extendingClassnameText.getText();
}
IStructuredSelection selection = (IStructuredSelection) comboAppendClassnameViewer.getSelection();
ICompilationUnit unit = (ICompilationUnit) selection.getFirstElement();
return unit.getElementName();
}
public String validate() {
if ((startElement == null) || (startElement.trim().length() == 0)) {
String msg = MessageUtil.getString("no_start_element_defined_in_the_graph");
return msg;
}
if (btnAppendRadioButton != null && btnAppendRadioButton.getSelection()) {
if (comboAppendClassnameViewer.getCombo().getText().trim().length() == 0) {
String msg = MessageUtil.getString("you_must_select_an_existing_test");
return msg;
}
}
if (btnCreateNewRadioButton != null && btnCreateNewRadioButton.getSelection()) {
String value = newClassnameText.getText();
if (value == null) {
String msg = MessageUtil.getString("you_must_select_a_new_test_class");
newClassnameText.setFocus();
return msg;
}
if (value.trim().length() == 0) {
String msg = MessageUtil.getString("you_must_select_a_new_test_class");
newClassnameText.setFocus();
return msg;
}
if (!validateClassName(value.trim())) {
String msg = MessageUtil.getString("you_must_select_a_valid_test_class_name");
newClassnameText.setFocus();
return msg;
}
String path = this.getPackageFragment().getPath().append(value + ".java").toString();
IResource resource = ResourceManager.getResource(path);
if (resource != null && resource.exists()) {
String msg = MessageUtil.getString("you_must_select_a_different_test_class_name");
newClassnameText.setFocus();
return msg;
}
}
if (btnExtendRadioButton != null && btnExtendRadioButton.getSelection()) {
String extendedClass = comboExtendedClassnameViewer.getCombo().getText().trim();
if (extendedClass.length() == 0) {
String msg = MessageUtil.getString("you_must_select_an_existing_test");
comboExtendedClassnameViewer.getCombo().setFocus();
return msg;
}
String value = extendingClassnameText.getText();
if (value == null) {
String msg = MessageUtil.getString("you_must_select_a_new_test_class");
extendingClassnameText.setFocus();
return msg;
}
if (value.trim().length() == 0) {
String msg = MessageUtil.getString("you_must_select_a_new_test_class");
extendingClassnameText.setFocus();
return msg;
}
if (!validateClassName(value.trim())) {
String msg = MessageUtil.getString("you_must_select_a_valid_test_class_name");
extendingClassnameText.setFocus();
return msg;
}
IStructuredSelection selection = (IStructuredSelection) comboExtendedClassnameViewer.getSelection();
ICompilationUnit unit = (ICompilationUnit) selection.getFirstElement();
String selectedAncestor = unit.getElementName().split(Pattern.quote("."))[0];
if (selectedAncestor.equals(value)) {
String msg = MessageUtil.getString("you_must_select_a_different_test_class_name");
extendingClassnameText.setFocus();
return msg;
}
String path = this.getPackageFragment().getPath().append(value + ".java").toString();
IResource resource = ResourceManager.getResource(path);
if (resource != null && resource.exists()) {
String msg = MessageUtil.getString("you_must_select_a_different_test_class_name");
extendingClassnameText.setFocus();
return msg;
}
}
if (pkgf == null) {
String msg = MessageUtil.getString("invalid_pkg");
return msg;
}
if (getRoot() == null) {
String msg = MessageUtil.getString("invalid_root");
return msg;
}
return null;
}
public void setTarget(IPath p, String name) {
IFolder folder = (IFolder) ResourceManager.getResource(p.toString());
IJavaElement element = JavaCore.create(folder);
if (element instanceof IPackageFragmentRoot) {
this.pkgf = ((IPackageFragmentRoot) element).getPackageFragment(IPackageFragment.DEFAULT_PACKAGE_NAME);
} else {
this.pkgf = (IPackageFragment) element;
}
String value = name.split(Pattern.quote(".")) [0];
newClassnameText.setText(value);
extendingClassnameText.setText(value);
}
private void updateUI() {
lblAppendClassNameLabel.setEnabled(false);
comboAppendClassnameViewer.setEnabled(false);
lblExtendedLabel.setEnabled(false);
comboExtendedClassnameViewer.setEnabled(false);
lblExtendingLabel.setEnabled(false);
extendingClassnameText.setEnabled(false);
lblNewClassnameLabel.setEnabled(false);
newClassnameText.setEnabled(false);
if (btnAppendRadioButton.getSelection()) {
lblAppendClassNameLabel.setEnabled(true);
comboAppendClassnameViewer.setEnabled(true);
comboAppendClassnameViewer.getCombo().setFocus();
}
if (btnExtendRadioButton.getSelection()) {
lblExtendedLabel.setEnabled(true);
comboExtendedClassnameViewer.setEnabled(true);
comboExtendedClassnameViewer.getCombo().setFocus();
lblExtendingLabel.setEnabled(true);
extendingClassnameText.setEnabled(true);
}
if (btnCreateNewRadioButton.getSelection()) {
lblNewClassnameLabel.setEnabled(true);
newClassnameText.setEnabled(true);
newClassnameText.setFocus();
}
listener.handleEvent(null);
}
/**
* @param parent
*/
private void skip(Composite parent) {
Label lblDummy = new Label(parent, SWT.NONE);
lblDummy.setText("");
GridData gd = new GridData(GridData.FILL);
lblDummy.setLayoutData(gd);
}
private void loadAncestor(IFile file) {
Display display = Display.getCurrent();
Runnable longJob = new Runnable() {
public void run() {
display.syncExec(new Runnable() {
public void run() {
ancestors = JDTManager.findAvailableExecutionContextAncestors(file);
}
});
display.wake();
}
};
BusyIndicator.showWhile(display, longJob);
}
private void createAppendMode() {
btnAppendRadioButton = new Button(this, SWT.RADIO);
btnAppendRadioButton.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 12, 1));
btnAppendRadioButton.setText(MessageUtil.getString("append_mode"));
btnAppendRadioButton.setSelection(true);
btnAppendRadioButton.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
switch (e.type) {
case SWT.Selection:
updateUI();
break;
}
}
});
btnAppendRadioButton.setData(GW4E_CONVERSION_WIDGET_ID, GW4E_APPEND_CHECKBOX);
Composite composite = new Composite(this, SWT.NONE);
composite.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 12, 1));
composite.setLayout(new GridLayout(12, false));
lblAppendClassNameLabel = new Label(composite, SWT.NONE);
lblAppendClassNameLabel.setLayoutData(new GridData(SWT.LEFT, SWT.CENTER, false, false, 4, 1));
lblAppendClassNameLabel.setText("Class name");
comboAppendClassnameViewer = new AncestorViewer(composite);
comboAppendClassnameViewer.initialize(GW4E_CONVERSION_COMBO_ANCESTOR_EXTEND_TEST, false);
comboAppendClassnameViewer.getCombo().setData(GW4E_CONVERSION_WIDGET_ID, GW4E_CONVERSION_COMBO_ANCESTOR_APPEND_TEST);
Combo combo = comboAppendClassnameViewer.getCombo();
combo.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 8, 1));
combo.setEnabled(true);
}
private boolean validateClassName(String name) {
return JDTManager.validateClassName(name);
}
public String getExtendedClassName() {
if (btnExtendRadioButton.getSelection()) {
IStructuredSelection selection = (IStructuredSelection) comboExtendedClassnameViewer.getSelection();
ICompilationUnit unit = (ICompilationUnit) selection.getFirstElement();
String selectedAncestor = unit.getElementName().split(Pattern.quote("."))[0];
return selectedAncestor;
}
return null;
}
private void createExtendMode() {
btnExtendRadioButton = new Button(this, SWT.RADIO);
btnExtendRadioButton.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 12, 1));
btnExtendRadioButton.setText(MessageUtil.getString("extending_class"));
btnExtendRadioButton.setSelection(false);
btnExtendRadioButton.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
switch (e.type) {
case SWT.Selection:
updateUI();
break;
}
}
});
btnExtendRadioButton.setData(GW4E_CONVERSION_WIDGET_ID, GW4E_EXTEND_CHECKBOX);
Composite composite = new Composite(this, SWT.NONE);
composite.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 12, 1));
composite.setLayout(new GridLayout(12, false));
lblExtendedLabel = new Label(composite, SWT.NONE);
lblExtendedLabel.setLayoutData(new GridData(SWT.LEFT, SWT.CENTER, false, false, 4, 1));
lblExtendedLabel.setText(MessageUtil.getString("class_extended"));
lblExtendedLabel.setEnabled(false);
comboExtendedClassnameViewer = new AncestorViewer(composite);
comboExtendedClassnameViewer.initialize(GW4E_CONVERSION_COMBO_ANCESTOR_EXTEND_TEST, false);
comboExtendedClassnameViewer.getCombo().setData(GW4E_CONVERSION_WIDGET_ID, GW4E_CONVERSION_COMBO_ANCESTOR_EXTEND_TEST);
Combo combo = comboExtendedClassnameViewer.getCombo();
combo.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 8, 1));
combo.setEnabled(false);
lblExtendingLabel = new Label(composite, SWT.NONE);
lblExtendingLabel.setLayoutData(new GridData(SWT.LEFT, SWT.CENTER, false, false, 4, 1));
lblExtendingLabel.setText(MessageUtil.getString("classname"));
lblExtendingLabel.setEnabled(false);
extendingClassnameText = new Text(composite, SWT.BORDER);
extendingClassnameText.setEnabled(false);
extendingClassnameText.addModifyListener(new ModifyListener() {
public void modifyText(ModifyEvent event) {
listener.handleEvent(null);
}
});
extendingClassnameText.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 8, 1));
extendingClassnameText.setEnabled(false);
extendingClassnameText.setData(GW4E_CONVERSION_WIDGET_ID, GW4E_EXTEND_CLASS_TEXT);
}
private void createNewMode() {
btnCreateNewRadioButton = new Button(this, SWT.RADIO);
btnCreateNewRadioButton.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 12, 1));
btnCreateNewRadioButton.setText(MessageUtil.getString("standalone_mode"));
btnCreateNewRadioButton.setSelection(false);
btnCreateNewRadioButton.addListener(SWT.Selection, new Listener() {
public void handleEvent(Event e) {
switch (e.type) {
case SWT.Selection:
updateUI();
break;
}
}
});
btnCreateNewRadioButton.setData(GW4E_CONVERSION_WIDGET_ID, GW4E_NEWCLASS_CHECKBOX);
Composite composite = new Composite(this, SWT.NONE);
composite.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 12, 1));
composite.setLayout(new GridLayout(12, false));
lblNewClassnameLabel = new Label(composite, SWT.NONE);
lblNewClassnameLabel.setLayoutData(new GridData(SWT.LEFT, SWT.CENTER, false, false, 4, 1));
lblNewClassnameLabel.setText("Class name");
lblNewClassnameLabel.setEnabled(false);
newClassnameText = new Text(composite, SWT.BORDER);
newClassnameText.setEnabled(false);
newClassnameText.addModifyListener(new ModifyListener() {
public void modifyText(ModifyEvent event) {
listener.handleEvent(null);
}
});
newClassnameText.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false, 8, 1));
newClassnameText.setEnabled(false);
newClassnameText.setData(GW4E_CONVERSION_WIDGET_ID, GW4E_NEWCLASS_TEXT);
}
public class AncestorViewer extends ComboViewer {
public AncestorViewer(Composite parent) {
super(parent);
}
public void setEnabled(boolean enable) {
Combo comboAncestor = getCombo();
comboAncestor.setEnabled(enable);
}
public void initialize(String widgetid, boolean active) {
setEnabled(active);
setContentProvider(new IStructuredContentProvider() {
@Override
public Object[] getElements(Object inputElement) {
List<IFile> files = (List<IFile>) inputElement;
Object[] ret = new Object[files.size()];
int index = 0;
for (IFile file : files) {
ret[index++] = JavaCore.create(file);
}
return ret;
}
});
setLabelProvider(new JavaElementLabelProvider(
JavaElementLabelProvider.SHOW_QUALIFIED | JavaElementLabelProvider.SHOW_ROOT));
addSelectionChangedListener(new ISelectionChangedListener() {
@Override
public void selectionChanged(SelectionChangedEvent event) {
IStructuredSelection selection = (IStructuredSelection) event.getSelection();
if (selection.size() > 0) {
ICompilationUnit element = (ICompilationUnit) selection.getFirstElement();
GeneratorChoiceComposite.this.pkgf = (IPackageFragment) element.getParent();
listener.handleEvent(null);
}
}
});
setData(GW4E_CONVERSION_WIDGET_ID, widgetid);
setInput(ancestors);
if (active && hasItems()) {
setSelection(new StructuredSelection(JavaCore.create(ancestors.get(0))));
}
}
public boolean hasItems() {
return ancestors.size() > 0;
}
}
}
| |
package com.codepath.socialshopper.socialshopper.Adapters;
import android.content.Context;
import android.content.Intent;
import android.support.v7.widget.RecyclerView;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import com.bumptech.glide.Glide;
import com.codepath.socialshopper.socialshopper.Activities.PaymentActivity;
import com.codepath.socialshopper.socialshopper.Activities.TrackStatusActivity;
import com.codepath.socialshopper.socialshopper.Models.TimeLineModel;
import com.codepath.socialshopper.socialshopper.R;
import com.codepath.socialshopper.socialshopper.Utils.DatabaseUtils;
import com.codepath.socialshopper.socialshopper.Utils.DateTimeUtils;
import com.codepath.socialshopper.socialshopper.Utils.VectorDrawableUtils;
import com.github.vipulasri.timelineview.TimelineView;
import com.google.android.gms.wallet.Cart;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ValueEventListener;
import com.stripe.wrap.pay.activity.StripeAndroidPayActivity;
import com.stripe.wrap.pay.utils.CartContentException;
import com.stripe.wrap.pay.utils.CartManager;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Scanner;
import butterknife.BindView;
import butterknife.ButterKnife;
import okhttp3.Call;
import okhttp3.Callback;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
/**
* Created by saripirala on 10/28/17.
*/
public class TimeLineAdapter extends RecyclerView.Adapter<TimeLineAdapter.TimeLineViewHolder> {
private List<TimeLineModel> mFeedList;
private Context mContext;
private boolean mWithLinePadding;
private LayoutInflater mLayoutInflater;
private DatabaseUtils dbUtils;
private static String receiptURL;
private static Long extractReceiptAmount;
public TimeLineAdapter(List<TimeLineModel> feedList, boolean withLinePadding) {
mFeedList = feedList;
mWithLinePadding = withLinePadding;
}
@Override
public TimeLineViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
mContext = parent.getContext();
mLayoutInflater = LayoutInflater.from(mContext);
View view = mLayoutInflater.inflate(R.layout.item_timeline, parent, false);
return new TimeLineViewHolder(view, viewType);
}
@Override
public void onBindViewHolder(final TimeLineViewHolder holder, int position) {
TimeLineModel timeLineModel = mFeedList.get(position);
holder.mTimelineView.setMarker(VectorDrawableUtils.getDrawable(mContext, R.drawable.ic_marker_active, android.R.color.holo_green_dark));
if(!timeLineModel.getDate().isEmpty()) {
holder.mDate.setVisibility(View.VISIBLE);
holder.mDate.setText(DateTimeUtils.parseDateTime(timeLineModel.getDate(), "yyyy-MM-dd HH:mm", "hh:mm a, dd-MMM-yyyy"));
}
else
holder.mDate.setVisibility(View.GONE);
holder.mMessage.setText(timeLineModel.getMessage());
if (timeLineModel.getmStatus() == "COMPLETED") {
holder.iVReceiptImage.setVisibility(View.VISIBLE);
DatabaseReference mDatabase = FirebaseDatabase.getInstance().getReference();
DatabaseReference ref = mDatabase.child("lists").child(TimeLineModel.listID).child("receiptImageURL");
ref.addListenerForSingleValueEvent(
new ValueEventListener() {
@Override
public void onDataChange(DataSnapshot dataSnapshot) {
String image;
image = (String) dataSnapshot.getValue();
receiptURL = image;
Glide.with(mContext)
.load(image)
.into(holder.iVReceiptImage);
String postBody = readStringFromJSON();
postBody = postBody.replace("$__RECEIPT_URL__", receiptURL);
try {
extractBillAmount(postBody);
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onCancelled(DatabaseError databaseError) {
//handle databaseError
}
});
}
}
@Override
public int getItemCount() {
return (mFeedList!=null? mFeedList.size():0);
}
@Override
public int getItemViewType(int position) {
return TimelineView.getTimeLineViewType(position,getItemCount());
}
private void initializeCart() {
CartManager cartManager = new CartManager();
cartManager.setTotalPrice(extractReceiptAmount);
try {
Cart cart = cartManager.buildCart();
Intent intent = new Intent(mContext, PaymentActivity.class)
.putExtra(StripeAndroidPayActivity.EXTRA_CART, cart)
.putExtra("receiptURL", receiptURL)
.putExtra("amount", extractReceiptAmount);
((TrackStatusActivity) mContext).startActivityForResult(intent, 1);
} catch (CartContentException e) {
Toast.makeText(mContext, "error preparing cart", Toast.LENGTH_SHORT).show();
e.printStackTrace();
}
}
public class TimeLineViewHolder extends RecyclerView.ViewHolder {
@BindView(R.id.text_timeline_date)
TextView mDate;
@BindView(R.id.text_timeline_title)
TextView mMessage;
@BindView(R.id.time_marker)
TimelineView mTimelineView;
@BindView(R.id.receiptImage)
ImageView iVReceiptImage;
public TimeLineViewHolder(View itemView, int viewType) {
super(itemView);
ButterKnife.bind(this, itemView);
mTimelineView.initLine(viewType);
iVReceiptImage.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
initializeCart();
}
});
}
}
void extractBillAmount(String postBody) throws IOException {
OkHttpClient client = new OkHttpClient();
MediaType JSON = MediaType.parse("application/json; charset=utf-8");
RequestBody body = RequestBody.create(JSON, postBody);
Request request = new Request.Builder()
.url("https://vision.googleapis.com/v1/images:annotate?key=AIzaSyCbshrDoUFiWNVYMTqPeT9687NABAgFAjs")
.post(body)
.build();
client.newCall(request).enqueue(new Callback() {
@Override
public void onFailure(Call call, IOException e) {
call.cancel();
}
@Override
public void onResponse(Call call, Response response) throws IOException {
String jsonData = response.body().string();
//Log.d("TAG",response.body().string());
try {
JSONObject result = new JSONObject(jsonData);
JSONArray arr = result.getJSONArray("responses");
JSONObject responseObj = arr.getJSONObject(0);
JSONArray textAnnotationsArr = responseObj.getJSONArray("textAnnotations");
String extractedOutput = textAnnotationsArr.getJSONObject(0).getString("description");
String [] elements = extractedOutput.split("\n");
double maxValue = 0.0;
for(String element: elements){
try{
element = element.replace("$","");
if(isNumericAndHasDot(element)){
double value = Double.parseDouble(element);
maxValue = value > maxValue ? value: maxValue;
}
}catch(NumberFormatException e){
}
}
extractReceiptAmount = Math.round(maxValue);
}catch (JSONException e){
Log.d("Exception", e.getLocalizedMessage());
}
}
});
}
public boolean isNumericAndHasDot(String inputData) {
Scanner sc = new Scanner(inputData);
return sc.hasNextDouble() && inputData.contains(".");
}
private String readStringFromJSON() {
InputStream inputStream = mContext.getResources().openRawResource(R.raw.image_to_text);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try {
int i = inputStream.read();
while (i != -1) {
byteArrayOutputStream.write(i);
i = inputStream.read();
}
inputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
return byteArrayOutputStream.toString();
}
}
| |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package git4idea.status;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.vcs.*;
import com.intellij.openapi.vcs.changes.Change;
import com.intellij.openapi.vcs.changes.ChangeListManager;
import com.intellij.openapi.vcs.changes.ContentRevision;
import com.intellij.openapi.vcs.changes.VcsDirtyScope;
import com.intellij.openapi.vfs.VirtualFile;
import git4idea.GitContentRevision;
import git4idea.GitRevisionNumber;
import git4idea.GitUtil;
import git4idea.changes.GitChangeUtils;
import git4idea.commands.GitCommand;
import git4idea.commands.GitSimpleHandler;
import git4idea.util.StringScanner;
import org.jetbrains.annotations.NotNull;
import java.util.*;
/**
* <p>
* Collects changes from the Git repository in the specified {@link com.intellij.openapi.vcs.changes.VcsDirtyScope}
* using the older technique that is replaced by {@link GitNewChangesCollector} for Git later than 1.7.0 inclusive.
* This class is used for Git older than 1.7.0 not inclusive, that don't have <code>'git status --porcelain'</code>.
* </p>
* <p>
* The method used by this class is less efficient and more error-prone than {@link GitNewChangesCollector} method.
* Thus this class is considered as a legacy code for Git 1.6.*. Read further for the implementation details and the ground for
* transferring to use {@code 'git status --porcelain'}.
* </p>
* <p>
* The following Git commands are called to get the changes, i.e. the state of the working tree combined with the state of index.
* <ul>
* <li>
* <b><code>'git update-index --refresh'</code></b> (called on the whole repository) - probably unnecessary (especially before 'git diff'),
* but is left not to break some older Gits occasionally. See the following links for some details:
* <a href="http://us.generation-nt.com/answer/bug-596126-git-status-does-not-refresh-index-fixed-since-1-7-1-1-please-consider-upgrading-1-7-1-2-squeeze-help-200234171.html">
* gitk doesn't refresh the index statinfo</a>;
* <a href="http://thread.gmane.org/gmane.comp.version-control.git/144176/focus">
* "Most git porcelain silently refreshes stat-dirty index entries"</a>;
* <a href="https://git.wiki.kernel.org/index.php/GitFaq#Can_I_import_from_tar_files_.28archives.29.3">update-index to import from tar files</a>.
* </li>
* <li>
* <b><code>'git ls-files --unmerged'</code></b> (called on the whole repository) - to get the list of unmerged files.
* It is not clear why it should be called on the whole repository. The decision to call it on the whole repository was made in
* <code>45687fe "<a href="http://youtrack.jetbrains.net/issue/IDEA-50573">IDEADEV-40577</a>: The ignored unmerged files are now reported"</code>,
* but neither the rollback & test, nor the analysis didn't recover the need for that. It is left however, since it is a legacy code.
* </li>
* <li>
* <b><code>'git ls-files --others --exclude-standard'</code></b> (called on the dirty scope) - to get the list of unversioned files.
* Note that this command is the only way to get the list of unversioned files, besides <code>'git status'</code>.
* </li>
* <li>
* <b><code>'git diff --name-status -M HEAD -- </code></b> (called on the dirty scope) - to get all other changes (except unversioned and
* unmerged).
* Note that there is also no way to get all tracked changes by a single command (except <code>'git status'</code>), since
* <code>'git diff'</code> returns either only not-staged changes, either (<code>'git diff HEAD'</code>) treats unmerged as modified.
* </li>
* </ul>
* </p>
* <p>
* <b>Performance measurement</b>
* was performed on a large repository (like IntelliJ IDEA), on a single machine, after several "warm-ups" when <code>'git status'</code> duration
* stabilizes.
* For the whole repository:
* <code>'git status'</code> takes ~ 1300 ms while these 4 commands take ~ 1870 ms
* ('update-index' ~ 270 ms, 'ls-files --unmerged' ~ 46 ms, 'ls files --others' ~ 820 ms, 'diff' ~ 650 ms)
* ; for a single file:
* <code>'git status'</code> takes ~ 375 ms, these 4 commands take ~ 750 ms.
* </p>
* <p>
* The class is immutable: collect changes and get the instance from where they can be retrieved by {@link #collect}.
* </p>
*
* @author Constantine Plotnikov
* @author Kirill Likhodedov
*/
class GitOldChangesCollector extends GitChangesCollector {
private final List<VirtualFile> myUnversioned = new ArrayList<>(); // Unversioned files
private final Set<String> myUnmergedNames = new HashSet<>(); // Names of unmerged files
private final List<Change> myChanges = new ArrayList<>(); // all changes
/**
* Collects the changes from git command line and returns the instance of GitNewChangesCollector from which these changes can be retrieved.
* This may be lengthy.
*/
@NotNull
static GitOldChangesCollector collect(@NotNull Project project, @NotNull ChangeListManager changeListManager,
@NotNull ProjectLevelVcsManager vcsManager, @NotNull AbstractVcs vcs,
@NotNull VcsDirtyScope dirtyScope, @NotNull VirtualFile vcsRoot) throws VcsException {
return new GitOldChangesCollector(project, changeListManager, vcsManager, vcs, dirtyScope, vcsRoot);
}
@NotNull
@Override
Collection<VirtualFile> getUnversionedFiles() {
return myUnversioned;
}
@NotNull
@Override
Collection<Change> getChanges(){
return myChanges;
}
private GitOldChangesCollector(@NotNull Project project, @NotNull ChangeListManager changeListManager,
@NotNull ProjectLevelVcsManager vcsManager, @NotNull AbstractVcs vcs, @NotNull VcsDirtyScope dirtyScope,
@NotNull VirtualFile vcsRoot) throws VcsException {
super(project, changeListManager, vcsManager, vcs, dirtyScope, vcsRoot);
updateIndex();
collectUnmergedAndUnversioned();
collectDiffChanges();
}
private void updateIndex() throws VcsException {
GitSimpleHandler handler = new GitSimpleHandler(myProject, myVcsRoot, GitCommand.UPDATE_INDEX);
handler.addParameters("--refresh", "--ignore-missing");
handler.setSilent(true);
handler.setStdoutSuppressed(true);
handler.ignoreErrorCode(1);
handler.run();
}
/**
* Collect diff with head
*
* @throws VcsException if there is a problem with running git
*/
private void collectDiffChanges() throws VcsException {
Collection<FilePath> dirtyPaths = dirtyPaths(true);
if (dirtyPaths.isEmpty()) {
return;
}
try {
String output = GitChangeUtils.getDiffOutput(myProject, myVcsRoot, "HEAD", dirtyPaths);
GitChangeUtils.parseChanges(myProject, myVcsRoot, null, GitChangeUtils.resolveReference(myProject, myVcsRoot, "HEAD"), output, myChanges,
myUnmergedNames);
}
catch (VcsException ex) {
if (!GitChangeUtils.isHeadMissing(ex)) {
throw ex;
}
GitSimpleHandler handler = new GitSimpleHandler(myProject, myVcsRoot, GitCommand.LS_FILES);
handler.addParameters("--cached");
handler.setSilent(true);
handler.setStdoutSuppressed(true);
// During init diff does not works because HEAD
// will appear only after the first commit.
// In that case added files are cached in index.
String output = handler.run();
if (output.length() > 0) {
StringTokenizer tokenizer = new StringTokenizer(output, "\n\r");
while (tokenizer.hasMoreTokens()) {
final String s = tokenizer.nextToken();
Change ch = new Change(null, GitContentRevision.createRevision(myVcsRoot, s, null, myProject, false, true), FileStatus.ADDED);
myChanges.add(ch);
}
}
}
}
/**
* Collect unversioned and unmerged files
*
* @throws VcsException if there is a problem with running git
*/
private void collectUnmergedAndUnversioned() throws VcsException {
Collection<FilePath> dirtyPaths = dirtyPaths(false);
if (dirtyPaths.isEmpty()) {
return;
}
// prepare handler
GitSimpleHandler handler = new GitSimpleHandler(myProject, myVcsRoot, GitCommand.LS_FILES);
handler.addParameters("-v", "--unmerged");
handler.setSilent(true);
handler.setStdoutSuppressed(true);
// run handler and collect changes
parseFiles(handler.run());
// prepare handler
handler = new GitSimpleHandler(myProject, myVcsRoot, GitCommand.LS_FILES);
handler.addParameters("-v", "--others", "--exclude-standard");
handler.setSilent(true);
handler.setStdoutSuppressed(true);
handler.endOptions();
handler.addRelativePaths(dirtyPaths);
if(handler.isLargeCommandLine()) {
handler = new GitSimpleHandler(myProject, myVcsRoot, GitCommand.LS_FILES);
handler.addParameters("-v", "--others", "--exclude-standard");
handler.setSilent(true);
handler.setStdoutSuppressed(true);
handler.endOptions();
}
// run handler and collect changes
parseFiles(handler.run());
}
private void parseFiles(String list) throws VcsException {
for (StringScanner sc = new StringScanner(list); sc.hasMoreData();) {
if (sc.isEol()) {
sc.nextLine();
continue;
}
char status = sc.peek();
sc.skipChars(2);
if ('?' == status) {
VirtualFile file = myVcsRoot.findFileByRelativePath(GitUtil.unescapePath(sc.line()));
if (Comparing.equal(GitUtil.gitRootOrNull(file), myVcsRoot)) {
myUnversioned.add(file);
}
}
else { //noinspection HardCodedStringLiteral
if ('M' == status) {
sc.boundedToken('\t');
String file = GitUtil.unescapePath(sc.line());
VirtualFile vFile = myVcsRoot.findFileByRelativePath(file);
if (!Comparing.equal(GitUtil.gitRootOrNull(vFile), myVcsRoot)) {
continue;
}
if (!myUnmergedNames.add(file)) {
continue;
}
// assume modify-modify conflict
ContentRevision before = GitContentRevision.createRevision(myVcsRoot, file, new GitRevisionNumber("orig_head"), myProject, true,
true);
ContentRevision after = GitContentRevision.createRevision(myVcsRoot, file, null, myProject, false, true);
myChanges.add(new Change(before, after, FileStatus.MERGED_WITH_CONFLICTS));
}
else {
throw new VcsException("Unsupported type of the merge conflict detected: " + status);
}
}
}
}
}
| |
/*
* Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.query.impl.getters;
import com.hazelcast.config.Config;
import com.hazelcast.config.MapAttributeConfig;
import com.hazelcast.query.extractor.ValueCollector;
import com.hazelcast.query.extractor.ValueExtractor;
import com.hazelcast.test.HazelcastParametersRunnerFactory;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import static com.hazelcast.query.impl.getters.ExtractorHelper.extractArgumentsFromAttributeName;
import static com.hazelcast.query.impl.getters.ExtractorHelper.extractAttributeNameNameWithoutArguments;
import static groovy.util.GroovyTestCase.assertEquals;
import static java.util.Arrays.asList;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.isA;
import static org.junit.Assert.assertNull;
@RunWith(Parameterized.class)
@Parameterized.UseParametersRunnerFactory(HazelcastParametersRunnerFactory.class)
public class ExtractorHelperTest {
@Parameterized.Parameters(name = "useClassloader:{0}")
public static Collection<Object[]> parameters() {
return Arrays.asList(new Object[][]{
{false},
{true}
});
}
@Parameterized.Parameter(0)
public boolean useClassloader;
@Rule
public ExpectedException expected = ExpectedException.none();
@Test
public void instantiate_extractor() {
// GIVEN
MapAttributeConfig config = new MapAttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$IqExtractor");
// WHEN
ValueExtractor extractor = instantiateExtractor(config);
// THEN
assertThat(extractor, instanceOf(IqExtractor.class));
}
@Test
public void instantiate_extractor_notExistingClass() {
// GIVEN
MapAttributeConfig config = new MapAttributeConfig("iq", "not.existing.class");
// EXPECT
expected.expect(IllegalArgumentException.class);
expected.expectCause(isA(ClassNotFoundException.class));
// WHEN
instantiateExtractor(config);
}
@Test
public void instantiate_extractors() {
// GIVEN
MapAttributeConfig iqExtractor = new MapAttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$IqExtractor");
MapAttributeConfig nameExtractor = new MapAttributeConfig("name", "com.hazelcast.query.impl.getters.ExtractorHelperTest$NameExtractor");
// WHEN
Map<String, ValueExtractor> extractors =
instantiateExtractors(asList(iqExtractor, nameExtractor));
// THEN
assertThat(extractors.get("iq"), instanceOf(IqExtractor.class));
assertThat(extractors.get("name"), instanceOf(NameExtractor.class));
}
@Test
public void instantiate_extractors_withCustomClassLoader() {
// GIVEN
MapAttributeConfig iqExtractor =
new MapAttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$IqExtractor");
MapAttributeConfig nameExtractor =
new MapAttributeConfig("name", "com.hazelcast.query.impl.getters.ExtractorHelperTest$NameExtractor");
Config config = new Config();
// For other custom class loaders (from OSGi bundles, for example)
ClassLoader customClassLoader = getClass().getClassLoader();
config.setClassLoader(customClassLoader);
// WHEN
Map<String, ValueExtractor> extractors = instantiateExtractors(asList(iqExtractor, nameExtractor));
// THEN
assertThat(extractors.get("iq"), instanceOf(IqExtractor.class));
assertThat(extractors.get("name"), instanceOf(NameExtractor.class));
}
@Test
public void instantiate_extractors_oneClassNotExisting() {
// GIVEN
MapAttributeConfig iqExtractor = new MapAttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$IqExtractor");
MapAttributeConfig nameExtractor = new MapAttributeConfig("name", "not.existing.class");
// EXPECT
expected.expect(IllegalArgumentException.class);
expected.expectCause(isA(ClassNotFoundException.class));
// WHEN
instantiateExtractors(asList(iqExtractor, nameExtractor));
}
@Test
public void instantiate_extractors_duplicateExtractor() {
// GIVEN
MapAttributeConfig iqExtractor = new MapAttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$IqExtractor");
MapAttributeConfig iqExtractorDuplicate = new MapAttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$IqExtractor");
// EXPECT
expected.expect(IllegalArgumentException.class);
// WHEN
instantiateExtractors(asList(iqExtractor, iqExtractorDuplicate));
}
@Test
public void instantiate_extractors_wrongType() {
// GIVEN
MapAttributeConfig string = new MapAttributeConfig("iq", "java.lang.String");
// EXPECT
expected.expect(IllegalArgumentException.class);
// WHEN
instantiateExtractors(asList(string));
}
@Test
public void instantiate_extractors_initException() {
// GIVEN
MapAttributeConfig string = new MapAttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$InitExceptionExtractor");
// EXPECT
expected.expect(IllegalArgumentException.class);
// WHEN
instantiateExtractors(asList(string));
}
@Test
public void instantiate_extractors_accessException() {
// GIVEN
MapAttributeConfig string = new MapAttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$AccessExceptionExtractor");
// EXPECT
expected.expect(IllegalArgumentException.class);
// WHEN
instantiateExtractors(asList(string));
}
@Test
public void extractArgument_correctArguments() {
assertEquals("left-front", extractArgumentsFromAttributeName("car.wheel[left-front]"));
assertEquals("123", extractArgumentsFromAttributeName("car.wheel[123]"));
assertEquals(".';'.", extractArgumentsFromAttributeName("car.wheel[.';'.]"));
assertEquals("", extractArgumentsFromAttributeName("car.wheel[]"));
assertNull(extractArgumentsFromAttributeName("car.wheel"));
}
@Test(expected = IllegalArgumentException.class)
public void extractArgument_wrongArguments_noClosing() {
extractArgumentsFromAttributeName("car.wheel[left");
}
@Test(expected = IllegalArgumentException.class)
public void extractArgument_wrongArguments_noArgument() {
extractArgumentsFromAttributeName("car.wheel[");
}
@Test(expected = IllegalArgumentException.class)
public void extractArgument_wrongArguments_noOpening() {
extractArgumentsFromAttributeName("car.wheelleft]");
}
@Test(expected = IllegalArgumentException.class)
public void extractArgument_wrongArguments_noArgument_noOpening() {
extractArgumentsFromAttributeName("car.wheel]");
}
@Test
public void extractArgument_wrongArguments_tooManySquareBrackets_lastExtracted() {
assertEquals("BAR", extractArgumentsFromAttributeName("car.wheel[2].pressure[BAR]"));
}
@Test
public void extractAttributeName_correctArguments() {
assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel[left-front]"));
assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel[123]"));
assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel[.';'.]"));
assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel[]"));
assertEquals("car.wheel", extractAttributeNameNameWithoutArguments("car.wheel"));
}
@Test(expected = IllegalArgumentException.class)
public void extractAttributeName_wrongArguments_noClosing() {
extractAttributeNameNameWithoutArguments("car.wheel[left");
}
@Test(expected = IllegalArgumentException.class)
public void extractAttributeName_wrongArguments_noArgument() {
extractAttributeNameNameWithoutArguments("car.wheel[");
}
@Test(expected = IllegalArgumentException.class)
public void extractAttributeName_wrongArguments_noOpening() {
extractAttributeNameNameWithoutArguments("car.wheelleft]");
}
@Test(expected = IllegalArgumentException.class)
public void extractAttributeName_wrongArguments_noArgument_noOpening() {
extractAttributeNameNameWithoutArguments("car.wheel]");
}
@Test
public void extractAttributeName_wrongArguments_tooManySquareBrackets_lastExtracted() {
assertEquals("car.wheel[2].pressure", extractAttributeNameNameWithoutArguments("car.wheel[2].pressure[BAR]"));
}
public static class IqExtractor extends ValueExtractor<Object, Object> {
@Override
public void extract(Object target, Object arguments, ValueCollector collector) {
}
}
public static class AccessExceptionExtractor extends NameExtractor {
private AccessExceptionExtractor() {
}
}
public static abstract class InitExceptionExtractor extends NameExtractor {
}
public static class NameExtractor extends ValueExtractor<Object, Object> {
@Override
public void extract(Object target, Object arguments, ValueCollector collector) {
}
}
private ValueExtractor instantiateExtractor(MapAttributeConfig mapAttributeConfig) {
return ExtractorHelper.instantiateExtractor(mapAttributeConfig,
useClassloader ? this.getClass().getClassLoader() : null);
}
private Map<String, ValueExtractor> instantiateExtractors(List<MapAttributeConfig> mapAttributeConfigs) {
return ExtractorHelper.instantiateExtractors(mapAttributeConfigs,
useClassloader ? this.getClass().getClassLoader() : null);
}
}
| |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package BreakReminder;
import PaginaPrincipalAdmin.Fondo;
import java.awt.BorderLayout;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.concurrent.TimeUnit;
import javax.swing.JOptionPane;
import javax.swing.Timer;
/**
*
* @author ozzIE
*/
public class SetBreak extends javax.swing.JFrame {
DateFormat df6 = new SimpleDateFormat("HH:mm");
String Box1 ="No Break",Box2 ="No Lunch",Box3 ="No Break";
Date currentTime = new Date();
Timer t1;
Timer t2;
Timer t3;
Timer t4;
Timer t5;
Timer t6;
int flag=0;
public SetBreak() {
initComponents();
setLocationRelativeTo(null);
setTitle("Break Reminder");
setSize(500, 300);
setResizable(false);
//Fondo f = new Fondo();
//this.add(f,BorderLayout.CENTER);
//this.pack();
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jPanel1 = new javax.swing.JPanel();
jLabel4 = new javax.swing.JLabel();
jPanel8 = new javax.swing.JPanel();
jScrollPane6 = new javax.swing.JScrollPane();
jTextArea6 = new javax.swing.JTextArea();
jPanel3 = new javax.swing.JPanel();
jLabel1 = new javax.swing.JLabel();
jLabel2 = new javax.swing.JLabel();
jLabel3 = new javax.swing.JLabel();
jComboBox1 = new javax.swing.JComboBox();
jComboBox2 = new javax.swing.JComboBox();
jComboBox3 = new javax.swing.JComboBox();
jButton1 = new javax.swing.JButton();
jButton3 = new javax.swing.JButton();
jButton2 = new javax.swing.JButton();
jLabel4.setFont(new java.awt.Font("Arial", 1, 18)); // NOI18N
jLabel4.setText("Ingresa la hora de tus breaks y lunch:");
javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1);
jPanel1.setLayout(jPanel1Layout);
jPanel1Layout.setHorizontalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(50, 50, 50)
.addComponent(jLabel4, javax.swing.GroupLayout.PREFERRED_SIZE, 355, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(82, Short.MAX_VALUE))
);
jPanel1Layout.setVerticalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addContainerGap()
.addComponent(jLabel4, javax.swing.GroupLayout.PREFERRED_SIZE, 46, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
);
getContentPane().add(jPanel1, java.awt.BorderLayout.PAGE_START);
jTextArea6.setColumns(20);
jTextArea6.setRows(5);
jTextArea6.setText("\t \n Aqui puedes tomar notas de extra breaks, \n desconecciones que tuviste o etcetera");
jScrollPane6.setViewportView(jTextArea6);
javax.swing.GroupLayout jPanel8Layout = new javax.swing.GroupLayout(jPanel8);
jPanel8.setLayout(jPanel8Layout);
jPanel8Layout.setHorizontalGroup(
jPanel8Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel8Layout.createSequentialGroup()
.addGap(21, 21, 21)
.addComponent(jScrollPane6, javax.swing.GroupLayout.PREFERRED_SIZE, 438, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(28, Short.MAX_VALUE))
);
jPanel8Layout.setVerticalGroup(
jPanel8Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, jPanel8Layout.createSequentialGroup()
.addContainerGap(20, Short.MAX_VALUE)
.addComponent(jScrollPane6, javax.swing.GroupLayout.PREFERRED_SIZE, 85, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap())
);
getContentPane().add(jPanel8, java.awt.BorderLayout.PAGE_END);
jLabel1.setText("Ingresa tu primer break:");
jLabel2.setText("Ingresa tu hora de Lunch:");
jLabel3.setText("Ingresa tu segundo break:");
jComboBox1.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "No Break", "4:30", "4:45", "5:00", "5:15", "5:30", "5:45", "6:00", "6:15", "6:30", "6:45", "7:00", "7:15", "7:30", "7:45", "8:00", "8:15", "8:30", "8:45", "9:00", "9:15", "9:30", "9:45", "10:00", "10:15", "10:30", "10:45", "11:00", "11:15", "11:30", "11:45", "12:00", "12:15", "12:30", "12:45", "13:00", "13:15", "13:30", "13:45", "14:00", "14:15", "14:30", "14:45", "15:00", "15:15", "15:30", "15:45", "16:00", "16:15", "16:30", "16:45", "17:00", "17:15", "17:30", "17:45", "18:00", "18:15", "18:30", "18:45", "19:00", "19:15", "19:30", "19:45", "20:00", "20:15", "20:30", "20:45", "21:00", "21:15", "21:30", "21:45", "22:00", "22:15", "22:30", "22:45", "23:00", "23:15", "23:30", "23:45" }));
jComboBox1.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jComboBox1ActionPerformed(evt);
}
});
jComboBox2.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "No Lunch", "4:30", "4:45", "5:00", "5:15", "5:30", "5:45", "6:00", "6:15", "6:30", "6:45", "7:00", "7:15", "7:30", "7:45", "8:00", "8:15", "8:30", "8:45", "9:00", "9:15", "9:30", "9:45", "10:00", "10:15", "10:30", "10:45", "11:00", "11:15", "11:30", "11:45", "12:00", "12:15", "12:30", "12:45", "13:00", "13:15", "13:30", "13:45", "14:00", "14:15", "14:30", "14:45", "15:00", "15:15", "15:30", "15:45", "16:00", "16:15", "16:30", "16:45", "17:00", "17:15", "17:30", "17:45", "18:00", "18:15", "18:30", "18:45", "19:00", "19:15", "19:30", "19:45", "20:00", "20:15", "20:30", "20:45", "21:00", "21:15", "21:30", "21:45", "22:00", "22:15", "22:30", "22:45", "23:00", "23:15", "23:30", "23:45" }));
jComboBox2.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jComboBox2ActionPerformed(evt);
}
});
jComboBox3.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "No Break", "4:30", "4:45", "5:00", "5:15", "5:30", "5:45", "6:00", "6:15", "6:30", "6:45", "7:00", "7:15", "7:30", "7:45", "8:00", "8:15", "8:30", "8:45", "9:00", "9:15", "9:30", "9:45", "10:00", "10:15", "10:30", "10:45", "11:00", "11:15", "11:30", "11:45", "12:00", "12:15", "12:30", "12:45", "13:00", "13:15", "13:30", "13:45", "14:00", "14:15", "14:30", "14:45", "15:00", "15:15", "15:30", "15:45", "16:00", "16:15", "16:30", "16:45", "17:00", "17:15", "17:30", "17:45", "18:00", "18:15", "18:30", "18:45", "19:00", "19:15", "19:30", "19:45", "20:00", "20:15", "20:30", "20:45", "21:00", "21:15", "21:30", "21:45", "22:00", "22:15", "22:30", "22:45", "23:00", "23:15", "23:30", "23:45" }));
jComboBox3.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jComboBox3ActionPerformed(evt);
}
});
jButton1.setText("Aceptar");
jButton1.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton1ActionPerformed(evt);
}
});
jButton3.setText("Cancelar");
jButton3.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton3ActionPerformed(evt);
}
});
jButton2.setText("Actualizar");
jButton2.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton2ActionPerformed(evt);
}
});
javax.swing.GroupLayout jPanel3Layout = new javax.swing.GroupLayout(jPanel3);
jPanel3.setLayout(jPanel3Layout);
jPanel3Layout.setHorizontalGroup(
jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel3Layout.createSequentialGroup()
.addGap(57, 57, 57)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel3, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jLabel2, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jLabel1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jComboBox1, 0, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jComboBox2, 0, 91, Short.MAX_VALUE)
.addComponent(jComboBox3, 0, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
.addGap(30, 30, 30)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING, false)
.addComponent(jButton1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jButton3, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jButton2, javax.swing.GroupLayout.DEFAULT_SIZE, 103, Short.MAX_VALUE))
.addGap(74, 74, 74))
);
jPanel3Layout.setVerticalGroup(
jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, jPanel3Layout.createSequentialGroup()
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jPanel3Layout.createSequentialGroup()
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel1)
.addComponent(jComboBox1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jButton1))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jComboBox2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel2, javax.swing.GroupLayout.DEFAULT_SIZE, 23, Short.MAX_VALUE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel3, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jComboBox3, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jButton2, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addGroup(jPanel3Layout.createSequentialGroup()
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jButton3, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(31, 31, 31)))
.addContainerGap())
);
getContentPane().add(jPanel3, java.awt.BorderLayout.CENTER);
pack();
}// </editor-fold>//GEN-END:initComponents
private void jButton2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton2ActionPerformed
try
{t1.restart();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t2.restart();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t3.restart();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t4.restart();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t5.restart();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t6.restart();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
}//GEN-LAST:event_jButton2ActionPerformed
private void jButton3ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton3ActionPerformed
//Cancel Breaks
try
{t1.stop();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t2.stop();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t3.stop();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t4.stop();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t5.stop();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
try
{t6.stop();}
catch (Exception ex )
{System.out.println("Este timer no se a inicializado");}
JOptionPane.showMessageDialog(null,"Se han cancelado los breaks!");
setVisible(false);
}//GEN-LAST:event_jButton3ActionPerformed
private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed
// Aqui se confirman los breaks
// setVisible(false);
if (!Box1.equals("No Break")){
try
{ Date currentTime = new Date();
String currTime = df6.format(currentTime);
String[] split = currTime.split(":");
long CurrTimeInMins= TimeUnit.HOURS.toMinutes(Integer.parseInt(split[0]))+Integer.parseInt(split[1]);
//Primer Break
String[] split1 = Box1.split(":");
long firstBreak= TimeUnit.HOURS.toMinutes(Integer.parseInt(split1[0]))+Integer.parseInt(split1[1]);
//JOptionPane.showMessageDialog(null,"La Hora en minutos es:" + firstBreak);
// Difference in time
long break1= (firstBreak - CurrTimeInMins)-4;
long break4= (firstBreak - CurrTimeInMins);
if(firstBreak>=CurrTimeInMins)
//
//
{t1= new Timer((int)TimeUnit.MINUTES.toMillis(break1), actions1);
t1.start();
flag=1;
t4= new Timer((int)TimeUnit.MINUTES.toMillis(break4), actions4);
t4.start();
flag=4;}
else if(firstBreak<CurrTimeInMins){JOptionPane.showMessageDialog(null,"Tu primer break ya paso!");}
}
catch (Exception ex )
{System.out.println("No se a seleccionado un break");}
}
else
JOptionPane.showMessageDialog(null,"No ingresaste un primer break");
if (!Box2.equals("No Lunch")){
try
{ Date currentTime = new Date();
String currTime = df6.format(currentTime);
String[] split = currTime.split(":");
long CurrTimeInMins= TimeUnit.HOURS.toMinutes(Integer.parseInt(split[0]))+Integer.parseInt(split[1]);
//Lunch
String[] split2 = Box2.split(":");
long lunch= TimeUnit.HOURS.toMinutes(Integer.parseInt(split2[0]))+Integer.parseInt(split2[1]);
//Segundo Break
//JOptionPane.showMessageDialog(null,"La Hora en minutos es:" + firstBreak);
// Difference in time
long break2= (lunch - CurrTimeInMins)-4;
long break5= (lunch - CurrTimeInMins);
if(lunch>=CurrTimeInMins)
//
{t2= new Timer((int)TimeUnit.MINUTES.toMillis(break2), actions2);
t2.start();
flag=2;
t5= new Timer((int)TimeUnit.MINUTES.toMillis(break5), actions5);
t5.start();
flag=5;}
else if(lunch<CurrTimeInMins){JOptionPane.showMessageDialog(null,"Tu Lunch ya paso!");}
}
catch (Exception ex )
{System.out.println("No se a seleccionado un Lunch");}
}
else
JOptionPane.showMessageDialog(null,"No ingresaste un Lunch");
if (!Box3.equals("No Break")){
try
{ Date currentTime = new Date();
String currTime = df6.format(currentTime);
String[] split = currTime.split(":");
long CurrTimeInMins= TimeUnit.HOURS.toMinutes(Integer.parseInt(split[0]))+Integer.parseInt(split[1]);
//Segundo Break
String[] split3 = Box3.split(":");
long thirdBreak= TimeUnit.HOURS.toMinutes(Integer.parseInt(split3[0]))+Integer.parseInt(split3[1]);
//JOptionPane.showMessageDialog(null,"La Hora en minutos es:" + firstBreak);
// Difference in time
long break3= (thirdBreak - CurrTimeInMins)-4;
long break6= (thirdBreak - CurrTimeInMins);
if(thirdBreak>=CurrTimeInMins)
//
//
{ t3= new Timer((int)TimeUnit.MINUTES.toMillis(break3), actions3);
t3.start();
flag=3;
t6= new Timer((int)TimeUnit.MINUTES.toMillis(break6), actions6);
t6.start();
flag=6;}
else if(thirdBreak<CurrTimeInMins){JOptionPane.showMessageDialog(null,"Tu otro break ya paso!");}
}
catch (Exception ex )
{System.out.println("No seleccionaste tu segundo break");}
}
else
JOptionPane.showMessageDialog(null,"No seleccionaste tu segundo break");
}//GEN-LAST:event_jButton1ActionPerformed
private void jComboBox3ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jComboBox3ActionPerformed
Box3 =(String)jComboBox3.getSelectedItem( );
}//GEN-LAST:event_jComboBox3ActionPerformed
private void jComboBox2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jComboBox2ActionPerformed
Box2 =(String)jComboBox2.getSelectedItem( );
}//GEN-LAST:event_jComboBox2ActionPerformed
private void jComboBox1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jComboBox1ActionPerformed
Box1 =(String)jComboBox1.getSelectedItem( );
}//GEN-LAST:event_jComboBox1ActionPerformed
public ActionListener actions1 = new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
JOptionPane.showMessageDialog(null,"Recuerda que tu break es a las "+Box1);
t1.stop();
}
};
public ActionListener actions2 = new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
JOptionPane.showMessageDialog(null,"Recuerda que tu Lunch es a las "+Box2);
t2.stop();
}
};
public ActionListener actions3 = new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
JOptionPane.showMessageDialog(null,"Recuerda que tu break es a las "+Box3);
t3.stop();
}
};
public ActionListener actions4 = new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
JOptionPane.showMessageDialog(null,"Ya te desconectaste? Es hora de tu break");
t4.stop();
}
};
public ActionListener actions5 = new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
JOptionPane.showMessageDialog(null,"Ya te desconectaste? Es hora de tu Lunch");
t5.stop();
}
};
public ActionListener actions6 = new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
JOptionPane.showMessageDialog(null,"Ya te desconectaste? Es hora de tu break");
t6.stop();
}
};
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
/* Set the Nimbus look and feel */
//<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) ">
/* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.
* For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html
*/
try {
for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {
if ("Nimbus".equals(info.getName())) {
javax.swing.UIManager.setLookAndFeel(info.getClassName());
break;
}
}
} catch (ClassNotFoundException ex) {
java.util.logging.Logger.getLogger(SetBreak.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
java.util.logging.Logger.getLogger(SetBreak.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
java.util.logging.Logger.getLogger(SetBreak.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (javax.swing.UnsupportedLookAndFeelException ex) {
java.util.logging.Logger.getLogger(SetBreak.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
}
//</editor-fold>
/* Create and display the form */
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
new SetBreak().setVisible(true);
new SetBreak().setDefaultCloseOperation(HIDE_ON_CLOSE);
}
});
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton jButton1;
private javax.swing.JButton jButton2;
private javax.swing.JButton jButton3;
private javax.swing.JComboBox jComboBox1;
private javax.swing.JComboBox jComboBox2;
private javax.swing.JComboBox jComboBox3;
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel2;
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel4;
private javax.swing.JPanel jPanel1;
private javax.swing.JPanel jPanel2;
private javax.swing.JPanel jPanel3;
private javax.swing.JPanel jPanel4;
private javax.swing.JPanel jPanel5;
private javax.swing.JPanel jPanel6;
private javax.swing.JPanel jPanel7;
private javax.swing.JPanel jPanel8;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JScrollPane jScrollPane2;
private javax.swing.JScrollPane jScrollPane3;
private javax.swing.JScrollPane jScrollPane4;
private javax.swing.JScrollPane jScrollPane5;
private javax.swing.JScrollPane jScrollPane6;
private javax.swing.JTextArea jTextArea1;
private javax.swing.JTextArea jTextArea2;
private javax.swing.JTextArea jTextArea3;
private javax.swing.JTextArea jTextArea4;
private javax.swing.JTextArea jTextArea5;
private javax.swing.JTextArea jTextArea6;
// End of variables declaration//GEN-END:variables
}
| |
package co.com.codesoftware.servicio.general;
import java.math.BigDecimal;
import java.util.List;
import javax.jws.WebMethod;
import javax.jws.WebParam;
import javax.jws.WebResult;
import javax.jws.WebService;
import javax.xml.bind.annotation.XmlSeeAlso;
import javax.xml.datatype.XMLGregorianCalendar;
import javax.xml.ws.Action;
import javax.xml.ws.RequestWrapper;
import javax.xml.ws.ResponseWrapper;
/**
* This class was generated by the JAX-WS RI.
* JAX-WS RI 2.2.9-b130926.1035
* Generated source version: 2.2
*
*/
@WebService(name = "GeneralWS", targetNamespace = "http://general.servicio.codesoftware.com.co/")
@XmlSeeAlso({
ObjectFactory.class
})
public interface GeneralWS {
/**
*
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.SedeEntity>
*/
@WebMethod
@WebResult(targetNamespace = "")
@RequestWrapper(localName = "obtenerSedes", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerSedes")
@ResponseWrapper(localName = "obtenerSedesResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerSedesResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerSedesRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerSedesResponse")
public List<SedeEntity> obtenerSedes();
/**
*
* @param sede
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(targetNamespace = "")
@RequestWrapper(localName = "actualizarSede", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ActualizarSede")
@ResponseWrapper(localName = "actualizarSedeResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ActualizarSedeResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/actualizarSedeRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/actualizarSedeResponse")
public String actualizarSede(
@WebParam(name = "sede", targetNamespace = "")
SedeEntity sede);
/**
*
* @param arg1
* @param arg0
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(targetNamespace = "")
@RequestWrapper(localName = "generaReportes", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.GeneraReportes")
@ResponseWrapper(localName = "generaReportesResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.GeneraReportesResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/generaReportesRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/generaReportesResponse")
public String generaReportes(
@WebParam(name = "arg0", targetNamespace = "")
List<MapaEntity> arg0,
@WebParam(name = "arg1", targetNamespace = "")
List<MapaEntity> arg1);
/**
*
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.CiudadEntity>
*/
@WebMethod
@WebResult(name = "listaCiudades", targetNamespace = "")
@RequestWrapper(localName = "obtenerCiudades", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerCiudades")
@ResponseWrapper(localName = "obtenerCiudadesResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerCiudadesResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerCiudadesRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerCiudadesResponse")
public List<CiudadEntity> obtenerCiudades();
/**
*
* @param idRemision
* @param idFactura
* @return
* returns co.com.codesoftware.servicio.general.PagoRemisionEntity
*/
@WebMethod
@WebResult(name = "pagoRemision", targetNamespace = "")
@RequestWrapper(localName = "obtenerPrincPago", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerPrincPago")
@ResponseWrapper(localName = "obtenerPrincPagoResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerPrincPagoResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerPrincPagoRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerPrincPagoResponse")
public PagoRemisionEntity obtenerPrincPago(
@WebParam(name = "idRemision", targetNamespace = "")
int idRemision,
@WebParam(name = "idFactura", targetNamespace = "")
int idFactura);
/**
*
* @param mensajeCorreo
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(targetNamespace = "")
@RequestWrapper(localName = "enviaCorreo", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.EnviaCorreo")
@ResponseWrapper(localName = "enviaCorreoResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.EnviaCorreoResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/enviaCorreoRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/enviaCorreoResponse")
public String enviaCorreo(
@WebParam(name = "mensajeCorreo", targetNamespace = "")
CorreoWrapperRequest mensajeCorreo);
/**
*
* @param sql
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(targetNamespace = "")
@RequestWrapper(localName = "generarExcelSql", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.GenerarExcelSql")
@ResponseWrapper(localName = "generarExcelSqlResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.GenerarExcelSqlResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/generarExcelSqlRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/generarExcelSqlResponse")
public String generarExcelSql(
@WebParam(name = "sql", targetNamespace = "")
String sql);
/**
*
* @param idDocumento
* @param tipoDoc
* @return
* returns co.com.codesoftware.servicio.general.RelFacRemiGenEntity
*/
@WebMethod
@WebResult(targetNamespace = "")
@RequestWrapper(localName = "buscaDocumentosPagosRemi", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.BuscaDocumentosPagosRemi")
@ResponseWrapper(localName = "buscaDocumentosPagosRemiResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.BuscaDocumentosPagosRemiResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/buscaDocumentosPagosRemiRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/buscaDocumentosPagosRemiResponse")
public RelFacRemiGenEntity buscaDocumentosPagosRemi(
@WebParam(name = "tipoDoc", targetNamespace = "")
String tipoDoc,
@WebParam(name = "idDocumento", targetNamespace = "")
int idDocumento);
/**
*
* @param idRemision
* @param idRsfa
* @param diasPlazo
* @param idTius
* @param retefuente
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(name = "respuesta", targetNamespace = "")
@RequestWrapper(localName = "realizarFacturaXRemision", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.RealizarFacturaXRemision")
@ResponseWrapper(localName = "realizarFacturaXRemisionResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.RealizarFacturaXRemisionResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/realizarFacturaXRemisionRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/realizarFacturaXRemisionResponse")
public String realizarFacturaXRemision(
@WebParam(name = "idRemision", targetNamespace = "")
int idRemision,
@WebParam(name = "idTius", targetNamespace = "")
int idTius,
@WebParam(name = "idRsfa", targetNamespace = "")
int idRsfa,
@WebParam(name = "diasPlazo", targetNamespace = "")
int diasPlazo,
@WebParam(name = "retefuente", targetNamespace = "")
String retefuente);
/**
*
* @param idRemision
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.DetProdRemision>
*/
@WebMethod
@WebResult(name = "detalles", targetNamespace = "")
@RequestWrapper(localName = "obtenerDetalleRemision", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerDetalleRemision")
@ResponseWrapper(localName = "obtenerDetalleRemisionResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerDetalleRemisionResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerDetalleRemisionRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerDetalleRemisionResponse")
public List<DetProdRemision> obtenerDetalleRemision(
@WebParam(name = "idRemision", targetNamespace = "")
int idRemision);
/**
*
* @param idCliente
* @param fechaIni
* @param fechafin
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.RemisionEntity>
*/
@WebMethod
@WebResult(name = "remisiones", targetNamespace = "")
@RequestWrapper(localName = "obtenerRemisionesXCliente", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerRemisionesXCliente")
@ResponseWrapper(localName = "obtenerRemisionesXClienteResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerRemisionesXClienteResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerRemisionesXClienteRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerRemisionesXClienteResponse")
public List<RemisionEntity> obtenerRemisionesXCliente(
@WebParam(name = "idCliente", targetNamespace = "")
int idCliente,
@WebParam(name = "fechaIni", targetNamespace = "")
XMLGregorianCalendar fechaIni,
@WebParam(name = "fechafin", targetNamespace = "")
XMLGregorianCalendar fechafin);
/**
*
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.ResolucionFactEntity>
*/
@WebMethod
@WebResult(name = "resoluciones", targetNamespace = "")
@RequestWrapper(localName = "obtenerResolucionesFact", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerResolucionesFact")
@ResponseWrapper(localName = "obtenerResolucionesFactResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerResolucionesFactResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerResolucionesFactRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerResolucionesFactResponse")
public List<ResolucionFactEntity> obtenerResolucionesFact();
/**
*
* @param objEntity
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(name = "respuesta", targetNamespace = "")
@RequestWrapper(localName = "actualizaResolucion", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ActualizaResolucion")
@ResponseWrapper(localName = "actualizaResolucionResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ActualizaResolucionResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/actualizaResolucionRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/actualizaResolucionResponse")
public String actualizaResolucion(
@WebParam(name = "objEntity", targetNamespace = "")
ResolucionFactEntity objEntity);
/**
*
* @param objEntity
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(name = "respuesta", targetNamespace = "")
@RequestWrapper(localName = "insertarResolucion", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.InsertarResolucion")
@ResponseWrapper(localName = "insertarResolucionResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.InsertarResolucionResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/insertarResolucionRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/insertarResolucionResponse")
public String insertarResolucion(
@WebParam(name = "objEntity", targetNamespace = "")
ResolucionFactEntity objEntity);
/**
*
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.ParametrosEmpresaEntity>
*/
@WebMethod
@WebResult(name = "parametros", targetNamespace = "")
@RequestWrapper(localName = "obtenerParametrosEmpresa", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerParametrosEmpresa")
@ResponseWrapper(localName = "obtenerParametrosEmpresaResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerParametrosEmpresaResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerParametrosEmpresaRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerParametrosEmpresaResponse")
public List<ParametrosEmpresaEntity> obtenerParametrosEmpresa();
/**
*
* @param valor
* @return
* returns java.math.BigDecimal
*/
@WebMethod
@WebResult(name = "valor", targetNamespace = "")
@RequestWrapper(localName = "obtenerValorVentasMes", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerValorVentasMes")
@ResponseWrapper(localName = "obtenerValorVentasMesResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerValorVentasMesResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerValorVentasMesRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerValorVentasMesResponse")
public BigDecimal obtenerValorVentasMes(
@WebParam(name = "valor", targetNamespace = "")
Integer valor);
/**
*
* @param permiso
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.UsuarioEntity>
*/
@WebMethod
@WebResult(name = "listaUsuarios", targetNamespace = "")
@RequestWrapper(localName = "obtenerUsuariosXPermiso", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerUsuariosXPermiso")
@ResponseWrapper(localName = "obtenerUsuariosXPermisoResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerUsuariosXPermisoResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerUsuariosXPermisoRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerUsuariosXPermisoResponse")
public List<UsuarioEntity> obtenerUsuariosXPermiso(
@WebParam(name = "permiso", targetNamespace = "")
String permiso);
/**
*
* @param idPago
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.DetallePagoRemision>
*/
@WebMethod
@WebResult(name = "detallePago", targetNamespace = "")
@RequestWrapper(localName = "obtenerDetallePagos", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerDetallePagos")
@ResponseWrapper(localName = "obtenerDetallePagosResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerDetallePagosResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerDetallePagosRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerDetallePagosResponse")
public List<DetallePagoRemision> obtenerDetallePagos(
@WebParam(name = "idPago", targetNamespace = "")
int idPago);
/**
*
* @param clave
* @param nuevoValor
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(name = "listaCiudades", targetNamespace = "")
@RequestWrapper(localName = "actualizaParametro", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ActualizaParametro")
@ResponseWrapper(localName = "actualizaParametroResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ActualizaParametroResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/actualizaParametroRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/actualizaParametroResponse")
public String actualizaParametro(
@WebParam(name = "clave", targetNamespace = "")
String clave,
@WebParam(name = "nuevoValor", targetNamespace = "")
String nuevoValor);
/**
*
* @param idDepto
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.CiudadEntity>
*/
@WebMethod
@WebResult(name = "listaCiudades", targetNamespace = "")
@RequestWrapper(localName = "obtenerCiudadesXDepartamento", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerCiudadesXDepartamento")
@ResponseWrapper(localName = "obtenerCiudadesXDepartamentoResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerCiudadesXDepartamentoResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerCiudadesXDepartamentoRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerCiudadesXDepartamentoResponse")
public List<CiudadEntity> obtenerCiudadesXDepartamento(
@WebParam(name = "idDepto", targetNamespace = "")
Integer idDepto);
/**
*
* @return
* returns java.util.List<co.com.codesoftware.servicio.general.DepartamentoEntity>
*/
@WebMethod
@WebResult(name = "listaDepartamentos", targetNamespace = "")
@RequestWrapper(localName = "obtenerDepartamentos", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerDepartamentos")
@ResponseWrapper(localName = "obtenerDepartamentosResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.ObtenerDepartamentosResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerDepartamentosRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/obtenerDepartamentosResponse")
public List<DepartamentoEntity> obtenerDepartamentos();
/**
*
* @param idFact
* @param valorPago
* @param pagoTotal
* @param tipoPago
* @param idTius
* @return
* returns java.lang.String
*/
@WebMethod
@WebResult(targetNamespace = "")
@RequestWrapper(localName = "ejecutaPagoRemision", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.EjecutaPagoRemision")
@ResponseWrapper(localName = "ejecutaPagoRemisionResponse", targetNamespace = "http://general.servicio.codesoftware.com.co/", className = "co.com.codesoftware.servicio.general.EjecutaPagoRemisionResponse")
@Action(input = "http://general.servicio.codesoftware.com.co/GeneralWS/ejecutaPagoRemisionRequest", output = "http://general.servicio.codesoftware.com.co/GeneralWS/ejecutaPagoRemisionResponse")
public String ejecutaPagoRemision(
@WebParam(name = "idTius", targetNamespace = "")
int idTius,
@WebParam(name = "idFact", targetNamespace = "")
int idFact,
@WebParam(name = "valorPago", targetNamespace = "")
BigDecimal valorPago,
@WebParam(name = "tipoPago", targetNamespace = "")
String tipoPago,
@WebParam(name = "pagoTotal", targetNamespace = "")
String pagoTotal);
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.standard;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.PriorityBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.flowfile.attributes.CoreAttributes;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.AbstractProcessor;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.annotation.lifecycle.OnScheduled;
import org.apache.nifi.processor.exception.FlowFileAccessException;
import org.apache.nifi.processors.standard.util.FileInfo;
import org.apache.nifi.processors.standard.util.FileTransfer;
import org.apache.nifi.util.StopWatch;
/**
* Base class for GetSFTP and GetFTP
*/
public abstract class GetFileTransfer extends AbstractProcessor {
public static final Relationship REL_SUCCESS = new Relationship.Builder()
.name("success")
.description("All FlowFiles that are received are routed to success")
.build();
private final Set<Relationship> relationships;
public static final String FILE_LAST_MODIFY_TIME_ATTRIBUTE = "file.lastModifiedTime";
public static final String FILE_OWNER_ATTRIBUTE = "file.owner";
public static final String FILE_GROUP_ATTRIBUTE = "file.group";
public static final String FILE_PERMISSIONS_ATTRIBUTE = "file.permissions";
public static final String FILE_MODIFY_DATE_ATTR_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
private final AtomicLong lastPollTime = new AtomicLong(-1L);
private final Lock listingLock = new ReentrantLock();
private final AtomicReference<BlockingQueue<FileInfo>> fileQueueRef = new AtomicReference<>();
private final Set<FileInfo> processing = Collections.synchronizedSet(new HashSet<FileInfo>());
// Used when transferring filenames from the File Queue to the processing queue; multiple threads can do this
// simultaneously using the sharableTransferLock; however, in order to check if either has a given file, the
// mutually exclusive lock is required.
private final ReadWriteLock transferLock = new ReentrantReadWriteLock();
private final Lock sharableTransferLock = transferLock.readLock();
private final Lock mutuallyExclusiveTransferLock = transferLock.writeLock();
public GetFileTransfer() {
final Set<Relationship> relationships = new HashSet<>();
relationships.add(REL_SUCCESS);
this.relationships = Collections.unmodifiableSet(relationships);
}
@Override
public Set<Relationship> getRelationships() {
return relationships;
}
protected abstract FileTransfer getFileTransfer(final ProcessContext context);
@OnScheduled
public void onScheduled(final ProcessContext context) {
listingLock.lock();
try {
final BlockingQueue<FileInfo> fileQueue = fileQueueRef.get();
if (fileQueue != null) {
fileQueue.clear();
}
fileQueueRef.set(null); // create new queue on next listing, in case queue type needs to change
} finally {
listingLock.unlock();
}
}
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final long pollingIntervalMillis = context.getProperty(FileTransfer.POLLING_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
final long nextPollTime = lastPollTime.get() + pollingIntervalMillis;
BlockingQueue<FileInfo> fileQueue = fileQueueRef.get();
final ComponentLog logger = getLogger();
// do not do the listing if there are already 100 or more items in our queue
// 100 is really just a magic number that seems to work out well in practice
FileTransfer transfer = null;
if (System.currentTimeMillis() >= nextPollTime && (fileQueue == null || fileQueue.size() < 100) && listingLock.tryLock()) {
try {
transfer = getFileTransfer(context);
try {
fetchListing(context, session, transfer);
lastPollTime.set(System.currentTimeMillis());
} catch (final IOException e) {
context.yield();
try {
transfer.close();
} catch (final IOException e1) {
logger.warn("Unable to close connection due to {}", new Object[]{e1});
}
logger.error("Unable to fetch listing from remote server due to {}", new Object[]{e});
return;
}
} finally {
listingLock.unlock();
}
}
fileQueue = fileQueueRef.get();
if (fileQueue == null || fileQueue.isEmpty()) {
// nothing to do!
context.yield();
if (transfer != null) {
try {
transfer.close();
} catch (final IOException e1) {
logger.warn("Unable to close connection due to {}", new Object[]{e1});
}
}
return;
}
final String hostname = context.getProperty(FileTransfer.HOSTNAME).evaluateAttributeExpressions().getValue();
final boolean deleteOriginal = context.getProperty(FileTransfer.DELETE_ORIGINAL).asBoolean();
final int maxSelects = context.getProperty(FileTransfer.MAX_SELECTS).asInteger();
if (transfer == null) {
transfer = getFileTransfer(context);
}
try {
for (int i = 0; i < maxSelects && isScheduled(); i++) {
final FileInfo file;
sharableTransferLock.lock();
try {
file = fileQueue.poll();
if (file == null) {
return;
}
processing.add(file);
} finally {
sharableTransferLock.unlock();
}
File relativeFile = new File(file.getFullPathFileName());
final String parentRelativePath = (null == relativeFile.getParent()) ? "" : relativeFile.getParent();
final String parentRelativePathString = parentRelativePath + "/";
final Path absPath = relativeFile.toPath().toAbsolutePath();
final String absPathString = absPath.getParent().toString() + "/";
try {
FlowFile flowFile = session.create();
final StopWatch stopWatch = new StopWatch(false);
stopWatch.start();
flowFile = transfer.getRemoteFile(file.getFullPathFileName(), flowFile, session);
stopWatch.stop();
final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
final String dataRate = stopWatch.calculateDataRate(flowFile.getSize());
flowFile = session.putAttribute(flowFile, this.getClass().getSimpleName().toLowerCase() + ".remote.source", hostname);
flowFile = session.putAttribute(flowFile, CoreAttributes.PATH.key(), parentRelativePathString);
flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), relativeFile.getName());
flowFile = session.putAttribute(flowFile, CoreAttributes.ABSOLUTE_PATH.key(), absPathString);
Map<String, String> attributes = getAttributesFromFile(file);
if (attributes.size() > 0) {
flowFile = session.putAllAttributes(flowFile, attributes);
}
if (deleteOriginal) {
try {
transfer.deleteFile(flowFile, null, file.getFullPathFileName());
} catch (final IOException e) {
logger.error("Failed to remove remote file {} due to {}; deleting local copy",
new Object[]{file.getFullPathFileName(), e});
session.remove(flowFile);
return;
}
}
session.getProvenanceReporter().receive(flowFile, transfer.getProtocolName() + "://" + hostname + "/" + file.getFullPathFileName(), millis);
session.transfer(flowFile, REL_SUCCESS);
logger.info("Successfully retrieved {} from {} in {} milliseconds at a rate of {} and transferred to success",
new Object[]{flowFile, hostname, millis, dataRate});
session.commit();
} catch (final IOException e) {
context.yield();
logger.error("Unable to retrieve file {} due to {}", new Object[]{file.getFullPathFileName(), e});
try {
transfer.close();
} catch (IOException e1) {
logger.warn("Unable to close connection to remote host due to {}", new Object[]{e1});
}
session.rollback();
return;
} catch (final FlowFileAccessException e) {
context.yield();
logger.error("Unable to retrieve file {} due to {}", new Object[]{file.getFullPathFileName(), e.getCause()}, e);
try {
transfer.close();
} catch (IOException e1) {
logger.warn("Unable to close connection to remote host due to {}", e1);
}
session.rollback();
return;
} finally {
processing.remove(file);
}
}
} finally {
try {
transfer.close();
} catch (final IOException e) {
logger.warn("Failed to close connection to {} due to {}", new Object[]{hostname, e});
}
}
}
protected Map<String, String> getAttributesFromFile(FileInfo info) {
Map<String, String> attributes = new HashMap<>();
if (info != null) {
final DateFormat formatter = new SimpleDateFormat(FILE_MODIFY_DATE_ATTR_FORMAT, Locale.US);
attributes.put(FILE_LAST_MODIFY_TIME_ATTRIBUTE, formatter.format(new Date(info.getLastModifiedTime())));
attributes.put(FILE_PERMISSIONS_ATTRIBUTE, info.getPermissions());
attributes.put(FILE_OWNER_ATTRIBUTE, info.getOwner());
attributes.put(FILE_GROUP_ATTRIBUTE, info.getGroup());
}
return attributes;
}
// must be called while holding the listingLock
private void fetchListing(final ProcessContext context, final ProcessSession session, final FileTransfer transfer) throws IOException {
BlockingQueue<FileInfo> queue = fileQueueRef.get();
if (queue == null) {
final boolean useNaturalOrdering = context.getProperty(FileTransfer.USE_NATURAL_ORDERING).asBoolean();
queue = useNaturalOrdering ? new PriorityBlockingQueue<FileInfo>(25000) : new LinkedBlockingQueue<FileInfo>(25000);
fileQueueRef.set(queue);
}
final StopWatch stopWatch = new StopWatch(true);
final List<FileInfo> listing = transfer.getListing();
final long millis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);
int newItems = 0;
mutuallyExclusiveTransferLock.lock();
try {
for (final FileInfo file : listing) {
if (!queue.contains(file) && !processing.contains(file)) {
if (!queue.offer(file)) {
break;
}
newItems++;
}
}
} finally {
mutuallyExclusiveTransferLock.unlock();
}
getLogger().info("Obtained file listing in {} milliseconds; listing had {} items, {} of which were new",
new Object[]{millis, listing.size(), newItems});
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.common.io;
import org.apache.flink.api.common.io.FileOutputFormat.OutputDirectoryMode;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.FileSystem.WriteMode;
import org.apache.flink.core.fs.Path;
import org.apache.flink.types.IntValue;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import static org.junit.Assert.fail;
public class FileOutputFormatTest {
@Test
public void testCreateNonParallelLocalFS() throws IOException {
File tmpOutPath = File.createTempFile("fileOutputFormatTest", "Test1");
File tmpOutFile = new File(tmpOutPath.getAbsolutePath() + "/1");
String tmpFilePath = tmpOutPath.toURI().toString();
// check fail if file exists
DummyFileOutputFormat dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
fail();
} catch (Exception e) {
// exception expected
}
tmpOutPath.delete();
// check fail if directory exists
Assert.assertTrue("Directory could not be created.", tmpOutPath.mkdir());
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
fail();
} catch (Exception e) {
// exception expected
}
tmpOutPath.delete();
// check success
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isFile());
tmpOutPath.delete();
// check fail for path with tailing '/'
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath + "/"));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isFile());
tmpOutPath.delete();
// ----------- test again with always directory mode
// check fail if file exists
tmpOutPath.createNewFile();
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
fail();
} catch (Exception e) {
// exception expected
}
tmpOutPath.delete();
// check success if directory exists
Assert.assertTrue("Directory could not be created.", tmpOutPath.mkdir());
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
(new File(tmpOutPath.getAbsoluteFile() + "/1")).delete();
// check custom file name inside directory if directory exists
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.testFileName = true;
Configuration c = new Configuration();
dfof.configure(c);
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
File customOutFile = new File(tmpOutPath.getAbsolutePath() + "/fancy-1-0.avro");
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(customOutFile.exists() && customOutFile.isFile());
customOutFile.delete();
// check fail if file in directory exists
// create file for test
customOutFile = new File(tmpOutPath.getAbsolutePath() + "/1");
customOutFile.createNewFile();
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
fail();
} catch (Exception e) {
// exception expected
}
(new File(tmpOutPath.getAbsoluteFile() + "/1")).delete();
tmpOutPath.delete();
// check success if no file exists
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
(new File(tmpOutPath.getAbsoluteFile() + "/1")).delete();
tmpOutPath.delete();
// check success for path with tailing '/'
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath + '/'));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
(new File(tmpOutPath.getAbsoluteFile() + "/1")).delete();
tmpOutPath.delete();
}
@Test
public void testCreateParallelLocalFS() throws IOException {
File tmpOutPath = null;
File tmpOutFile = null;
tmpOutPath = File.createTempFile("fileOutputFormatTest", "Test1");
tmpOutFile = new File(tmpOutPath.getAbsolutePath() + "/1");
String tmpFilePath = tmpOutPath.toURI().toString();
// check fail if file exists
DummyFileOutputFormat dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
fail();
} catch (Exception e) {
// exception expected
}
tmpOutPath.delete();
// check success if directory exists
Assert.assertTrue("Directory could not be created.", tmpOutPath.mkdir());
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
// check fail if file in directory exists
tmpOutPath.mkdir();
tmpOutFile.createNewFile();
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
fail();
} catch (Exception e) {
// exception expected
}
tmpOutFile.delete();
tmpOutPath.delete();
// check success if no file exists
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
// check success for path with tailing '/'
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath + "/"));
dfof.setWriteMode(WriteMode.NO_OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
}
@Test
public void testOverwriteNonParallelLocalFS() throws IOException {
File tmpOutPath = null;
File tmpOutFile = null;
tmpOutPath = File.createTempFile("fileOutputFormatTest", "Test1");
tmpOutFile = new File(tmpOutPath.getAbsolutePath() + "/1");
String tmpFilePath = tmpOutPath.toURI().toString();
// check success if file exists
DummyFileOutputFormat dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isFile());
// check success if directory exists
tmpOutPath.delete();
Assert.assertTrue("Directory could not be created.", tmpOutPath.mkdir());
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isFile());
tmpOutPath.delete();
// check success
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isFile());
tmpOutPath.delete();
// check fail for path with tailing '/'
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath + "/"));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isFile());
tmpOutPath.delete();
// ----------- test again with always directory mode
// check success if file exists
tmpOutPath.createNewFile();
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
// check success if directory exists
Assert.assertTrue("Directory could not be created.", tmpOutPath.mkdir());
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutPath.delete();
tmpOutFile.delete();
// check success if file in directory exists
tmpOutPath.mkdir();
tmpOutFile.createNewFile();
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutPath.delete();
tmpOutFile.delete();
// check success if no file exists
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
// check success for path with tailing '/'
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath + "/"));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.ALWAYS);
dfof.configure(new Configuration());
try {
dfof.open(0, 1);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
}
@Test
public void testOverwriteParallelLocalFS() throws IOException {
File tmpOutPath = null;
File tmpOutFile = null;
tmpOutPath = File.createTempFile("fileOutputFormatTest", "Test1");
tmpOutFile = new File(tmpOutPath.getAbsolutePath() + "/1");
String tmpFilePath = tmpOutPath.toURI().toString();
// check success if file exists
DummyFileOutputFormat dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
// check success if directory exists
Assert.assertTrue("Directory could not be created.", tmpOutPath.mkdir());
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
// check success if file in directory exists
tmpOutPath.mkdir();
tmpOutFile.createNewFile();
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
(new File(tmpOutPath.getAbsoluteFile() + "/1")).delete();
tmpOutPath.delete();
// check success if no file exists
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
// check success for path with tailing '/'
dfof = new DummyFileOutputFormat();
dfof.setOutputFilePath(new Path(tmpFilePath + "/"));
dfof.setWriteMode(WriteMode.OVERWRITE);
dfof.setOutputDirectoryMode(OutputDirectoryMode.PARONLY);
dfof.configure(new Configuration());
try {
dfof.open(0, 2);
dfof.close();
} catch (Exception e) {
fail();
}
Assert.assertTrue(tmpOutPath.exists() && tmpOutPath.isDirectory());
Assert.assertTrue(tmpOutFile.exists() && tmpOutFile.isFile());
tmpOutFile.delete();
tmpOutPath.delete();
}
// -------------------------------------------------------------------------------------------
public static class DummyFileOutputFormat extends FileOutputFormat<IntValue> {
private static final long serialVersionUID = 1L;
public boolean testFileName = false;
@Override
public void writeRecord(IntValue record) throws IOException {
// DO NOTHING
}
@Override
protected String getDirectoryFileName(int taskNumber) {
if (testFileName) {
return "fancy-" + (taskNumber + 1) + "-" + taskNumber + ".avro";
} else {
return super.getDirectoryFileName(taskNumber);
}
}
}
}
| |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator.annotations;
import com.facebook.presto.common.function.OperatorType;
import com.facebook.presto.common.type.TypeSignature;
import com.facebook.presto.spi.function.Description;
import com.facebook.presto.spi.function.IsNull;
import com.facebook.presto.spi.function.LiteralParameters;
import com.facebook.presto.spi.function.LongVariableConstraint;
import com.facebook.presto.spi.function.Signature;
import com.facebook.presto.spi.function.SqlNullable;
import com.facebook.presto.spi.function.SqlType;
import com.facebook.presto.spi.function.TypeParameter;
import com.facebook.presto.spi.function.TypeParameterSpecialization;
import com.facebook.presto.spi.function.TypeVariableConstraint;
import com.facebook.presto.type.Constraint;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import javax.annotation.Nullable;
import java.lang.annotation.Annotation;
import java.lang.reflect.AnnotatedElement;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.stream.Stream;
import static com.facebook.presto.common.function.OperatorType.BETWEEN;
import static com.facebook.presto.common.function.OperatorType.CAST;
import static com.facebook.presto.common.function.OperatorType.EQUAL;
import static com.facebook.presto.common.function.OperatorType.GREATER_THAN;
import static com.facebook.presto.common.function.OperatorType.GREATER_THAN_OR_EQUAL;
import static com.facebook.presto.common.function.OperatorType.HASH_CODE;
import static com.facebook.presto.common.function.OperatorType.LESS_THAN;
import static com.facebook.presto.common.function.OperatorType.LESS_THAN_OR_EQUAL;
import static com.facebook.presto.common.function.OperatorType.NOT_EQUAL;
import static com.facebook.presto.common.type.StandardTypes.PARAMETRIC_TYPES;
import static com.facebook.presto.operator.annotations.ImplementationDependency.isImplementationDependencyAnnotation;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static java.lang.reflect.Modifier.isPublic;
import static java.lang.reflect.Modifier.isStatic;
import static java.util.Arrays.asList;
public class FunctionsParserHelper
{
private static final Set<OperatorType> COMPARABLE_TYPE_OPERATORS = ImmutableSet.of(EQUAL, NOT_EQUAL, HASH_CODE);
private static final Set<OperatorType> ORDERABLE_TYPE_OPERATORS = ImmutableSet.of(LESS_THAN, LESS_THAN_OR_EQUAL, GREATER_THAN, GREATER_THAN_OR_EQUAL, BETWEEN);
private FunctionsParserHelper()
{}
public static boolean containsAnnotation(Annotation[] annotations, Predicate<Annotation> predicate)
{
return Arrays.stream(annotations).anyMatch(predicate);
}
public static boolean containsImplementationDependencyAnnotation(Annotation[] annotations)
{
return containsAnnotation(annotations, ImplementationDependency::isImplementationDependencyAnnotation);
}
public static List<TypeVariableConstraint> createTypeVariableConstraints(Iterable<TypeParameter> typeParameters, List<ImplementationDependency> dependencies)
{
Set<String> orderableRequired = new HashSet<>();
Set<String> comparableRequired = new HashSet<>();
for (ImplementationDependency dependency : dependencies) {
if (dependency instanceof OperatorImplementationDependency) {
OperatorType operator = ((OperatorImplementationDependency) dependency).getOperator();
if (operator == CAST) {
continue;
}
Set<String> argumentTypes = ((OperatorImplementationDependency) dependency).getArgumentTypes().stream()
.map(TypeSignature::getBase)
.collect(toImmutableSet());
checkArgument(argumentTypes.size() == 1, "Operator dependency must only have arguments of a single type");
String argumentType = Iterables.getOnlyElement(argumentTypes);
if (COMPARABLE_TYPE_OPERATORS.contains(operator)) {
comparableRequired.add(argumentType);
}
if (ORDERABLE_TYPE_OPERATORS.contains(operator)) {
orderableRequired.add(argumentType);
}
}
}
ImmutableList.Builder<TypeVariableConstraint> typeVariableConstraints = ImmutableList.builder();
for (TypeParameter typeParameter : typeParameters) {
String name = typeParameter.value();
String variadicBound = typeParameter.boundedBy().isEmpty() ? null : typeParameter.boundedBy();
checkArgument(variadicBound == null || PARAMETRIC_TYPES.contains(variadicBound), "boundedBy must be a parametric type, got %s", variadicBound);
if (orderableRequired.contains(name)) {
typeVariableConstraints.add(new TypeVariableConstraint(name, false, true, variadicBound, false));
}
else if (comparableRequired.contains(name)) {
typeVariableConstraints.add(new TypeVariableConstraint(name, true, false, variadicBound, false));
}
else {
typeVariableConstraints.add(new TypeVariableConstraint(name, false, false, variadicBound, false));
}
}
return typeVariableConstraints.build();
}
public static void validateSignaturesCompatibility(Optional<Signature> signatureOld, Signature signatureNew)
{
if (!signatureOld.isPresent()) {
return;
}
checkArgument(signatureOld.get().equals(signatureNew), "Implementations with type parameters must all have matching signatures. %s does not match %s", signatureOld.get(), signatureNew);
}
@SafeVarargs
public static Set<Method> findPublicStaticMethods(Class<?> clazz, Class<? extends Annotation>... includedAnnotations)
{
return findPublicStaticMethods(clazz, ImmutableSet.copyOf(asList(includedAnnotations)), ImmutableSet.of());
}
public static Set<Method> findPublicStaticMethods(Class<?> clazz, Set<Class<? extends Annotation>> includedAnnotations, Set<Class<? extends Annotation>> excludedAnnotations)
{
return findMethods(
clazz.getMethods(),
method -> checkArgument(isStatic(method.getModifiers()) && isPublic(method.getModifiers()), "Annotated method [%s] must be static and public", method.getName()),
includedAnnotations,
excludedAnnotations);
}
@SafeVarargs
public static Set<Method> findPublicMethods(Class<?> clazz, Class<? extends Annotation>... includedAnnotations)
{
return findPublicMethods(clazz, ImmutableSet.copyOf(asList(includedAnnotations)), ImmutableSet.of());
}
public static Set<Method> findPublicMethods(Class<?> clazz, Set<Class<? extends Annotation>> includedAnnotations, Set<Class<? extends Annotation>> excludedAnnotations)
{
return findMethods(
clazz.getDeclaredMethods(),
method -> checkArgument(isPublic(method.getModifiers()), "Annotated method [%s] must be public"),
includedAnnotations,
excludedAnnotations);
}
public static Set<Method> findMethods(
Method[] allMethods,
Consumer<Method> methodChecker,
Set<Class<? extends Annotation>> includedAnnotations,
Set<Class<? extends Annotation>> excludedAnnotations)
{
ImmutableSet.Builder<Method> methods = ImmutableSet.builder();
for (Method method : allMethods) {
boolean included = false;
boolean excluded = false;
for (Annotation annotation : method.getAnnotations()) {
for (Class<?> annotationClass : excludedAnnotations) {
if (annotationClass.isInstance(annotation)) {
excluded = true;
break;
}
}
if (excluded) {
break;
}
if (included) {
continue;
}
for (Class<?> annotationClass : includedAnnotations) {
if (annotationClass.isInstance(annotation)) {
included = true;
break;
}
}
}
if (included && !excluded) {
methodChecker.accept(method);
methods.add(method);
}
}
return methods.build();
}
public static Optional<Constructor<?>> findConstructor(Class<?> clazz)
{
Constructor<?>[] constructors = clazz.getConstructors();
checkArgument(constructors.length <= 1, "Class [%s] must have no more than 1 public constructor");
if (constructors.length == 0) {
return Optional.empty();
}
return Optional.of(constructors[0]);
}
public static Set<String> parseLiteralParameters(Method method)
{
LiteralParameters literalParametersAnnotation = method.getAnnotation(LiteralParameters.class);
if (literalParametersAnnotation == null) {
return ImmutableSet.of();
}
return ImmutableSet.copyOf(literalParametersAnnotation.value());
}
public static boolean containsLegacyNullable(Annotation[] annotations)
{
return Arrays.stream(annotations)
.map(Annotation::annotationType)
.map(Class::getName)
.anyMatch(name -> name.equals(Nullable.class.getName()));
}
public static boolean isPrestoAnnotation(Annotation annotation)
{
return isImplementationDependencyAnnotation(annotation) ||
annotation instanceof SqlType ||
annotation instanceof SqlNullable ||
annotation instanceof IsNull;
}
public static Optional<String> parseDescription(AnnotatedElement base, AnnotatedElement override)
{
Optional<String> overrideDescription = parseDescription(override);
if (overrideDescription.isPresent()) {
return overrideDescription;
}
return parseDescription(base);
}
public static Optional<String> parseDescription(AnnotatedElement base)
{
Description description = base.getAnnotation(Description.class);
return (description == null) ? Optional.empty() : Optional.of(description.value());
}
public static List<LongVariableConstraint> parseLongVariableConstraints(Method inputFunction)
{
return Stream.of(inputFunction.getAnnotationsByType(Constraint.class))
.map(annotation -> new LongVariableConstraint(annotation.variable(), annotation.expression()))
.collect(toImmutableList());
}
public static Map<String, Class<?>> getDeclaredSpecializedTypeParameters(Method method, Set<TypeParameter> typeParameters)
{
Map<String, Class<?>> specializedTypeParameters = new HashMap<>();
TypeParameterSpecialization[] typeParameterSpecializations = method.getAnnotationsByType(TypeParameterSpecialization.class);
ImmutableSet<String> typeParameterNames = typeParameters.stream()
.map(TypeParameter::value)
.collect(toImmutableSet());
for (TypeParameterSpecialization specialization : typeParameterSpecializations) {
checkArgument(typeParameterNames.contains(specialization.name()), "%s does not match any declared type parameters (%s) [%s]", specialization.name(), typeParameters, method);
Class<?> existingSpecialization = specializedTypeParameters.get(specialization.name());
checkArgument(existingSpecialization == null || existingSpecialization.equals(specialization.nativeContainerType()),
"%s has conflicting specializations %s and %s [%s]", specialization.name(), existingSpecialization, specialization.nativeContainerType(), method);
specializedTypeParameters.put(specialization.name(), specialization.nativeContainerType());
}
return specializedTypeParameters;
}
}
| |
package gr.forth.ics.graph.event;
import java.util.*;
import gr.forth.ics.util.Args;
import gr.forth.ics.util.EventSupport;
public class GraphEventSupport {
private static final Runnable NULL_RUNNABLE = new Runnable() { public void run() { } };
private final EventSupport<NodeListener> nodeSupport = new EventSupport<NodeListener>();
private final EventSupport<EdgeListener> edgeSupport = new EventSupport<EdgeListener>();
private int listeners;
private void calcListeners() {
listeners = nodeSupport.getListenerCount() + edgeSupport.getListenerCount();
}
public boolean isEmpty() {
return listeners == 0;
}
public void addEdgeListener(EdgeListener listener) {
edgeSupport.addListener(listener);
calcListeners();
}
public void addNodeListener(NodeListener listener) {
nodeSupport.addListener(listener);
calcListeners();
}
public void removeEdgeListener(EdgeListener listener) {
edgeSupport.removeListener(listener);
calcListeners();
}
public void removeNodeListener(NodeListener listener) {
nodeSupport.removeListener(listener);
calcListeners();
}
public void addGraphListener(GraphListener listener) {
addEdgeListener(listener);
addNodeListener(listener);
}
public void removeGraphListener(GraphListener listener) {
if (listener == null) {
return;
}
removeEdgeListener(listener);
removeNodeListener(listener);
}
public void fire(GraphEvent e) {
fire(e, e.getEventType(), NULL_RUNNABLE);
}
//throws npe
public void fire(GraphEvent e, Runnable commandIfNoVeto) {
fire(e, e.getEventType(), commandIfNoVeto);
}
//throws npe
public void fire(GraphEvent e, GraphEvent.Type eventType, Runnable commandIfNoVeto) {
if (listeners == 0) {
commandIfNoVeto.run();
return;
}
switch (eventType) {
case NODE_ADDED: case NODE_REINSERTED:
fireAddNode(e, commandIfNoVeto); break;
case NODE_REMOVED:
fireRemoveNode(e, commandIfNoVeto); break;
case EDGE_ADDED: case EDGE_REINSERTED:
fireAddEdge(e, commandIfNoVeto); break;
case EDGE_REMOVED:
fireRemoveEdge(e, commandIfNoVeto); break;
case NODE_REORDERED:
fireNodeReordered(e); break;
case EDGE_REORDERED:
fireEdgeReordered(e); break;
default:
throw new IllegalArgumentException("Unexpected event type: " + eventType);
}
}
public void firePreEdge() {
firePre(edgeSupport.getListeners());
}
public void firePostEdge() {
firePost(edgeSupport.getListeners());
}
public void firePreNode() {
firePre(nodeSupport.getListeners());
}
public void firePostNode() {
firePost(nodeSupport.getListeners());
}
public void fireNodeReordered(GraphEvent e) {
Args.isTrue(e.getEventType() == GraphEvent.Type.NODE_REORDERED);
for (NodeListener listener : nodeSupport.getListeners()) {
listener.nodeReordered(e);
}
}
public void fireEdgeReordered(GraphEvent e) {
Args.isTrue(e.getEventType() == GraphEvent.Type.EDGE_REORDERED);
for (EdgeListener listener : edgeSupport.getListeners()) {
listener.edgeReordered(e);
}
}
public void fireNodeToBeAdded(GraphEvent e) {
GraphEvent.Type eventType = e.getEventType();
Args.isTrue(eventType == GraphEvent.Type.NODE_ADDED || eventType == GraphEvent.Type.NODE_REINSERTED);
if (nodeSupport.isEmpty()) {
return;
}
for (NodeListener listener : nodeSupport.getListeners()) {
listener.nodeToBeAdded(e);
}
}
private void fireAddNode(GraphEvent e, Runnable commandIfNoVeto) {
firePreNode();
try {
Iterable<NodeListener> listeners = nodeSupport.getListeners();
for (NodeListener listener : listeners) {
listener.nodeToBeAdded(e);
}
commandIfNoVeto.run();
for (NodeListener listener : listeners) {
listener.nodeAdded(e);
}
} finally {
firePostNode();
}
}
public void fireNodeAdded(GraphEvent e) {
GraphEvent.Type eventType = e.getEventType();
Args.isTrue(eventType == GraphEvent.Type.NODE_ADDED || eventType == GraphEvent.Type.NODE_REINSERTED);
if (nodeSupport.isEmpty()) {
return;
}
for (NodeListener listener : nodeSupport.getListeners()) {
listener.nodeAdded(e);
}
}
public void fireNodeToBeRemoved(GraphEvent e) {
Args.isTrue(e.getEventType() == GraphEvent.Type.NODE_REMOVED);
if (nodeSupport.isEmpty()) {
return;
}
for (NodeListener listener : nodeSupport.getListeners()) {
listener.nodeToBeRemoved(e);
}
}
private void fireRemoveNode(GraphEvent e, Runnable commandIfNoVeto) {
firePreNode();
try {
Iterable<NodeListener> listeners = nodeSupport.getListeners();
for (NodeListener listener : listeners) {
listener.nodeToBeRemoved(e);
}
commandIfNoVeto.run();
for (NodeListener listener : listeners) {
listener.nodeRemoved(e);
}
} finally {
firePostNode();
}
}
public void fireNodeRemoved(GraphEvent e) {
Args.isTrue(e.getEventType() == GraphEvent.Type.NODE_REMOVED);
if (nodeSupport.isEmpty()) {
return;
}
for (NodeListener listener : nodeSupport.getListeners()) {
listener.nodeRemoved(e);
}
}
public void fireEdgeToBeAdded(GraphEvent e) {
GraphEvent.Type eventType = e.getEventType();
Args.isTrue(eventType == GraphEvent.Type.EDGE_ADDED || eventType == GraphEvent.Type.EDGE_REINSERTED);
if (edgeSupport.isEmpty()) {
return;
}
for (EdgeListener listener : edgeSupport.getListeners()) {
listener.edgeToBeAdded(e);
}
}
private void fireAddEdge(GraphEvent e, Runnable commandIfNoVeto) {
firePreEdge();
try {
Iterable<EdgeListener> listeners = edgeSupport.getListeners();
for (EdgeListener listener : listeners) {
listener.edgeToBeAdded(e);
}
commandIfNoVeto.run();
for (EdgeListener listener : listeners) {
listener.edgeAdded(e);
}
} finally {
firePostEdge();
}
}
public void fireEdgeAdded(GraphEvent e) {
GraphEvent.Type eventType = e.getEventType();
Args.isTrue(eventType == GraphEvent.Type.EDGE_ADDED || eventType == GraphEvent.Type.EDGE_REINSERTED);
if (edgeSupport.isEmpty()) {
return;
}
for (EdgeListener listener : edgeSupport.getListeners()) {
listener.edgeAdded(e);
}
}
public void fireEdgeToBeRemoved(GraphEvent e) {
Args.isTrue(e.getEventType() == GraphEvent.Type.EDGE_REMOVED);
if (edgeSupport.isEmpty()) {
return;
}
for (EdgeListener listener : edgeSupport.getListeners()) {
listener.edgeToBeRemoved(e);
}
}
private void fireRemoveEdge(GraphEvent e, Runnable commandIfNoVeto) {
firePreEdge();
try {
Iterable<EdgeListener> listeners = edgeSupport.getListeners();
for (EdgeListener listener : listeners) {
listener.edgeToBeRemoved(e);
}
commandIfNoVeto.run();
for (EdgeListener listener : listeners) {
listener.edgeRemoved(e);
}
} finally {
firePostEdge();
}
}
public void fireEdgeRemoved(GraphEvent e) {
Args.isTrue(e.getEventType() == GraphEvent.Type.EDGE_REMOVED);
if (edgeSupport.isEmpty()) {
return;
}
for (EdgeListener listener : edgeSupport.getListeners()) {
listener.edgeRemoved(e);
}
}
private void firePre(Collection<? extends OperationListener> listeners) {
for (OperationListener listener : listeners) {
listener.preEvent();
}
}
private void firePost(Collection<? extends OperationListener> listeners) {
for (OperationListener listener : listeners) {
listener.postEvent();
}
}
public List<NodeListener> getNodeListeners() {
return nodeSupport.getListeners();
}
public List<EdgeListener> getEdgeListeners() {
return edgeSupport.getListeners();
}
}
| |
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import java.io.IOException;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.commons.dbutils.DbUtils;
import org.apache.commons.dbutils.QueryRunner;
import org.apache.commons.dbutils.ResultSetHandler;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
import azkaban.database.AbstractJdbcLoader;
import azkaban.utils.GZIPUtils;
import azkaban.utils.JSONUtils;
import azkaban.utils.Props;
public class JdbcTriggerLoader extends AbstractJdbcLoader implements
TriggerLoader {
private static Logger logger = Logger.getLogger(JdbcTriggerLoader.class);
private EncodingType defaultEncodingType = EncodingType.GZIP;
private static final String triggerTblName = "triggers";
private static final String GET_UPDATED_TRIGGERS =
"SELECT trigger_id, trigger_source, modify_time, enc_type, data FROM "
+ triggerTblName + " WHERE modify_time>=?";
private static String GET_ALL_TRIGGERS =
"SELECT trigger_id, trigger_source, modify_time, enc_type, data FROM "
+ triggerTblName;
private static String GET_TRIGGER =
"SELECT trigger_id, trigger_source, modify_time, enc_type, data FROM "
+ triggerTblName + " WHERE trigger_id=?";
private static String ADD_TRIGGER = "INSERT INTO " + triggerTblName
+ " ( modify_time) values (?)";
private static String REMOVE_TRIGGER = "DELETE FROM " + triggerTblName
+ " WHERE trigger_id=?";
private static String UPDATE_TRIGGER =
"UPDATE "
+ triggerTblName
+ " SET trigger_source=?, modify_time=?, enc_type=?, data=? WHERE trigger_id=?";
public EncodingType getDefaultEncodingType() {
return defaultEncodingType;
}
public void setDefaultEncodingType(EncodingType defaultEncodingType) {
this.defaultEncodingType = defaultEncodingType;
}
public JdbcTriggerLoader(Props props) {
super(props);
}
@Override
public List<Trigger> getUpdatedTriggers(long lastUpdateTime)
throws TriggerLoaderException {
logger.info("Loading triggers changed since "
+ new DateTime(lastUpdateTime).toString());
Connection connection = getConnection();
QueryRunner runner = new QueryRunner();
ResultSetHandler<List<Trigger>> handler = new TriggerResultHandler();
List<Trigger> triggers;
try {
triggers =
runner.query(connection, GET_UPDATED_TRIGGERS, handler,
lastUpdateTime);
} catch (SQLException e) {
logger.error(GET_ALL_TRIGGERS + " failed.");
throw new TriggerLoaderException("Loading triggers from db failed. ", e);
} finally {
DbUtils.closeQuietly(connection);
}
logger.info("Loaded " + triggers.size() + " triggers.");
return triggers;
}
@Override
public List<Trigger> loadTriggers() throws TriggerLoaderException {
logger.info("Loading all triggers from db.");
Connection connection = getConnection();
QueryRunner runner = new QueryRunner();
ResultSetHandler<List<Trigger>> handler = new TriggerResultHandler();
List<Trigger> triggers;
try {
triggers = runner.query(connection, GET_ALL_TRIGGERS, handler);
} catch (SQLException e) {
logger.error(GET_ALL_TRIGGERS + " failed.");
throw new TriggerLoaderException("Loading triggers from db failed. ", e);
} finally {
DbUtils.closeQuietly(connection);
}
logger.info("Loaded " + triggers.size() + " triggers.");
return triggers;
}
@Override
public void removeTrigger(Trigger t) throws TriggerLoaderException {
logger.info("Removing trigger " + t.toString() + " from db.");
QueryRunner runner = createQueryRunner();
try {
int removes = runner.update(REMOVE_TRIGGER, t.getTriggerId());
if (removes == 0) {
throw new TriggerLoaderException("No trigger has been removed.");
}
} catch (SQLException e) {
logger.error(REMOVE_TRIGGER + " failed.");
throw new TriggerLoaderException("Remove trigger " + t.toString()
+ " from db failed. ", e);
}
}
@Override
public void addTrigger(Trigger t) throws TriggerLoaderException {
logger.info("Inserting trigger " + t.toString() + " into db.");
t.setLastModifyTime(System.currentTimeMillis());
Connection connection = getConnection();
try {
addTrigger(connection, t, defaultEncodingType);
} catch (Exception e) {
throw new TriggerLoaderException("Error uploading trigger", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
private synchronized void addTrigger(Connection connection, Trigger t,
EncodingType encType) throws TriggerLoaderException {
QueryRunner runner = new QueryRunner();
long id;
try {
runner.update(connection, ADD_TRIGGER, DateTime.now().getMillis());
connection.commit();
id =
runner.query(connection, LastInsertID.LAST_INSERT_ID,
new LastInsertID());
if (id == -1L) {
logger.error("trigger id is not properly created.");
throw new TriggerLoaderException("trigger id is not properly created.");
}
t.setTriggerId((int) id);
updateTrigger(t);
logger.info("uploaded trigger " + t.getDescription());
} catch (SQLException e) {
throw new TriggerLoaderException("Error creating trigger.", e);
}
}
@Override
public void updateTrigger(Trigger t) throws TriggerLoaderException {
if (logger.isDebugEnabled()) {
logger.debug("Updating trigger " + t.getTriggerId() + " into db.");
}
t.setLastModifyTime(System.currentTimeMillis());
Connection connection = getConnection();
try {
updateTrigger(connection, t, defaultEncodingType);
} catch (Exception e) {
e.printStackTrace();
throw new TriggerLoaderException("Failed to update trigger "
+ t.toString() + " into db!");
} finally {
DbUtils.closeQuietly(connection);
}
}
private void updateTrigger(Connection connection, Trigger t,
EncodingType encType) throws TriggerLoaderException {
String json = JSONUtils.toJSON(t.toJson());
byte[] data = null;
try {
byte[] stringData = json.getBytes("UTF-8");
data = stringData;
if (encType == EncodingType.GZIP) {
data = GZIPUtils.gzipBytes(stringData);
}
logger.debug("NumChars: " + json.length() + " UTF-8:" + stringData.length
+ " Gzip:" + data.length);
} catch (IOException e) {
throw new TriggerLoaderException("Error encoding the trigger "
+ t.toString());
}
QueryRunner runner = new QueryRunner();
try {
int updates =
runner.update(connection, UPDATE_TRIGGER, t.getSource(),
t.getLastModifyTime(), encType.getNumVal(), data,
t.getTriggerId());
connection.commit();
if (updates == 0) {
throw new TriggerLoaderException("No trigger has been updated.");
} else {
if (logger.isDebugEnabled()) {
logger.debug("Updated " + updates + " records.");
}
}
} catch (SQLException e) {
logger.error(UPDATE_TRIGGER + " failed.");
throw new TriggerLoaderException("Update trigger " + t.toString()
+ " into db failed. ", e);
}
}
private static class LastInsertID implements ResultSetHandler<Long> {
private static String LAST_INSERT_ID = "SELECT LAST_INSERT_ID()";
@Override
public Long handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return -1L;
}
long id = rs.getLong(1);
return id;
}
}
public class TriggerResultHandler implements ResultSetHandler<List<Trigger>> {
@Override
public List<Trigger> handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.<Trigger> emptyList();
}
ArrayList<Trigger> triggers = new ArrayList<Trigger>();
do {
int triggerId = rs.getInt(1);
int encodingType = rs.getInt(4);
byte[] data = rs.getBytes(5);
Object jsonObj = null;
if (data != null) {
EncodingType encType = EncodingType.fromInteger(encodingType);
try {
// Convoluted way to inflate strings. Should find common package or
// helper function.
if (encType == EncodingType.GZIP) {
// Decompress the sucker.
String jsonString = GZIPUtils.unGzipString(data, "UTF-8");
jsonObj = JSONUtils.parseJSONFromString(jsonString);
} else {
String jsonString = new String(data, "UTF-8");
jsonObj = JSONUtils.parseJSONFromString(jsonString);
}
} catch (IOException e) {
throw new SQLException("Error reconstructing trigger data ");
}
}
Trigger t = null;
try {
t = Trigger.fromJson(jsonObj);
triggers.add(t);
} catch (Exception e) {
e.printStackTrace();
logger.error("Failed to load trigger " + triggerId);
}
} while (rs.next());
return triggers;
}
}
private Connection getConnection() throws TriggerLoaderException {
Connection connection = null;
try {
connection = super.getDBConnection(false);
} catch (Exception e) {
DbUtils.closeQuietly(connection);
throw new TriggerLoaderException("Error getting DB connection.", e);
}
return connection;
}
@Override
public Trigger loadTrigger(int triggerId) throws TriggerLoaderException {
logger.info("Loading trigger " + triggerId + " from db.");
Connection connection = getConnection();
QueryRunner runner = new QueryRunner();
ResultSetHandler<List<Trigger>> handler = new TriggerResultHandler();
List<Trigger> triggers;
try {
triggers = runner.query(connection, GET_TRIGGER, handler, triggerId);
} catch (SQLException e) {
logger.error(GET_TRIGGER + " failed.");
throw new TriggerLoaderException("Loading trigger from db failed. ", e);
} finally {
DbUtils.closeQuietly(connection);
}
if (triggers.size() == 0) {
logger.error("Loaded 0 triggers. Failed to load trigger " + triggerId);
throw new TriggerLoaderException(
"Loaded 0 triggers. Failed to load trigger " + triggerId);
}
return triggers.get(0);
}
}
| |
package com.bazaarvoice.emodb.local;
import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.Logger;
import com.bazaarvoice.emodb.auth.role.RoleIdentifier;
import com.bazaarvoice.emodb.common.dropwizard.guice.SelfHostAndPort;
import com.bazaarvoice.emodb.common.dropwizard.guice.SelfHostAndPortModule;
import com.bazaarvoice.emodb.common.dropwizard.guice.ServerCluster;
import com.bazaarvoice.emodb.common.json.CustomJsonObjectMapperFactory;
import com.bazaarvoice.emodb.uac.api.CreateEmoRoleRequest;
import com.bazaarvoice.emodb.uac.api.EmoRoleKey;
import com.bazaarvoice.emodb.uac.api.UpdateEmoRoleRequest;
import com.bazaarvoice.emodb.uac.api.UserAccessControl;
import com.bazaarvoice.emodb.uac.client.UserAccessControlClientFactory;
import com.bazaarvoice.emodb.uac.client.UserAccessControlFixedHostDiscoverySource;
import com.bazaarvoice.emodb.web.EmoConfiguration;
import com.bazaarvoice.emodb.web.EmoService;
import com.bazaarvoice.emodb.web.auth.ApiKeyEncryption;
import com.bazaarvoice.emodb.web.util.EmoServiceObjectMapperFactory;
import com.bazaarvoice.ostrich.pool.ServicePoolBuilder;
import com.bazaarvoice.ostrich.pool.ServicePoolProxies;
import com.bazaarvoice.ostrich.retry.ExponentialBackoffRetry;
import com.codahale.metrics.MetricRegistry;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.ByteStreams;
import com.google.common.io.Files;
import com.google.common.net.HostAndPort;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Key;
import com.google.inject.Module;
import io.dropwizard.server.ServerFactory;
import net.sourceforge.argparse4j.ArgumentParsers;
import net.sourceforge.argparse4j.impl.Arguments;
import net.sourceforge.argparse4j.inf.ArgumentParser;
import net.sourceforge.argparse4j.inf.Namespace;
import org.apache.cassandra.service.CassandraDaemon;
import org.apache.commons.lang.ArrayUtils;
import org.apache.curator.test.TestingServer;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.net.Socket;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
public class EmoServiceWithZK {
private static final ExecutorService service = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("EmbeddedCassandra-%d")
.build());
public static void main(String... args) throws Exception {
// Remove all nulls and empty strings from the argument list. This can happen as if the maven command
// starts the service with no permission YAML files.
args = Arrays.stream(args).filter(arg -> !Strings.isNullOrEmpty(arg)).toArray(String[]::new);
// Start cassandra if necessary (cassandra.yaml is provided)
ArgumentParser parser = ArgumentParsers.newArgumentParser("java -jar emodb-web-local*.jar");
parser.addArgument("server").required(true).help("server");
parser.addArgument("emo-config").required(true).help("config.yaml - EmoDB's config file");
parser.addArgument("emo-config-ddl").required(true).help("config-ddl.yaml - EmoDB's cassandra schema file");
parser.addArgument("cassandra-yaml").nargs("?").help("cassandra.yaml - Cassandra configuration file to start an" +
" in memory embedded Cassandra.");
parser.addArgument("-z","--zookeeper").dest("zookeeper").action(Arguments.storeTrue()).help("Starts zookeeper");
parser.addArgument("-p","--permissions-yaml").dest("permissions").nargs("*").help("Permissions file(s)");
// Get the path to cassandraYaml or if zookeeper is available
Namespace result = parser.parseArgs(args);
String cassandraYaml = result.getString("cassandra-yaml");
boolean startZk = result.getBoolean("zookeeper");
String emoConfigYaml = result.getString("emo-config");
List<String> permissionsYamls = result.getList("permissions");
String[] emoServiceArgs = args;
// Start ZooKeeper
TestingServer zooKeeperServer = null;
if (startZk) {
zooKeeperServer = isLocalZooKeeperRunning() ? null : startLocalZooKeeper();
emoServiceArgs = (String[]) ArrayUtils.removeElement(args, "-z");
emoServiceArgs = (String[]) ArrayUtils.removeElement(emoServiceArgs, "--zookeeper");
}
boolean success = false;
if (cassandraYaml != null) {
// Replace $DIR$ so we can correctly specify location during runtime
File templateFile = new File(cassandraYaml);
String baseFile = Files.toString(templateFile, Charset.defaultCharset());
// Get the jar location
String path = EmoServiceWithZK.class.getProtectionDomain().getCodeSource().getLocation().getPath();
String parentDir = new File(path).getParent();
String newFile = baseFile.replace("$DATADIR$", new File(parentDir, "data").getAbsolutePath());
newFile = newFile.replace("$COMMITDIR$", new File(parentDir, "commitlog").getAbsolutePath());
newFile = newFile.replace("$CACHEDIR$", new File(parentDir, "saved_caches").getAbsolutePath());
File newYamlFile = new File(templateFile.getParent(), "emo-cassandra.yaml");
Files.write(newFile, newYamlFile, Charset.defaultCharset());
startLocalCassandra(newYamlFile.getAbsolutePath());
emoServiceArgs = (String[]) ArrayUtils.removeElement(emoServiceArgs, cassandraYaml);
}
// If permissions files were configured remove them from the argument list
int permissionsIndex = Math.max(ArrayUtils.indexOf(emoServiceArgs, "-p"), ArrayUtils.indexOf(emoServiceArgs, "--permissions-yaml"));
if (permissionsIndex >= 0) {
int permissionsArgCount = 1 + permissionsYamls.size();
for (int i=0; i < permissionsArgCount; i++) {
emoServiceArgs = (String[]) ArrayUtils.remove(emoServiceArgs, permissionsIndex);
}
}
try {
EmoService.main(emoServiceArgs);
success = true;
setPermissionsFromFiles(permissionsYamls, emoConfigYaml);
} catch (Throwable t) {
t.printStackTrace();
} finally {
// The main web server command returns immediately--don't stop ZooKeeper/Cassandra in that case.
if (zooKeeperServer != null && !(success && args.length > 0 && "server".equals(args[0]))) {
zooKeeperServer.stop();
service.shutdown();
}
}
}
/** Start an in-memory Cassandra. */
private static void startLocalCassandra(String cassandraYamlPath) throws IOException {
System.setProperty("cassandra.config", "file:" + cassandraYamlPath);
final CassandraDaemon cassandra = new CassandraDaemon();
cassandra.init(null);
Futures.getUnchecked(service.submit(new Callable<Object>(){
@Override
public Object call() throws Exception
{
cassandra.start();
return null;
}
}));
}
/** Start an in-memory copy of ZooKeeper. */
private static TestingServer startLocalZooKeeper() throws Exception {
// ZooKeeper is too noisy by default.
((Logger) LoggerFactory.getLogger("org.apache.zookeeper")).setLevel(Level.ERROR);
// Start the testing server.
TestingServer zooKeeperServer = new TestingServer(2181);
// Configure EmoDB to use the testing server.
System.setProperty("dw.zooKeeper.connectString", zooKeeperServer.getConnectString());
return zooKeeperServer;
}
private static boolean isLocalZooKeeperRunning() {
Socket socket = null;
try {
// Connect to a local ZooKeeper
socket = new Socket("localhost", 2181);
OutputStream out = socket.getOutputStream();
// Send a 4-letter request
out.write("ruok".getBytes());
// Receive the 4-letter response
byte[] response = new byte[4];
ByteStreams.readFully(socket.getInputStream(), response);
return Arrays.equals(response, "imok".getBytes());
} catch (Throwable t) {
return false;
} finally {
if (socket != null) {
try {
socket.close();
} catch (IOException e) {
// Ignore
}
}
}
}
private static void setPermissionsFromFiles(List<String> permissionsYamls, String emoConfigYamlPath) {
if (permissionsYamls.isEmpty()) {
return;
}
ObjectMapper objectMapper = EmoServiceObjectMapperFactory.configure(
CustomJsonObjectMapperFactory.build(new YAMLFactory()));
EmoConfiguration emoConfig;
try {
emoConfig = objectMapper.readValue(new File(emoConfigYamlPath), EmoConfiguration.class);
} catch (Exception e) {
System.err.println("Failed to EmoDB configuration from file " + emoConfigYamlPath);
e.printStackTrace(System.err);
return;
}
final String cluster = emoConfig.getCluster();
final MetricRegistry metricRegistry = new MetricRegistry();
// Easiest path to get server port and API key decryptor for the admin API key is to use the same Guice injection
// modules as the server.
Module module = new AbstractModule() {
@Override
protected void configure() {
bind(String.class).annotatedWith(ServerCluster.class).toInstance(cluster);
bind(ServerFactory.class).toInstance(emoConfig.getServerFactory());
bind(ApiKeyEncryption.class).asEagerSingleton();
install(new SelfHostAndPortModule());
}
};
Injector injector = Guice.createInjector(module);
HostAndPort selfHostAndPort = injector.getInstance(Key.get(HostAndPort.class, SelfHostAndPort.class));
ApiKeyEncryption apiKeyEncryption = injector.getInstance(ApiKeyEncryption.class);
String adminApiKey = emoConfig.getAuthorizationConfiguration().getAdminApiKey();
try {
adminApiKey = apiKeyEncryption.decrypt(adminApiKey);
} catch (Exception e) {
if (ApiKeyEncryption.isPotentiallyEncryptedApiKey(adminApiKey)) {
throw e;
}
}
// Create a client for the local EmoDB service
UserAccessControl uac = ServicePoolBuilder.create(UserAccessControl.class)
.withHostDiscoverySource(new UserAccessControlFixedHostDiscoverySource("http://localhost:" + selfHostAndPort.getPort()))
.withServiceFactory(UserAccessControlClientFactory.forCluster(cluster, metricRegistry).usingCredentials(adminApiKey))
.withMetricRegistry(metricRegistry)
.buildProxy(new ExponentialBackoffRetry(5, 50, 1000, TimeUnit.MILLISECONDS));
try {
for (String permissionsYaml : permissionsYamls) {
Map<String, List<String>> permissions;
try {
permissions = objectMapper.readValue(new File(permissionsYaml), new TypeReference<Map<String, List<String>>>() {});
} catch (Exception e) {
System.err.println("Failed to load permissions from file " + permissionsYaml);
e.printStackTrace(System.err);
return;
}
// Use the client to create or update all roles with permissions from the file
for (Map.Entry<String, List<String>> entry : permissions.entrySet()) {
RoleIdentifier roleIdentifier = RoleIdentifier.fromString(entry.getKey());
EmoRoleKey roleKey = new EmoRoleKey(roleIdentifier.getGroup(), roleIdentifier.getId());
if (uac.getRole(roleKey) == null) {
uac.createRole(new CreateEmoRoleRequest(roleKey)
.setPermissions(ImmutableSet.copyOf(entry.getValue())));
} else {
uac.updateRole(new UpdateEmoRoleRequest(roleKey)
.grantPermissions(ImmutableSet.copyOf(entry.getValue())));
}
}
}
} finally {
ServicePoolProxies.close(uac);
}
}
}
| |
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.master;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.joda.beans.Bean;
import org.joda.beans.BeanBuilder;
import org.joda.beans.BeanDefinition;
import org.joda.beans.JodaBeanUtils;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectMetaBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import org.joda.beans.impl.direct.DirectMetaPropertyMap;
import com.opengamma.util.PublicSPI;
import com.opengamma.util.paging.Paging;
/**
* Result providing a list of documents with paging.
*
* @param <D> the type of the document
*/
@PublicSPI
@BeanDefinition
public abstract class AbstractDocumentsResult<D extends AbstractDocument> extends DirectBean {
/**
* The paging information, not null if correctly created.
*/
@PropertyDefinition
private Paging _paging;
/**
* The documents, not null.
*/
@PropertyDefinition
private final List<D> _documents = new ArrayList<D>();
/**
* Creates an instance.
*/
public AbstractDocumentsResult() {
}
/**
* Creates an instance.
* @param coll the collection of documents to add, not null
*/
public AbstractDocumentsResult(Collection<D> coll) {
_documents.addAll(coll);
_paging = Paging.ofAll(coll);
}
//-------------------------------------------------------------------------
/**
* Gets the first document, or null if no documents.
* @return the first document, null if none
*/
public D getFirstDocument() {
return getDocuments().size() > 0 ? getDocuments().get(0) : null;
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code AbstractDocumentsResult}.
* @return the meta-bean, not null
*/
@SuppressWarnings("rawtypes")
public static AbstractDocumentsResult.Meta meta() {
return AbstractDocumentsResult.Meta.INSTANCE;
}
/**
* The meta-bean for {@code AbstractDocumentsResult}.
* @param <R> the bean's generic type
* @param cls the bean's generic type
* @return the meta-bean, not null
*/
@SuppressWarnings("unchecked")
public static <R extends AbstractDocument> AbstractDocumentsResult.Meta<R> metaAbstractDocumentsResult(Class<R> cls) {
return AbstractDocumentsResult.Meta.INSTANCE;
}
static {
JodaBeanUtils.registerMetaBean(AbstractDocumentsResult.Meta.INSTANCE);
}
@SuppressWarnings("unchecked")
@Override
public AbstractDocumentsResult.Meta<D> metaBean() {
return AbstractDocumentsResult.Meta.INSTANCE;
}
//-----------------------------------------------------------------------
/**
* Gets the paging information, not null if correctly created.
* @return the value of the property
*/
public Paging getPaging() {
return _paging;
}
/**
* Sets the paging information, not null if correctly created.
* @param paging the new value of the property
*/
public void setPaging(Paging paging) {
this._paging = paging;
}
/**
* Gets the the {@code paging} property.
* @return the property, not null
*/
public final Property<Paging> paging() {
return metaBean().paging().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the documents, not null.
* @return the value of the property, not null
*/
public List<D> getDocuments() {
return _documents;
}
/**
* Sets the documents, not null.
* @param documents the new value of the property, not null
*/
public void setDocuments(List<D> documents) {
JodaBeanUtils.notNull(documents, "documents");
this._documents.clear();
this._documents.addAll(documents);
}
/**
* Gets the the {@code documents} property.
* @return the property, not null
*/
public final Property<List<D>> documents() {
return metaBean().documents().createProperty(this);
}
//-----------------------------------------------------------------------
@Override
public AbstractDocumentsResult<D> clone() {
return JodaBeanUtils.cloneAlways(this);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj != null && obj.getClass() == this.getClass()) {
AbstractDocumentsResult<?> other = (AbstractDocumentsResult<?>) obj;
return JodaBeanUtils.equal(getPaging(), other.getPaging()) &&
JodaBeanUtils.equal(getDocuments(), other.getDocuments());
}
return false;
}
@Override
public int hashCode() {
int hash = getClass().hashCode();
hash = hash * 31 + JodaBeanUtils.hashCode(getPaging());
hash = hash * 31 + JodaBeanUtils.hashCode(getDocuments());
return hash;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(96);
buf.append("AbstractDocumentsResult{");
int len = buf.length();
toString(buf);
if (buf.length() > len) {
buf.setLength(buf.length() - 2);
}
buf.append('}');
return buf.toString();
}
protected void toString(StringBuilder buf) {
buf.append("paging").append('=').append(JodaBeanUtils.toString(getPaging())).append(',').append(' ');
buf.append("documents").append('=').append(JodaBeanUtils.toString(getDocuments())).append(',').append(' ');
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code AbstractDocumentsResult}.
* @param <D> the type
*/
public static class Meta<D extends AbstractDocument> extends DirectMetaBean {
/**
* The singleton instance of the meta-bean.
*/
@SuppressWarnings("rawtypes")
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code paging} property.
*/
private final MetaProperty<Paging> _paging = DirectMetaProperty.ofReadWrite(
this, "paging", AbstractDocumentsResult.class, Paging.class);
/**
* The meta-property for the {@code documents} property.
*/
@SuppressWarnings({"unchecked", "rawtypes" })
private final MetaProperty<List<D>> _documents = DirectMetaProperty.ofReadWrite(
this, "documents", AbstractDocumentsResult.class, (Class) List.class);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<?>> _metaPropertyMap$ = new DirectMetaPropertyMap(
this, null,
"paging",
"documents");
/**
* Restricted constructor.
*/
protected Meta() {
}
@Override
protected MetaProperty<?> metaPropertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case -995747956: // paging
return _paging;
case 943542968: // documents
return _documents;
}
return super.metaPropertyGet(propertyName);
}
@Override
public BeanBuilder<? extends AbstractDocumentsResult<D>> builder() {
throw new UnsupportedOperationException("AbstractDocumentsResult is an abstract class");
}
@SuppressWarnings({"unchecked", "rawtypes" })
@Override
public Class<? extends AbstractDocumentsResult<D>> beanType() {
return (Class) AbstractDocumentsResult.class;
}
@Override
public Map<String, MetaProperty<?>> metaPropertyMap() {
return _metaPropertyMap$;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code paging} property.
* @return the meta-property, not null
*/
public final MetaProperty<Paging> paging() {
return _paging;
}
/**
* The meta-property for the {@code documents} property.
* @return the meta-property, not null
*/
public final MetaProperty<List<D>> documents() {
return _documents;
}
//-----------------------------------------------------------------------
@Override
protected Object propertyGet(Bean bean, String propertyName, boolean quiet) {
switch (propertyName.hashCode()) {
case -995747956: // paging
return ((AbstractDocumentsResult<?>) bean).getPaging();
case 943542968: // documents
return ((AbstractDocumentsResult<?>) bean).getDocuments();
}
return super.propertyGet(bean, propertyName, quiet);
}
@SuppressWarnings("unchecked")
@Override
protected void propertySet(Bean bean, String propertyName, Object newValue, boolean quiet) {
switch (propertyName.hashCode()) {
case -995747956: // paging
((AbstractDocumentsResult<D>) bean).setPaging((Paging) newValue);
return;
case 943542968: // documents
((AbstractDocumentsResult<D>) bean).setDocuments((List<D>) newValue);
return;
}
super.propertySet(bean, propertyName, newValue, quiet);
}
@Override
protected void validate(Bean bean) {
JodaBeanUtils.notNull(((AbstractDocumentsResult<?>) bean)._documents, "documents");
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
| |
package org.littleshoot.proxy;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryUsage;
import java.lang.management.OperatingSystemMXBean;
import java.lang.reflect.Method;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.security.SecureRandom;
import java.security.cert.X509Certificate;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLSession;
import javax.net.ssl.SSLSocket;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.http.HttpHost;
import org.apache.http.client.HttpClient;
import org.apache.http.conn.params.ConnRoutePNames;
import org.apache.http.conn.scheme.Scheme;
import org.apache.http.conn.ssl.SSLSocketFactory;
import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
import org.apache.http.conn.ssl.X509HostnameVerifier;
import org.apache.http.impl.client.DefaultHttpClient;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.eclipse.jetty.server.ssl.SslSocketConnector;
import org.littleshoot.proxy.extras.SelfSignedSslEngineSource;
public class TestUtils {
private TestUtils() {
}
/**
* Creates and starts embedded web server that is running on given port.
* Each response has a body that indicates how many bytes were received with
* a message like "Received x bytes\n".
*
* @param port
* The port
* @return Instance of Server
* @throws Exception
* if failed to start
*/
public static Server startWebServer(final int port) throws Exception {
return startWebServer(port, null);
}
/**
* Creates and starts embedded web server that is running on given port.
* Each response has a body that contains the specified contents.
*
* @param port
* The port
* @return Instance of Server
* @throws Exception
* if failed to start
*/
public static Server startWebServerWithResponse(final int port, byte[] content) throws Exception {
return startWebServerWithResponse(port, null, content);
}
/**
* Creates and starts embedded web server that is running on given port,
* including an SSL connector on the other given port. Each response has a
* body that indicates how many bytes were received with a message like
* "Received x bytes\n".
*
* @param port
* The port
* @param sslPort
* (optional) The ssl port
* @return Instance of Server
* @throws Exception
* if failed to start
*/
public static Server startWebServer(final int port, final Integer sslPort)
throws Exception {
final Server httpServer = new Server(port);
httpServer.setHandler(new AbstractHandler() {
public void handle(String target, Request baseRequest,
HttpServletRequest request, HttpServletResponse response)
throws IOException, ServletException {
if (request.getRequestURI().contains("hang")) {
System.out.println("Hanging as requested");
try {
Thread.sleep(90000);
} catch (InterruptedException ie) {
System.out.println("Stopped hanging due to interruption");
}
}
long numberOfBytesRead = 0;
InputStream in = new BufferedInputStream(request
.getInputStream());
while (in.read() != -1) {
numberOfBytesRead += 1;
}
System.out.println("Done reading # of bytes: "
+ numberOfBytesRead);
response.setStatus(HttpServletResponse.SC_OK);
baseRequest.setHandled(true);
byte[] content = ("Received " + numberOfBytesRead + " bytes\n").getBytes();
response.addHeader("Content-Length", Integer.toString(content.length));
response.getOutputStream().write(content);
}
});
if (sslPort != null) {
// Add SSL connector
org.eclipse.jetty.util.ssl.SslContextFactory sslContextFactory = new org.eclipse.jetty.util.ssl.SslContextFactory();
SelfSignedSslEngineSource contextSource = new SelfSignedSslEngineSource();
SSLContext sslContext = contextSource.getSslContext();
sslContextFactory.setSslContext(sslContext);
SslSocketConnector connector = new SslSocketConnector(
sslContextFactory);
connector.setPort(sslPort);
/*
* <p>Ox: For some reason, on OS X, a non-zero timeout can causes
* sporadic issues. <a href="http://stackoverflow.com/questions
* /16191236/tomcat-startup-fails
* -due-to-java-net-socketexception-invalid-argument-on-mac-o">This
* StackOverflow thread</a> has some insights into it, but I don't
* quite get it.</p>
*
* <p>This can cause problems with Jetty's SSL handshaking, so I
* have to set the handshake timeout and the maxIdleTime to 0 so
* that the SSLSocket has an infinite timeout.</p>
*/
connector.setHandshakeTimeout(0);
connector.setMaxIdleTime(0);
httpServer.addConnector(connector);
}
httpServer.start();
return httpServer;
}
/**
* Creates and starts embedded web server that is running on given port,
* including an SSL connector on the other given port. Each response has a
* body that contains the specified contents.
*
* @param port
* The port
* @param sslPort
* (optional) The ssl port
* @param content
* The response the server will return
* @return Instance of Server
* @throws Exception
* if failed to start
*/
public static Server startWebServerWithResponse(final int port, final Integer sslPort, final byte[] content)
throws Exception {
final Server httpServer = new Server(port);
httpServer.setHandler(new AbstractHandler() {
public void handle(String target, Request baseRequest,
HttpServletRequest request, HttpServletResponse response)
throws IOException, ServletException {
if (request.getRequestURI().contains("hang")) {
System.out.println("Hanging as requested");
try {
Thread.sleep(90000);
} catch (InterruptedException ie) {
System.out.println("Stopped hanging due to interruption");
}
}
long numberOfBytesRead = 0;
InputStream in = new BufferedInputStream(request
.getInputStream());
while (in.read() != -1) {
numberOfBytesRead += 1;
}
System.out.println("Done reading # of bytes: "
+ numberOfBytesRead);
response.setStatus(HttpServletResponse.SC_OK);
baseRequest.setHandled(true);
response.addHeader("Content-Length", Integer.toString(content.length));
response.getOutputStream().write(content);
}
});
if (sslPort != null) {
// Add SSL connector
org.eclipse.jetty.util.ssl.SslContextFactory sslContextFactory = new org.eclipse.jetty.util.ssl.SslContextFactory();
SelfSignedSslEngineSource contextSource = new SelfSignedSslEngineSource();
SSLContext sslContext = contextSource.getSslContext();
sslContextFactory.setSslContext(sslContext);
SslSocketConnector connector = new SslSocketConnector(
sslContextFactory);
connector.setPort(sslPort);
/*
* <p>Ox: For some reason, on OS X, a non-zero timeout can causes
* sporadic issues. <a href="http://stackoverflow.com/questions
* /16191236/tomcat-startup-fails
* -due-to-java-net-socketexception-invalid-argument-on-mac-o">This
* StackOverflow thread</a> has some insights into it, but I don't
* quite get it.</p>
*
* <p>This can cause problems with Jetty's SSL handshaking, so I
* have to set the handshake timeout and the maxIdleTime to 0 so
* that the SSLSocket has an infinite timeout.</p>
*/
connector.setHandshakeTimeout(0);
connector.setMaxIdleTime(0);
httpServer.addConnector(connector);
}
httpServer.start();
return httpServer;
}
/**
* Creates instance HttpClient that is configured to use proxy server. The
* proxy server should run on 127.0.0.1 and given port
*
* @param port
* the proxy port
* @return instance of HttpClient
*/
public static HttpClient createProxiedHttpClient(final int port)
throws Exception {
return createProxiedHttpClient(port, false);
}
/**
* Creates instance HttpClient that is configured to use proxy server. The
* proxy server should run on 127.0.0.1 and given port
*
* @param port
* the proxy port
* @param supportSSL
* if true, client will support SSL connections to servers using
* self-signed certificates
* @return instance of HttpClient
*/
public static HttpClient createProxiedHttpClient(final int port,
final boolean supportSSL) throws Exception {
final HttpClient httpclient = new DefaultHttpClient();
// Note: we use 127.0.0.1 here because on OS X, using straight up
// localhost yields a connect exception.
final HttpHost proxy = new HttpHost("127.0.0.1", port, "http");
httpclient.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY,
proxy);
if (supportSSL) {
SSLSocketFactory sf = new SSLSocketFactory(
new TrustSelfSignedStrategy(),
new X509HostnameVerifier() {
public boolean verify(String arg0, SSLSession arg1) {
return true;
}
public void verify(String host, String[] cns,
String[] subjectAlts) throws SSLException {
}
public void verify(String host, X509Certificate cert)
throws SSLException {
}
public void verify(String host, SSLSocket ssl)
throws IOException {
}
});
Scheme scheme = new Scheme("https", 443, sf);
httpclient.getConnectionManager().getSchemeRegistry()
.register(scheme);
}
return httpclient;
}
public static int randomPort() {
final SecureRandom secureRandom = new SecureRandom();
for (int i = 0; i < 20; i++) {
// The +1 on the random int is because
// Math.abs(Integer.MIN_VALUE) == Integer.MIN_VALUE -- caught
// by FindBugs.
final int randomPort = 1024 + (Math.abs(secureRandom.nextInt() + 1) % 60000);
ServerSocket sock = null;
try {
sock = new ServerSocket();
sock.bind(new InetSocketAddress("127.0.0.1", randomPort));
final int port = sock.getLocalPort();
return port;
} catch (final IOException e) {
} finally {
if (sock != null) {
try {
sock.close();
} catch (IOException e) {
}
}
}
}
// If we can't grab one of our securely chosen random ports, use
// whatever port the OS assigns.
ServerSocket sock = null;
try {
sock = new ServerSocket();
sock.bind(null);
final int port = sock.getLocalPort();
return port;
} catch (final IOException e) {
return 1024 + (Math.abs(secureRandom.nextInt() + 1) % 60000);
} finally {
if (sock != null) {
try {
sock.close();
} catch (IOException e) {
}
}
}
}
public static long getOpenFileDescriptorsAndPrintMemoryUsage() throws Exception {
// Below courtesy of:
// http://stackoverflow.com/questions/10999076/programmatically-print-the-heap-usage-that-is-typically-printed-on-jvm-exit-when
MemoryUsage mu = ManagementFactory.getMemoryMXBean()
.getHeapMemoryUsage();
MemoryUsage muNH = ManagementFactory.getMemoryMXBean()
.getNonHeapMemoryUsage();
System.out.println("Init :" + mu.getInit() + "\nMax :" + mu.getMax()
+ "\nUsed :" + mu.getUsed() + "\nCommitted :"
+ mu.getCommitted() + "\nInit NH :" + muNH.getInit()
+ "\nMax NH :" + muNH.getMax() + "\nUsed NH:" + muNH.getUsed()
+ "\nCommitted NH:" + muNH.getCommitted());
// Below courtesy of:
// http://neopatel.blogspot.com/2011/05/java-count-open-file-handles.html
OperatingSystemMXBean osStats = ManagementFactory
.getOperatingSystemMXBean();
long numberOfOpenFileDescriptors = 0;
if (osStats.getClass().getName()
.equals("com.sun.management.UnixOperatingSystem")) {
Method method = osStats.getClass().getDeclaredMethod(
"getOpenFileDescriptorCount");
method.setAccessible(true);
numberOfOpenFileDescriptors = (Long) method.invoke(osStats);
System.out.println("Open File Descriptors: "
+ numberOfOpenFileDescriptors);
method.setAccessible(false);
}
return numberOfOpenFileDescriptors;
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
* or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.apache.jackrabbit.oak.query;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.Lists.newArrayList;
import static org.apache.jackrabbit.oak.query.ast.AstElementFactory.copyElementAndCheckReference;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import org.apache.jackrabbit.oak.api.PropertyValue;
import org.apache.jackrabbit.oak.api.Result.SizePrecision;
import org.apache.jackrabbit.oak.api.Tree;
import org.apache.jackrabbit.oak.namepath.JcrPathParser;
import org.apache.jackrabbit.oak.namepath.NamePathMapper;
import org.apache.jackrabbit.oak.plugins.index.counter.jmx.NodeCounter;
import org.apache.jackrabbit.oak.plugins.memory.PropertyValues;
import org.apache.jackrabbit.oak.query.QueryOptions.Traversal;
import org.apache.jackrabbit.oak.query.ast.AndImpl;
import org.apache.jackrabbit.oak.query.ast.AstVisitorBase;
import org.apache.jackrabbit.oak.query.ast.BindVariableValueImpl;
import org.apache.jackrabbit.oak.query.ast.ChildNodeImpl;
import org.apache.jackrabbit.oak.query.ast.ChildNodeJoinConditionImpl;
import org.apache.jackrabbit.oak.query.ast.CoalesceImpl;
import org.apache.jackrabbit.oak.query.ast.ColumnImpl;
import org.apache.jackrabbit.oak.query.ast.ComparisonImpl;
import org.apache.jackrabbit.oak.query.ast.ConstraintImpl;
import org.apache.jackrabbit.oak.query.ast.DescendantNodeImpl;
import org.apache.jackrabbit.oak.query.ast.DescendantNodeJoinConditionImpl;
import org.apache.jackrabbit.oak.query.ast.DynamicOperandImpl;
import org.apache.jackrabbit.oak.query.ast.EquiJoinConditionImpl;
import org.apache.jackrabbit.oak.query.ast.FirstImpl;
import org.apache.jackrabbit.oak.query.ast.FullTextSearchImpl;
import org.apache.jackrabbit.oak.query.ast.FullTextSearchScoreImpl;
import org.apache.jackrabbit.oak.query.ast.InImpl;
import org.apache.jackrabbit.oak.query.ast.JoinConditionImpl;
import org.apache.jackrabbit.oak.query.ast.JoinImpl;
import org.apache.jackrabbit.oak.query.ast.JoinType;
import org.apache.jackrabbit.oak.query.ast.LengthImpl;
import org.apache.jackrabbit.oak.query.ast.LiteralImpl;
import org.apache.jackrabbit.oak.query.ast.LowerCaseImpl;
import org.apache.jackrabbit.oak.query.ast.NativeFunctionImpl;
import org.apache.jackrabbit.oak.query.ast.NodeLocalNameImpl;
import org.apache.jackrabbit.oak.query.ast.NodeNameImpl;
import org.apache.jackrabbit.oak.query.ast.NotImpl;
import org.apache.jackrabbit.oak.query.ast.OrImpl;
import org.apache.jackrabbit.oak.query.ast.OrderingImpl;
import org.apache.jackrabbit.oak.query.ast.PathImpl;
import org.apache.jackrabbit.oak.query.ast.PropertyExistenceImpl;
import org.apache.jackrabbit.oak.query.ast.PropertyInexistenceImpl;
import org.apache.jackrabbit.oak.query.ast.PropertyValueImpl;
import org.apache.jackrabbit.oak.query.ast.SameNodeImpl;
import org.apache.jackrabbit.oak.query.ast.SameNodeJoinConditionImpl;
import org.apache.jackrabbit.oak.query.ast.SelectorImpl;
import org.apache.jackrabbit.oak.query.ast.SimilarImpl;
import org.apache.jackrabbit.oak.query.ast.SourceImpl;
import org.apache.jackrabbit.oak.query.ast.SpellcheckImpl;
import org.apache.jackrabbit.oak.query.ast.SuggestImpl;
import org.apache.jackrabbit.oak.query.ast.UpperCaseImpl;
import org.apache.jackrabbit.oak.query.index.FilterImpl;
import org.apache.jackrabbit.oak.query.index.TraversingIndex;
import org.apache.jackrabbit.oak.query.plan.ExecutionPlan;
import org.apache.jackrabbit.oak.query.plan.SelectorExecutionPlan;
import org.apache.jackrabbit.oak.query.stats.QueryStatsData.QueryExecutionStats;
import org.apache.jackrabbit.oak.spi.query.Filter;
import org.apache.jackrabbit.oak.spi.query.Filter.PathRestriction;
import org.apache.jackrabbit.oak.spi.query.QueryConstants;
import org.apache.jackrabbit.oak.spi.query.QueryIndex;
import org.apache.jackrabbit.oak.spi.query.QueryIndex.AdvancedQueryIndex;
import org.apache.jackrabbit.oak.spi.query.QueryIndex.IndexPlan;
import org.apache.jackrabbit.oak.spi.query.QueryIndex.OrderEntry;
import org.apache.jackrabbit.oak.spi.query.QueryIndex.OrderEntry.Order;
import org.apache.jackrabbit.oak.spi.query.QueryIndexProvider;
import org.apache.jackrabbit.oak.spi.state.NodeState;
import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
import org.apache.jackrabbit.oak.stats.HistogramStats;
import org.apache.jackrabbit.oak.stats.StatisticsProvider;
import org.apache.jackrabbit.oak.stats.StatsOptions;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Strings;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Ordering;
/**
* Represents a parsed query.
*/
public class QueryImpl implements Query {
public static final UnsupportedOperationException TOO_MANY_UNION =
new UnsupportedOperationException("Too many union queries");
public final static int MAX_UNION = Integer.getInteger("oak.sql2MaxUnion", 1000);
private static final Logger LOG = LoggerFactory.getLogger(QueryImpl.class);
private static final String INDEX_UNAVAILABLE = "INDEX-UNAVAILABLE";
private final QueryExecutionStats stats;
private boolean potentiallySlowTraversalQueryLogged;
private static final Ordering<QueryIndex> MINIMAL_COST_ORDERING = new Ordering<QueryIndex>() {
@Override
public int compare(QueryIndex left, QueryIndex right) {
return Double.compare(left.getMinimumCost(), right.getMinimumCost());
}
};
SourceImpl source;
private String statement;
final HashMap<String, PropertyValue> bindVariableMap = new HashMap<String, PropertyValue>();
/**
* The map of indexes (each selector uses one index)
*/
final HashMap<String, Integer> selectorIndexes = new HashMap<String, Integer>();
/**
* The list of selectors of this query. For a join, there can be multiple selectors.
*/
final ArrayList<SelectorImpl> selectors = new ArrayList<SelectorImpl>();
ConstraintImpl constraint;
/**
* Whether fallback to the traversing index is supported if no other index
* is available. This is enabled by default and can be disabled for testing
* purposes.
*/
private boolean traversalEnabled = true;
/**
* The query option to be used for this query.
*/
private QueryOptions queryOptions = new QueryOptions();
private OrderingImpl[] orderings;
private ColumnImpl[] columns;
/**
* The columns that make a row distinct. This is all columns
* except for "jcr:score".
*/
private boolean[] distinctColumns;
private boolean explain, measure;
private boolean distinct;
private long limit = Long.MAX_VALUE;
private long offset;
private long size = -1;
private boolean prepared;
private ExecutionContext context;
/**
* whether the object has been initialised or not
*/
private boolean init;
private boolean isSortedByIndex;
private final NamePathMapper namePathMapper;
private double estimatedCost;
private final QueryEngineSettings settings;
private boolean warnedHidden;
private boolean isInternal;
private boolean potentiallySlowTraversalQuery;
QueryImpl(String statement, SourceImpl source, ConstraintImpl constraint,
ColumnImpl[] columns, NamePathMapper mapper, QueryEngineSettings settings,
QueryExecutionStats stats) {
this.statement = statement;
this.source = source;
this.constraint = constraint;
this.columns = columns;
this.namePathMapper = mapper;
this.settings = settings;
this.stats = stats;
}
@Override
public void init() {
final QueryImpl query = this;
if (constraint != null) {
// need to do this *before* the visitation below, as the
// simplify() method does not always keep the query reference
// passed in setQuery(). TODO: avoid that mutability concern
constraint = constraint.simplify();
}
new AstVisitorBase() {
@Override
public boolean visit(BindVariableValueImpl node) {
node.setQuery(query);
bindVariableMap.put(node.getBindVariableName(), null);
return true;
}
@Override
public boolean visit(ChildNodeImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(ChildNodeJoinConditionImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(CoalesceImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(FirstImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(ColumnImpl node) {
node.setQuery(query);
return true;
}
@Override
public boolean visit(DescendantNodeImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(DescendantNodeJoinConditionImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(EquiJoinConditionImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(FullTextSearchImpl node) {
node.setQuery(query);
node.bindSelector(source);
return super.visit(node);
}
@Override
public boolean visit(NativeFunctionImpl node) {
node.setQuery(query);
node.bindSelector(source);
return super.visit(node);
}
@Override
public boolean visit(SimilarImpl node) {
node.setQuery(query);
node.bindSelector(source);
return super.visit(node);
}
@Override
public boolean visit(SpellcheckImpl node) {
node.setQuery(query);
node.bindSelector(source);
return super.visit(node);
}
@Override
public boolean visit(SuggestImpl node) {
node.setQuery(query);
node.bindSelector(source);
return super.visit(node);
}
@Override
public boolean visit(FullTextSearchScoreImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(LiteralImpl node) {
node.setQuery(query);
return true;
}
@Override
public boolean visit(NodeLocalNameImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(NodeNameImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(PathImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(PropertyExistenceImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(PropertyInexistenceImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(PropertyValueImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(SameNodeImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(SameNodeJoinConditionImpl node) {
node.setQuery(query);
node.bindSelector(source);
return true;
}
@Override
public boolean visit(SelectorImpl node) {
String name = node.getSelectorName();
if (selectorIndexes.put(name, selectors.size()) != null) {
throw new IllegalArgumentException("Two selectors with the same name: " + name);
}
selectors.add(node);
node.setQuery(query);
return true;
}
@Override
public boolean visit(LengthImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(UpperCaseImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(LowerCaseImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(ComparisonImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(InImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(AndImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(OrImpl node) {
node.setQuery(query);
return super.visit(node);
}
@Override
public boolean visit(NotImpl node) {
node.setQuery(query);
return super.visit(node);
}
}.visit(this);
source.setQueryConstraint(constraint);
for (ColumnImpl column : columns) {
column.bindSelector(source);
}
distinctColumns = new boolean[columns.length];
for (int i = 0; i < columns.length; i++) {
ColumnImpl c = columns[i];
boolean distinct = true;
String propName = c.getPropertyName();
if (QueryConstants.JCR_SCORE.equals(propName) || propName.startsWith(QueryConstants.REP_FACET + "(")) {
distinct = false;
}
distinctColumns[i] = distinct;
}
init = true;
}
@Override
public ColumnImpl[] getColumns() {
return columns;
}
public ConstraintImpl getConstraint() {
return constraint;
}
public OrderingImpl[] getOrderings() {
return orderings;
}
public SourceImpl getSource() {
return source;
}
@Override
public void bindValue(String varName, PropertyValue value) {
bindVariableMap.put(varName, value);
}
@Override
public void setLimit(long limit) {
this.limit = limit;
}
@Override
public void setOffset(long offset) {
this.offset = offset;
}
@Override
public void setExplain(boolean explain) {
this.explain = explain;
}
@Override
public void setMeasure(boolean measure) {
this.measure = measure;
}
public void setDistinct(boolean distinct) {
this.distinct = distinct;
}
@Override
public ResultImpl executeQuery() {
return new ResultImpl(this);
}
/**
* If one of the indexes wants a warning to be logged due to path mismatch,
* then get the warning message. Otherwise, return null.
*
* @return null (in the normal case) or the list of index plan names (if
* some index wants a warning to be logged)
*/
private String getWarningForPathFilterMismatch() {
StringBuilder buff = null;
for (SelectorImpl s : selectors) {
if (s.getExecutionPlan() != null &&
s.getExecutionPlan().getIndexPlan() != null &&
s.getExecutionPlan().getIndexPlan().logWarningForPathFilterMismatch()) {
if (buff == null) {
buff = new StringBuilder();
}
if (buff.length() > 0) {
buff.append(", ");
}
buff.append(s.getExecutionPlan().getIndexPlanName());
}
}
return buff == null ? null : buff.toString();
}
private void logAdditionalMessages() {
for (SelectorImpl s : selectors) {
if (s.getExecutionPlan() != null &&
s.getExecutionPlan().getIndexPlan() != null) {
s.getExecutionPlan().getIndexPlan().getAdditionalMessages().forEach((level, list) -> {
switch (level) {
case TRACE: for (String msg : list) {
LOG.trace(msg);
}
break;
case DEBUG: for (String msg : list) {
LOG.debug(msg);
}
break;
case INFO: for (String msg : list) {
LOG.info(msg);
}
break;
case WARN: for (String msg : list) {
LOG.warn(msg);
}
break;
case ERROR: for (String msg : list) {
LOG.error(msg);
}
break;
}
});
}
}
}
@Override
public Iterator<ResultRowImpl> getRows() {
prepare();
String warn = getWarningForPathFilterMismatch();
if (warn != null) {
LOG.warn("Index definition of index used have path restrictions and query won't return nodes from " +
"those restricted paths; query={}, plan={}", statement, warn);
}
logAdditionalMessages();
if (explain) {
String plan = getPlan();
if (measure) {
plan += " cost: { " + getIndexCostInfo() + " }";
}
columns = new ColumnImpl[] {
new ColumnImpl("explain", "plan", "plan"),
new ColumnImpl("explain", "statement", "statement")
};
ResultRowImpl r = new ResultRowImpl(this,
Tree.EMPTY_ARRAY,
new PropertyValue[] {
PropertyValues.newString(plan),
// remove "explain" keyword from query statement to produce explained statement
PropertyValues.newString(getStatement()
.replaceFirst("(?i)\\bexplain\\s+", ""))
},
null, null);
return Arrays.asList(r).iterator();
}
if (LOG.isDebugEnabled()) {
logDebug("query execute " + statement);
logDebug("query plan " + getPlan());
}
final RowIterator rowIt = new RowIterator(context.getBaseState());
Comparator<ResultRowImpl> orderBy;
if (isSortedByIndex) {
orderBy = null;
} else {
orderBy = ResultRowImpl.getComparator(orderings);
}
Iterator<ResultRowImpl> it =
FilterIterators.newCombinedFilter(rowIt, distinct, limit, offset, orderBy, settings);
if (orderBy != null) {
// this will force the rows to be read, so that the size is known
it.hasNext();
// we need the size, and there is no other way to get it right now
// but we also have to take limit and offset into account
long read = rowIt.getReadCount();
// we will ignore whatever is behind 'limit+offset'
read = Math.min(saturatedAdd(limit, offset), read);
// and we will skip 'offset' entries
read = Math.max(0, read - offset);
size = read;
}
if (measure) {
// return the measuring iterator delegating the readCounts to the rowIterator
it = new MeasuringIterator(this, it) {
@Override
protected void setColumns(ColumnImpl[] col) {
columns = col;
}
@Override
protected long getReadCount() {
return rowIt.getReadCount();
}
@Override
protected Map<String, Long> getSelectorScanCount() {
Map<String, Long> selectorReadCounts = Maps.newHashMap();
for (SelectorImpl selector : selectors) {
selectorReadCounts.put(selector.getSelectorName(), selector.getScanCount());
}
return selectorReadCounts;
}
};
}
return it;
}
@Override
public boolean isSortedByIndex() {
return isSortedByIndex;
}
private boolean canSortByIndex() {
boolean canSortByIndex = false;
// TODO add issue about order by optimization for multiple selectors
if (orderings != null && selectors.size() == 1) {
IndexPlan plan = selectors.get(0).getExecutionPlan().getIndexPlan();
if (plan != null) {
List<OrderEntry> list = plan.getSortOrder();
if (list != null && list.size() == orderings.length) {
canSortByIndex = true;
for (int i = 0; i < list.size(); i++) {
OrderEntry e = list.get(i);
OrderingImpl o = orderings[i];
DynamicOperandImpl op = o.getOperand();
// we only have one selector, so no need to check that
// TODO support joins
String pn = op.getOrderEntryPropertyName(selectors.get(0));
if (pn == null || !pn.equals(e.getPropertyName())) {
// ordered by another property
canSortByIndex = false;
break;
}
if (o.isDescending() != (e.getOrder() == Order.DESCENDING)) {
// ordered ascending versus descending
canSortByIndex = false;
break;
}
}
}
}
}
return canSortByIndex;
}
@Override
public String getPlan() {
return source.getPlan(context.getBaseState());
}
@Override
public String getIndexCostInfo() {
return source.getIndexCostInfo(context.getBaseState());
}
@Override
public double getEstimatedCost() {
return estimatedCost;
}
@Override
public void prepare() {
if (prepared) {
return;
}
prepared = true;
List<SourceImpl> sources = source.getInnerJoinSelectors();
List<JoinConditionImpl> conditions = source.getInnerJoinConditions();
if (sources.size() <= 1) {
// simple case (no join)
estimatedCost = source.prepare().getEstimatedCost();
isSortedByIndex = canSortByIndex();
return;
}
// use a greedy algorithm
SourceImpl result = null;
Set<SourceImpl> available = new HashSet<SourceImpl>();
// the query is only slow if all possible join orders are slow
// (in theory, due to using the greedy algorithm, a query might be considered
// slow even thought there is a plan that doesn't need to use traversal, but
// only for 3-way and higher joins, and only if traversal is considered very fast)
boolean isPotentiallySlowJoin = true;
while (sources.size() > 0) {
int bestIndex = 0;
double bestCost = Double.POSITIVE_INFINITY;
ExecutionPlan bestPlan = null;
SourceImpl best = null;
for (int i = 0; i < sources.size(); i++) {
SourceImpl test = buildJoin(result, sources.get(i), conditions);
if (test == null) {
// no join condition
continue;
}
ExecutionPlan testPlan = test.prepare();
double cost = testPlan.getEstimatedCost();
if (best == null || cost < bestCost) {
bestPlan = testPlan;
bestCost = cost;
bestIndex = i;
best = test;
}
if (!potentiallySlowTraversalQuery) {
isPotentiallySlowJoin = false;
}
test.unprepare();
}
available.add(sources.remove(bestIndex));
result = best;
best.prepare(bestPlan);
}
potentiallySlowTraversalQuery = isPotentiallySlowJoin;
estimatedCost = result.prepare().getEstimatedCost();
source = result;
isSortedByIndex = canSortByIndex();
}
private static SourceImpl buildJoin(SourceImpl result, SourceImpl last, List<JoinConditionImpl> conditions) {
if (result == null) {
return last;
}
List<SourceImpl> selectors = result.getInnerJoinSelectors();
Set<SourceImpl> oldSelectors = new HashSet<SourceImpl>();
oldSelectors.addAll(selectors);
Set<SourceImpl> newSelectors = new HashSet<SourceImpl>();
newSelectors.addAll(selectors);
newSelectors.add(last);
for (JoinConditionImpl j : conditions) {
// only join conditions can now be evaluated,
// but couldn't be evaluated before
if (!j.canEvaluate(oldSelectors) && j.canEvaluate(newSelectors)) {
JoinImpl join = new JoinImpl(result, last, JoinType.INNER, j);
return join;
}
}
// no join condition was found
return null;
}
/**
* <b>!Test purpose only! <b>
*
* this creates a filter for the given query
*
*/
Filter createFilter(boolean preparing) {
return source.createFilter(preparing);
}
/**
* Abstract decorating iterator for measure queries. The iterator delegates to the underlying actual
* query iterator to lazily execute and return counts.
*/
abstract static class MeasuringIterator extends AbstractIterator<ResultRowImpl> {
private Iterator<ResultRowImpl> delegate;
private Query query;
private List<ResultRowImpl> results;
private boolean init;
MeasuringIterator(Query query, Iterator<ResultRowImpl> delegate) {
this.query = query;
this.delegate = delegate;
results = Lists.newArrayList();
}
@Override
protected ResultRowImpl computeNext() {
if (!init) {
getRows();
}
if (!results.isEmpty()) {
return results.remove(0);
} else {
return endOfData();
}
}
void getRows() {
// run the query
while (delegate.hasNext()) {
delegate.next();
}
ColumnImpl[] columns = new ColumnImpl[] {
new ColumnImpl("measure", "selector", "selector"),
new ColumnImpl("measure", "scanCount", "scanCount")
};
setColumns(columns);
ResultRowImpl r = new ResultRowImpl(query,
Tree.EMPTY_ARRAY,
new PropertyValue[] {
PropertyValues.newString("query"),
PropertyValues.newLong(getReadCount())
},
null, null);
results.add(r);
Map<String, Long> selectorScanCount = getSelectorScanCount();
for (String selector : selectorScanCount.keySet()) {
r = new ResultRowImpl(query,
Tree.EMPTY_ARRAY,
new PropertyValue[] {
PropertyValues.newString(selector),
PropertyValues.newLong(selectorScanCount.get(selector)),
},
null, null);
results.add(r);
}
init = true;
}
/**
* Set the measure specific columns in the query object
* @param columns the measure specific columns
*/
protected abstract void setColumns(ColumnImpl[] columns);
/**
* Retrieve the selector scan count
* @return map of selector to scan count
*/
protected abstract Map<String, Long> getSelectorScanCount();
/**
* Retrieve the query read count
* @return count
*/
protected abstract long getReadCount();
/**
* Retrieves the actual query iterator
* @return the delegate
*/
protected Iterator<ResultRowImpl> getDelegate() {
return delegate;
}
}
/**
* An iterator over result rows.
*/
class RowIterator implements Iterator<ResultRowImpl> {
private final NodeState rootState;
private ResultRowImpl current;
private boolean started, end;
private long rowIndex;
RowIterator(NodeState rootState) {
this.rootState = rootState;
}
public long getReadCount() {
return rowIndex;
}
private void fetchNext() {
if (end) {
return;
}
long nanos = System.nanoTime();
long oldIndex = rowIndex;
if (!started) {
source.execute(rootState);
started = true;
}
while (true) {
if (source.next()) {
if (constraint == null || constraint.evaluate()) {
current = currentRow();
rowIndex++;
break;
}
if (constraint != null && constraint.evaluateStop()) {
current = null;
end = true;
break;
}
} else {
current = null;
end = true;
break;
}
}
nanos = System.nanoTime() - nanos;
stats.read(rowIndex - oldIndex, rowIndex, nanos);
}
@Override
public boolean hasNext() {
if (end) {
return false;
}
if (current == null) {
fetchNext();
}
return !end;
}
@Override
public ResultRowImpl next() {
if (end) {
return null;
}
if (current == null) {
fetchNext();
}
ResultRowImpl r = current;
current = null;
return r;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
ResultRowImpl currentRow() {
int selectorCount = selectors.size();
Tree[] trees = new Tree[selectorCount];
for (int i = 0; i < selectorCount; i++) {
SelectorImpl s = selectors.get(i);
trees[i] = s.currentTree();
}
int columnCount = columns.length;
PropertyValue[] values = new PropertyValue[columnCount];
for (int i = 0; i < columnCount; i++) {
ColumnImpl c = columns[i];
values[i] = c.currentProperty();
}
PropertyValue[] orderValues;
if (orderings == null) {
orderValues = null;
} else {
int size = orderings.length;
orderValues = new PropertyValue[size];
for (int i = 0; i < size; i++) {
orderValues[i] = orderings[i].getOperand().currentProperty();
}
}
return new ResultRowImpl(this, trees, values, distinctColumns, orderValues);
}
@Override
public int getSelectorIndex(String selectorName) {
Integer index = selectorIndexes.get(selectorName);
if (index == null) {
throw new IllegalArgumentException("Unknown selector: " + selectorName);
}
return index;
}
@Override
public int getColumnIndex(String columnName) {
return getColumnIndex(columns, columnName);
}
static int getColumnIndex(ColumnImpl[] columns, String columnName) {
for (int i = 0, size = columns.length; i < size; i++) {
ColumnImpl c = columns[i];
String cn = c.getColumnName();
if (cn != null && cn.equals(columnName)) {
return i;
}
}
return -1;
}
public PropertyValue getBindVariableValue(String bindVariableName) {
PropertyValue v = bindVariableMap.get(bindVariableName);
if (v == null) {
throw new IllegalArgumentException("Bind variable value not set: " + bindVariableName);
}
return v;
}
@Override
public String[] getSelectorNames() {
String[] list = new String[selectors.size()];
for (int i = 0; i < list.length; i++) {
list[i] = selectors.get(i).getSelectorName();
}
// reverse names to that for xpath,
// the first selector is the same as the node iterator
Collections.reverse(Arrays.asList(list));
return list;
}
@Override
public List<String> getBindVariableNames() {
return new ArrayList<String>(bindVariableMap.keySet());
}
@Override
public void setTraversalEnabled(boolean traversalEnabled) {
this.traversalEnabled = traversalEnabled;
}
@Override
public void setQueryOptions(QueryOptions options) {
this.queryOptions = options;
}
public SelectorExecutionPlan getBestSelectorExecutionPlan(FilterImpl filter) {
return getBestSelectorExecutionPlan(context.getBaseState(), filter,
context.getIndexProvider(), traversalEnabled);
}
private SelectorExecutionPlan getBestSelectorExecutionPlan(
NodeState rootState, FilterImpl filter,
QueryIndexProvider indexProvider, boolean traversalEnabled) {
QueryIndex bestIndex = null;
if (LOG.isDebugEnabled()) {
logDebug("cost using filter " + filter);
}
double bestCost = Double.POSITIVE_INFINITY;
IndexPlan bestPlan = null;
// track similar costs
QueryIndex almostBestIndex = null;
double almostBestCost = Double.POSITIVE_INFINITY;
IndexPlan almostBestPlan = null;
// Sort the indexes according to their minimum cost to be able to skip the remaining indexes if the cost of the
// current index is below the minimum cost of the next index.
List<? extends QueryIndex> queryIndexes = MINIMAL_COST_ORDERING
.sortedCopy(indexProvider.getQueryIndexes(rootState));
List<OrderEntry> sortOrder = getSortOrder(filter);
for (int i = 0; i < queryIndexes.size(); i++) {
QueryIndex index = queryIndexes.get(i);
double minCost = index.getMinimumCost();
if (minCost > bestCost) {
if (Math.abs(minCost - bestIndex.getMinimumCost()) < .00001) {
// Continue with cost evaluation if minimum cost of both indexes are same i.e both indexes are on par.
LOG.debug("minCost: {} of index :{} > best Cost: {} from index: {}, but both indexes have same minimum cost - cost evaluation will continue", minCost, index.getIndexName(), bestCost, bestIndex.getIndexName());
} else {
// Stop looking if the minimum cost is higher than the current best cost
LOG.debug("minCost: {} of index :{} < best Cost: {} from index: {}. Further index evaluation will be skipped", minCost, index.getIndexName(), bestCost, bestIndex.getIndexName());
break;
}
}
double cost;
String indexName = index.getIndexName();
IndexPlan indexPlan = null;
if (index instanceof AdvancedQueryIndex) {
AdvancedQueryIndex advIndex = (AdvancedQueryIndex) index;
long maxEntryCount = limit;
if (offset > 0) {
if (offset + limit < 0) {
// long overflow
maxEntryCount = Long.MAX_VALUE;
} else {
maxEntryCount = offset + limit;
}
}
List<IndexPlan> ipList = advIndex.getPlans(
filter, sortOrder, rootState);
cost = Double.POSITIVE_INFINITY;
for (IndexPlan p : ipList) {
long entryCount = p.getEstimatedEntryCount();
if (p.getSupportsPathRestriction()) {
entryCount = scaleEntryCount(rootState, filter, entryCount);
}
if (sortOrder == null || p.getSortOrder() != null) {
// if the query is unordered, or
// if the query contains "order by" and the index can sort on that,
// then we don't need to read all entries from the index
entryCount = Math.min(maxEntryCount, entryCount);
}
double c = p.getCostPerExecution() + entryCount * p.getCostPerEntry();
if (LOG.isDebugEnabled()) {
String plan = advIndex.getPlanDescription(p, rootState);
String msg = String.format("cost for [%s] of type (%s) with plan [%s] is %1.2f", p.getPlanName(), indexName, plan, c);
logDebug(msg);
}
if (c < bestCost) {
almostBestCost = bestCost;
almostBestIndex = bestIndex;
almostBestPlan = bestPlan;
bestCost = c;
bestIndex = index;
bestPlan = p;
} else if (c - bestCost <= 0.1) {
almostBestCost = c;
almostBestIndex = index;
almostBestPlan = p;
}
}
if (indexPlan != null && indexPlan.getPlanName() != null) {
indexName += "[" + indexPlan.getPlanName() + "]";
}
} else {
cost = index.getCost(filter, rootState);
}
if (LOG.isDebugEnabled()) {
logDebug("cost for " + indexName + " is " + cost);
}
if (cost < 0) {
LOG.error("cost below 0 for " + indexName + " is " + cost);
}
if (cost < bestCost) {
almostBestCost = bestCost;
almostBestIndex = bestIndex;
bestCost = cost;
bestIndex = index;
bestPlan = indexPlan;
} else if (cost - bestCost <= 0.1) {
almostBestCost = cost;
almostBestIndex = index;
}
}
if (LOG.isDebugEnabled() && Math.abs(bestCost - almostBestCost) <= 0.1) {
String msg = (bestPlan != null && almostBestPlan != null) ? String.format("selected index %s with plan %s and %s with plan %s have similar costs %s and %s for query %s - " +
"check query explanation / index definitions",
bestIndex, bestPlan.getPlanName(), almostBestIndex, almostBestPlan.getPlanName(), bestCost, almostBestCost, filter.toString())
:String.format("selected index %s and %s have similar costs %s and %s for query %s - check query explanation / index definitions",
bestIndex, almostBestIndex, bestCost, almostBestCost, filter.toString());
LOG.debug(msg);
}
potentiallySlowTraversalQuery = bestIndex == null;
if (traversalEnabled) {
TraversingIndex traversal = new TraversingIndex();
double cost = traversal.getCost(filter, rootState);
if (LOG.isDebugEnabled()) {
logDebug("cost for " + traversal.getIndexName() + " is " + cost);
}
if (cost < bestCost || bestCost == Double.POSITIVE_INFINITY) {
bestCost = cost;
bestPlan = null;
bestIndex = traversal;
if (potentiallySlowTraversalQuery) {
potentiallySlowTraversalQuery = traversal.isPotentiallySlow(filter, rootState);
}
}
}
if (potentiallySlowTraversalQuery || bestIndex == null) {
// Log warning for fulltext queries without index, since these cannot return results
if(!filter.getFulltextConditions().isEmpty()) {
LOG.warn("Fulltext query without index for filter {}; no results will be returned", filter);
} else {
LOG.debug("no proper index was found for filter {}", filter);
}
StatisticsProvider statisticsProvider = getSettings().getStatisticsProvider();
if (statisticsProvider != null) {
HistogramStats histogram = statisticsProvider.getHistogram(INDEX_UNAVAILABLE, StatsOptions.METRICS_ONLY);
if (histogram != null) {
histogram.update(1);
}
}
}
return new SelectorExecutionPlan(filter.getSelector(), bestIndex,
bestPlan, bestCost);
}
private long scaleEntryCount(NodeState rootState, FilterImpl filter, long count) {
PathRestriction r = filter.getPathRestriction();
if (r != PathRestriction.ALL_CHILDREN) {
return count;
}
String path = filter.getPath();
if (path.startsWith(JoinConditionImpl.SPECIAL_PATH_PREFIX)) {
// don't know the path currently, could be root
return count;
}
long filterPathCount = NodeCounter.getEstimatedNodeCount(rootState, path, true);
if (filterPathCount < 0) {
// don't know
return count;
}
long totalNodesCount = NodeCounter.getEstimatedNodeCount(rootState, "/", true);
if (totalNodesCount <= 0) {
totalNodesCount = 1;
}
// same logic as for the property index (see ContentMirrorStoreStrategy):
// assume nodes in the index are evenly distributed in the repository (old idea)
long countScaledDown = (long) ((double) count / totalNodesCount * filterPathCount);
// assume 80% of the indexed nodes are in this subtree
long mostNodesFromThisSubtree = (long) (filterPathCount * 0.8);
// count can at most be the assumed subtree size
count = Math.min(count, mostNodesFromThisSubtree);
// this in theory should not have any effect,
// except if the above estimates are incorrect,
// so this is just for safety feature
count = Math.max(count, countScaledDown);
return count;
}
@Override
public boolean isPotentiallySlow() {
return potentiallySlowTraversalQuery;
}
@Override
public void verifyNotPotentiallySlow() {
if (potentiallySlowTraversalQuery) {
QueryOptions.Traversal traversal = queryOptions.traversal;
if (traversal == Traversal.DEFAULT) {
// use the (configured) default
traversal = settings.getFailTraversal() ? Traversal.FAIL : Traversal.WARN;
} else {
// explicitly set in the query
traversal = queryOptions.traversal;
}
String message = "Traversal query (query without index): " + statement + "; consider creating an index";
switch (traversal) {
case DEFAULT:
// not possible (changed to either FAIL or WARN above)
throw new AssertionError();
case OK:
break;
case WARN:
if (!potentiallySlowTraversalQueryLogged) {
LOG.warn(message);
potentiallySlowTraversalQueryLogged = true;
}
break;
case FAIL:
LOG.debug(message);
throw new IllegalArgumentException(message);
}
}
}
private List<OrderEntry> getSortOrder(FilterImpl filter) {
if (orderings == null) {
return null;
}
ArrayList<OrderEntry> sortOrder = new ArrayList<OrderEntry>();
for (OrderingImpl o : orderings) {
DynamicOperandImpl op = o.getOperand();
OrderEntry e = op.getOrderEntry(filter.getSelector(), o);
if (e == null) {
continue;
}
sortOrder.add(e);
}
if (sortOrder.size() == 0) {
return null;
}
return sortOrder;
}
private void logDebug(String msg) {
if (isInternal) {
LOG.trace(msg);
} else {
LOG.debug(msg);
}
}
@Override
public void setExecutionContext(ExecutionContext context) {
this.context = context;
}
@Override
public void setOrderings(OrderingImpl[] orderings) {
this.orderings = orderings;
}
public NamePathMapper getNamePathMapper() {
return namePathMapper;
}
@Override
public Tree getTree(String path) {
if (NodeStateUtils.isHiddenPath(path)) {
if (!warnedHidden) {
warnedHidden = true;
LOG.warn("Hidden tree traversed: {}", path);
}
return null;
}
return context.getRoot().getTree(path);
}
@Override
public boolean isMeasureOrExplainEnabled() {
return explain || measure;
}
/**
* Validate the path is syntactically correct, and convert it to an Oak
* internal path (including namespace remapping if needed).
*
* @param path the path
* @return the the converted path
*/
public String getOakPath(String path) {
if (path == null) {
return null;
}
if (!JcrPathParser.validate(path)) {
throw new IllegalArgumentException("Invalid path: " + path);
}
String p = namePathMapper.getOakPath(path);
if (p == null) {
throw new IllegalArgumentException("Invalid path or namespace prefix: " + path);
}
return p;
}
@Override
public String toString() {
StringBuilder buff = new StringBuilder();
buff.append("select ");
int i = 0;
for (ColumnImpl c : columns) {
if (i++ > 0) {
buff.append(", ");
}
buff.append(c);
}
buff.append(" from ").append(source);
if (constraint != null) {
buff.append(" where ").append(constraint);
}
if (orderings != null) {
buff.append(" order by ");
i = 0;
for (OrderingImpl o : orderings) {
if (i++ > 0) {
buff.append(", ");
}
buff.append(o);
}
}
return buff.toString();
}
@Override
public long getSize() {
return size;
}
@Override
public long getSize(SizePrecision precision, long max) {
// Note: DISTINCT is ignored
if (size != -1) {
// "order by" was used, so we know the size
return size;
}
return Math.min(limit, source.getSize(context.getBaseState(), precision, max));
}
@Override
public String getStatement() {
return Strings.isNullOrEmpty(statement) ? toString() : statement;
}
public QueryEngineSettings getSettings() {
return settings;
}
@Override
public void setInternal(boolean isInternal) {
this.isInternal = isInternal;
}
public ExecutionContext getExecutionContext() {
return context;
}
/**
* Add two values, but don't let it overflow or underflow.
*
* @param x the first value
* @param y the second value
* @return the sum, or Long.MIN_VALUE for underflow, or Long.MAX_VALUE for
* overflow
*/
public static long saturatedAdd(long x, long y) {
BigInteger min = BigInteger.valueOf(Long.MIN_VALUE);
BigInteger max = BigInteger.valueOf(Long.MAX_VALUE);
BigInteger sum = BigInteger.valueOf(x).add(BigInteger.valueOf(y));
return sum.min(max).max(min).longValue();
}
@Override
public Query buildAlternativeQuery() {
Query result = this;
if (constraint != null) {
Set<ConstraintImpl> unionList;
try {
unionList = constraint.convertToUnion();
} catch (UnsupportedOperationException e) {
// too many union
return this;
}
if (unionList.size() > 1) {
// there are some cases where multiple ORs simplify into a single one. If we get a
// union list of just one we don't really have to UNION anything.
QueryImpl left = null;
Query right = null;
// we have something to do here.
for (ConstraintImpl c : unionList) {
if (right != null) {
right = newAlternativeUnionQuery(left, right);
} else {
// pulling left to the right
if (left != null) {
right = left;
}
}
// cloning original query
left = (QueryImpl) this.copyOf();
// cloning the constraints and assigning to new query
left.constraint = (ConstraintImpl) copyElementAndCheckReference(c);
// re-composing the statement for better debug messages
left.statement = recomposeStatement(left);
}
result = newAlternativeUnionQuery(left, right);
}
}
return result;
}
private static String recomposeStatement(@NotNull QueryImpl query) {
checkNotNull(query);
String original = query.getStatement();
String origUpper = original.toUpperCase(Locale.ENGLISH);
StringBuilder recomputed = new StringBuilder();
final String where = " WHERE ";
final String orderBy = " ORDER BY ";
int whereOffset = where.length();
if (query.getConstraint() == null) {
recomputed.append(original);
} else {
recomputed.append(original.substring(0, origUpper.indexOf(where) + whereOffset));
recomputed.append(query.getConstraint());
if (origUpper.indexOf(orderBy) > -1) {
recomputed.append(original.substring(origUpper.indexOf(orderBy)));
}
}
return recomputed.toString();
}
/**
* Convenience method for creating a UnionQueryImpl with proper settings.
*
* @param left the first subquery
* @param right the second subquery
* @return the union query
*/
private UnionQueryImpl newAlternativeUnionQuery(@NotNull Query left, @NotNull Query right) {
UnionQueryImpl u = new UnionQueryImpl(
false,
checkNotNull(left, "`left` cannot be null"),
checkNotNull(right, "`right` cannot be null"),
this.settings);
u.setExplain(explain);
u.setMeasure(measure);
u.setInternal(isInternal);
u.setQueryOptions(queryOptions);
u.setOrderings(orderings);
return u;
}
@Override
public Query copyOf() {
if (isInit()) {
throw new IllegalStateException("QueryImpl cannot be cloned once initialised.");
}
List<ColumnImpl> cols = newArrayList();
for (ColumnImpl c : columns) {
cols.add((ColumnImpl) copyElementAndCheckReference(c));
}
QueryImpl copy = new QueryImpl(
this.statement,
(SourceImpl) copyElementAndCheckReference(this.source),
this.constraint,
cols.toArray(new ColumnImpl[0]),
this.namePathMapper,
this.settings,
this.stats);
copy.explain = this.explain;
copy.measure = this.measure;
copy.isInternal = this.isInternal;
copy.distinct = this.distinct;
copy.queryOptions = this.queryOptions;
return copy;
}
@Override
public boolean isInit() {
return init;
}
@Override
public boolean isInternal() {
return isInternal;
}
@Override
public boolean containsUnfilteredFullTextCondition() {
return constraint.containsUnfilteredFullTextCondition();
}
public QueryOptions getQueryOptions() {
return queryOptions;
}
public QueryExecutionStats getQueryExecutionStats() {
return stats;
}
}
| |
/*
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.spanner;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException;
import com.google.cloud.Timestamp;
import com.google.cloud.grpc.GrpcTransportOptions;
import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory;
import com.google.cloud.spanner.Options.QueryOption;
import com.google.cloud.spanner.Options.ReadOption;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import io.opencensus.trace.Annotation;
import io.opencensus.trace.AttributeValue;
import io.opencensus.trace.Span;
import io.opencensus.trace.Tracing;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
import javax.annotation.concurrent.GuardedBy;
import org.threeten.bp.Duration;
import org.threeten.bp.Instant;
/**
* Maintains a pool of sessions some of which might be prepared for write by invoking
* BeginTransaction rpc. It maintains two queues of sessions(read and write prepared) and two queues
* of waiters who are waiting for a session to become available. This class itself is thread safe
* and is meant to be used concurrently across multiple threads.
*/
final class SessionPool {
private static final Logger logger = Logger.getLogger(SessionPool.class.getName());
/**
* Wrapper around current time so that we can fake it in tests. TODO(user): Replace with Java 8
* Clock.
*/
static class Clock {
Instant instant() {
return Instant.now();
}
}
/**
* Wrapper around {@code ReadContext} that releases the session to the pool once the call is
* finished, if it is a single use context.
*/
private static class AutoClosingReadContext implements ReadContext {
private final ReadContext delegate;
private final PooledSession session;
private final boolean isSingleUse;
private boolean closed;
private AutoClosingReadContext(
ReadContext delegate, PooledSession session, boolean isSingleUse) {
this.delegate = delegate;
this.session = session;
this.isSingleUse = isSingleUse;
}
private ResultSet wrap(final ResultSet resultSet) {
session.markUsed();
if (!isSingleUse) {
return resultSet;
}
return new ForwardingResultSet(resultSet) {
@Override
public boolean next() throws SpannerException {
try {
boolean ret = super.next();
if (!ret) {
close();
}
return ret;
} catch (SpannerException e) {
if (!closed) {
session.lastException = e;
AutoClosingReadContext.this.close();
}
throw e;
}
}
@Override
public void close() {
super.close();
AutoClosingReadContext.this.close();
}
};
}
@Override
public ResultSet read(
String table, KeySet keys, Iterable<String> columns, ReadOption... options) {
return wrap(delegate.read(table, keys, columns, options));
}
@Override
public ResultSet readUsingIndex(
String table, String index, KeySet keys, Iterable<String> columns, ReadOption... options) {
return wrap(delegate.readUsingIndex(table, index, keys, columns, options));
}
@Override
@Nullable
public Struct readRow(String table, Key key, Iterable<String> columns) {
try {
session.markUsed();
return delegate.readRow(table, key, columns);
} finally {
if (isSingleUse) {
close();
}
}
}
@Override
@Nullable
public Struct readRowUsingIndex(String table, String index, Key key, Iterable<String> columns) {
try {
session.markUsed();
return delegate.readRowUsingIndex(table, index, key, columns);
} finally {
if (isSingleUse) {
close();
}
}
}
@Override
public ResultSet executeQuery(Statement statement, QueryOption... options) {
return wrap(delegate.executeQuery(statement, options));
}
@Override
public ResultSet analyzeQuery(Statement statement, QueryAnalyzeMode queryMode) {
return wrap(delegate.analyzeQuery(statement, queryMode));
}
@Override
public void close() {
if (closed) {
return;
}
closed = true;
delegate.close();
session.close();
}
}
private static class AutoClosingReadTransaction extends AutoClosingReadContext
implements ReadOnlyTransaction {
private final ReadOnlyTransaction txn;
AutoClosingReadTransaction(
ReadOnlyTransaction txn, PooledSession session, boolean isSingleUse) {
super(txn, session, isSingleUse);
this.txn = txn;
}
@Override
public Timestamp getReadTimestamp() {
return txn.getReadTimestamp();
}
}
// Exception class used just to track the stack trace at the point when a session was handed out
// from the pool.
private final class LeakedSessionException extends RuntimeException {
private static final long serialVersionUID = 1451131180314064914L;
private LeakedSessionException() {
super("Session was checked out from the pool at " + clock.instant());
}
}
private enum SessionState {
AVAILABLE,
BUSY,
CLOSING,
}
final class PooledSession implements Session {
@VisibleForTesting final Session delegate;
private volatile Instant lastUseTime;
private volatile SpannerException lastException;
private volatile LeakedSessionException leakedException;
@GuardedBy("lock")
private SessionState state;
private PooledSession(Session delegate) {
this.delegate = delegate;
this.state = SessionState.AVAILABLE;
markUsed();
}
private void markBusy() {
this.state = SessionState.BUSY;
this.leakedException = new LeakedSessionException();
}
private void markClosing() {
this.state = SessionState.CLOSING;
}
@Override
public Timestamp write(Iterable<Mutation> mutations) throws SpannerException {
try {
markUsed();
return delegate.write(mutations);
} catch (SpannerException e) {
throw lastException = e;
} finally {
close();
}
}
@Override
public Timestamp writeAtLeastOnce(Iterable<Mutation> mutations) throws SpannerException {
try {
markUsed();
return delegate.writeAtLeastOnce(mutations);
} catch (SpannerException e) {
throw lastException = e;
} finally {
close();
}
}
@Override
public ReadContext singleUse() {
try {
return new AutoClosingReadContext(delegate.singleUse(), this, true);
} catch (Exception e) {
close();
throw e;
}
}
@Override
public ReadContext singleUse(TimestampBound bound) {
try {
return new AutoClosingReadContext(delegate.singleUse(bound), this, true);
} catch (Exception e) {
close();
throw e;
}
}
@Override
public ReadOnlyTransaction singleUseReadOnlyTransaction() {
try {
return new AutoClosingReadTransaction(delegate.singleUseReadOnlyTransaction(), this, true);
} catch (Exception e) {
close();
throw e;
}
}
@Override
public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) {
try {
return new AutoClosingReadTransaction(
delegate.singleUseReadOnlyTransaction(bound), this, true);
} catch (Exception e) {
close();
throw e;
}
}
@Override
public ReadOnlyTransaction readOnlyTransaction() {
try {
return new AutoClosingReadTransaction(delegate.readOnlyTransaction(), this, false);
} catch (Exception e) {
close();
throw e;
}
}
@Override
public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) {
try {
return new AutoClosingReadTransaction(delegate.readOnlyTransaction(bound), this, false);
} catch (Exception e) {
close();
throw e;
}
}
@Override
public TransactionRunner readWriteTransaction() {
final TransactionRunner runner = delegate.readWriteTransaction();
return new TransactionRunner() {
@Override
@Nullable
public <T> T run(TransactionCallable<T> callable) {
try {
markUsed();
T result = runner.run(callable);
return result;
} catch (SpannerException e) {
throw lastException = e;
} finally {
close();
}
}
@Override
public Timestamp getCommitTimestamp() {
return runner.getCommitTimestamp();
}
};
}
@Override
public void close() {
synchronized (lock) {
numSessionsInUse--;
}
leakedException = null;
if (lastException != null && isSessionNotFound(lastException)) {
invalidateSession(this);
} else {
lastException = null;
if (state != SessionState.CLOSING) {
state = SessionState.AVAILABLE;
}
releaseSession(this);
}
}
@Override
public String getName() {
return delegate.getName();
}
@Override
public void prepareReadWriteTransaction() {
markUsed();
delegate.prepareReadWriteTransaction();
}
private void keepAlive() {
markUsed();
delegate
.singleUse(TimestampBound.ofMaxStaleness(60, TimeUnit.SECONDS))
.executeQuery(Statement.newBuilder("SELECT 1").build())
.next();
}
private void markUsed() {
lastUseTime = clock.instant();
}
}
private static final class SessionOrError {
private final PooledSession session;
private final SpannerException e;
SessionOrError(PooledSession session) {
this.session = session;
this.e = null;
}
SessionOrError(SpannerException e) {
this.session = null;
this.e = e;
}
}
private static final class Waiter {
private final SynchronousQueue<SessionOrError> waiter = new SynchronousQueue<>();
private void put(PooledSession session) {
Uninterruptibles.putUninterruptibly(waiter, new SessionOrError(session));
}
private void put(SpannerException e) {
Uninterruptibles.putUninterruptibly(waiter, new SessionOrError(e));
}
private PooledSession take() throws SpannerException {
SessionOrError s = Uninterruptibles.takeUninterruptibly(waiter);
if (s.e != null) {
throw newSpannerException(s.e);
}
return s.session;
}
}
// Background task to maintain the pool. It closes idle sessions, keeps alive sessions that have
// not been used for a user configured time and creates session if needed to bring pool up to
// minimum required sessions. We keep track of the number of concurrent sessions being used.
// The maximum value of that over a window (10 minutes) tells us how many sessions we need in the
// pool. We close the remaining sessions. To prevent bursty traffic, we smear this out over the
// window length. We also smear out the keep alive traffic over the keep alive period.
final class PoolMaintainer {
// Length of the window in millis over which we keep track of maximum number of concurrent
// sessions in use.
private final Duration windowLength = Duration.ofMillis(TimeUnit.MINUTES.toMillis(10));
// Frequency of the timer loop.
@VisibleForTesting static final long LOOP_FREQUENCY = 10 * 1000L;
// Number of loop iterations in which we need to to close all the sessions waiting for closure.
@VisibleForTesting final long numClosureCycles = windowLength.toMillis() / LOOP_FREQUENCY;
private final Duration keepAliveMilis =
Duration.ofMillis(TimeUnit.MINUTES.toMillis(options.getKeepAliveIntervalMinutes()));
// Number of loop iterations in which we need to keep alive all the sessions
@VisibleForTesting final long numKeepAliveCycles = keepAliveMilis.toMillis() / LOOP_FREQUENCY;
Instant lastResetTime = Instant.ofEpochMilli(0);
int numSessionsToClose = 0;
int sessionsToClosePerLoop = 0;
@GuardedBy("lock")
ScheduledFuture<?> scheduledFuture;
@GuardedBy("lock")
boolean running;
void init() {
// Scheduled pool maintenance worker.
synchronized (lock) {
scheduledFuture =
executor.scheduleAtFixedRate(
new Runnable() {
@Override
public void run() {
maintainPool();
}
},
LOOP_FREQUENCY,
LOOP_FREQUENCY,
TimeUnit.MILLISECONDS);
}
}
void close() {
synchronized (lock) {
scheduledFuture.cancel(false);
if (!running) {
decrementPendingClosures();
}
}
}
// Does various pool maintenance activities.
void maintainPool() {
synchronized (lock) {
if (isClosed()) {
return;
}
running = true;
}
Instant currTime = clock.instant();
closeIdleSessions(currTime);
// Now go over all the remaining sessions and see if they need to be kept alive explicitly.
keepAliveSessions(currTime);
replenishPool();
synchronized (lock) {
running = false;
if (isClosed()) {
decrementPendingClosures();
}
}
}
private void closeIdleSessions(Instant currTime) {
LinkedList<PooledSession> sessionsToClose = new LinkedList<>();
synchronized (lock) {
// Every ten minutes figure out how many sessions need to be closed then close them over
// next ten minutes.
if (currTime.isAfter(lastResetTime.plus(windowLength))) {
int sessionsToKeep =
Math.max(options.getMinSessions(), maxSessionsInUse + options.getMaxIdleSessions());
numSessionsToClose = totalSessions() - sessionsToKeep;
sessionsToClosePerLoop = (int) Math.ceil((double) numSessionsToClose / numClosureCycles);
maxSessionsInUse = 0;
lastResetTime = currTime;
}
if (numSessionsToClose > 0) {
while (sessionsToClose.size() < Math.min(numSessionsToClose, sessionsToClosePerLoop)) {
PooledSession sess =
readSessions.size() > 0 ? readSessions.poll() : writePreparedSessions.poll();
if (sess != null) {
if (sess.state != SessionState.CLOSING) {
sess.markClosing();
sessionsToClose.add(sess);
}
} else {
break;
}
}
numSessionsToClose -= sessionsToClose.size();
}
}
for (PooledSession sess : sessionsToClose) {
logger.log(Level.FINE, "Closing session %s", sess.getName());
closeSession(sess);
}
}
private void keepAliveSessions(Instant currTime) {
long numSessionsToKeepAlive = 0;
synchronized (lock) {
// In each cycle only keep alive a subset of sessions to prevent burst of traffic.
numSessionsToKeepAlive = (long) Math.ceil((double) totalSessions() / numKeepAliveCycles);
}
// Now go over all the remaining sessions and see if they need to be kept alive explicitly.
Instant keepAliveThreshold = currTime.minus(keepAliveMilis);
// Keep chugging till there is no session that needs to be kept alive.
while (numSessionsToKeepAlive > 0) {
PooledSession sessionToKeepAlive = null;
synchronized (lock) {
sessionToKeepAlive = findSessionToKeepAlive(readSessions, keepAliveThreshold);
if (sessionToKeepAlive == null) {
sessionToKeepAlive = findSessionToKeepAlive(writePreparedSessions, keepAliveThreshold);
}
}
if (sessionToKeepAlive == null) {
break;
}
try {
logger.log(Level.FINE, "Keeping alive session " + sessionToKeepAlive.getName());
numSessionsToKeepAlive--;
sessionToKeepAlive.keepAlive();
releaseSession(sessionToKeepAlive);
} catch (SpannerException e) {
handleException(e, sessionToKeepAlive);
}
}
}
private void replenishPool() {
synchronized (lock) {
// If we have gone below min pool size, create that many sessions.
for (int i = 0;
i < options.getMinSessions() - (totalSessions() + numSessionsBeingCreated);
i++) {
createSession();
}
}
}
}
private final SessionPoolOptions options;
private final DatabaseId db;
private final SpannerImpl spanner;
private final ScheduledExecutorService executor;
private final ExecutorFactory<ScheduledExecutorService> executorFactory;
final PoolMaintainer poolMaintainer;
private final Clock clock;
private final Object lock = new Object();
@GuardedBy("lock")
private int pendingClosure;
@GuardedBy("lock")
private SettableFuture<Void> closureFuture;
@GuardedBy("lock")
private final Queue<PooledSession> readSessions = new LinkedList<>();
@GuardedBy("lock")
private final Queue<PooledSession> writePreparedSessions = new LinkedList<>();
@GuardedBy("lock")
private final Queue<Waiter> readWaiters = new LinkedList<>();
@GuardedBy("lock")
private final Queue<Waiter> readWriteWaiters = new LinkedList<>();
@GuardedBy("lock")
private int numSessionsBeingPrepared = 0;
@GuardedBy("lock")
private int numSessionsBeingCreated = 0;
@GuardedBy("lock")
private int numSessionsInUse = 0;
@GuardedBy("lock")
private int maxSessionsInUse = 0;
@GuardedBy("lock")
private final Set<PooledSession> allSessions = new HashSet<>();
/**
* Create a session pool with the given options and for the given database. It will also start
* eagerly creating sessions if {@link SessionPoolOptions#getMinSessions()} is greater than 0.
* Return pool is immediately ready for use, though getting a session might block for sessions to
* be created.
*/
static SessionPool createPool(SpannerOptions spannerOptions, DatabaseId db, SpannerImpl spanner) {
return createPool(
spannerOptions.getSessionPoolOptions(),
((GrpcTransportOptions) spannerOptions.getTransportOptions()).getExecutorFactory(),
db,
spanner);
}
static SessionPool createPool(
SessionPoolOptions poolOptions,
ExecutorFactory<ScheduledExecutorService> executorFactory,
DatabaseId db,
SpannerImpl spanner) {
return createPool(poolOptions, executorFactory, db, spanner, new Clock());
}
static SessionPool createPool(
SessionPoolOptions poolOptions,
ExecutorFactory<ScheduledExecutorService> executorFactory,
DatabaseId db,
SpannerImpl spanner,
Clock clock) {
SessionPool pool =
new SessionPool(poolOptions, executorFactory, executorFactory.get(), db, spanner, clock);
pool.initPool();
return pool;
}
private SessionPool(
SessionPoolOptions options,
ExecutorFactory<ScheduledExecutorService> executorFactory,
ScheduledExecutorService executor,
DatabaseId db,
SpannerImpl spanner,
Clock clock) {
this.options = options;
this.executorFactory = executorFactory;
this.executor = executor;
this.db = db;
this.spanner = spanner;
this.clock = clock;
this.poolMaintainer = new PoolMaintainer();
}
private void initPool() {
synchronized (lock) {
poolMaintainer.init();
for (int i = 0; i < options.getMinSessions(); i++) {
createSession();
}
}
}
private boolean isClosed() {
synchronized (lock) {
return closureFuture != null;
}
}
private void handleException(SpannerException e, PooledSession session) {
if (isSessionNotFound(e)) {
invalidateSession(session);
} else {
releaseSession(session);
}
}
private boolean isSessionNotFound(SpannerException e) {
return e.getErrorCode() == ErrorCode.NOT_FOUND && e.getMessage().contains("Session not found");
}
private void invalidateSession(PooledSession session) {
synchronized (lock) {
if (isClosed()) {
return;
}
allSessions.remove(session);
// replenish the pool.
createSession();
}
}
private PooledSession findSessionToKeepAlive(
Queue<PooledSession> queue, Instant keepAliveThreshold) {
Iterator<PooledSession> iterator = queue.iterator();
while (iterator.hasNext()) {
PooledSession session = iterator.next();
if (session.lastUseTime.isBefore(keepAliveThreshold)) {
iterator.remove();
return session;
}
}
return null;
}
/**
* Returns a session to be used for read requests to spanner. It will block if a session is not
* currently available. In case the pool is exhausted and {@link
* SessionPoolOptions#isFailIfPoolExhausted()} has been set, it will throw an exception. Returned
* session must be closed by calling {@link Session#close()}.
*
* <p>Implementation strategy:
*
* <ol>
* <li>If a read session is available, return that.
* <li>Otherwise if a writePreparedSession is available, return that.
* <li>Otherwise if a session can be created, fire a creation request.
* <li>Wait for a session to become available. Note that this can be unblocked either by a
* session being returned to the pool or a new session being created.
* </ol>
*/
Session getReadSession() throws SpannerException {
Span span = Tracing.getTracer().getCurrentSpan();
span.addAnnotation("Acquiring session");
Waiter waiter = null;
PooledSession sess = null;
synchronized (lock) {
if (closureFuture != null) {
span.addAnnotation("Pool has been closed");
throw new IllegalStateException("Pool has been closed");
}
sess = readSessions.poll();
if (sess == null) {
sess = writePreparedSessions.poll();
if (sess == null) {
span.addAnnotation("No session available");
maybeCreateSession();
waiter = new Waiter();
readWaiters.add(waiter);
} else {
span.addAnnotation("Acquired read write session");
}
} else {
span.addAnnotation("Acquired read only session");
}
}
if (waiter != null) {
logger.log(
Level.FINE,
"No session available in the pool. Blocking for one to become available/created");
span.addAnnotation("Waiting for read only session to be available");
sess = waiter.take();
}
sess.markBusy();
incrementNumSessionsInUse();
span.addAnnotation(sessionAnnotation(sess));
return sess;
}
/**
* Returns a session which has been prepared for writes by invoking BeginTransaction rpc. It will
* block if such a session is not currently available.In case the pool is exhausted and {@link
* SessionPoolOptions#isFailIfPoolExhausted()} has been set, it will throw an exception. Returned
* session must closed by invoking {@link Session#close()}.
*
* <p>Implementation strategy:
*
* <ol>
* <li>If a writePreparedSession is available, return that.
* <li>Otherwise if we have an extra session being prepared for write, wait for that.
* <li>Otherwise, if there is a read session available, start preparing that for write and wait.
* <li>Otherwise start creating a new session and wait.
* <li>Wait for write prepared session to become available. This can be unblocked either by the
* session create/prepare request we fired in above request or by a session being released
* to the pool which is then write prepared.
* </ol>
*/
Session getReadWriteSession() {
Span span = Tracing.getTracer().getCurrentSpan();
span.addAnnotation("Acquiring read write session");
Waiter waiter = null;
PooledSession sess = null;
synchronized (lock) {
if (closureFuture != null) {
throw new IllegalStateException("Pool has been closed");
}
sess = writePreparedSessions.poll();
if (sess == null) {
if (numSessionsBeingPrepared <= readWriteWaiters.size()) {
PooledSession readSession = readSessions.poll();
if (readSession != null) {
span.addAnnotation("Acquired read only session. Preparing for read write transaction");
prepareSession(readSession);
} else {
span.addAnnotation("No session available");
maybeCreateSession();
}
}
waiter = new Waiter();
readWriteWaiters.add(waiter);
} else {
span.addAnnotation("Acquired read write session");
}
}
if (waiter != null) {
logger.log(
Level.FINE,
"No session available in the pool. Blocking for one to become available/created");
span.addAnnotation("Waiting for read write session to be available");
sess = waiter.take();
}
sess.markBusy();
incrementNumSessionsInUse();
span.addAnnotation(sessionAnnotation(sess));
return sess;
}
private Annotation sessionAnnotation(Session session) {
AttributeValue sessionId = AttributeValue.stringAttributeValue(session.getName());
return Annotation.fromDescriptionAndAttributes("Using Session",
ImmutableMap.of("sessionId", sessionId));
}
private void incrementNumSessionsInUse() {
synchronized (lock) {
if (maxSessionsInUse < ++numSessionsInUse) {
maxSessionsInUse = numSessionsInUse;
}
}
}
private void maybeCreateSession() {
Span span = Tracing.getTracer().getCurrentSpan();
synchronized (lock) {
if (numWaiters() >= numSessionsBeingCreated) {
if (canCreateSession()) {
span.addAnnotation("Creating session");
createSession();
} else if (options.isFailIfPoolExhausted()) {
span.addAnnotation("Pool exhausted. Failing");
// throw specific exception
throw newSpannerException(
ErrorCode.RESOURCE_EXHAUSTED,
"No session available in the pool. Maximum number of sessions in the pool can be"
+ " overridden by invoking SessionPoolOptions#Builder#setMaxSessions. Client can be made to block"
+ " rather than fail by setting SessionPoolOptions#Builder#setBlockIfPoolExhausted.");
}
}
}
}
/**
* Releases a session back to the pool. This might cause one of the waiters to be unblocked.
*
* <p>Implementation note:
*
* <ol>
* <li>If there are no pending waiters, either add to the read sessions queue or start preparing
* for write depending on what fraction of sessions are already prepared for writes.
* <li>Otherwise either unblock a waiting reader or start preparing for a write. Exact strategy
* on which option we chose, in case there are both waiting readers and writers, is
* implemented in {@link #shouldUnblockReader}
* </ol>
*/
private void releaseSession(PooledSession session) {
Preconditions.checkNotNull(session);
synchronized (lock) {
if (closureFuture != null) {
return;
}
if (readWaiters.size() == 0 && numSessionsBeingPrepared >= readWriteWaiters.size()) {
// No pending waiters
if (shouldPrepareSession()) {
prepareSession(session);
} else {
readSessions.add(session);
}
} else if (shouldUnblockReader()) {
readWaiters.poll().put(session);
} else {
prepareSession(session);
}
}
}
private void handleCreateSessionFailure(SpannerException e) {
synchronized (lock) {
if (readWaiters.size() > 0) {
readWaiters.poll().put(e);
} else if (readWriteWaiters.size() > 0) {
readWriteWaiters.poll().put(e);
}
}
}
private void handlePrepareSessionFailure(SpannerException e, PooledSession session) {
synchronized (lock) {
if (isSessionNotFound(e)) {
invalidateSession(session);
} else if (readWriteWaiters.size() > 0) {
readWriteWaiters.poll().put(e);
} else {
releaseSession(session);
}
}
}
private void decrementPendingClosures() {
pendingClosure--;
if (pendingClosure == 0) {
closureFuture.set(null);
}
}
/**
* Close all the sessions. Once this method is invoked {@link #getReadSession()} and {@link
* #getReadWriteSession()} will start throwing {@code IllegalStateException}. The returned future
* blocks till all the sessions created in this pool have been closed.
*/
ListenableFuture<Void> closeAsync() {
ListenableFuture<Void> retFuture = null;
synchronized (lock) {
if (closureFuture != null) {
throw new IllegalStateException("Close has already been invoked");
}
// Fail all pending waiters.
Waiter waiter = readWaiters.poll();
while (waiter != null) {
waiter.put(newSpannerException(ErrorCode.INTERNAL, "Client has been closed"));
waiter = readWaiters.poll();
}
waiter = readWriteWaiters.poll();
while (waiter != null) {
waiter.put(newSpannerException(ErrorCode.INTERNAL, "Client has been closed"));
waiter = readWriteWaiters.poll();
}
closureFuture = SettableFuture.create();
retFuture = closureFuture;
pendingClosure =
totalSessions() + numSessionsBeingCreated + 1 /* For pool maintenance thread */;
poolMaintainer.close();
readSessions.clear();
writePreparedSessions.clear();
for (final PooledSession session : ImmutableList.copyOf(allSessions)) {
if (session.leakedException != null) {
logger.log(Level.WARNING, "Leaked session", session.leakedException);
}
if (session.state != SessionState.CLOSING) {
closeSessionAsync(session);
}
}
}
retFuture.addListener(
new Runnable() {
@Override
public void run() {
executorFactory.release(executor);
}
},
MoreExecutors.directExecutor());
return retFuture;
}
private boolean shouldUnblockReader() {
// This might not be the best strategy since a continuous burst of read requests can starve
// a write request. Maybe maintain a timestamp in the queue and unblock according to that
// or just flip a weighted coin.
synchronized (lock) {
int numWriteWaiters = readWriteWaiters.size() - numSessionsBeingPrepared;
return readWaiters.size() > numWriteWaiters;
}
}
private boolean shouldPrepareSession() {
synchronized (lock) {
int preparedSessions = writePreparedSessions.size() + numSessionsBeingPrepared;
return preparedSessions < Math.floor(options.getWriteSessionsFraction() * totalSessions());
}
}
private int numWaiters() {
synchronized (lock) {
return readWaiters.size() + readWriteWaiters.size();
}
}
private int totalSessions() {
synchronized (lock) {
return allSessions.size();
}
}
private void closeSessionAsync(final PooledSession sess) {
executor.submit(
new Runnable() {
@Override
public void run() {
closeSession(sess);
}
});
}
private void closeSession(PooledSession sess) {
try {
sess.delegate.close();
} catch (SpannerException e) {
// Backend will delete these sessions after a while even if we fail to close them.
if (logger.isLoggable(Level.FINE)) {
logger.log(Level.FINE, "Failed to close session: " + sess.getName(), e);
}
} finally {
synchronized (lock) {
allSessions.remove(sess);
if (isClosed()) {
decrementPendingClosures();
return;
}
// Create a new session if needed to unblock some waiter.
if (numWaiters() > numSessionsBeingCreated) {
createSession();
}
}
}
}
private void prepareSession(final PooledSession sess) {
synchronized (lock) {
numSessionsBeingPrepared++;
}
executor.submit(
new Runnable() {
@Override
public void run() {
try {
logger.log(Level.FINE, "Preparing session");
sess.prepareReadWriteTransaction();
logger.log(Level.FINE, "Session prepared");
synchronized (lock) {
numSessionsBeingPrepared--;
if (!isClosed()) {
if (readWriteWaiters.size() > 0) {
readWriteWaiters.poll().put(sess);
} else if (readWaiters.size() > 0) {
readWaiters.poll().put(sess);
} else {
writePreparedSessions.add(sess);
}
}
}
} catch (Throwable t) {
synchronized (lock) {
numSessionsBeingPrepared--;
if (!isClosed()) {
handlePrepareSessionFailure(newSpannerException(t), sess);
}
}
}
}
});
}
private boolean canCreateSession() {
synchronized (lock) {
return totalSessions() + numSessionsBeingCreated < options.getMaxSessions();
}
}
private void createSession() {
logger.log(Level.FINE, "Creating session");
synchronized (lock) {
numSessionsBeingCreated++;
executor.submit(
new Runnable() {
@Override
public void run() {
Session session = null;
try {
session = spanner.createSession(db);
logger.log(Level.FINE, "Session created");
} catch (Throwable t) {
// Expose this to customer via a metric.
synchronized (lock) {
numSessionsBeingCreated--;
if (isClosed()) {
decrementPendingClosures();
}
handleCreateSessionFailure(newSpannerException(t));
}
return;
}
boolean closeSession = false;
PooledSession pooledSession = null;
synchronized (lock) {
pooledSession = new PooledSession(session);
numSessionsBeingCreated--;
if (closureFuture != null) {
closeSession = true;
} else {
Preconditions.checkState(totalSessions() <= options.getMaxSessions() - 1);
allSessions.add(pooledSession);
releaseSession(pooledSession);
}
}
if (closeSession) {
closeSession(pooledSession);
}
}
});
}
}
}
| |
/*
* Copyright 2014-2016 by Cloudsoft Corporation Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package brooklyn.location.docker;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.brooklyn.api.entity.Entity;
import org.apache.brooklyn.api.entity.EntitySpec;
import org.apache.brooklyn.api.entity.Group;
import org.apache.brooklyn.api.location.LocationDefinition;
import org.apache.brooklyn.api.location.MachineProvisioningLocation;
import org.apache.brooklyn.api.location.NoMachinesAvailableException;
import org.apache.brooklyn.config.ConfigKey;
import org.apache.brooklyn.core.config.ConfigKeys;
import org.apache.brooklyn.core.config.Sanitizer;
import org.apache.brooklyn.core.entity.Entities;
import org.apache.brooklyn.core.entity.EntityInternal;
import org.apache.brooklyn.core.entity.trait.Startable;
import org.apache.brooklyn.core.location.AbstractLocation;
import org.apache.brooklyn.core.location.BasicLocationDefinition;
import org.apache.brooklyn.core.location.LocationConfigKeys;
import org.apache.brooklyn.core.location.dynamic.DynamicLocation;
import org.apache.brooklyn.entity.software.base.SoftwareProcess;
import org.apache.brooklyn.location.jclouds.JcloudsLocation;
import org.apache.brooklyn.location.ssh.SshMachineLocation;
import org.apache.brooklyn.util.collections.MutableMap;
import org.apache.brooklyn.util.core.flags.SetFromFlag;
import org.apache.brooklyn.util.exceptions.Exceptions;
import org.apache.brooklyn.util.net.Cidr;
import org.apache.brooklyn.util.ssh.BashCommands;
import org.apache.brooklyn.util.text.Strings;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.base.Objects.ToStringHelper;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import brooklyn.entity.container.DockerAttributes;
import brooklyn.entity.container.DockerCallbacks;
import brooklyn.entity.container.DockerUtils;
import brooklyn.entity.container.docker.DockerContainer;
import brooklyn.entity.container.docker.DockerHost;
import brooklyn.entity.container.docker.DockerInfrastructure;
import brooklyn.networking.common.subnet.PortForwarder;
import brooklyn.networking.sdn.SdnAgent;
import brooklyn.networking.sdn.SdnAttributes;
import brooklyn.networking.sdn.SdnProvider;
import brooklyn.networking.subnet.SubnetTier;
public class DockerHostLocation extends AbstractLocation implements MachineProvisioningLocation<DockerContainerLocation>, DockerVirtualLocation,
DynamicLocation<DockerHost, DockerHostLocation>, Closeable {
private static final Logger LOG = LoggerFactory.getLogger(DockerHostLocation.class);
public static final String CONTAINER_MUTEX = "container";
public static final ConfigKey<String> LOCATION_NAME = ConfigKeys.newStringConfigKey("locationName");
public static final ConfigKey<SshMachineLocation> MACHINE = ConfigKeys.newConfigKey(
SshMachineLocation.class,
"machine");
public static final ConfigKey<PortForwarder> PORT_FORWARDER = ConfigKeys.newConfigKey(
PortForwarder.class,
"portForwarder");
public static final ConfigKey<JcloudsLocation> JCLOUDS_LOCATION = ConfigKeys.newConfigKey(
JcloudsLocation.class,
"jcloudsLocation");
@SetFromFlag("locationRegistrationId")
private String locationRegistrationId;
private transient ReadWriteLock lock = new ReentrantReadWriteLock();
private transient DockerHost dockerHost;
private transient SshMachineLocation machine;
private transient PortForwarder portForwarder;
private transient JcloudsLocation jcloudsLocation;
private final ConcurrentMap<String, CountDownLatch> imageLatches = Maps.newConcurrentMap();
public DockerHostLocation() {
this(Maps.newLinkedHashMap());
}
public DockerHostLocation(Map properties) {
super(properties);
if (isLegacyConstruction()) {
init();
}
}
@Override
public void init() {
super.init();
// TODO BasicLocationRebindsupport.addCustoms currently calls init() unfortunately!
// Don't checkNotNull in that situation - it could be this location is orphaned!
if (isRebinding()) {
dockerHost = (DockerHost) getConfig(OWNER);
machine = (SshMachineLocation) getConfig(MACHINE);
portForwarder = (PortForwarder) getConfig(PORT_FORWARDER);
jcloudsLocation = (JcloudsLocation) getConfig(JCLOUDS_LOCATION);
} else {
dockerHost = (DockerHost) checkNotNull(getConfig(OWNER), "owner");
machine = (SshMachineLocation) checkNotNull(getConfig(MACHINE), "machine");
portForwarder = (PortForwarder) getConfig(PORT_FORWARDER);
jcloudsLocation = (JcloudsLocation) getConfig(JCLOUDS_LOCATION);
}
}
@Override
public void rebind() {
super.rebind();
dockerHost = (DockerHost) getConfig(OWNER);
machine = (SshMachineLocation) getConfig(MACHINE);
portForwarder = (PortForwarder) getConfig(PORT_FORWARDER);
jcloudsLocation = (JcloudsLocation) getConfig(JCLOUDS_LOCATION);
if (dockerHost != null && getConfig(LOCATION_NAME) != null) {
register();
}
}
@Override
public LocationDefinition register() {
String locationName = checkNotNull(getConfig(LOCATION_NAME), "config %s", LOCATION_NAME.getName());
LocationDefinition check = getManagementContext().getLocationRegistry().getDefinedLocationByName(locationName);
if (check != null) {
throw new IllegalStateException("Location " + locationName + " is already defined: " + check);
}
String hostLocId = getId();
String infraLocId = (getParent() != null) ? getParent().getId() : "";
String locationSpec = String.format(DockerResolver.DOCKER_HOST_MACHINE_SPEC, infraLocId, hostLocId) + String.format(":(name=\"%s\")", locationName);
LocationDefinition definition = new BasicLocationDefinition(locationName, locationSpec, ImmutableMap.<String, Object>of());
getManagementContext().getLocationRegistry().updateDefinedLocation(definition);
locationRegistrationId = definition.getId();
requestPersist();
return definition;
}
@Override
public void deregister() {
if (locationRegistrationId != null) {
getManagementContext().getLocationRegistry().removeDefinedLocation(locationRegistrationId);
locationRegistrationId = null;
requestPersist();
}
}
public DockerContainerLocation obtain() throws NoMachinesAvailableException {
return obtain(Maps.<String,Object>newLinkedHashMap());
}
@Override
public DockerContainerLocation obtain(Map<?,?> flags) throws NoMachinesAvailableException {
lock.readLock().lock();
try {
// Lookup entity from context or flags
Object context = flags.get(LocationConfigKeys.CALLER_CONTEXT.getName());
if (context == null || !(context instanceof Entity)) {
throw new IllegalStateException("Invalid location context: " + context);
}
Entity entity = (Entity) context;
// Flag to configure adding SSHable layer
boolean useSsh = entity.config().get(DockerContainer.DOCKER_USE_SSH) &&
dockerHost.config().get(DockerContainer.DOCKER_USE_SSH);
// Configure the entity
LOG.info("Configuring entity {} via subnet {}", entity, dockerHost.getSubnetTier());
entity.config().set(SubnetTier.PORT_FORWARDING_MANAGER, dockerHost.getSubnetTier().getPortForwardManager());
entity.config().set(SubnetTier.PORT_FORWARDER, portForwarder);
if (getOwner().config().get(SdnAttributes.SDN_ENABLE)) {
SdnAgent agent = getOwner().sensors().get(SdnAgent.SDN_AGENT);
if (agent == null) {
throw new IllegalStateException("SDN agent entity on " + getOwner() + " is null");
}
Map<String, Cidr> networks = agent.sensors().get(SdnAgent.SDN_PROVIDER).sensors().get(SdnProvider.SUBNETS);
entity.config().set(SubnetTier.SUBNET_CIDR, networks.get(entity.getApplicationId()));
} else {
entity.config().set(SubnetTier.SUBNET_CIDR, Cidr.UNIVERSAL);
}
// Add the entity Dockerfile if configured
String dockerfile = entity.config().get(DockerAttributes.DOCKERFILE_URL);
String entrypoint = entity.config().get(DockerAttributes.DOCKERFILE_ENTRYPOINT_URL);
String contextArchive = entity.config().get(DockerAttributes.DOCKERFILE_CONTEXT_URL);
String imageId = entity.config().get(DockerAttributes.DOCKER_IMAGE_ID);
Optional<String> baseImage = Optional.fromNullable(entity.config().get(DockerAttributes.DOCKER_IMAGE_NAME));
String imageTag = Optional.fromNullable(entity.config().get(DockerAttributes.DOCKER_IMAGE_TAG)).or("latest");
boolean autoCheckpointImagePostInstall = Boolean.TRUE.equals(entity.config().get(DockerAttributes.AUTO_CHECKPOINT_DOCKER_IMAGE_POST_INSTALL));
// TODO incorporate more info (incl registry?)
String imageName;
if (autoCheckpointImagePostInstall) {
imageName = DockerUtils.imageName(entity, dockerfile);
} else {
// Generate a random id, and avoid collisions
boolean collision;
do {
imageName = DockerUtils.randomImageName();
collision = dockerHost.getImageNamed(imageName, imageTag).isPresent();
if (collision) LOG.info("Random image name collision '{}' on host {}; generating new id", imageName, getOwner());
} while (collision);
}
// Lookup image ID or build new image from Dockerfile
LOG.info("ImageName ({}) for entity {}: {}", new Object[] {(autoCheckpointImagePostInstall ? "hash" : "random"), entity, imageName});
if (dockerHost.getImageNamed(imageName, imageTag).isPresent()) {
assert autoCheckpointImagePostInstall : "random imageName "+imageName+" collision on host "+getOwner();
// Wait until committed before continuing - Brooklyn may be midway through its creation.
waitForImage(imageName);
// Look up imageId again
imageId = dockerHost.getImageNamed(imageName, imageTag).get();
LOG.info("Found image {} for entity: {}", imageName, imageId);
// Skip install phase
entity.config().set(SoftwareProcess.SKIP_INSTALLATION, true);
} else if (baseImage.isPresent()) {
// Use the repository configured on the entity if present
Optional<String> imageRepo = Optional.fromNullable(entity.config().get(DockerAttributes.DOCKER_IMAGE_REGISTRY_URL));
// Otherwise only use the configured repo here if it we created it or it is writeable
Optional<String> localRepo = Optional.absent();
if (config().get(DockerInfrastructure.DOCKER_SHOULD_START_REGISTRY) ||
config().get(DockerInfrastructure.DOCKER_IMAGE_REGISTRY_WRITEABLE)) {
localRepo = Optional.fromNullable(getDockerInfrastructure().sensors().get(DockerAttributes.DOCKER_IMAGE_REGISTRY_URL));;
}
imageName = Joiner.on('/').join(Optional.presentInstances(ImmutableList.of(imageRepo.or(localRepo), baseImage)));
String fullyQualifiedName = imageName + ":" + imageTag;
if (useSsh) {
// Create an SSHable image from the one configured
imageId = dockerHost.layerSshableImageOnFullyQualified(fullyQualifiedName);
LOG.info("Created SSHable image from {}: {}", fullyQualifiedName, imageId);
} else {
try {
dockerHost.runDockerCommand(String.format("pull %s", fullyQualifiedName));
} catch (Exception e) {
// XXX pulls fail sometimes but issue fixed in Docker 1.9.1
LOG.debug("Caught exception pulling {}: {}", fullyQualifiedName, e.getMessage());
}
imageId = dockerHost.getImageNamed(imageName, imageTag).orNull();
}
entity.config().set(SoftwareProcess.SKIP_INSTALLATION, true);
} else {
// Push or commit the image, otherwise Clocker will make a new one for the entity once it is installed.
if (autoCheckpointImagePostInstall) {
if (getDockerInfrastructure().config().get(DockerInfrastructure.DOCKER_IMAGE_REGISTRY_WRITEABLE) &&
(getDockerInfrastructure().config().get(DockerInfrastructure.DOCKER_SHOULD_START_REGISTRY) ||
Strings.isNonBlank(getDockerInfrastructure().sensors().get(DockerInfrastructure.DOCKER_IMAGE_REGISTRY_URL)))) {
insertCallback(entity, SoftwareProcess.POST_INSTALL_COMMAND, DockerCallbacks.push());
} else {
insertCallback(entity, SoftwareProcess.POST_INSTALL_COMMAND, DockerCallbacks.commit());
}
}
if (Strings.isNonBlank(dockerfile)) {
if (imageId != null) {
LOG.warn("Ignoring container imageId {} as dockerfile URL is set: {}", imageId, dockerfile);
}
Map<String, Object> substitutions = getExtraTemplateSubstitutions(imageName, entity);
imageId = dockerHost.buildImage(dockerfile, entrypoint, contextArchive, imageName, useSsh, substitutions);
}
if (Strings.isBlank(imageId)) {
imageId = getOwner().sensors().get(DockerHost.DOCKER_IMAGE_ID);
}
// Tag the image name and create its latch
imageLatches.putIfAbsent(imageName, new CountDownLatch(1));
dockerHost.runDockerCommand(String.format("tag -f %s %s:latest", imageId, imageName));
}
// Look up hardware ID
String hardwareId = entity.config().get(DockerAttributes.DOCKER_HARDWARE_ID);
if (Strings.isEmpty(hardwareId)) {
hardwareId = getOwner().config().get(DockerAttributes.DOCKER_HARDWARE_ID);
}
// Fix missing device link for urandom on some containers
insertCallback(entity, SoftwareProcess.PRE_INSTALL_COMMAND,
"if [ ! -e /dev/random ] ; then ln -s /dev/urandom /dev/random ; fi");
// Create new Docker container in the host cluster
LOG.info("Starting container with imageId {} and hardwareId {} at {}", new Object[] { imageId, hardwareId, machine });
Map<Object, Object> containerFlags = MutableMap.builder()
.putAll(flags)
.put("useSsh", useSsh)
.put("entity", entity)
.putIfNotNull("imageId", imageId)
.putIfNotNull("imageName", imageId == null ? imageName : null)
.putIfNotNull("imageTag", imageId == null ? imageTag : null)
.putIfNotNull("hardwareId", hardwareId)
.build();
Group cluster = dockerHost.getDockerContainerCluster();
EntitySpec<DockerContainer> spec = EntitySpec.create(getOwner().sensors().get(DockerHost.DOCKER_CONTAINER_SPEC));
spec.configure(containerFlags);
Entity added = cluster.addMemberChild(spec);
if (added == null) {
throw new NoMachinesAvailableException(String.format("Failed to create container at %s", dockerHost.getDockerHostName()));
} else {
if (LOG.isDebugEnabled()) LOG.debug("Starting container {} at {}, config {}",
new Object[] { added, machine, Sanitizer.sanitize(((EntityInternal)added).config().getBag()) });
Entities.invokeEffector(entity, added, Startable.START, MutableMap.of("locations", ImmutableList.of(machine))).getUnchecked();
}
DockerContainer dockerContainer = (DockerContainer) added;
// Save the container attributes
dockerContainer.sensors().set(DockerContainer.IMAGE_ID, imageId);
dockerContainer.sensors().set(DockerContainer.IMAGE_NAME, imageName);
dockerContainer.sensors().set(DockerContainer.HARDWARE_ID, hardwareId);
// record SDN application network details
if (getOwner().config().get(SdnAttributes.SDN_ENABLE)) {
SdnAgent agent = getOwner().sensors().get(SdnAgent.SDN_AGENT);
Cidr applicationCidr = agent.sensors().get(SdnAgent.SDN_PROVIDER).getSubnetCidr(entity.getApplicationId());
entity.sensors().set(SdnProvider.APPLICATION_CIDR, applicationCidr);
dockerContainer.sensors().set(SdnProvider.APPLICATION_CIDR, applicationCidr);
}
return dockerContainer.getDynamicLocation();
} finally {
lock.readLock().unlock();
}
}
private Map<String, Object> getExtraTemplateSubstitutions(String imageName, Entity context) {
Map<String, Object> templateSubstitutions = MutableMap.<String, Object>of("fullyQualifiedImageName", imageName);
templateSubstitutions.putAll(getOwner().config().get(DockerInfrastructure.DOCKERFILE_SUBSTITUTIONS));
// Add any extra substitutions on the entity (if present)
if (context != null) {
templateSubstitutions.putAll(context.config().get(DockerInfrastructure.DOCKERFILE_SUBSTITUTIONS));
}
return templateSubstitutions;
}
private void insertCallback(Entity entity, ConfigKey<String> commandKey, String callback) {
String command = entity.config().get(commandKey);
if (Strings.isNonBlank(command)) {
command = BashCommands.chain(String.format("( %s )", command), callback);
} else {
command = callback;
}
entity.config().set(commandKey, command);
}
public void waitForImage(String imageName) {
try {
CountDownLatch latch = imageLatches.get(imageName);
if (latch != null) latch.await(15, TimeUnit.MINUTES);
} catch (InterruptedException ie) {
throw Exceptions.propagate(ie);
}
}
public void markImage(String imageName) {
CountDownLatch latch = imageLatches.get(imageName);
if (latch != null) latch.countDown();
}
@Override
public void release(DockerContainerLocation machine) {
lock.readLock().lock();
try {
LOG.info("Releasing {}", machine);
Group cluster = dockerHost.getDockerContainerCluster();
DockerContainer container = machine.getOwner();
if (cluster.removeMember(container)) {
LOG.info("Docker Host {}: member {} released", dockerHost.getDockerHostName(), machine);
} else {
LOG.warn("Docker Host {}: member {} not found for release", dockerHost.getDockerHostName(), machine);
}
// Now close and unmange the container
try {
container.stop();
machine.close();
} catch (Exception e) {
LOG.warn("Error stopping container: " + container, e);
Exceptions.propagateIfFatal(e);
} finally {
Entities.unmanage(container);
}
} finally {
lock.readLock().unlock();
}
}
@Override
public Map<String, Object> getProvisioningFlags(Collection<String> tags) {
return MutableMap.of();
}
@Override
public DockerHost getOwner() {
return dockerHost;
}
public SshMachineLocation getMachine() {
return machine;
}
public JcloudsLocation getJcloudsLocation() {
return jcloudsLocation;
}
public PortForwarder getPortForwarder() {
return portForwarder;
}
public int getCurrentSize() {
return dockerHost.getCurrentSize();
}
@Override
public MachineProvisioningLocation<DockerContainerLocation> newSubLocation(Map<?, ?> newFlags) {
throw new UnsupportedOperationException();
}
@Override
public List<Entity> getDockerContainerList() {
return dockerHost.getDockerContainerList();
}
@Override
public List<Entity> getDockerHostList() {
return Lists.<Entity>newArrayList(dockerHost);
}
@Override
public DockerInfrastructure getDockerInfrastructure() {
return ((DockerLocation) getParent()).getDockerInfrastructure();
}
@Override
public void close() throws IOException {
LOG.info("Close called on Docker host {}: {}", machine, this);
try {
machine.close();
} catch (Exception e) {
LOG.info("{}: Closing Docker host: {}", e.getMessage(), this);
throw Exceptions.propagate(e);
} finally {
LOG.info("Docker host closed: {}", this);
}
}
public Lock getLock() {
return lock.writeLock();
}
@Override
public ToStringHelper string() {
return super.string()
.add("machine", machine)
.add("jcloudsLocation", jcloudsLocation)
.add("dockerHost", dockerHost);
}
}
| |
package liquibase.database.core;
import liquibase.CatalogAndSchema;
import liquibase.database.AbstractJdbcDatabase;
import liquibase.database.DatabaseConnection;
import liquibase.database.OfflineConnection;
import liquibase.database.jvm.JdbcConnection;
import liquibase.exception.DatabaseException;
import liquibase.exception.UnexpectedLiquibaseException;
import liquibase.exception.ValidationErrors;
import liquibase.executor.ExecutorService;
import liquibase.logging.LogFactory;
import liquibase.statement.*;
import liquibase.statement.core.RawCallStatement;
import liquibase.statement.core.RawSqlStatement;
import liquibase.structure.DatabaseObject;
import liquibase.structure.core.*;
import liquibase.util.JdbcUtils;
import liquibase.util.StringUtils;
import java.lang.reflect.Method;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Encapsulates Oracle database support.
*/
public class OracleDatabase extends AbstractJdbcDatabase {
public static final String PRODUCT_NAME = "oracle";
private Set<String> reservedWords = new HashSet<String>();
private Set<String> userDefinedTypes = null;
private Boolean canAccessDbaRecycleBin;
private Integer databaseMajorVersion;
public OracleDatabase() {
super.unquotedObjectsAreUppercased=true;
super.setCurrentDateTimeFunction("SYSTIMESTAMP");
// Setting list of Oracle's native functions
dateFunctions.add(new DatabaseFunction("SYSDATE"));
dateFunctions.add(new DatabaseFunction("SYSTIMESTAMP"));
dateFunctions.add(new DatabaseFunction("CURRENT_TIMESTAMP"));
super.sequenceNextValueFunction = "%s.nextval";
super.sequenceCurrentValueFunction = "%s.currval";
}
@Override
public int getPriority() {
return PRIORITY_DEFAULT;
}
@Override
public void setConnection(DatabaseConnection conn) {
reservedWords.addAll(Arrays.asList("GROUP", "USER", "SESSION", "PASSWORD", "RESOURCE", "START", "SIZE", "UID", "DESC", "ORDER")); //more reserved words not returned by driver
Connection sqlConn = null;
if (!(conn instanceof OfflineConnection)) {
try {
/**
* Don't try to call getWrappedConnection if the conn instance is
* is not a JdbcConnection. This happens for OfflineConnection.
* @see <a href="https://liquibase.jira.com/browse/CORE-2192">CORE-2192</a>
**/
if (conn instanceof JdbcConnection) {
Method wrappedConn = conn.getClass().getMethod("getWrappedConnection");
wrappedConn.setAccessible(true);
sqlConn = (Connection) wrappedConn.invoke(conn);
}
} catch (Exception e) {
throw new UnexpectedLiquibaseException(e);
}
if (sqlConn != null) {
try {
reservedWords.addAll(Arrays.asList(sqlConn.getMetaData().getSQLKeywords().toUpperCase().split(",\\s*")));
} catch (SQLException e) {
LogFactory.getLogger().info("Could get sql keywords on OracleDatabase: " + e.getMessage());
//can not get keywords. Continue on
}
try {
Method method = sqlConn.getClass().getMethod("setRemarksReporting", Boolean.TYPE);
method.setAccessible(true);
method.invoke(sqlConn, true);
} catch (Exception e) {
LogFactory.getLogger().info("Could not set remarks reporting on OracleDatabase: " + e.getMessage());
; //cannot set it. That is OK
}
Statement statement = null;
ResultSet resultSet = null;
try {
statement = sqlConn.createStatement();
resultSet = statement.executeQuery("SELECT value FROM v$parameter WHERE name = 'compatible'");
String compatibleVersion = null;
if (resultSet.next()) {
compatibleVersion = resultSet.getString("value");
}
if (compatibleVersion != null) {
Matcher majorVersionMatcher = Pattern.compile("(\\d+)\\..*").matcher(compatibleVersion);
if (majorVersionMatcher.matches()) {
this.databaseMajorVersion = Integer.valueOf(majorVersionMatcher.group(1));
}
}
} catch (SQLException e) {
String message = "Cannot read from v$parameter: "+e.getMessage();
LogFactory.getLogger().info("Could not set check compatibility mode on OracleDatabase, assuming not running in any sort of compatibility mode: " + message);
} finally {
JdbcUtils.close(resultSet, statement);
}
}
}
super.setConnection(conn);
}
@Override
public String getShortName() {
return "oracle";
}
@Override
protected String getDefaultDatabaseProductName() {
return "Oracle";
}
@Override
public int getDatabaseMajorVersion() throws DatabaseException {
if (databaseMajorVersion == null) {
return super.getDatabaseMajorVersion();
} else {
return databaseMajorVersion;
}
}
@Override
public Integer getDefaultPort() {
return 1521;
}
@Override
public String getJdbcCatalogName(CatalogAndSchema schema) {
return null;
}
@Override
public String getJdbcSchemaName(CatalogAndSchema schema) {
return correctObjectName(schema.getCatalogName() == null ? schema.getSchemaName() : schema.getCatalogName(), Schema.class);
}
@Override
public String generatePrimaryKeyName(String tableName) {
if (tableName.length() > 27) {
return "PK_" + tableName.toUpperCase().substring(0, 27);
} else {
return "PK_" + tableName.toUpperCase();
}
}
@Override
public boolean supportsInitiallyDeferrableColumns() {
return true;
}
@Override
public boolean isReservedWord(String objectName) {
return reservedWords.contains(objectName.toUpperCase());
}
@Override
public boolean supportsSequences() {
return true;
}
/**
* Oracle supports catalogs in liquibase terms
*
* @return
*/
@Override
public boolean supportsSchemas() {
return false;
}
@Override
protected String getConnectionCatalogName() throws DatabaseException {
if (getConnection() instanceof OfflineConnection) {
return getConnection().getCatalog();
}
try {
return ExecutorService.getInstance().getExecutor(this).queryForObject(new RawCallStatement("select sys_context( 'userenv', 'current_schema' ) from dual"), String.class);
} catch (Exception e) {
LogFactory.getLogger().info("Error getting default schema", e);
}
return null;
}
@Override
public boolean isCorrectDatabaseImplementation(DatabaseConnection conn) throws DatabaseException {
return PRODUCT_NAME.equalsIgnoreCase(conn.getDatabaseProductName());
}
@Override
public String getDefaultDriver(String url) {
if (url.startsWith("jdbc:oracle")) {
return "oracle.jdbc.OracleDriver";
}
return null;
}
@Override
public String getDefaultCatalogName() {//NOPMD
return super.getDefaultCatalogName() == null ? null : super.getDefaultCatalogName().toUpperCase();
}
/**
* Return an Oracle date literal with the same value as a string formatted using ISO 8601.
* <p/>
* Convert an ISO8601 date string to one of the following results:
* to_date('1995-05-23', 'YYYY-MM-DD')
* to_date('1995-05-23 09:23:59', 'YYYY-MM-DD HH24:MI:SS')
* <p/>
* Implementation restriction:
* Currently, only the following subsets of ISO8601 are supported:
* YYYY-MM-DD
* YYYY-MM-DDThh:mm:ss
*/
@Override
public String getDateLiteral(String isoDate) {
String normalLiteral = super.getDateLiteral(isoDate);
if (isDateOnly(isoDate)) {
StringBuffer val = new StringBuffer();
val.append("to_date(");
val.append(normalLiteral);
val.append(", 'YYYY-MM-DD')");
return val.toString();
} else if (isTimeOnly(isoDate)) {
StringBuffer val = new StringBuffer();
val.append("to_date(");
val.append(normalLiteral);
val.append(", 'HH24:MI:SS')");
return val.toString();
} else if (isTimestamp(isoDate)) {
StringBuffer val = new StringBuffer(26);
val.append("to_timestamp(");
val.append(normalLiteral);
val.append(", 'YYYY-MM-DD HH24:MI:SS.FF')");
return val.toString();
} else if (isDateTime(isoDate)) {
normalLiteral = normalLiteral.substring(0, normalLiteral.lastIndexOf('.')) + "'";
StringBuffer val = new StringBuffer(26);
val.append("to_date(");
val.append(normalLiteral);
val.append(", 'YYYY-MM-DD HH24:MI:SS')");
return val.toString();
} else {
return "UNSUPPORTED:" + isoDate;
}
}
@Override
public boolean isSystemObject(DatabaseObject example) {
if (example == null) {
return false;
}
if (this.isLiquibaseObject(example)) {
return false;
}
if (example instanceof Schema) {
if ("SYSTEM".equals(example.getName()) || "SYS".equals(example.getName()) || "CTXSYS".equals(example.getName())|| "XDB".equals(example.getName())) {
return true;
}
if ("SYSTEM".equals(example.getSchema().getCatalogName()) || "SYS".equals(example.getSchema().getCatalogName()) || "CTXSYS".equals(example.getSchema().getCatalogName()) || "XDB".equals(example.getSchema().getCatalogName())) {
return true;
}
} else if (isSystemObject(example.getSchema())) {
return true;
}
if (example instanceof Catalog) {
if (("SYSTEM".equals(example.getName()) || "SYS".equals(example.getName()) || "CTXSYS".equals(example.getName()) || "XDB".equals(example.getName()))) {
return true;
}
} else if (example.getName() != null) {
if (example.getName().startsWith("BIN$")) { //oracle deleted table
boolean filteredInOriginalQuery = this.canAccessDbaRecycleBin();
if (!filteredInOriginalQuery) {
filteredInOriginalQuery = StringUtils.trimToEmpty(example.getSchema().getName()).equalsIgnoreCase(this.getConnection().getConnectionUserName());
}
if (filteredInOriginalQuery) {
if (example instanceof PrimaryKey || example instanceof Index || example instanceof liquibase.statement.UniqueConstraint) { //some objects don't get renamed back and so are already filtered in the metadata queries
return false;
} else {
return true;
}
} else {
return true;
}
} else if (example.getName().startsWith("AQ$")) { //oracle AQ tables
return true;
} else if (example.getName().startsWith("DR$")) { //oracle index tables
return true;
} else if (example.getName().startsWith("SYS_IOT_OVER")) { //oracle system table
return true;
} else if ((example.getName().startsWith("MDRT_") || example.getName().startsWith("MDRS_")) && example.getName().endsWith("$")) {
// CORE-1768 - Oracle creates these for spatial indices and will remove them when the index is removed.
return true;
} else if (example.getName().startsWith("MLOG$_")) { //Created by materliaized view logs for every table that is part of a materialized view. Not available for DDL operations.
return true;
} else if (example.getName().startsWith("RUPD$_")) { //Created by materialized view log tables using primary keys. Not available for DDL operations.
return true;
} else if (example.getName().startsWith("WM$_")) { //Workspace Manager backup tables.
return true;
} else if (example.getName().equals("CREATE$JAVA$LOB$TABLE")) { //This table contains the name of the Java object, the date it was loaded, and has a BLOB column to store the Java object.
return true;
} else if (example.getName().equals("JAVA$CLASS$MD5$TABLE")) { //This is a hash table that tracks the loading of Java objects into a schema.
return true;
} else if (example.getName().startsWith("ISEQ$$_")) { //System-generated sequence
return true;
} else if (example.getName().startsWith("USLOG$")) { //for update materialized view
return true;
}
}
return super.isSystemObject(example);
}
@Override
public boolean supportsTablespaces() {
return true;
}
@Override
public boolean supportsAutoIncrement() {
// Oracle supports Identity beginning with version 12c
boolean isAutoIncrementSupported = false;
try {
if (getDatabaseMajorVersion() >= 12) {
isAutoIncrementSupported = true;
}
// Returning true will generate create table command with 'IDENTITY' clause, example:
// CREATE TABLE AutoIncTest (IDPrimaryKey NUMBER(19) GENERATED BY DEFAULT AS IDENTITY NOT NULL, TypeID NUMBER(3) NOT NULL, Description NVARCHAR2(50), CONSTRAINT PK_AutoIncTest PRIMARY KEY (IDPrimaryKey));
// While returning false will continue to generate create table command without 'IDENTITY' clause, example:
// CREATE TABLE AutoIncTest (IDPrimaryKey NUMBER(19) NOT NULL, TypeID NUMBER(3) NOT NULL, Description NVARCHAR2(50), CONSTRAINT PK_AutoIncTest PRIMARY KEY (IDPrimaryKey));
} catch (DatabaseException ex) {
isAutoIncrementSupported = false;
}
return isAutoIncrementSupported;
}
// public Set<UniqueConstraint> findUniqueConstraints(String schema) throws DatabaseException {
// Set<UniqueConstraint> returnSet = new HashSet<UniqueConstraint>();
//
// List<Map> maps = new Executor(this).queryForList(new RawSqlStatement("SELECT UC.CONSTRAINT_NAME, UCC.TABLE_NAME, UCC.COLUMN_NAME FROM USER_CONSTRAINTS UC, USER_CONS_COLUMNS UCC WHERE UC.CONSTRAINT_NAME=UCC.CONSTRAINT_NAME AND CONSTRAINT_TYPE='U' ORDER BY UC.CONSTRAINT_NAME"));
//
// UniqueConstraint constraint = null;
// for (Map map : maps) {
// if (constraint == null || !constraint.getName().equals(constraint.getName())) {
// returnSet.add(constraint);
// Table table = new Table((String) map.get("TABLE_NAME"));
// constraint = new UniqueConstraint(map.get("CONSTRAINT_NAME").toString(), table);
// }
// }
// if (constraint != null) {
// returnSet.add(constraint);
// }
//
// return returnSet;
// }
@Override
public boolean supportsRestrictForeignKeys() {
return false;
}
@Override
public int getDataTypeMaxParameters(String dataTypeName) {
if (dataTypeName.toUpperCase().equals("BINARY_FLOAT")) {
return 0;
}
if (dataTypeName.toUpperCase().equals("BINARY_DOUBLE")) {
return 0;
}
return super.getDataTypeMaxParameters(dataTypeName);
}
@Override
public boolean jdbcCallsCatalogsSchemas() {
return true;
}
public Set<String> getUserDefinedTypes() {
if (userDefinedTypes == null) {
userDefinedTypes = new HashSet<String>();
if (getConnection() != null && !(getConnection() instanceof OfflineConnection)) {
try {
try {
userDefinedTypes.addAll(ExecutorService.getInstance().getExecutor(this).queryForList(new RawSqlStatement("SELECT DISTINCT TYPE_NAME FROM ALL_TYPES"), String.class));
} catch (DatabaseException e) { //fall back to USER_TYPES if the user cannot see ALL_TYPES
userDefinedTypes.addAll(ExecutorService.getInstance().getExecutor(this).queryForList(new RawSqlStatement("SELECT TYPE_NAME FROM USER_TYPES"), String.class));
}
} catch (DatabaseException e) {
//ignore error
}
}
}
return userDefinedTypes;
}
@Override
public String generateDatabaseFunctionValue(DatabaseFunction databaseFunction) {
if (databaseFunction != null && databaseFunction.toString().equalsIgnoreCase("current_timestamp")) {
return databaseFunction.toString();
}
if(databaseFunction instanceof SequenceNextValueFunction
|| databaseFunction instanceof SequenceCurrentValueFunction){
String quotedSeq = super.generateDatabaseFunctionValue(databaseFunction);
// replace "myschema.my_seq".nextval with "myschema"."my_seq".nextval
return quotedSeq.replaceFirst("\"([^\\.\"]*)\\.([^\\.\"]*)\"","\"$1\".\"$2\"");
}
return super.generateDatabaseFunctionValue(databaseFunction);
}
@Override
public ValidationErrors validate() {
ValidationErrors errors = super.validate();
DatabaseConnection connection = getConnection();
if (connection == null || connection instanceof OfflineConnection) {
LogFactory.getInstance().getLog().info("Cannot validate offline database");
return errors;
}
if (!canAccessDbaRecycleBin()) {
errors.addWarning(getDbaRecycleBinWarning());
}
return errors;
}
public String getDbaRecycleBinWarning() {
return "Liquibase needs to access the DBA_RECYCLEBIN table so we can automatically handle the case where constraints are deleted and restored. Since Oracle doesn't properly restore the original table names referenced in the constraint, we use the information from the DBA_RECYCLEBIN to automatically correct this issue.\n" +
"\n" +
"The user you used to connect to the database ("+getConnection().getConnectionUserName()+") needs to have \"SELECT ON SYS.DBA_RECYCLEBIN\" permissions set before we can perform this operation. Please run the following SQL to set the appropriate permissions, and try running the command again.\n" +
"\n" +
" GRANT SELECT ON SYS.DBA_RECYCLEBIN TO "+getConnection().getConnectionUserName()+";";
}
public boolean canAccessDbaRecycleBin() {
if (canAccessDbaRecycleBin == null) {
DatabaseConnection connection = getConnection();
if (connection == null || connection instanceof OfflineConnection) {
return false;
}
Statement statement = null;
try {
statement = ((JdbcConnection) connection).createStatement();
ResultSet resultSet = statement.executeQuery("select 1 from dba_recyclebin where 0=1");
resultSet.close(); //don't need to do anything with the result set, just make sure statement ran.
this.canAccessDbaRecycleBin = true;
} catch (Exception e) {
if (e instanceof SQLException && e.getMessage().startsWith("ORA-00942")) { //ORA-00942: table or view does not exist
this.canAccessDbaRecycleBin = false;
} else {
LogFactory.getInstance().getLog().warning("Cannot check dba_recyclebin access", e);
this.canAccessDbaRecycleBin = false;
}
} finally {
JdbcUtils.close(null, statement);
}
}
return canAccessDbaRecycleBin;
}
}
| |
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.tum.in.schlichter.androidutils;
import android.annotation.TargetApi;
import android.os.Handler;
import android.os.Message;
import android.os.Process;
import java.util.ArrayDeque;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.FutureTask;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* ************************************* Copied from JB release framework:
* https:
* //android.googlesource.com/platform/frameworks/base/+/jb-release/core/java
* /android/os/AsyncTask.java so that threading behavior on all OS versions is
* the same and we can tweak behavior by using executeOnExecutor() if needed.
* There are 3 changes in this copy of AsyncTask: -pre-HC a single thread
* executor is used for serial operation (Executors.newSingleThreadExecutor) and
* is the default -the default THREAD_POOL_EXECUTOR was changed to use
* DiscardOldestPolicy -a new fixed thread pool called DUAL_THREAD_EXECUTOR was
* added *************************************
* <p>
* AsyncTask enables proper and easy use of the UI thread. This class allows to
* perform background operations and publish results on the UI thread without
* having to manipulate threads and/or handlers.
* </p>
* <p>
* AsyncTask is designed to be a helper class around {@link Thread} and
* {@link Handler} and does not constitute a generic threading framework.
* AsyncTasks should ideally be used for short operations (a few seconds at the
* most.) If you need to keep threads running for long periods of time, it is
* highly recommended you use the various APIs provided by the
* <code>java.util.concurrent</code> pacakge such as {@link Executor},
* {@link ThreadPoolExecutor} and {@link FutureTask}.
* </p>
* <p>
* An asynchronous task is defined by a computation that runs on a background
* thread and whose result is published on the UI thread. An asynchronous task
* is defined by 3 generic types, called <code>Params</code>,
* <code>Progress</code> and <code>Result</code>, and 4 steps, called
* <code>onPreExecute</code>, <code>doInBackground</code>,
* <code>onProgressUpdate</code> and <code>onPostExecute</code>.
* </p>
* <div class="special reference"> <h3>Developer Guides</h3>
* <p>
* For more information about using tasks and threads, read the <a
* href="{@docRoot}
* guide/topics/fundamentals/processes-and-threads.html">Processes and
* Threads</a> developer guide.
* </p>
* </div> <h2>Usage</h2>
* <p>
* AsyncTask must be subclassed to be used. The subclass will override at least
* one method ({@link #doInBackground}), and most often will override a second
* one ({@link #onPostExecute}.)
* </p>
* <p>
* Here is an example of subclassing:
* </p>
*
* <pre class="prettyprint">
* private class DownloadFilesTask extends AsyncTask<URL, Integer, Long> {
* protected Long doInBackground(URL... urls) {
* int count = urls.length;
* long totalSize = 0;
* for (int i = 0; i < count; i++) {
* totalSize += Downloader.downloadFile(urls[i]);
* publishProgress((int) ((i / (float) count) * 100));
* // Escape early if cancel() is called
* if (isCancelled())
* break;
* }
* return totalSize;
* }
*
* protected void onProgressUpdate(Integer... progress) {
* setProgressPercent(progress[0]);
* }
*
* protected void onPostExecute(Long result) {
* showDialog("Downloaded " + result + " bytes");
* }
* }
* </pre>
* <p>
* Once created, a task is executed very simply:
* </p>
*
* <pre class="prettyprint">
* new DownloadFilesTask().execute(url1, url2, url3);
* </pre>
*
* <h2>AsyncTask's generic types</h2>
* <p>
* The three types used by an asynchronous task are the following:
* </p>
* <ol>
* <li><code>Params</code>, the type of the parameters sent to the task upon
* execution.</li>
* <li><code>Progress</code>, the type of the progress units published during
* the background computation.</li>
* <li><code>Result</code>, the type of the result of the background
* computation.</li>
* </ol>
* <p>
* Not all types are always used by an asynchronous task. To mark a type as
* unused, simply use the type {@link Void}:
* </p>
*
* <pre>
* private class MyTask extends AsyncTask<Void, Void, Void> { ... }
* </pre>
*
* <h2>The 4 steps</h2>
* <p>
* When an asynchronous task is executed, the task goes through 4 steps:
* </p>
* <ol>
* <li>{@link #onPreExecute()}, invoked on the UI thread immediately after the
* task is executed. This step is normally used to setup the task, for instance
* by showing a progress bar in the user interface.</li>
* <li>{@link #doInBackground}, invoked on the background thread immediately
* after {@link #onPreExecute()} finishes executing. This step is used to
* perform background computation that can take a long time. The parameters of
* the asynchronous task are passed to this step. The result of the computation
* must be returned by this step and will be passed back to the last step. This
* step can also use {@link #publishProgress} to publish one or more units of
* progress. These values are published on the UI thread, in the
* {@link #onProgressUpdate} step.</li>
* <li>{@link #onProgressUpdate}, invoked on the UI thread after a call to
* {@link #publishProgress}. The timing of the execution is undefined. This
* method is used to display any form of progress in the user interface while
* the background computation is still executing. For instance, it can be used
* to animate a progress bar or show logs in a text field.</li>
* <li>{@link #onPostExecute}, invoked on the UI thread after the background
* computation finishes. The result of the background computation is passed to
* this step as a parameter.</li>
* </ol>
* <h2>Cancelling a task</h2>
* <p>
* A task can be cancelled at any time by invoking {@link #cancel(boolean)}.
* Invoking this method will cause subsequent calls to {@link #isCancelled()} to
* return true. After invoking this method, {@link #onCancelled(Object)},
* instead of {@link #onPostExecute(Object)} will be invoked after
* {@link #doInBackground(Object[])} returns. To ensure that a task is cancelled
* as quickly as possible, you should always check the return value of
* {@link #isCancelled()} periodically from {@link #doInBackground(Object[])},
* if possible (inside a loop for instance.)
* </p>
* <h2>Threading rules</h2>
* <p>
* There are a few threading rules that must be followed for this class to work
* properly:
* </p>
* <ul>
* <li>The AsyncTask class must be loaded on the UI thread. This is done
* automatically as of {@link android.os.Build.VERSION_CODES#JELLY_BEAN}.</li>
* <li>The task instance must be created on the UI thread.</li>
* <li>{@link #execute} must be invoked on the UI thread.</li>
* <li>Do not call {@link #onPreExecute()}, {@link #onPostExecute},
* {@link #doInBackground}, {@link #onProgressUpdate} manually.</li>
* <li>The task can be executed only once (an exception will be thrown if a
* second execution is attempted.)</li>
* </ul>
* <h2>Memory observability</h2>
* <p>
* AsyncTask guarantees that all callback calls are synchronized in such a way
* that the following operations are safe without explicit synchronizations.
* </p>
* <ul>
* <li>Set member fields in the constructor or {@link #onPreExecute}, and refer
* to them in {@link #doInBackground}.
* <li>Set member fields in {@link #doInBackground}, and refer to them in
* {@link #onProgressUpdate} and {@link #onPostExecute}.
* </ul>
* <h2>Order of execution</h2>
* <p>
* When first introduced, AsyncTasks were executed serially on a single
* background thread. Starting with {@link android.os.Build.VERSION_CODES#DONUT}
* , this was changed to a pool of threads allowing multiple tasks to operate in
* parallel. Starting with {@link android.os.Build.VERSION_CODES#HONEYCOMB},
* tasks are executed on a single thread to avoid common application errors
* caused by parallel execution.
* </p>
* <p>
* If you truly want parallel execution, you can invoke
* {@link #executeOnExecutor(java.util.concurrent.Executor, Object[])} with
* {@link #THREAD_POOL_EXECUTOR}.
* </p>
*/
public abstract class AsyncTask<Params, Progress, Result> {
private static final String LOG_TAG = "AsyncTask";
private static final int CORE_POOL_SIZE = 5;
private static final int MAXIMUM_POOL_SIZE = 128;
private static final int KEEP_ALIVE = 1;
private static final ThreadFactory sThreadFactory = new ThreadFactory() {
private final AtomicInteger mCount = new AtomicInteger(1);
public Thread newThread(Runnable r) {
return new Thread(r, "AsyncTask #" + mCount.getAndIncrement());
}
};
private static final BlockingQueue<Runnable> sPoolWorkQueue =
new LinkedBlockingQueue<Runnable>(10);
/**
* An {@link Executor} that can be used to execute tasks in parallel.
*/
public static final Executor THREAD_POOL_EXECUTOR = new ThreadPoolExecutor(CORE_POOL_SIZE,
MAXIMUM_POOL_SIZE, KEEP_ALIVE,
TimeUnit.SECONDS, sPoolWorkQueue, sThreadFactory,
new ThreadPoolExecutor.DiscardOldestPolicy());
/**
* An {@link Executor} that executes tasks one at a time in serial order.
* This serialization is global to a particular process.
*/
public static final Executor SERIAL_EXECUTOR = AndroidUtils.isHoneycombOrHigher() ? new SerialExecutor()
: Executors.newSingleThreadExecutor(sThreadFactory);
public static final Executor DUAL_THREAD_EXECUTOR =
Executors.newFixedThreadPool(2, sThreadFactory);
private static final int MESSAGE_POST_RESULT = 0x1;
private static final int MESSAGE_POST_PROGRESS = 0x2;
private static final InternalHandler sHandler = new InternalHandler();
private static volatile Executor sDefaultExecutor = SERIAL_EXECUTOR;
private final WorkerRunnable<Params, Result> mWorker;
private final FutureTask<Result> mFuture;
private volatile Status mStatus = Status.PENDING;
private final AtomicBoolean mCancelled = new AtomicBoolean();
private final AtomicBoolean mTaskInvoked = new AtomicBoolean();
@TargetApi(11)
private static class SerialExecutor implements Executor {
final ArrayDeque<Runnable> mTasks = new ArrayDeque<Runnable>();
Runnable mActive;
public synchronized void execute(final Runnable r) {
mTasks.offer(new Runnable() {
public void run() {
try {
r.run();
} finally {
scheduleNext();
}
}
});
if (mActive == null) {
scheduleNext();
}
}
protected synchronized void scheduleNext() {
if ((mActive = mTasks.poll()) != null) {
THREAD_POOL_EXECUTOR.execute(mActive);
}
}
}
/**
* Indicates the current status of the task. Each status will be set only
* once during the lifetime of a task.
*/
public enum Status {
/**
* Indicates that the task has not been executed yet.
*/
PENDING,
/**
* Indicates that the task is running.
*/
RUNNING,
/**
* Indicates that {@link AsyncTask#onPostExecute} has finished.
*/
FINISHED,
}
/** @hide Used to force static handler to be created. */
public static void init() {
sHandler.getLooper();
}
/** @hide */
public static void setDefaultExecutor(Executor exec) {
sDefaultExecutor = exec;
}
/**
* Creates a new asynchronous task. This constructor must be invoked on the
* UI thread.
*/
public AsyncTask() {
mWorker = new WorkerRunnable<Params, Result>() {
public Result call() throws Exception {
mTaskInvoked.set(true);
Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
// noinspection unchecked
return postResult(doInBackground(mParams));
}
};
mFuture = new FutureTask<Result>(mWorker) {
@Override
protected void done() {
try {
postResultIfNotInvoked(get());
} catch (InterruptedException e) {
android.util.Log.w(LOG_TAG, e);
} catch (ExecutionException e) {
throw new RuntimeException("An error occured while executing doInBackground()",
e.getCause());
} catch (CancellationException e) {
postResultIfNotInvoked(null);
}
}
};
}
private void postResultIfNotInvoked(Result result) {
final boolean wasTaskInvoked = mTaskInvoked.get();
if (!wasTaskInvoked) {
postResult(result);
}
}
private Result postResult(Result result) {
@SuppressWarnings("unchecked")
Message message = sHandler.obtainMessage(MESSAGE_POST_RESULT,
new AsyncTaskResult<Result>(this, result));
message.sendToTarget();
return result;
}
/**
* Returns the current status of this task.
*
* @return The current status.
*/
public final Status getStatus() {
return mStatus;
}
/**
* Override this method to perform a computation on a background thread. The
* specified parameters are the parameters passed to {@link #execute} by the
* caller of this task. This method can call {@link #publishProgress} to
* publish updates on the UI thread.
*
* @param params The parameters of the task.
* @return A result, defined by the subclass of this task.
* @see #onPreExecute()
* @see #onPostExecute
* @see #publishProgress
*/
protected abstract Result doInBackground(Params... params);
/**
* Runs on the UI thread before {@link #doInBackground}.
*
* @see #onPostExecute
* @see #doInBackground
*/
protected void onPreExecute() {
}
/**
* <p>
* Runs on the UI thread after {@link #doInBackground}. The specified result
* is the value returned by {@link #doInBackground}.
* </p>
* <p>
* This method won't be invoked if the task was cancelled.
* </p>
*
* @param result The result of the operation computed by
* {@link #doInBackground}.
* @see #onPreExecute
* @see #doInBackground
* @see #onCancelled(Object)
*/
@SuppressWarnings({
"UnusedDeclaration"
})
protected void onPostExecute(Result result) {
}
/**
* Runs on the UI thread after {@link #publishProgress} is invoked. The
* specified values are the values passed to {@link #publishProgress}.
*
* @param values The values indicating progress.
* @see #publishProgress
* @see #doInBackground
*/
@SuppressWarnings({
"UnusedDeclaration"
})
protected void onProgressUpdate(Progress... values) {
}
/**
* <p>
* Runs on the UI thread after {@link #cancel(boolean)} is invoked and
* {@link #doInBackground(Object[])} has finished.
* </p>
* <p>
* The default implementation simply invokes {@link #onCancelled()} and
* ignores the result. If you write your own implementation, do not call
* <code>super.onCancelled(result)</code>.
* </p>
*
* @param result The result, if any, computed in
* {@link #doInBackground(Object[])}, can be null
* @see #cancel(boolean)
* @see #isCancelled()
*/
@SuppressWarnings({
"UnusedParameters"
})
protected void onCancelled(Result result) {
onCancelled();
}
/**
* <p>
* Applications should preferably override {@link #onCancelled(Object)}.
* This method is invoked by the default implementation of
* {@link #onCancelled(Object)}.
* </p>
* <p>
* Runs on the UI thread after {@link #cancel(boolean)} is invoked and
* {@link #doInBackground(Object[])} has finished.
* </p>
*
* @see #onCancelled(Object)
* @see #cancel(boolean)
* @see #isCancelled()
*/
protected void onCancelled() {
}
/**
* Returns <tt>true</tt> if this task was cancelled before it completed
* normally. If you are calling {@link #cancel(boolean)} on the task, the
* value returned by this method should be checked periodically from
* {@link #doInBackground(Object[])} to end the task as soon as possible.
*
* @return <tt>true</tt> if task was cancelled before it completed
* @see #cancel(boolean)
*/
public final boolean isCancelled() {
return mCancelled.get();
}
/**
* <p>
* Attempts to cancel execution of this task. This attempt will fail if the
* task has already completed, already been cancelled, or could not be
* cancelled for some other reason. If successful, and this task has not
* started when <tt>cancel</tt> is called, this task should never run. If
* the task has already started, then the <tt>mayInterruptIfRunning</tt>
* parameter determines whether the thread executing this task should be
* interrupted in an attempt to stop the task.
* </p>
* <p>
* Calling this method will result in {@link #onCancelled(Object)} being
* invoked on the UI thread after {@link #doInBackground(Object[])} returns.
* Calling this method guarantees that {@link #onPostExecute(Object)} is
* never invoked. After invoking this method, you should check the value
* returned by {@link #isCancelled()} periodically from
* {@link #doInBackground(Object[])} to finish the task as early as
* possible.
* </p>
*
* @param mayInterruptIfRunning <tt>true</tt> if the thread executing this
* task should be interrupted; otherwise, in-progress tasks are
* allowed to complete.
* @return <tt>false</tt> if the task could not be cancelled, typically
* because it has already completed normally; <tt>true</tt>
* otherwise
* @see #isCancelled()
* @see #onCancelled(Object)
*/
public final boolean cancel(boolean mayInterruptIfRunning) {
mCancelled.set(true);
return mFuture.cancel(mayInterruptIfRunning);
}
/**
* Waits if necessary for the computation to complete, and then retrieves
* its result.
*
* @return The computed result.
* @throws CancellationException If the computation was cancelled.
* @throws ExecutionException If the computation threw an exception.
* @throws InterruptedException If the current thread was interrupted while
* waiting.
*/
public final Result get() throws InterruptedException, ExecutionException {
return mFuture.get();
}
/**
* Waits if necessary for at most the given time for the computation to
* complete, and then retrieves its result.
*
* @param timeout Time to wait before cancelling the operation.
* @param unit The time unit for the timeout.
* @return The computed result.
* @throws CancellationException If the computation was cancelled.
* @throws ExecutionException If the computation threw an exception.
* @throws InterruptedException If the current thread was interrupted while
* waiting.
* @throws TimeoutException If the wait timed out.
*/
public final Result get(long timeout, TimeUnit unit) throws InterruptedException,
ExecutionException, TimeoutException {
return mFuture.get(timeout, unit);
}
/**
* Executes the task with the specified parameters. The task returns itself
* (this) so that the caller can keep a reference to it.
* <p>
* Note: this function schedules the task on a queue for a single background
* thread or pool of threads depending on the platform version. When first
* introduced, AsyncTasks were executed serially on a single background
* thread. Starting with {@link android.os.Build.VERSION_CODES#DONUT}, this
* was changed to a pool of threads allowing multiple tasks to operate in
* parallel. Starting {@link android.os.Build.VERSION_CODES#HONEYCOMB},
* tasks are back to being executed on a single thread to avoid common
* application errors caused by parallel execution. If you truly want
* parallel execution, you can use the {@link #executeOnExecutor} version of
* this method with {@link #THREAD_POOL_EXECUTOR}; however, see commentary
* there for warnings on its use.
* <p>
* This method must be invoked on the UI thread.
*
* @param params The parameters of the task.
* @return This instance of AsyncTask.
* @throws IllegalStateException If {@link #getStatus()} returns either
* {@link AsyncTask.Status#RUNNING} or
* {@link AsyncTask.Status#FINISHED}.
* @see #executeOnExecutor(java.util.concurrent.Executor, Object[])
* @see #execute(Runnable)
*/
public final AsyncTask<Params, Progress, Result> execute(Params... params) {
return executeOnExecutor(sDefaultExecutor, params);
}
/**
* Executes the task with the specified parameters. The task returns itself
* (this) so that the caller can keep a reference to it.
* <p>
* This method is typically used with {@link #THREAD_POOL_EXECUTOR} to allow
* multiple tasks to run in parallel on a pool of threads managed by
* AsyncTask, however you can also use your own {@link Executor} for custom
* behavior.
* <p>
* <em>Warning:</em> Allowing multiple tasks to run in parallel from a
* thread pool is generally <em>not</em> what one wants, because the order
* of their operation is not defined. For example, if these tasks are used
* to modify any state in common (such as writing a file due to a button
* click), there are no guarantees on the order of the modifications.
* Without careful work it is possible in rare cases for the newer version
* of the data to be over-written by an older one, leading to obscure data
* loss and stability issues. Such changes are best executed in serial; to
* guarantee such work is serialized regardless of platform version you can
* use this function with {@link #SERIAL_EXECUTOR}.
* <p>
* This method must be invoked on the UI thread.
*
* @param exec The executor to use. {@link #THREAD_POOL_EXECUTOR} is
* available as a convenient process-wide thread pool for tasks
* that are loosely coupled.
* @param params The parameters of the task.
* @return This instance of AsyncTask.
* @throws IllegalStateException If {@link #getStatus()} returns either
* {@link AsyncTask.Status#RUNNING} or
* {@link AsyncTask.Status#FINISHED}.
* @see #execute(Object[])
*/
public final AsyncTask<Params, Progress, Result> executeOnExecutor(Executor exec,
Params... params) {
if (mStatus != Status.PENDING) {
switch (mStatus) {
case RUNNING:
throw new IllegalStateException("Cannot execute task:"
+ " the task is already running.");
case FINISHED:
throw new IllegalStateException("Cannot execute task:"
+ " the task has already been executed "
+ "(a task can be executed only once)");
}
}
mStatus = Status.RUNNING;
onPreExecute();
mWorker.mParams = params;
exec.execute(mFuture);
return this;
}
/**
* Convenience version of {@link #execute(Object...)} for use with a simple
* Runnable object. See {@link #execute(Object[])} for more information on
* the order of execution.
*
* @see #execute(Object[])
* @see #executeOnExecutor(java.util.concurrent.Executor, Object[])
*/
public static void execute(Runnable runnable) {
sDefaultExecutor.execute(runnable);
}
/**
* This method can be invoked from {@link #doInBackground} to publish
* updates on the UI thread while the background computation is still
* running. Each call to this method will trigger the execution of
* {@link #onProgressUpdate} on the UI thread. {@link #onProgressUpdate}
* will note be called if the task has been canceled.
*
* @param values The progress values to update the UI with.
* @see #onProgressUpdate
* @see #doInBackground
*/
protected final void publishProgress(Progress... values) {
if (!isCancelled()) {
sHandler.obtainMessage(MESSAGE_POST_PROGRESS,
new AsyncTaskResult<Progress>(this, values)).sendToTarget();
}
}
private void finish(Result result) {
if (isCancelled()) {
onCancelled(result);
} else {
onPostExecute(result);
}
mStatus = Status.FINISHED;
}
private static class InternalHandler extends Handler {
@SuppressWarnings({
"unchecked", "RawUseOfParameterizedType"
})
@Override
public void handleMessage(Message msg) {
AsyncTaskResult result = (AsyncTaskResult) msg.obj;
switch (msg.what) {
case MESSAGE_POST_RESULT:
// There is only one result
result.mTask.finish(result.mData[0]);
break;
case MESSAGE_POST_PROGRESS:
result.mTask.onProgressUpdate(result.mData);
break;
}
}
}
private static abstract class WorkerRunnable<Params, Result> implements Callable<Result> {
Params[] mParams;
}
@SuppressWarnings({
"RawUseOfParameterizedType"
})
private static class AsyncTaskResult<Data> {
final AsyncTask mTask;
final Data[] mData;
AsyncTaskResult(AsyncTask task, Data... data) {
mTask = task;
mData = data;
}
}
}
| |
package mil.nga.geopackage;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Abstract GeoPackage Core Cache for maintaining and reusing open GeoPackage
* connections
*
* @author osbornb
*
* @param <T>
* templated GeoPackage object
*/
public abstract class GeoPackageCoreCache<T extends GeoPackageCore> {
/**
* Logger
*/
private static final Logger logger = Logger
.getLogger(GeoPackageCoreCache.class.getName());
/**
* Cache of GeoPackage names and GeoPackages
*/
private Map<String, T> cache = new HashMap<String, T>();
/**
* Close quietly flag
*/
private boolean closeQuietly = true;
/**
* Constructor
*/
public GeoPackageCoreCache() {
}
/**
* Is close quietly mode enabled
*
* @return true if close quiet mode
* @since 3.1.0
*/
public boolean isCloseQuietly() {
return closeQuietly;
}
/**
* Set the close quietly mode
*
* @param closeQuietly
* true to close quietly
* @since 3.1.0
*/
public void setCloseQuietly(boolean closeQuietly) {
this.closeQuietly = closeQuietly;
}
/**
* Get the names of the cached GeoPackages
*
* @return set of cached GeoPackage names
* @since 1.0.1
*/
public Set<String> getNames() {
return cache.keySet();
}
/**
* Get the cached GeoPackages
*
* @return collection of cached GeoPackages
* @since 1.0.1
*/
public Collection<T> getGeoPackages() {
return cache.values();
}
/**
* Determine if the cache has the GeoPackage name
*
* @param name
* GeoPackage name
* @return true if has cached GeoPackage
* @since 3.1.0
*/
public boolean has(String name) {
return cache.containsKey(name);
}
/**
* Get the GeoPackage with name
*
* @param name
* GeoPackage name
* @return cached GeoPackage
*/
public T get(String name) {
return cache.get(name);
}
/**
* Checks if the GeoPackage name exists in the cache
*
* @param name
* GeoPackage name
* @return true if exists
*/
public boolean exists(String name) {
return cache.containsKey(name);
}
/**
* Close all GeoPackages in the cache
*/
public void closeAll() {
for (T geoPackage : cache.values()) {
close(geoPackage);
}
cache.clear();
}
/**
* Add a GeoPackage to the cache
*
* @param geoPackage
* GeoPackage
*/
public void add(T geoPackage) {
cache.put(geoPackage.getName(), geoPackage);
}
/**
* Add the collection of GeoPackages
*
* @param geoPackages
* GeoPackages
* @since 3.0.2
*/
public void addAll(Collection<T> geoPackages) {
for (T geoPackage : geoPackages) {
add(geoPackage);
}
}
/**
* Remove the GeoPackage with the name but does not close it, call
* {@link #close(String)} to close and remove
*
* @param name
* GeoPackage name
* @return removed GeoPackage
*/
public T remove(String name) {
return cache.remove(name);
}
/**
* Clears all cached GeoPackages but does not close them, call
* {@link #closeAll()} to close and clear all GeoPackages
*
* @since 1.0.1
*/
public void clear() {
cache.clear();
}
/**
* Remove and close the GeoPackage with name, same as {@link #close(String)}
*
* @param name
* GeoPackage name
* @return true if found, removed, and closed
*/
public boolean removeAndClose(String name) {
return close(name);
}
/**
* Close the GeoPackage with name
*
* @param name
* GeoPackage name
* @return true if found and closed
* @since 1.0.1
*/
public boolean close(String name) {
T geoPackage = remove(name);
if (geoPackage != null) {
close(geoPackage);
}
return geoPackage != null;
}
/**
* Close GeoPackages not specified in the retain GeoPackage names
*
* @param retain
* GeoPackages to retain
* @since 1.0.1
*/
public void closeRetain(Collection<String> retain) {
Set<String> close = new HashSet<>(cache.keySet());
close.removeAll(retain);
for (String name : close) {
close(name);
}
}
/**
* Close GeoPackages with names
*
* @param names
* GeoPackage names
* @since 1.0.1
*/
public void close(Collection<String> names) {
for (String name : names) {
close(name);
}
}
/**
* Close the GeoPackage
*
* @param geoPackage
* GeoPackage
* @since 3.1.0
*/
public void close(T geoPackage) {
if (geoPackage != null) {
try {
geoPackage.close();
} catch (Exception e) {
logger.log(Level.SEVERE, "Error closing GeoPackage: "
+ geoPackage.getName(), e);
if (!closeQuietly) {
throw e;
}
}
}
}
/**
* Close the GeoPackage if it is cached (same GeoPackage instance)
*
* @param geoPackage
* GeoPackage
* @return true if closed
* @since 3.1.0
*/
public boolean closeIfCached(T geoPackage) {
boolean closed = false;
if (geoPackage != null) {
T cached = get(geoPackage.getName());
if (cached != null && cached == geoPackage) {
closed = close(geoPackage.getName());
}
}
return closed;
}
/**
* Close the GeoPackage if it is not cached (GeoPackage not cached or
* different instance)
*
* @param geoPackage
* GeoPackage
* @return true if closed
* @since 3.1.0
*/
public boolean closeIfNotCached(T geoPackage) {
boolean closed = false;
if (geoPackage != null) {
T cached = get(geoPackage.getName());
if (cached == null || cached != geoPackage) {
close(geoPackage);
closed = true;
}
}
return closed;
}
}
| |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.cloudformation;
import javax.annotation.Generated;
import com.amazonaws.services.cloudformation.model.*;
/**
* Abstract implementation of {@code AmazonCloudFormationAsync}. Convenient method forms pass through to the
* corresponding overload that takes a request object and an {@code AsyncHandler}, which throws an
* {@code UnsupportedOperationException}.
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AbstractAmazonCloudFormationAsync extends AbstractAmazonCloudFormation implements AmazonCloudFormationAsync {
protected AbstractAmazonCloudFormationAsync() {
}
@Override
public java.util.concurrent.Future<ActivateTypeResult> activateTypeAsync(ActivateTypeRequest request) {
return activateTypeAsync(request, null);
}
@Override
public java.util.concurrent.Future<ActivateTypeResult> activateTypeAsync(ActivateTypeRequest request,
com.amazonaws.handlers.AsyncHandler<ActivateTypeRequest, ActivateTypeResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<BatchDescribeTypeConfigurationsResult> batchDescribeTypeConfigurationsAsync(
BatchDescribeTypeConfigurationsRequest request) {
return batchDescribeTypeConfigurationsAsync(request, null);
}
@Override
public java.util.concurrent.Future<BatchDescribeTypeConfigurationsResult> batchDescribeTypeConfigurationsAsync(
BatchDescribeTypeConfigurationsRequest request,
com.amazonaws.handlers.AsyncHandler<BatchDescribeTypeConfigurationsRequest, BatchDescribeTypeConfigurationsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<CancelUpdateStackResult> cancelUpdateStackAsync(CancelUpdateStackRequest request) {
return cancelUpdateStackAsync(request, null);
}
@Override
public java.util.concurrent.Future<CancelUpdateStackResult> cancelUpdateStackAsync(CancelUpdateStackRequest request,
com.amazonaws.handlers.AsyncHandler<CancelUpdateStackRequest, CancelUpdateStackResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ContinueUpdateRollbackResult> continueUpdateRollbackAsync(ContinueUpdateRollbackRequest request) {
return continueUpdateRollbackAsync(request, null);
}
@Override
public java.util.concurrent.Future<ContinueUpdateRollbackResult> continueUpdateRollbackAsync(ContinueUpdateRollbackRequest request,
com.amazonaws.handlers.AsyncHandler<ContinueUpdateRollbackRequest, ContinueUpdateRollbackResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<CreateChangeSetResult> createChangeSetAsync(CreateChangeSetRequest request) {
return createChangeSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<CreateChangeSetResult> createChangeSetAsync(CreateChangeSetRequest request,
com.amazonaws.handlers.AsyncHandler<CreateChangeSetRequest, CreateChangeSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<CreateStackResult> createStackAsync(CreateStackRequest request) {
return createStackAsync(request, null);
}
@Override
public java.util.concurrent.Future<CreateStackResult> createStackAsync(CreateStackRequest request,
com.amazonaws.handlers.AsyncHandler<CreateStackRequest, CreateStackResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<CreateStackInstancesResult> createStackInstancesAsync(CreateStackInstancesRequest request) {
return createStackInstancesAsync(request, null);
}
@Override
public java.util.concurrent.Future<CreateStackInstancesResult> createStackInstancesAsync(CreateStackInstancesRequest request,
com.amazonaws.handlers.AsyncHandler<CreateStackInstancesRequest, CreateStackInstancesResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<CreateStackSetResult> createStackSetAsync(CreateStackSetRequest request) {
return createStackSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<CreateStackSetResult> createStackSetAsync(CreateStackSetRequest request,
com.amazonaws.handlers.AsyncHandler<CreateStackSetRequest, CreateStackSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DeactivateTypeResult> deactivateTypeAsync(DeactivateTypeRequest request) {
return deactivateTypeAsync(request, null);
}
@Override
public java.util.concurrent.Future<DeactivateTypeResult> deactivateTypeAsync(DeactivateTypeRequest request,
com.amazonaws.handlers.AsyncHandler<DeactivateTypeRequest, DeactivateTypeResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DeleteChangeSetResult> deleteChangeSetAsync(DeleteChangeSetRequest request) {
return deleteChangeSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<DeleteChangeSetResult> deleteChangeSetAsync(DeleteChangeSetRequest request,
com.amazonaws.handlers.AsyncHandler<DeleteChangeSetRequest, DeleteChangeSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DeleteStackResult> deleteStackAsync(DeleteStackRequest request) {
return deleteStackAsync(request, null);
}
@Override
public java.util.concurrent.Future<DeleteStackResult> deleteStackAsync(DeleteStackRequest request,
com.amazonaws.handlers.AsyncHandler<DeleteStackRequest, DeleteStackResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DeleteStackInstancesResult> deleteStackInstancesAsync(DeleteStackInstancesRequest request) {
return deleteStackInstancesAsync(request, null);
}
@Override
public java.util.concurrent.Future<DeleteStackInstancesResult> deleteStackInstancesAsync(DeleteStackInstancesRequest request,
com.amazonaws.handlers.AsyncHandler<DeleteStackInstancesRequest, DeleteStackInstancesResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DeleteStackSetResult> deleteStackSetAsync(DeleteStackSetRequest request) {
return deleteStackSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<DeleteStackSetResult> deleteStackSetAsync(DeleteStackSetRequest request,
com.amazonaws.handlers.AsyncHandler<DeleteStackSetRequest, DeleteStackSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DeregisterTypeResult> deregisterTypeAsync(DeregisterTypeRequest request) {
return deregisterTypeAsync(request, null);
}
@Override
public java.util.concurrent.Future<DeregisterTypeResult> deregisterTypeAsync(DeregisterTypeRequest request,
com.amazonaws.handlers.AsyncHandler<DeregisterTypeRequest, DeregisterTypeResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeAccountLimitsResult> describeAccountLimitsAsync(DescribeAccountLimitsRequest request) {
return describeAccountLimitsAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeAccountLimitsResult> describeAccountLimitsAsync(DescribeAccountLimitsRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeAccountLimitsRequest, DescribeAccountLimitsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeChangeSetResult> describeChangeSetAsync(DescribeChangeSetRequest request) {
return describeChangeSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeChangeSetResult> describeChangeSetAsync(DescribeChangeSetRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeChangeSetRequest, DescribeChangeSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeChangeSetHooksResult> describeChangeSetHooksAsync(DescribeChangeSetHooksRequest request) {
return describeChangeSetHooksAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeChangeSetHooksResult> describeChangeSetHooksAsync(DescribeChangeSetHooksRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeChangeSetHooksRequest, DescribeChangeSetHooksResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribePublisherResult> describePublisherAsync(DescribePublisherRequest request) {
return describePublisherAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribePublisherResult> describePublisherAsync(DescribePublisherRequest request,
com.amazonaws.handlers.AsyncHandler<DescribePublisherRequest, DescribePublisherResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStackDriftDetectionStatusResult> describeStackDriftDetectionStatusAsync(
DescribeStackDriftDetectionStatusRequest request) {
return describeStackDriftDetectionStatusAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStackDriftDetectionStatusResult> describeStackDriftDetectionStatusAsync(
DescribeStackDriftDetectionStatusRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStackDriftDetectionStatusRequest, DescribeStackDriftDetectionStatusResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStackEventsResult> describeStackEventsAsync(DescribeStackEventsRequest request) {
return describeStackEventsAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStackEventsResult> describeStackEventsAsync(DescribeStackEventsRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStackEventsRequest, DescribeStackEventsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStackInstanceResult> describeStackInstanceAsync(DescribeStackInstanceRequest request) {
return describeStackInstanceAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStackInstanceResult> describeStackInstanceAsync(DescribeStackInstanceRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStackInstanceRequest, DescribeStackInstanceResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStackResourceResult> describeStackResourceAsync(DescribeStackResourceRequest request) {
return describeStackResourceAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStackResourceResult> describeStackResourceAsync(DescribeStackResourceRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStackResourceRequest, DescribeStackResourceResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStackResourceDriftsResult> describeStackResourceDriftsAsync(DescribeStackResourceDriftsRequest request) {
return describeStackResourceDriftsAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStackResourceDriftsResult> describeStackResourceDriftsAsync(DescribeStackResourceDriftsRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStackResourceDriftsRequest, DescribeStackResourceDriftsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStackResourcesResult> describeStackResourcesAsync(DescribeStackResourcesRequest request) {
return describeStackResourcesAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStackResourcesResult> describeStackResourcesAsync(DescribeStackResourcesRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStackResourcesRequest, DescribeStackResourcesResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStackSetResult> describeStackSetAsync(DescribeStackSetRequest request) {
return describeStackSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStackSetResult> describeStackSetAsync(DescribeStackSetRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStackSetRequest, DescribeStackSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStackSetOperationResult> describeStackSetOperationAsync(DescribeStackSetOperationRequest request) {
return describeStackSetOperationAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStackSetOperationResult> describeStackSetOperationAsync(DescribeStackSetOperationRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStackSetOperationRequest, DescribeStackSetOperationResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeStacksResult> describeStacksAsync(DescribeStacksRequest request) {
return describeStacksAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeStacksResult> describeStacksAsync(DescribeStacksRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeStacksRequest, DescribeStacksResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
/**
* Simplified method form for invoking the DescribeStacks operation.
*
* @see #describeStacksAsync(DescribeStacksRequest)
*/
@Override
public java.util.concurrent.Future<DescribeStacksResult> describeStacksAsync() {
return describeStacksAsync(new DescribeStacksRequest());
}
/**
* Simplified method form for invoking the DescribeStacks operation with an AsyncHandler.
*
* @see #describeStacksAsync(DescribeStacksRequest, com.amazonaws.handlers.AsyncHandler)
*/
@Override
public java.util.concurrent.Future<DescribeStacksResult> describeStacksAsync(
com.amazonaws.handlers.AsyncHandler<DescribeStacksRequest, DescribeStacksResult> asyncHandler) {
return describeStacksAsync(new DescribeStacksRequest(), asyncHandler);
}
@Override
public java.util.concurrent.Future<DescribeTypeResult> describeTypeAsync(DescribeTypeRequest request) {
return describeTypeAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeTypeResult> describeTypeAsync(DescribeTypeRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeTypeRequest, DescribeTypeResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DescribeTypeRegistrationResult> describeTypeRegistrationAsync(DescribeTypeRegistrationRequest request) {
return describeTypeRegistrationAsync(request, null);
}
@Override
public java.util.concurrent.Future<DescribeTypeRegistrationResult> describeTypeRegistrationAsync(DescribeTypeRegistrationRequest request,
com.amazonaws.handlers.AsyncHandler<DescribeTypeRegistrationRequest, DescribeTypeRegistrationResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DetectStackDriftResult> detectStackDriftAsync(DetectStackDriftRequest request) {
return detectStackDriftAsync(request, null);
}
@Override
public java.util.concurrent.Future<DetectStackDriftResult> detectStackDriftAsync(DetectStackDriftRequest request,
com.amazonaws.handlers.AsyncHandler<DetectStackDriftRequest, DetectStackDriftResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DetectStackResourceDriftResult> detectStackResourceDriftAsync(DetectStackResourceDriftRequest request) {
return detectStackResourceDriftAsync(request, null);
}
@Override
public java.util.concurrent.Future<DetectStackResourceDriftResult> detectStackResourceDriftAsync(DetectStackResourceDriftRequest request,
com.amazonaws.handlers.AsyncHandler<DetectStackResourceDriftRequest, DetectStackResourceDriftResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<DetectStackSetDriftResult> detectStackSetDriftAsync(DetectStackSetDriftRequest request) {
return detectStackSetDriftAsync(request, null);
}
@Override
public java.util.concurrent.Future<DetectStackSetDriftResult> detectStackSetDriftAsync(DetectStackSetDriftRequest request,
com.amazonaws.handlers.AsyncHandler<DetectStackSetDriftRequest, DetectStackSetDriftResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<EstimateTemplateCostResult> estimateTemplateCostAsync(EstimateTemplateCostRequest request) {
return estimateTemplateCostAsync(request, null);
}
@Override
public java.util.concurrent.Future<EstimateTemplateCostResult> estimateTemplateCostAsync(EstimateTemplateCostRequest request,
com.amazonaws.handlers.AsyncHandler<EstimateTemplateCostRequest, EstimateTemplateCostResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
/**
* Simplified method form for invoking the EstimateTemplateCost operation.
*
* @see #estimateTemplateCostAsync(EstimateTemplateCostRequest)
*/
@Override
public java.util.concurrent.Future<EstimateTemplateCostResult> estimateTemplateCostAsync() {
return estimateTemplateCostAsync(new EstimateTemplateCostRequest());
}
/**
* Simplified method form for invoking the EstimateTemplateCost operation with an AsyncHandler.
*
* @see #estimateTemplateCostAsync(EstimateTemplateCostRequest, com.amazonaws.handlers.AsyncHandler)
*/
@Override
public java.util.concurrent.Future<EstimateTemplateCostResult> estimateTemplateCostAsync(
com.amazonaws.handlers.AsyncHandler<EstimateTemplateCostRequest, EstimateTemplateCostResult> asyncHandler) {
return estimateTemplateCostAsync(new EstimateTemplateCostRequest(), asyncHandler);
}
@Override
public java.util.concurrent.Future<ExecuteChangeSetResult> executeChangeSetAsync(ExecuteChangeSetRequest request) {
return executeChangeSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<ExecuteChangeSetResult> executeChangeSetAsync(ExecuteChangeSetRequest request,
com.amazonaws.handlers.AsyncHandler<ExecuteChangeSetRequest, ExecuteChangeSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<GetStackPolicyResult> getStackPolicyAsync(GetStackPolicyRequest request) {
return getStackPolicyAsync(request, null);
}
@Override
public java.util.concurrent.Future<GetStackPolicyResult> getStackPolicyAsync(GetStackPolicyRequest request,
com.amazonaws.handlers.AsyncHandler<GetStackPolicyRequest, GetStackPolicyResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<GetTemplateResult> getTemplateAsync(GetTemplateRequest request) {
return getTemplateAsync(request, null);
}
@Override
public java.util.concurrent.Future<GetTemplateResult> getTemplateAsync(GetTemplateRequest request,
com.amazonaws.handlers.AsyncHandler<GetTemplateRequest, GetTemplateResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<GetTemplateSummaryResult> getTemplateSummaryAsync(GetTemplateSummaryRequest request) {
return getTemplateSummaryAsync(request, null);
}
@Override
public java.util.concurrent.Future<GetTemplateSummaryResult> getTemplateSummaryAsync(GetTemplateSummaryRequest request,
com.amazonaws.handlers.AsyncHandler<GetTemplateSummaryRequest, GetTemplateSummaryResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
/**
* Simplified method form for invoking the GetTemplateSummary operation.
*
* @see #getTemplateSummaryAsync(GetTemplateSummaryRequest)
*/
@Override
public java.util.concurrent.Future<GetTemplateSummaryResult> getTemplateSummaryAsync() {
return getTemplateSummaryAsync(new GetTemplateSummaryRequest());
}
/**
* Simplified method form for invoking the GetTemplateSummary operation with an AsyncHandler.
*
* @see #getTemplateSummaryAsync(GetTemplateSummaryRequest, com.amazonaws.handlers.AsyncHandler)
*/
@Override
public java.util.concurrent.Future<GetTemplateSummaryResult> getTemplateSummaryAsync(
com.amazonaws.handlers.AsyncHandler<GetTemplateSummaryRequest, GetTemplateSummaryResult> asyncHandler) {
return getTemplateSummaryAsync(new GetTemplateSummaryRequest(), asyncHandler);
}
@Override
public java.util.concurrent.Future<ImportStacksToStackSetResult> importStacksToStackSetAsync(ImportStacksToStackSetRequest request) {
return importStacksToStackSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<ImportStacksToStackSetResult> importStacksToStackSetAsync(ImportStacksToStackSetRequest request,
com.amazonaws.handlers.AsyncHandler<ImportStacksToStackSetRequest, ImportStacksToStackSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListChangeSetsResult> listChangeSetsAsync(ListChangeSetsRequest request) {
return listChangeSetsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListChangeSetsResult> listChangeSetsAsync(ListChangeSetsRequest request,
com.amazonaws.handlers.AsyncHandler<ListChangeSetsRequest, ListChangeSetsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListExportsResult> listExportsAsync(ListExportsRequest request) {
return listExportsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListExportsResult> listExportsAsync(ListExportsRequest request,
com.amazonaws.handlers.AsyncHandler<ListExportsRequest, ListExportsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListImportsResult> listImportsAsync(ListImportsRequest request) {
return listImportsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListImportsResult> listImportsAsync(ListImportsRequest request,
com.amazonaws.handlers.AsyncHandler<ListImportsRequest, ListImportsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListStackInstancesResult> listStackInstancesAsync(ListStackInstancesRequest request) {
return listStackInstancesAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListStackInstancesResult> listStackInstancesAsync(ListStackInstancesRequest request,
com.amazonaws.handlers.AsyncHandler<ListStackInstancesRequest, ListStackInstancesResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListStackResourcesResult> listStackResourcesAsync(ListStackResourcesRequest request) {
return listStackResourcesAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListStackResourcesResult> listStackResourcesAsync(ListStackResourcesRequest request,
com.amazonaws.handlers.AsyncHandler<ListStackResourcesRequest, ListStackResourcesResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListStackSetOperationResultsResult> listStackSetOperationResultsAsync(ListStackSetOperationResultsRequest request) {
return listStackSetOperationResultsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListStackSetOperationResultsResult> listStackSetOperationResultsAsync(ListStackSetOperationResultsRequest request,
com.amazonaws.handlers.AsyncHandler<ListStackSetOperationResultsRequest, ListStackSetOperationResultsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListStackSetOperationsResult> listStackSetOperationsAsync(ListStackSetOperationsRequest request) {
return listStackSetOperationsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListStackSetOperationsResult> listStackSetOperationsAsync(ListStackSetOperationsRequest request,
com.amazonaws.handlers.AsyncHandler<ListStackSetOperationsRequest, ListStackSetOperationsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListStackSetsResult> listStackSetsAsync(ListStackSetsRequest request) {
return listStackSetsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListStackSetsResult> listStackSetsAsync(ListStackSetsRequest request,
com.amazonaws.handlers.AsyncHandler<ListStackSetsRequest, ListStackSetsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListStacksResult> listStacksAsync(ListStacksRequest request) {
return listStacksAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListStacksResult> listStacksAsync(ListStacksRequest request,
com.amazonaws.handlers.AsyncHandler<ListStacksRequest, ListStacksResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
/**
* Simplified method form for invoking the ListStacks operation.
*
* @see #listStacksAsync(ListStacksRequest)
*/
@Override
public java.util.concurrent.Future<ListStacksResult> listStacksAsync() {
return listStacksAsync(new ListStacksRequest());
}
/**
* Simplified method form for invoking the ListStacks operation with an AsyncHandler.
*
* @see #listStacksAsync(ListStacksRequest, com.amazonaws.handlers.AsyncHandler)
*/
@Override
public java.util.concurrent.Future<ListStacksResult> listStacksAsync(com.amazonaws.handlers.AsyncHandler<ListStacksRequest, ListStacksResult> asyncHandler) {
return listStacksAsync(new ListStacksRequest(), asyncHandler);
}
@Override
public java.util.concurrent.Future<ListTypeRegistrationsResult> listTypeRegistrationsAsync(ListTypeRegistrationsRequest request) {
return listTypeRegistrationsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListTypeRegistrationsResult> listTypeRegistrationsAsync(ListTypeRegistrationsRequest request,
com.amazonaws.handlers.AsyncHandler<ListTypeRegistrationsRequest, ListTypeRegistrationsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListTypeVersionsResult> listTypeVersionsAsync(ListTypeVersionsRequest request) {
return listTypeVersionsAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListTypeVersionsResult> listTypeVersionsAsync(ListTypeVersionsRequest request,
com.amazonaws.handlers.AsyncHandler<ListTypeVersionsRequest, ListTypeVersionsResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ListTypesResult> listTypesAsync(ListTypesRequest request) {
return listTypesAsync(request, null);
}
@Override
public java.util.concurrent.Future<ListTypesResult> listTypesAsync(ListTypesRequest request,
com.amazonaws.handlers.AsyncHandler<ListTypesRequest, ListTypesResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<PublishTypeResult> publishTypeAsync(PublishTypeRequest request) {
return publishTypeAsync(request, null);
}
@Override
public java.util.concurrent.Future<PublishTypeResult> publishTypeAsync(PublishTypeRequest request,
com.amazonaws.handlers.AsyncHandler<PublishTypeRequest, PublishTypeResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<RecordHandlerProgressResult> recordHandlerProgressAsync(RecordHandlerProgressRequest request) {
return recordHandlerProgressAsync(request, null);
}
@Override
public java.util.concurrent.Future<RecordHandlerProgressResult> recordHandlerProgressAsync(RecordHandlerProgressRequest request,
com.amazonaws.handlers.AsyncHandler<RecordHandlerProgressRequest, RecordHandlerProgressResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<RegisterPublisherResult> registerPublisherAsync(RegisterPublisherRequest request) {
return registerPublisherAsync(request, null);
}
@Override
public java.util.concurrent.Future<RegisterPublisherResult> registerPublisherAsync(RegisterPublisherRequest request,
com.amazonaws.handlers.AsyncHandler<RegisterPublisherRequest, RegisterPublisherResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<RegisterTypeResult> registerTypeAsync(RegisterTypeRequest request) {
return registerTypeAsync(request, null);
}
@Override
public java.util.concurrent.Future<RegisterTypeResult> registerTypeAsync(RegisterTypeRequest request,
com.amazonaws.handlers.AsyncHandler<RegisterTypeRequest, RegisterTypeResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<RollbackStackResult> rollbackStackAsync(RollbackStackRequest request) {
return rollbackStackAsync(request, null);
}
@Override
public java.util.concurrent.Future<RollbackStackResult> rollbackStackAsync(RollbackStackRequest request,
com.amazonaws.handlers.AsyncHandler<RollbackStackRequest, RollbackStackResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<SetStackPolicyResult> setStackPolicyAsync(SetStackPolicyRequest request) {
return setStackPolicyAsync(request, null);
}
@Override
public java.util.concurrent.Future<SetStackPolicyResult> setStackPolicyAsync(SetStackPolicyRequest request,
com.amazonaws.handlers.AsyncHandler<SetStackPolicyRequest, SetStackPolicyResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<SetTypeConfigurationResult> setTypeConfigurationAsync(SetTypeConfigurationRequest request) {
return setTypeConfigurationAsync(request, null);
}
@Override
public java.util.concurrent.Future<SetTypeConfigurationResult> setTypeConfigurationAsync(SetTypeConfigurationRequest request,
com.amazonaws.handlers.AsyncHandler<SetTypeConfigurationRequest, SetTypeConfigurationResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<SetTypeDefaultVersionResult> setTypeDefaultVersionAsync(SetTypeDefaultVersionRequest request) {
return setTypeDefaultVersionAsync(request, null);
}
@Override
public java.util.concurrent.Future<SetTypeDefaultVersionResult> setTypeDefaultVersionAsync(SetTypeDefaultVersionRequest request,
com.amazonaws.handlers.AsyncHandler<SetTypeDefaultVersionRequest, SetTypeDefaultVersionResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<SignalResourceResult> signalResourceAsync(SignalResourceRequest request) {
return signalResourceAsync(request, null);
}
@Override
public java.util.concurrent.Future<SignalResourceResult> signalResourceAsync(SignalResourceRequest request,
com.amazonaws.handlers.AsyncHandler<SignalResourceRequest, SignalResourceResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<StopStackSetOperationResult> stopStackSetOperationAsync(StopStackSetOperationRequest request) {
return stopStackSetOperationAsync(request, null);
}
@Override
public java.util.concurrent.Future<StopStackSetOperationResult> stopStackSetOperationAsync(StopStackSetOperationRequest request,
com.amazonaws.handlers.AsyncHandler<StopStackSetOperationRequest, StopStackSetOperationResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<TestTypeResult> testTypeAsync(TestTypeRequest request) {
return testTypeAsync(request, null);
}
@Override
public java.util.concurrent.Future<TestTypeResult> testTypeAsync(TestTypeRequest request,
com.amazonaws.handlers.AsyncHandler<TestTypeRequest, TestTypeResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<UpdateStackResult> updateStackAsync(UpdateStackRequest request) {
return updateStackAsync(request, null);
}
@Override
public java.util.concurrent.Future<UpdateStackResult> updateStackAsync(UpdateStackRequest request,
com.amazonaws.handlers.AsyncHandler<UpdateStackRequest, UpdateStackResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<UpdateStackInstancesResult> updateStackInstancesAsync(UpdateStackInstancesRequest request) {
return updateStackInstancesAsync(request, null);
}
@Override
public java.util.concurrent.Future<UpdateStackInstancesResult> updateStackInstancesAsync(UpdateStackInstancesRequest request,
com.amazonaws.handlers.AsyncHandler<UpdateStackInstancesRequest, UpdateStackInstancesResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<UpdateStackSetResult> updateStackSetAsync(UpdateStackSetRequest request) {
return updateStackSetAsync(request, null);
}
@Override
public java.util.concurrent.Future<UpdateStackSetResult> updateStackSetAsync(UpdateStackSetRequest request,
com.amazonaws.handlers.AsyncHandler<UpdateStackSetRequest, UpdateStackSetResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<UpdateTerminationProtectionResult> updateTerminationProtectionAsync(UpdateTerminationProtectionRequest request) {
return updateTerminationProtectionAsync(request, null);
}
@Override
public java.util.concurrent.Future<UpdateTerminationProtectionResult> updateTerminationProtectionAsync(UpdateTerminationProtectionRequest request,
com.amazonaws.handlers.AsyncHandler<UpdateTerminationProtectionRequest, UpdateTerminationProtectionResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
@Override
public java.util.concurrent.Future<ValidateTemplateResult> validateTemplateAsync(ValidateTemplateRequest request) {
return validateTemplateAsync(request, null);
}
@Override
public java.util.concurrent.Future<ValidateTemplateResult> validateTemplateAsync(ValidateTemplateRequest request,
com.amazonaws.handlers.AsyncHandler<ValidateTemplateRequest, ValidateTemplateResult> asyncHandler) {
throw new java.lang.UnsupportedOperationException();
}
}
| |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.After;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
public class TestFSNamesystem {
@After
public void cleanUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
}
/**
* Tests that the namenode edits dirs are gotten with duplicates removed
*/
@Test
public void testUniqueEditDirs() throws IOException {
Configuration config = new Configuration();
config.set(DFS_NAMENODE_EDITS_DIR_KEY, "file://edits/dir, "
+ "file://edits/dir1,file://edits/dir1"); // overlapping internally
// getNamespaceEditsDirs removes duplicates
Collection<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(config);
assertEquals(2, editsDirs.size());
}
/**
* Test that FSNamesystem#clear clears all leases.
*/
@Test
public void testFSNamespaceClearLeases() throws Exception {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
LeaseManager leaseMan = fsn.getLeaseManager();
leaseMan.addLease("client1", fsn.getFSDirectory().allocateNewInodeId());
assertEquals(1, leaseMan.countLease());
fsn.clear();
leaseMan = fsn.getLeaseManager();
assertEquals(0, leaseMan.countLease());
}
@Test
/**
* Test that isInStartupSafemode returns true only during startup safemode
* and not also during low-resource safemode
*/
public void testStartupSafemode() throws IOException {
Configuration conf = new Configuration();
FSImage fsImage = Mockito.mock(FSImage.class);
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
fsn.leaveSafeMode();
assertTrue("After leaving safemode FSNamesystem.isInStartupSafeMode still "
+ "returned true", !fsn.isInStartupSafeMode());
assertTrue("After leaving safemode FSNamesystem.isInSafeMode still returned"
+ " true", !fsn.isInSafeMode());
fsn.enterSafeMode(true);
assertTrue("After entering safemode due to low resources FSNamesystem."
+ "isInStartupSafeMode still returned true", !fsn.isInStartupSafeMode());
assertTrue("After entering safemode due to low resources FSNamesystem."
+ "isInSafeMode still returned false", fsn.isInSafeMode());
}
@Test
public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException{
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
FSNamesystem fsn = Mockito.spy(fsNamesystem);
//Make shouldPopulaeReplQueues return true
HAContext haContext = Mockito.mock(HAContext.class);
HAState haState = Mockito.mock(HAState.class);
Mockito.when(haContext.getState()).thenReturn(haState);
Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
Whitebox.setInternalState(fsn, "haContext", haContext);
//Make NameNode.getNameNodeMetrics() not return null
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues were being populated during very first "
+ "safemode", !fsn.isPopulatingReplQueues());
fsn.leaveSafeMode();
assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated even after leaving "
+ "safemode", fsn.isPopulatingReplQueues());
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated after entering "
+ "safemode 2nd time", fsn.isPopulatingReplQueues());
}
@Test
public void testFsLockFairness() throws IOException, InterruptedException{
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
conf.setBoolean("dfs.namenode.fslock.fair", true);
FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
assertTrue(fsNamesystem.getFsLockForTests().isFair());
conf.setBoolean("dfs.namenode.fslock.fair", false);
fsNamesystem = new FSNamesystem(conf, fsImage);
assertFalse(fsNamesystem.getFsLockForTests().isFair());
}
@Test
public void testFSNamesystemLockCompatibility() {
FSNamesystemLock rwLock = new FSNamesystemLock(true);
assertEquals(0, rwLock.getReadHoldCount());
rwLock.readLock().lock();
assertEquals(1, rwLock.getReadHoldCount());
rwLock.readLock().lock();
assertEquals(2, rwLock.getReadHoldCount());
rwLock.readLock().unlock();
assertEquals(1, rwLock.getReadHoldCount());
rwLock.readLock().unlock();
assertEquals(0, rwLock.getReadHoldCount());
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0, rwLock.getWriteHoldCount());
rwLock.writeLock().lock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(1, rwLock.getWriteHoldCount());
rwLock.writeLock().lock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(2, rwLock.getWriteHoldCount());
rwLock.writeLock().unlock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(1, rwLock.getWriteHoldCount());
rwLock.writeLock().unlock();
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0, rwLock.getWriteHoldCount());
}
@Test
public void testReset() throws Exception {
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
fsn.clear();
assertFalse(fsn.isImageLoaded());
final INodeDirectory root = (INodeDirectory) fsn.getFSDirectory()
.getINode("/");
assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
}
@Test
public void testGetEffectiveLayoutVersion() {
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(true, -60, -61, -63));
assertEquals(-61,
FSNamesystem.getEffectiveLayoutVersion(true, -61, -61, -63));
assertEquals(-62,
FSNamesystem.getEffectiveLayoutVersion(true, -62, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(true, -63, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -60, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -61, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -62, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -63, -61, -63));
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.startup;
import java.io.File;
import java.net.MalformedURLException;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteState;
import org.apache.ignite.IgniteSystemProperties;
import org.apache.ignite.IgnitionListener;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.G;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.testframework.GridTestUtils;
import org.apache.ignite.testframework.junits.logger.GridTestLog4jLogger;
import org.apache.log4j.Appender;
import org.apache.log4j.Logger;
import org.apache.log4j.varia.NullAppender;
import org.jetbrains.annotations.Nullable;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.FileSystemXmlApplicationContext;
import static org.apache.ignite.IgniteState.STOPPED;
/**
* This class defines random command-line Ignite loader. This loader can be used
* to randomly start and stop Ignite from command line for tests. This loader is a Java
* application with {@link #main(String[])} method that accepts command line arguments.
* See below for details.
*/
public final class GridRandomCommandLineLoader {
/** Name of the system property defining name of command line program. */
private static final String IGNITE_PROG_NAME = "IGNITE_PROG_NAME";
/** Copyright text. Ant processed. */
private static final String COPYRIGHT = "2016 Copyright(C) Apache Software Foundation.";
/** Version. Ant processed. */
private static final String VER = "x.x.x";
/** */
private static final String OPTION_HELP = "help";
/** */
private static final String OPTION_CFG = "cfg";
/** */
private static final String OPTION_MIN_TTL = "minTtl";
/** */
private static final String OPTION_MAX_TTL = "maxTtl";
/** */
private static final String OPTION_DURATION = "duration";
/** */
private static final String OPTION_LOG_CFG = "logCfg";
/** Minimal value for timeout in milliseconds. */
private static final long DFLT_MIN_TIMEOUT = 1000;
/** Maximum value for timeout in milliseconds. */
private static final long DFLT_MAX_TIMEOUT = 1000 * 20;
/** Work timeout in milliseconds. */
private static final long DFLT_RUN_TIMEOUT = 1000 * 60 * 5;
/** Latch. */
private static CountDownLatch latch;
/**
* Enforces singleton.
*/
private GridRandomCommandLineLoader() {
// No-op.
}
/**
* Echos the given messages.
*
* @param msg Message to echo.
*/
private static void echo(String msg) {
assert msg != null;
System.out.println(msg);
}
/**
* Echos exception stack trace.
*
* @param e Exception to print.
*/
private static void echo(IgniteCheckedException e) {
assert e != null;
System.err.println(e);
}
/**
* Exists with optional error message, usage show and exit code.
*
* @param errMsg Optional error message.
* @param options Command line options to show usage information.
* @param exitCode Exit code.
*/
private static void exit(@Nullable String errMsg, @Nullable Options options, int exitCode) {
if (errMsg != null)
echo("ERROR: " + errMsg);
String runner = System.getProperty(IGNITE_PROG_NAME, "randignite.{sh|bat}");
int space = runner.indexOf(' ');
runner = runner.substring(0, space == -1 ? runner.length() : space);
if (options != null) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(runner, options);
}
System.exit(exitCode);
}
/**
* Prints logo.
*/
private static void logo() {
echo("Ignite Random Command Line Loader, ver. " + VER);
echo(COPYRIGHT);
echo("");
}
/**
* Main entry point.
*
* @param args Command line arguments.
*/
@SuppressWarnings({"BusyWait"})
public static void main(String[] args) {
System.setProperty(IgniteSystemProperties.IGNITE_UPDATE_NOTIFIER, "false");
logo();
Options options = createOptions();
// Create the command line parser.
CommandLineParser parser = new PosixParser();
String cfgPath = null;
long minTtl = DFLT_MIN_TIMEOUT;
long maxTtl = DFLT_MAX_TIMEOUT;
long duration = DFLT_RUN_TIMEOUT;
String logCfgPath = null;
try {
CommandLine cmd = parser.parse(options, args);
if (cmd.hasOption(OPTION_HELP))
exit(null, options, 0);
if (!cmd.hasOption(OPTION_LOG_CFG))
exit("-log should be set", options, -1);
else
logCfgPath = cmd.getOptionValue(OPTION_LOG_CFG);
if (cmd.hasOption(OPTION_CFG))
cfgPath = cmd.getOptionValue(OPTION_CFG);
try {
if (cmd.hasOption(OPTION_DURATION))
duration = Long.parseLong(cmd.getOptionValue(OPTION_DURATION));
}
catch (NumberFormatException ignored) {
exit("Invalid argument for option: " + OPTION_DURATION, options, -1);
}
try {
if (cmd.hasOption(OPTION_MIN_TTL))
minTtl = Long.parseLong(cmd.getOptionValue(OPTION_MIN_TTL));
}
catch (NumberFormatException ignored) {
exit("Invalid argument for option: " + OPTION_MIN_TTL, options, -1);
}
try {
if (cmd.hasOption(OPTION_MAX_TTL))
maxTtl = Long.parseLong(cmd.getOptionValue(OPTION_MAX_TTL));
}
catch (NumberFormatException ignored) {
exit("Invalid argument for option: " + OPTION_MAX_TTL, options, -1);
}
if (minTtl >= maxTtl)
exit("Invalid arguments for options: " + OPTION_MAX_TTL + ", " + OPTION_MIN_TTL, options, -1);
}
catch (ParseException e) {
exit(e.getMessage(), options, -1);
}
System.out.println("Configuration path: " + cfgPath);
System.out.println("Log4j configuration path: " + logCfgPath);
System.out.println("Duration: " + duration);
System.out.println("Minimum TTL: " + minTtl);
System.out.println("Maximum TTL: " + maxTtl);
G.addListener(new IgnitionListener() {
@Override public void onStateChange(String name, IgniteState state) {
if (state == STOPPED && latch != null)
latch.countDown();
}
});
Random rand = new Random();
long now = System.currentTimeMillis();
long end = duration + System.currentTimeMillis();
try {
while (now < end) {
G.start(getConfiguration(cfgPath, logCfgPath));
long delay = rand.nextInt((int)(maxTtl - minTtl)) + minTtl;
delay = (now + delay > end) ? (end - now) : delay;
now = System.currentTimeMillis();
echo("Time left (ms): " + (end - now));
echo("Going to sleep for (ms): " + delay);
Thread.sleep(delay);
G.stopAll(false);
now = System.currentTimeMillis();
}
}
catch (IgniteCheckedException e) {
echo(e);
exit("Failed to start grid: " + e.getMessage(), null, -1);
}
catch (InterruptedException e) {
echo("Loader was interrupted (exiting): " + e.getMessage());
}
latch = new CountDownLatch(G.allGrids().size());
try {
while (latch.getCount() > 0)
latch.await();
}
catch (InterruptedException e) {
echo("Loader was interrupted (exiting): " + e.getMessage());
}
System.exit(0);
}
/**
* Initializes configurations.
*
* @param springCfgPath Configuration file path.
* @param logCfgPath Log file name.
* @return List of configurations.
* @throws IgniteCheckedException If an error occurs.
*/
@SuppressWarnings("unchecked")
private static IgniteConfiguration getConfiguration(String springCfgPath, @Nullable String logCfgPath)
throws IgniteCheckedException {
assert springCfgPath != null;
File path = GridTestUtils.resolveIgnitePath(springCfgPath);
if (path == null)
throw new IgniteCheckedException("Spring XML configuration file path is invalid: " + new File(springCfgPath) +
". Note that this path should be either absolute path or a relative path to IGNITE_HOME.");
if (!path.isFile())
throw new IgniteCheckedException("Provided file path is not a file: " + path);
// Add no-op logger to remove no-appender warning.
Appender app = new NullAppender();
Logger.getRootLogger().addAppender(app);
ApplicationContext springCtx;
try {
springCtx = new FileSystemXmlApplicationContext(path.toURI().toURL().toString());
}
catch (BeansException | MalformedURLException e) {
throw new IgniteCheckedException("Failed to instantiate Spring XML application context: " + e.getMessage(), e);
}
Map cfgMap;
try {
// Note: Spring is not generics-friendly.
cfgMap = springCtx.getBeansOfType(IgniteConfiguration.class);
}
catch (BeansException e) {
throw new IgniteCheckedException("Failed to instantiate bean [type=" + IgniteConfiguration.class + ", err=" +
e.getMessage() + ']', e);
}
if (cfgMap == null)
throw new IgniteCheckedException("Failed to find a single grid factory configuration in: " + path);
// Remove previously added no-op logger.
Logger.getRootLogger().removeAppender(app);
if (cfgMap.size() != 1)
throw new IgniteCheckedException("Spring configuration file should contain exactly 1 grid configuration: " + path);
IgniteConfiguration cfg = (IgniteConfiguration)F.first(cfgMap.values());
assert cfg != null;
if (logCfgPath != null)
cfg.setGridLogger(new GridTestLog4jLogger(U.resolveIgniteUrl(logCfgPath)));
return cfg;
}
/**
* Creates cli options.
*
* @return Command line options
*/
private static Options createOptions() {
Options options = new Options();
Option help = new Option(OPTION_HELP, "print this message");
Option cfg = new Option(null, OPTION_CFG, true, "path to Spring XML configuration file.");
cfg.setValueSeparator('=');
cfg.setType(String.class);
Option minTtl = new Option(null, OPTION_MIN_TTL, true, "node minimum time to live.");
minTtl.setValueSeparator('=');
minTtl.setType(Long.class);
Option maxTtl = new Option(null, OPTION_MAX_TTL, true, "node maximum time to live.");
maxTtl.setValueSeparator('=');
maxTtl.setType(Long.class);
Option duration = new Option(null, OPTION_DURATION, true, "run timeout.");
duration.setValueSeparator('=');
duration.setType(Long.class);
Option log = new Option(null, OPTION_LOG_CFG, true, "path to log4j configuration file.");
log.setValueSeparator('=');
log.setType(String.class);
options.addOption(help);
OptionGroup grp = new OptionGroup();
grp.setRequired(true);
grp.addOption(cfg);
grp.addOption(minTtl);
grp.addOption(maxTtl);
grp.addOption(duration);
grp.addOption(log);
options.addOptionGroup(grp);
return options;
}
}
| |
//
// Copyright 2015 Amazon.com, Inc. or its affiliates (Amazon). All Rights Reserved.
//
// Code generated by AWS Mobile Hub. Amazon gives unlimited permission to
// copy, distribute and modify it.
//
// Source code generated from template: aws-my-sample-app-android v0.4
//
package com.amazonaws.mobile.content;
import android.content.Context;
import android.util.Log;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.mobile.AWSConfiguration;
import com.amazonaws.mobile.user.IdentityManager;
import com.amazonaws.mobile.util.StringFormatUtils;
import com.amazonaws.mobile.util.ThreadUtils;
import com.amazonaws.regions.Region;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.ObjectMetadata;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.TreeSet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* The Content Manager manages caching and transfer of files from Amazon S3 and/or
* Amazon CloudFront. It lists files directly using S3, regardless of whether Amazon
* CloudFront is in use. It maintains a size-limited cache for files stored on the
* local device and provides operations to set the cache size limit and clear files
* from the local cache. It serves as the application's interface into the Content
* Delivery feature. The application can create and use any number of Content Managers
* simultaneously, each with its own Amazon S3 bucket, S3 folder prefix, and local
* on-device cache folder location.
*/
public class ContentManager implements Iterable<ContentItem> {
/**
* Logging tag for this class.
*/
private static final String LOG_TAG = ContentManager.class.getSimpleName();
/**
* The path suffix for storing local content.
*/
private static final String LOCAL_CONTENT_DIR_SUFFIX = "/content";
/**
* The path suffix for storing incoming content.
*/
private static final String LOCAL_CONTENT_XFER_DIR_SUFFIX = "/incoming";
private static final String DIR_DELIMITER = "/" ;
/**
* Amazon S3 Client to use for obtaining content.
*/
protected final AmazonS3Client s3Client;
/**
* The transfer manager to manage transfers.
*/
protected final TransferHelper transferHelper;
/**
* The S3 bucket to use for transfers.
*/
protected final String bucket;
/**
* s3 Objects managed by this content manager use this object prefix.
*/
protected final String s3DirPrefix;
/**
* The application context.
*/
Context context;
/**
* The local content cache.
*/
private final LocalContentCache localContentCache;
/**
* The local path to downloaded content.
*/
protected final String localContentPath;
/**
* The local path to content being downloaded.
*/
protected final String localTransferPath;
/** The thread that handles iterating through the content and adding it to the queue. */
protected final ExecutorService executorService = Executors.newFixedThreadPool(4);
public interface BuilderResultHandler {
void onComplete(ContentManager contentManager);
}
/** Builder for convenience of instantiation. */
public static final class Builder {
private Context context = null;
private IdentityManager identityManager = null;
private String bucket = null;
private String s3DirPrefix = null;
private String cloudFrontDomainName = null;
private String basePath = null;
private ClientConfiguration clientConfiguration;
public Builder withContext(final Context context) {
this.context = context;
return this;
}
public Builder withIdentityManager(final IdentityManager identityManager) {
this.identityManager = identityManager;
return this;
}
public Builder withS3Bucket(final String s3Bucket) {
this.bucket = s3Bucket;
return this;
}
public Builder withS3DirPrefix(final String s3DirPrefix) {
this.s3DirPrefix = s3DirPrefix;
return this;
}
public Builder withCloudFrontDomainName(final String cloudFrontDomainName) {
this.cloudFrontDomainName = cloudFrontDomainName;
return this;
}
public Builder withLocalBasePath(final String basePath) {
this.basePath = basePath;
return this;
}
public Builder withClientConfiguration(final ClientConfiguration clientConfiguration) {
this.clientConfiguration = clientConfiguration;
return this;
}
public void build(final BuilderResultHandler resultHandler) {
if (clientConfiguration == null) {
clientConfiguration = new ClientConfiguration();
}
new Thread(new Runnable() {
@Override
public void run() {
final ContentManager contentManager =
new ContentManager(context, identityManager, bucket, s3DirPrefix,
cloudFrontDomainName, basePath, clientConfiguration);
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
resultHandler.onComplete(contentManager);
}
});
}
}).start();
}
}
/**
* Constructs a content manager.
*
* @param context an Android context.
* @param identityManager identity manager to use for credentials.
* @param bucket the s3 bucket.
* @param s3DirPrefix The directory within the bucket for which this content manager will manage content.
* This may be passed as null if the root directory of the bucket should be used. The
* object delimiter is always the standard directory separator of '/'.
* @param cloudFrontDomainName The CloudFront domain name where this bucket's content may be
* retrieved by downloading over http from a CloudFront edge
* location.
* @param basePath the base path under which to store the files managed by this content
* manager. This path will have a subdirectory identifying the remote
* location, and beneath that subdirectories 'content' and 'incoming'
* will be created to store the locally cached content and incoming
* transfers respectively.
* @param clientConfiguration The client configuration for AWS clients.
*/
ContentManager(final Context context,
final IdentityManager identityManager,
final String bucket,
final String s3DirPrefix,
final String cloudFrontDomainName,
final String basePath,
final ClientConfiguration clientConfiguration) {
this.context = context.getApplicationContext();
s3Client = new AmazonS3Client(identityManager.getCredentialsProvider(), clientConfiguration);
s3Client.setRegion(Region.getRegion(AWSConfiguration.AMAZON_COGNITO_REGION));
this.bucket = bucket;
final String localDirPrefix;
if (s3DirPrefix != null && !s3DirPrefix.isEmpty()) {
if (s3DirPrefix.endsWith(DIR_DELIMITER)) {
localDirPrefix = "/" + s3DirPrefix.substring(0, s3DirPrefix.length() - 1);
this.s3DirPrefix = s3DirPrefix;
} else {
localDirPrefix = "/" + s3DirPrefix;
this.s3DirPrefix = s3DirPrefix + DIR_DELIMITER;
}
} else {
localDirPrefix = "";
this.s3DirPrefix = null;
}
final String baseContentPath = basePath + "/s3_" + bucket + localDirPrefix;
final File prefixPathFile = new File(baseContentPath);
if (!prefixPathFile.exists()) {
if (!prefixPathFile.mkdirs()) {
throw new RuntimeException(String.format(
"Can't create directory the base directory ('%s') for storing local content.",
baseContentPath));
}
}
if (!prefixPathFile.isDirectory()) {
throw new RuntimeException(
String.format("Prefix content path '%s' is not a directory.", baseContentPath));
}
localContentPath = baseContentPath + LOCAL_CONTENT_DIR_SUFFIX;
localTransferPath = baseContentPath + LOCAL_CONTENT_XFER_DIR_SUFFIX;
localContentCache = new LocalContentCache(context, "com.amazonaws.mobile.content.cache.s3."
+ bucket + localDirPrefix.replace("/", "."), localContentPath);
if (cloudFrontDomainName == null) {
transferHelper = S3TransferHelper.build(context, s3Client, bucket,
this.s3DirPrefix, localTransferPath, localContentCache);
} else {
transferHelper =
new CloudFrontTransferHelper(context, cloudFrontDomainName,
this.s3DirPrefix, localTransferPath, localContentCache);
}
}
/**
* @return the maximum number of bytes this cache may hold.
*/
public long getContentCacheSize() {
return localContentCache.getMaxCacheSize();
}
/**
* @return the number of bytes used by the cache.
*/
public long getCacheUsedSize() {
return localContentCache.getCacheSizeUsed();
}
/**
* Number of bytes pinned to the cache.
*/
public long getPinnedSize() {
return localContentCache.getBytesPinned();
}
/**
* Set the cache size to be used by this content manager. This immediately removes the oldest
* content by last modified time until the new cache size is not exceeded.
*
* @param maxCacheSize the new max cache size.
*/
public void setContentCacheSize(final long maxCacheSize) {
localContentCache.setMaxCacheSize(maxCacheSize);
}
/**
* Remove local content from the cache.
* @param filePath the path to the content to remove.
* @return true if file will be removed asyncronously, false if the file could not be removed.
*/
public boolean removeLocal(final String filePath) {
return localContentCache.removeFile(filePath);
}
/**
* Clear the local content cache.
*/
public void clearCache() {
executorService.execute(new Runnable() {
@Override
public void run() {
localContentCache.clear();
}
});
}
/**
* @return The path to local content managed by this content manager.
*/
public String getLocalContentPath() {
return localContentPath;
}
/* package */ ContentState getContentStateForTransfer(final String filePath) {
if (!transferHelper.isTransferring(filePath)) {
return ContentState.REMOTE;
}
boolean localContentAvailable = localContentCache.contains(filePath);
if (transferHelper.isTransferWaiting(filePath)) {
if (localContentAvailable) {
return ContentState.CACHED_NEW_VERSION_TRANSFER_WAITING;
}
return ContentState.TRANSFER_WAITING;
}
if (localContentAvailable) {
return ContentState.CACHED_TRANSFERRING_NEW_VERSION;
}
return ContentState.TRANSFERRING;
}
/* package */ LocalContentCache getLocalContentCache() {
return localContentCache;
}
/* package */ String getS3bucket() {
return bucket;
}
/* package */ AmazonS3Client getS3Client() {
return s3Client;
}
/**
* Clears all progress listeners associated with this content manager.
*/
public void clearProgressListeners() {
transferHelper.clearProgressListeners();
}
/**
* Clears all listeners associated with this content manager.
*/
public void clearAllListeners() {
clearProgressListeners();
localContentCache.setContentRemovedListener(null);
}
/**
* Set a listener for any content removed from the cache managed by this content manager.
* @param listener the listening handler.
*/
public void setContentRemovedListener(final ContentRemovedListener listener) {
localContentCache.setContentRemovedListener(listener);
}
/**
* Set the progress listener for a file managed by the content manager.
* @param filePath the relative path and file name.
* @param listener the listening handler.
*/
public void setProgressListener(final String filePath, final ContentProgressListener listener) {
transferHelper.setProgressListener(filePath, listener);
}
/**
* Adds a flag that instructs the content manager to keep the file and not count it toward the
* cache size.
*
* @param filePath the relative path and file name.
*/
public void pinContent(final String filePath) {
getContent(filePath, 0, ContentDownloadPolicy.DOWNLOAD_IF_NOT_CACHED, true, null);
}
/**
* Adds a flag that instructs the content manager to keep the file and not count it toward the
* cache size. Optionally takes a listener to receive updates if a download is started as a
* result of attempting to pin content that is not yet cached.
*
* @param filePath the relative path and file name.
*/
public void pinContent(final String filePath, final ContentProgressListener listener) {
getContent(filePath, 0, ContentDownloadPolicy.DOWNLOAD_IF_NOT_CACHED, true, listener);
}
/**
* Removes the flag that instructs the content manager to keep the file and not count it toward
* the cache size.
* @param filePath the relative path and file name.
* @param afterUnpinRunner runnable to be run after the content is unpinned from the cache.
*/
public void unPinContent(final String filePath, final Runnable afterUnpinRunner) {
executorService.execute(new Runnable() {
@Override
public void run() {
localContentCache.unPinFile(filePath);
if (afterUnpinRunner != null) {
ThreadUtils.runOnUiThread(afterUnpinRunner);
}
}
});
}
/**
* Checks whether a file has been pinned.
*
* @param filePath the relative path and file name.
* @return true if content has been marked to be kept by calling {@link #pinContent(String)},
* otherwise returns false.
*/
public boolean isContentPinned(final String filePath) {
return localContentCache.shouldPinFile(filePath);
}
/**
* Get content by file name. Downloads and caches the remote content and calls the listener's
* onSuccess method once the content is ready to be accessed.
*
* @param filePath the relative path and file name of the content to retrieve.
* @param listener listener to receive the result.
*/
public void getContent(final String filePath, final ContentProgressListener listener) {
getContent(filePath, 0, ContentDownloadPolicy.DOWNLOAD_IF_NOT_CACHED, false, listener);
}
/**
* Get content by file name. Downloads and caches the remote content if it is not available or
* if the alwaysDownload flag is set. Calls the listener's onSuccess method once the content
* is ready to be accessed.
*
* @param filePath the relative path and file name of the content to retrieve.
* @param optionalFileSize an optional file size to be checked against the space available in
* the cache.
* @param policy indicates the download policy. See {@link ContentDownloadPolicy}
* @param listener listener to receive the result.
*/
public void getContent(final String filePath,
final long optionalFileSize,
final ContentDownloadPolicy policy,
final boolean pinOnCompletion,
final ContentProgressListener listener) {
executorService.execute(new Runnable() {
@Override
public void run() {
File localFile;
if (pinOnCompletion) {
localContentCache.pinFile(filePath);
}
if (policy == ContentDownloadPolicy.DOWNLOAD_ALWAYS) {
// ignore that the file may be in cache when the policy is to always download.
localFile = null;
} else {
localFile = localContentCache.get(filePath);
if (policy == ContentDownloadPolicy.DOWNLOAD_METADATA_IF_NOT_CACHED && localFile == null) {
try {
final String s3Key = s3DirPrefix != null ? s3DirPrefix + filePath : filePath;
final ObjectMetadata objectMeta =
s3Client.getObjectMetadata(bucket, s3Key);
// Check if the object is transferring and adjust the state appropriately.
final ContentState contentState;
if (transferHelper.isTransferring(filePath)) {
if (transferHelper.isTransferWaiting(filePath)) {
contentState = ContentState.TRANSFER_WAITING;
} else {
contentState = ContentState.TRANSFERRING;
}
} else {
contentState = ContentState.REMOTE;
}
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
listener.onSuccess(
new S3ContentMeta(filePath, objectMeta, contentState));
}
});
} catch (final AmazonServiceException ex) {
Log.d(LOG_TAG, ex.getMessage(), ex);
if (listener != null) {
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
listener.onError(filePath, ex);
}
});
}
}
return;
}
}
final boolean isPolicyDownloadIfNewer =
policy == ContentDownloadPolicy.DOWNLOAD_IF_NEWER_EXIST;
final long fileSize;
if (localFile != null && (isPolicyDownloadIfNewer || optionalFileSize == 0)) {
try {
final String s3Key = s3DirPrefix != null ? s3DirPrefix + filePath : filePath;
final ObjectMetadata objectMeta =
s3Client.getObjectMetadata(bucket, s3Key);
// Set the file size from the retrieved meta data.
fileSize = objectMeta.getContentLength();
// If the remote file is newer.
if (isPolicyDownloadIfNewer &&
localFile.lastModified() < objectMeta.getLastModified().getTime()) {
// Ignore the local file and force a download.
localFile = null;
}
} catch (final AmazonServiceException ex) {
Log.d(LOG_TAG, ex.getMessage(), ex);
if (listener != null) {
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
listener.onError(filePath, ex);
}
});
}
return;
}
} else {
fileSize = optionalFileSize;
}
if (localFile != null) {
final File result = localFile;
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
listener.onSuccess(new FileContent(result,
localContentCache.absolutePathToRelativePath(result.getAbsolutePath())));
}
});
return;
}
// Don't attempt to download if the file doesn't exist and the policy is never to
// download.
if (policy == ContentDownloadPolicy.DOWNLOAD_NEVER) {
Log.d(LOG_TAG, String.format(
"Policy set to never DOWNLOAD_NEVER and the file(%s) was not cached.",
filePath), new FileNotFoundException());
if (listener != null) {
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
listener.onError(filePath, new FileNotFoundException(
"Policy set to DOWNLOAD_NEVER and the file was not cached."));
}
});
}
return;
}
if (!pinOnCompletion && !localContentCache.shouldPinFile(filePath)) {
final long sizeTransferring = transferHelper.getSizeTransferring();
// Check if there is space if the file size is available
final Exception ex = getExceptionIfNoSpace(filePath, fileSize, sizeTransferring);
if (ex != null) {
Log.d(LOG_TAG, ex.getMessage(), ex);
if (listener != null) {
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
listener.onError(filePath, ex);
}
});
}
return;
}
}
// Begin downloading content.
transferHelper.download(filePath, fileSize, listener);
}
});
}
private Exception getExceptionIfNoSpace(final String filePath, final long fileSize, final long sizeTransferring) {
// if this file can't fit in our cache.
if (fileSize > localContentCache.getMaxCacheSize()) {
return new IllegalStateException(
String.format("Adding '%s' of size %s would exceed the cache size by %s bytes.",
filePath, StringFormatUtils.getBytesString(fileSize, true),
StringFormatUtils.getBytesString(fileSize - localContentCache.getMaxCacheSize(), true)));
}
final long bytesOverSize = (sizeTransferring + fileSize) -
localContentCache.getMaxCacheSize();
if (bytesOverSize > 0) {
Log.w(LOG_TAG, String.format("Adding '%s' of size %s causes in progress transfers to" +
" exceed the cache size by %s bytes. Content that completes downloading first" +
" will be dropped.", filePath, StringFormatUtils.getBytesString(fileSize, true),
StringFormatUtils.getBytesString(bytesOverSize, true)));
}
return null;
}
/**
* Compares two content items by date first and then by name.
*/
private static Comparator<ContentItem> compareContentItemsByDateAndName
= new Comparator<ContentItem>() {
@Override
public int compare(ContentItem lhs, ContentItem rhs) {
long rhTime = rhs.getLastModifiedTime();
long lhTime = lhs.getLastModifiedTime();
if (rhTime != lhTime) {
return rhTime > lhTime ? 1 : -1;
}
return lhs.getFilePath().compareTo(rhs.getFilePath());
}
};
/**
* Downloads recent content managed by the content manager. Stops upon reaching the first item
* that is too large to fit in the remaining available cache space.
*/
class DownloadRecentS3ContentRunner implements Runnable {
final String s3Prefix;
final String localPathPrefix;
final ContentProgressListener listener;
DownloadRecentS3ContentRunner(final String filePathPrefix, final ContentProgressListener listener) {
this.localPathPrefix = filePathPrefix;
this.s3Prefix = getS3PathPrefix(filePathPrefix);
this.listener = listener;
}
@Override
public void run() {
final AvailableS3ContentIterator contentIterator = new AvailableS3ContentIterator(
ContentManager.this, s3Prefix, localPathPrefix, null, executorService, false);
final TreeSet<ContentItem> sortedItems =
new TreeSet<>(compareContentItemsByDateAndName);
try {
// Retrieve all content items and sort by most recent using TreeSet.
for (final ContentItem contentItem : contentIterator) {
sortedItems.add(contentItem);
}
} catch (final Exception ex) {
Log.e(LOG_TAG, ex.getMessage(), ex);
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
listener.onError(null, ex);
}
});
return;
}
long availableSpaceInCache = localContentCache.getMaxCacheSize();
// Load the cache with all items that can fit.
for (final ContentItem contentItem : sortedItems) {
if (contentItem.getSize() > availableSpaceInCache) {
break;
}
final String filePath = contentItem.getFilePath();
if (!localContentCache.shouldPinFile(filePath)) {
availableSpaceInCache -= contentItem.getSize();
}
if (contentItem.getContentState() ==
ContentState.CACHED_WITH_NEWER_VERSION_AVAILABLE ||
contentItem.getContentState() == ContentState.REMOTE) {
Log.d(LOG_TAG, "Downloading recent content for file: " + contentItem.getFilePath());
getContent(contentItem.getFilePath(), contentItem.getSize(),
ContentDownloadPolicy.DOWNLOAD_ALWAYS, false, listener);
}
}
}
}
/**
* Preload most recent content until reaching the first content item that is too large to fit
* in the remaining available cache space.
*
* @param listener an optional progress listener to use for all the downloads that are created.
*/
public void downloadRecentContent(final ContentProgressListener listener) {
downloadRecentContent(null, listener);
}
/**
* @return prefix of the folder in the S3 bucket.
*/
public String getS3DirPrefix() {
return s3DirPrefix;
}
/**
* Retrieves the full S3 object prefix given a file name prefix.
*
* The S3 path to the files beginning with a prefix needs to take into consideration the
* directory within the bucket that this content manager manages, as well as the file name
* prefix if one exists.
*
* @param filePathPrefix the file name prefix for a file in s3.
* @return the full S3 object name prefix.
*/
protected String getS3PathPrefix(String filePathPrefix) {
if (filePathPrefix != null && !filePathPrefix.isEmpty()) {
if (s3DirPrefix != null) {
return s3DirPrefix + filePathPrefix;
}
return filePathPrefix;
}
return s3DirPrefix;
}
/**
* Preload most recent content with names beginning with a prefix until the cache is full.
*
* @param filePathPrefix File names must begin with this prefix to be considered for download.
* @param listener an optional progress listener to use for all the downloads that are created. Note that
* if there is an error listing, the {@link ContentProgressListener#onError(String, Exception)}
* method will be invoked with a null filePath.
*/
public void downloadRecentContent(final String filePathPrefix,
final ContentProgressListener listener) {
executorService.execute(new DownloadRecentS3ContentRunner(filePathPrefix, listener));
}
/**
* Creates an iterator over all available content managed by this content manager.
*
* @return the iterator.
*/
@Override
public AvailableS3ContentIterator iterator() {
return new AvailableS3ContentIterator(this, s3DirPrefix, "",
DIR_DELIMITER, executorService, true);
}
/**
* Get an iterator over all files beginning with a specified prefix.
* @param filePathPrefix the prefix for all file names that will be included by the iterator.
* @return the iterator.
*/
public AvailableS3ContentIterator getAvailableContentIterator(final String filePathPrefix) {
return new AvailableS3ContentIterator(this, getS3PathPrefix(filePathPrefix), filePathPrefix,
DIR_DELIMITER, executorService, true);
}
/**
* Get an iterator over all files beginning with a specified prefix.
* @param filePathPrefix the prefix for all file names that will be included by the iterator.
* @return the iterator.
*/
public AvailableS3ContentIterator getAvailableContentIterator(final String filePathPrefix,
final boolean includeDirectories) {
return new AvailableS3ContentIterator(this, getS3PathPrefix(filePathPrefix), filePathPrefix,
DIR_DELIMITER, executorService, includeDirectories);
}
private class ContentLister implements Runnable {
final ContentListHandler listHandler;
final String prefix;
int startIndex;
AvailableS3ContentIterator availableS3ContentIterator;
ContentLister(final String prefix, final ContentListHandler listHandler) {
this.listHandler = listHandler;
this.prefix = prefix;
startIndex = 0;
}
private void addContentItems(final int startIndex,
final List<ContentItem> contentItems,
final boolean hasMoreResults) {
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
if (!listHandler.onContentReceived(startIndex, contentItems, hasMoreResults)) {
// if the user has requested to cancel listing content.
availableS3ContentIterator.cancel();
}
}
});
}
@Override
public void run() {
availableS3ContentIterator = getAvailableContentIterator(prefix);
try {
ArrayList<ContentItem> contentItems = new ArrayList<>();
for (final ContentItem contentItem : availableS3ContentIterator) {
Log.d(LOG_TAG, "Found file: " + contentItem.getFilePath());
contentItems.add(contentItem);
// When we determine getting more content will block
if (availableS3ContentIterator.willNextBlock()) {
final int itemCount = contentItems.size();
// Add items so far to the UI.
addContentItems(startIndex, contentItems, true);
// Start a new list of items to add.
contentItems = new ArrayList<>();
startIndex += itemCount;
}
}
addContentItems(startIndex, contentItems, false);
} catch (final Exception ex) {
Log.e(LOG_TAG, ex.getMessage(), ex);
ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
listHandler.onError(ex);
}
});
}
}
};
/**
* List all available content on the UI thread in batches of results.
* @param handler the handler for receiving results.
*/
public void listAvailableContent(final ContentListHandler handler) {
executorService.execute(new ContentLister("", handler));
}
/**
* List all available content whose file names begin with a specified prefix. Content
* comes back through a handler on the UI thread in batches of results. The handler
* {@link ContentListHandler#onContentReceived(int, List, boolean)} allows for
* requesting to canceling listing; however since the callbacks are on the UI thread,
* the handler may be called several more times for any queued results that have
* already been received.
* @param filePathPrefix the path and file name prefix for listing.
* @param handler the handler for receiving results.
*/
public void listAvailableContent(final String filePathPrefix,
final ContentListHandler handler) {
executorService.execute(new ContentLister(filePathPrefix, handler));
}
/** This must should be called once the content manager is no longer needed. No methods should be called
* on the ContentManager once this method has been called.
*/
public synchronized void destroy() {
transferHelper.destroy();
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sling.i18n.it;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
import static org.ops4j.pax.exam.CoreOptions.frameworkProperty;
import static org.ops4j.pax.exam.CoreOptions.junitBundles;
import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
import static org.ops4j.pax.exam.CoreOptions.options;
import static org.ops4j.pax.exam.CoreOptions.systemProperty;
import static org.ops4j.pax.exam.CoreOptions.when;
import java.io.File;
import java.net.URISyntaxException;
import java.util.Locale;
import java.util.ResourceBundle;
import javax.inject.Inject;
import javax.jcr.Node;
import javax.jcr.RepositoryException;
import javax.jcr.Session;
import org.apache.sling.i18n.ResourceBundleProvider;
import org.apache.sling.jcr.api.SlingRepository;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.ops4j.pax.exam.Configuration;
import org.ops4j.pax.exam.CoreOptions;
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.cm.ConfigurationAdminOptions;
import org.ops4j.pax.exam.junit.PaxExam;
import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy;
import org.ops4j.pax.exam.spi.reactors.PerClass;
@RunWith(PaxExam.class)
@ExamReactorStrategy(PerClass.class)
public class ResourceBundleProviderIT {
private static final String BUNDLE_JAR_SYS_PROP = "bundle.filename";
/** The property containing the build directory. */
private static final String SYS_PROP_BUILD_DIR = "bundle.build.dir";
private static final String DEFAULT_BUILD_DIR = "target";
private static final String PORT_CONFIG = "org.osgi.service.http.port";
public static final int RETRY_TIMEOUT_MSEC = 50000;
public static final String MSG_KEY1 = "foo";
public static final String MSG_KEY2 = "foo2";
@Inject
private SlingRepository repository;
@Inject
private ResourceBundleProvider resourceBundleProvider;
private Session session;
private Node i18nRoot;
private Node deRoot;
private Node deDeRoot;
private Node frRoot;
private Node enRoot;
@Configuration
public Option[] config() {
final String buildDir = System.getProperty(SYS_PROP_BUILD_DIR, DEFAULT_BUILD_DIR);
final String bundleFileName = System.getProperty( BUNDLE_JAR_SYS_PROP );
final File bundleFile = new File( bundleFileName );
if ( !bundleFile.canRead() ) {
throw new IllegalArgumentException( "Cannot read from bundle file " + bundleFileName + " specified in the "
+ BUNDLE_JAR_SYS_PROP + " system property" );
}
String localRepo = System.getProperty("maven.repo.local", "");
final String jackrabbitVersion = "2.13.1";
final String oakVersion = "1.5.7";
final String slingHome = new File(buildDir + File.separatorChar + "sling_" + System.currentTimeMillis()).getAbsolutePath();
return options(
frameworkProperty("sling.home").value(slingHome),
frameworkProperty("repository.home").value(slingHome + File.separatorChar + "repository"),
when( localRepo.length() > 0 ).useOptions(
systemProperty("org.ops4j.pax.url.mvn.localRepository").value(localRepo)
),
when( System.getProperty(PORT_CONFIG) != null ).useOptions(
systemProperty(PORT_CONFIG).value(System.getProperty(PORT_CONFIG))),
systemProperty("pax.exam.osgi.unresolved.fail").value("true"),
ConfigurationAdminOptions.newConfiguration("org.apache.felix.jaas.ConfigurationSpi")
.create(true)
.put("jaas.defaultRealmName", "jackrabbit.oak")
.put("jaas.configProviderName", "FelixJaasProvider")
.asOption(),
ConfigurationAdminOptions.factoryConfiguration("org.apache.felix.jaas.Configuration.factory")
.create(true)
.put("jaas.controlFlag", "optional")
.put("jaas.classname", "org.apache.jackrabbit.oak.spi.security.authentication.GuestLoginModule")
.put("jaas.ranking", 300)
.asOption(),
ConfigurationAdminOptions.factoryConfiguration("org.apache.felix.jaas.Configuration.factory")
.create(true)
.put("jaas.controlFlag", "required")
.put("jaas.classname", "org.apache.jackrabbit.oak.security.authentication.user.LoginModuleImpl")
.asOption(),
ConfigurationAdminOptions.factoryConfiguration("org.apache.felix.jaas.Configuration.factory")
.create(true)
.put("jaas.controlFlag", "sufficient")
.put("jaas.classname", "org.apache.jackrabbit.oak.security.authentication.token.TokenLoginModule")
.put("jaas.ranking", 200)
.asOption(),
ConfigurationAdminOptions.newConfiguration("org.apache.jackrabbit.oak.security.authentication.AuthenticationConfigurationImpl")
.create(true)
.put("org.apache.jackrabbit.oak.authentication.configSpiName", "FelixJaasProvider")
.asOption(),
ConfigurationAdminOptions.newConfiguration("org.apache.jackrabbit.oak.security.user.UserConfigurationImpl")
.create(true)
.put("groupsPath", "/home/groups")
.put("usersPath", "/home/users")
.put("defaultPath", "1")
.put("importBehavior", "besteffort")
.asOption(),
ConfigurationAdminOptions.newConfiguration("org.apache.jackrabbit.oak.security.user.RandomAuthorizableNodeName")
.create(true)
.put("enabledActions", new String[] {"org.apache.jackrabbit.oak.spi.security.user.action.AccessControlAction"})
.put("userPrivilegeNames", new String[] {"jcr:all"})
.put("groupPrivilegeNames", new String[] {"jcr:read"})
.asOption(),
ConfigurationAdminOptions.newConfiguration("org.apache.jackrabbit.oak.spi.security.user.action.DefaultAuthorizableActionProvider")
.create(true)
.put("length", 21)
.asOption(),
ConfigurationAdminOptions.newConfiguration("org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService")
.create(true)
.put("name", "Default NodeStore")
.asOption(),
ConfigurationAdminOptions.newConfiguration("org.apache.sling.resourceresolver.impl.observation.OsgiObservationBridge")
.create(true)
.put("enabled", true)
.asOption(),
ConfigurationAdminOptions.factoryConfiguration("org.apache.sling.serviceusermapping.impl.ServiceUserMapperImpl.amended")
.create(true)
.put("user.mapping", new String[]{"org.apache.sling.i18n=sling-i18n"})
.asOption(),
ConfigurationAdminOptions.newConfiguration("org.apache.sling.jcr.repoinit.impl.RepositoryInitializer")
.put("references", new String[]{references()})
.asOption(),
// logging
systemProperty("pax.exam.logging").value("none"),
mavenBundle("org.apache.sling", "org.apache.sling.commons.log", "4.0.6"),
mavenBundle("org.apache.sling", "org.apache.sling.commons.logservice", "1.0.6"),
mavenBundle("org.slf4j", "slf4j-api", "1.7.13"),
mavenBundle("org.slf4j", "jcl-over-slf4j", "1.7.13"),
mavenBundle("org.slf4j", "log4j-over-slf4j", "1.7.13"),
mavenBundle("commons-io", "commons-io", "2.4"),
mavenBundle("commons-fileupload", "commons-fileupload", "1.3.1"),
mavenBundle("commons-collections", "commons-collections", "3.2.2"),
mavenBundle("commons-codec", "commons-codec", "1.10"),
mavenBundle("commons-lang", "commons-lang", "2.6"),
mavenBundle("commons-pool", "commons-pool", "1.6"),
mavenBundle("org.apache.servicemix.bundles", "org.apache.servicemix.bundles.concurrent", "1.3.4_1"),
mavenBundle("org.apache.geronimo.bundles", "commons-httpclient", "3.1_1"),
mavenBundle("org.apache.tika", "tika-core", "1.9"),
mavenBundle("org.apache.tika", "tika-bundle", "1.9"),
// infrastructure
mavenBundle("org.apache.felix", "org.apache.felix.http.servlet-api", "1.1.2"),
mavenBundle("org.apache.felix", "org.apache.felix.http.jetty", "3.1.6"),
mavenBundle("org.apache.felix", "org.apache.felix.eventadmin", "1.4.4"),
mavenBundle("org.apache.felix", "org.apache.felix.scr", "2.0.4"),
mavenBundle("org.apache.felix", "org.apache.felix.configadmin", "1.8.10"),
mavenBundle("org.apache.felix", "org.apache.felix.inventory", "1.0.4"),
mavenBundle("org.apache.felix", "org.apache.felix.metatype", "1.1.2"),
// sling
mavenBundle("org.apache.sling", "org.apache.sling.settings", "1.3.8"),
mavenBundle("org.apache.sling", "org.apache.sling.commons.osgi", "2.3.0"),
mavenBundle("org.apache.sling", "org.apache.sling.commons.json", "2.0.16"),
mavenBundle("org.apache.sling", "org.apache.sling.commons.mime", "2.1.8"),
mavenBundle("org.apache.sling", "org.apache.sling.commons.classloader", "1.3.2"),
mavenBundle("org.apache.sling", "org.apache.sling.commons.scheduler", "2.4.14"),
mavenBundle("org.apache.sling", "org.apache.sling.commons.threads", "3.2.4"),
mavenBundle("org.apache.sling", "org.apache.sling.auth.core", "1.3.12"),
mavenBundle("org.apache.sling", "org.apache.sling.discovery.api", "1.0.2"),
mavenBundle("org.apache.sling", "org.apache.sling.discovery.commons", "1.0.12"),
mavenBundle("org.apache.sling", "org.apache.sling.discovery.standalone", "1.0.2"),
mavenBundle("org.apache.sling", "org.apache.sling.api", "2.14.2"),
mavenBundle("org.apache.sling", "org.apache.sling.resourceresolver", "1.4.18"),
mavenBundle("org.apache.sling", "org.apache.sling.adapter", "2.1.2"),
mavenBundle("org.apache.sling", "org.apache.sling.jcr.resource", "2.8.0"),
mavenBundle("org.apache.sling", "org.apache.sling.jcr.classloader", "3.2.2"),
mavenBundle("org.apache.sling", "org.apache.sling.jcr.contentloader", "2.1.8"),
mavenBundle("org.apache.sling", "org.apache.sling.engine", "2.6.2"),
mavenBundle("org.apache.sling", "org.apache.sling.serviceusermapper", "1.2.2"),
mavenBundle("org.apache.sling", "org.apache.sling.jcr.jcr-wrapper", "2.0.0"),
mavenBundle("org.apache.sling", "org.apache.sling.jcr.api", "2.4.0"),
mavenBundle("org.apache.sling", "org.apache.sling.jcr.base", "2.4.0"),
mavenBundle("org.apache.sling", "org.apache.sling.jcr.repoinit", "1.1.0"),
mavenBundle("org.apache.sling", "org.apache.sling.repoinit.parser", "1.1.0"),
mavenBundle("org.apache.sling", "org.apache.sling.provisioning.model", "1.4.2"),
mavenBundle("com.google.guava", "guava", "15.0"),
mavenBundle("org.apache.jackrabbit", "jackrabbit-api", jackrabbitVersion),
mavenBundle("org.apache.jackrabbit", "jackrabbit-jcr-commons", jackrabbitVersion),
mavenBundle("org.apache.jackrabbit", "jackrabbit-spi", jackrabbitVersion),
mavenBundle("org.apache.jackrabbit", "jackrabbit-spi-commons", jackrabbitVersion),
mavenBundle("org.apache.jackrabbit", "jackrabbit-jcr-rmi", jackrabbitVersion),
mavenBundle("org.apache.felix", "org.apache.felix.jaas", "0.0.4"),
mavenBundle("org.apache.jackrabbit", "oak-core", oakVersion),
mavenBundle("org.apache.jackrabbit", "oak-commons", oakVersion),
mavenBundle("org.apache.jackrabbit", "oak-lucene", oakVersion),
mavenBundle("org.apache.jackrabbit", "oak-blob", oakVersion),
mavenBundle("org.apache.jackrabbit", "oak-jcr", oakVersion),
mavenBundle("org.apache.jackrabbit", "oak-segment", oakVersion),
mavenBundle("org.apache.sling", "org.apache.sling.jcr.oak.server", "1.1.0"),
mavenBundle("org.apache.sling", "org.apache.sling.testing.tools", "1.0.6"),
mavenBundle("org.apache.httpcomponents", "httpcore-osgi", "4.1.2"),
mavenBundle("org.apache.httpcomponents", "httpclient-osgi", "4.1.2"),
junitBundles(),
CoreOptions.bundle( bundleFile.toURI().toString() )
);
}
static abstract class Retry {
Retry(int timeoutMsec) {
final long timeout = System.currentTimeMillis() + timeoutMsec;
Throwable lastT = null;
while(System.currentTimeMillis() < timeout) {
try {
lastT = null;
exec();
break;
} catch(Throwable t) {
lastT = t;
}
}
if(lastT != null) {
fail("Failed after " + timeoutMsec + " msec: " + lastT);
}
}
protected abstract void exec() throws Exception;
}
@Before
public void setup() throws RepositoryException {
session = repository.loginAdministrative(null);
final Node root = session.getRootNode();
final Node libs;
if(root.hasNode("libs")) {
libs = root.getNode("libs");
} else {
libs = root.addNode("libs", "nt:unstructured");
}
i18nRoot = libs.addNode("i18n", "nt:unstructured");
deRoot = addLanguageNode(i18nRoot, "de");
frRoot = addLanguageNode(i18nRoot, "fr");
deDeRoot = addLanguageNode(i18nRoot, "de_DE");
enRoot = addLanguageNode(i18nRoot, "en");
session.save();
}
@After
public void cleanup() throws RepositoryException {
i18nRoot.remove();
session.save();
session.logout();
}
private Node addLanguageNode(Node parent, String language) throws RepositoryException {
final Node child = parent.addNode(language, "nt:folder");
child.addMixin("mix:language");
child.setProperty("jcr:language", language);
return child;
}
private void assertMessages(final String key, final String deMessage, final String deDeMessage, final String frMessage) {
new Retry(RETRY_TIMEOUT_MSEC) {
@Override
protected void exec() {
{
final ResourceBundle deDE = resourceBundleProvider.getResourceBundle(Locale.GERMANY); // this is the resource bundle for de_DE
assertNotNull(deDE);
assertEquals(deDeMessage, deDE.getString(key));
}
{
final ResourceBundle de = resourceBundleProvider.getResourceBundle(Locale.GERMAN);
assertNotNull(de);
assertEquals(deMessage, de.getString(key));
}
{
final ResourceBundle fr = resourceBundleProvider.getResourceBundle(Locale.FRENCH);
assertNotNull(fr);
assertEquals(frMessage, fr.getString(key));
}
}
};
}
private void setMessage(final Node rootNode, final String key, final String message) throws RepositoryException {
final String nodeName = "node_" + key;
final Node node;
if ( rootNode.hasNode(nodeName) ) {
node = rootNode.getNode(nodeName);
} else {
node = rootNode.addNode(nodeName, "sling:MessageEntry");
}
node.setProperty("sling:key", key);
node.setProperty("sling:message", message);
}
@Test
public void testChangesDetection() throws RepositoryException {
// set a key which is only available in the en dictionary
setMessage(enRoot, MSG_KEY2, "EN_message");
session.save();
// since "en" is the fallback for all other resource bundle, the value from "en" must be exposed
assertMessages(MSG_KEY2, "EN_message", "EN_message", "EN_message");
setMessage(deRoot, MSG_KEY1, "DE_message");
setMessage(frRoot, MSG_KEY1, "FR_message");
session.save();
assertMessages(MSG_KEY1, "DE_message", "DE_message", "FR_message");
setMessage(deRoot, MSG_KEY1, "DE_changed");
setMessage(frRoot, MSG_KEY1, "FR_changed");
session.save();
assertMessages(MSG_KEY1, "DE_changed", "DE_changed", "FR_changed");
setMessage(deRoot, MSG_KEY1, "DE_message");
setMessage(deDeRoot, MSG_KEY1, "DE_DE_message");
setMessage(frRoot, MSG_KEY1, "FR_message");
session.save();
assertMessages(MSG_KEY1, "DE_message", "DE_DE_message", "FR_message");
// now change a key which is only available in the "en" dictionary
setMessage(enRoot, MSG_KEY2, "EN_changed");
session.save();
assertMessages(MSG_KEY2, "EN_changed", "EN_changed", "EN_changed");
}
private String references() {
try {
String repoInitUrl = getClass().getResource("/repoinit.txt").toURI().toString();
return String.format("raw:%s", repoInitUrl);
} catch (URISyntaxException e) {
throw new RuntimeException("Failed to compute repoinit references", e);
}
}
}
| |
package com.lohool.ola.pay.alipay;
public final class Base64 {
private static final int BASELENGTH = 128;
private static final int LOOKUPLENGTH = 64;
private static final int TWENTYFOURBITGROUP = 24;
private static final int EIGHTBIT = 8;
private static final int SIXTEENBIT = 16;
private static final int FOURBYTE = 4;
private static final int SIGN = -128;
private static char PAD = '=';
private static byte[] base64Alphabet = new byte[BASELENGTH];
private static char[] lookUpBase64Alphabet = new char[LOOKUPLENGTH];
static {
for (int i = 0; i < BASELENGTH; ++i) {
base64Alphabet[i] = -1;
}
for (int i = 'Z'; i >= 'A'; i--) {
base64Alphabet[i] = (byte) (i - 'A');
}
for (int i = 'z'; i >= 'a'; i--) {
base64Alphabet[i] = (byte) (i - 'a' + 26);
}
for (int i = '9'; i >= '0'; i--) {
base64Alphabet[i] = (byte) (i - '0' + 52);
}
base64Alphabet['+'] = 62;
base64Alphabet['/'] = 63;
for (int i = 0; i <= 25; i++) {
lookUpBase64Alphabet[i] = (char) ('A' + i);
}
for (int i = 26, j = 0; i <= 51; i++, j++) {
lookUpBase64Alphabet[i] = (char) ('a' + j);
}
for (int i = 52, j = 0; i <= 61; i++, j++) {
lookUpBase64Alphabet[i] = (char) ('0' + j);
}
lookUpBase64Alphabet[62] = (char) '+';
lookUpBase64Alphabet[63] = (char) '/';
}
private static boolean isWhiteSpace(char octect) {
return (octect == 0x20 || octect == 0xd || octect == 0xa || octect == 0x9);
}
private static boolean isPad(char octect) {
return (octect == PAD);
}
private static boolean isData(char octect) {
return (octect < BASELENGTH && base64Alphabet[octect] != -1);
}
/**
* Encodes hex octects into Base64
*
* @param binaryData
* Array containing binaryData
* @return Encoded Base64 array
*/
public static String encode(byte[] binaryData) {
if (binaryData == null) {
return null;
}
int lengthDataBits = binaryData.length * EIGHTBIT;
if (lengthDataBits == 0) {
return "";
}
int fewerThan24bits = lengthDataBits % TWENTYFOURBITGROUP;
int numberTriplets = lengthDataBits / TWENTYFOURBITGROUP;
int numberQuartet = fewerThan24bits != 0 ? numberTriplets + 1
: numberTriplets;
char encodedData[] = null;
encodedData = new char[numberQuartet * 4];
byte k = 0, l = 0, b1 = 0, b2 = 0, b3 = 0;
int encodedIndex = 0;
int dataIndex = 0;
for (int i = 0; i < numberTriplets; i++) {
b1 = binaryData[dataIndex++];
b2 = binaryData[dataIndex++];
b3 = binaryData[dataIndex++];
l = (byte) (b2 & 0x0f);
k = (byte) (b1 & 0x03);
byte val1 = ((b1 & SIGN) == 0) ? (byte) (b1 >> 2)
: (byte) ((b1) >> 2 ^ 0xc0);
byte val2 = ((b2 & SIGN) == 0) ? (byte) (b2 >> 4)
: (byte) ((b2) >> 4 ^ 0xf0);
byte val3 = ((b3 & SIGN) == 0) ? (byte) (b3 >> 6)
: (byte) ((b3) >> 6 ^ 0xfc);
encodedData[encodedIndex++] = lookUpBase64Alphabet[val1];
encodedData[encodedIndex++] = lookUpBase64Alphabet[val2 | (k << 4)];
encodedData[encodedIndex++] = lookUpBase64Alphabet[(l << 2) | val3];
encodedData[encodedIndex++] = lookUpBase64Alphabet[b3 & 0x3f];
}
// form integral number of 6-bit groups
if (fewerThan24bits == EIGHTBIT) {
b1 = binaryData[dataIndex];
k = (byte) (b1 & 0x03);
byte val1 = ((b1 & SIGN) == 0) ? (byte) (b1 >> 2)
: (byte) ((b1) >> 2 ^ 0xc0);
encodedData[encodedIndex++] = lookUpBase64Alphabet[val1];
encodedData[encodedIndex++] = lookUpBase64Alphabet[k << 4];
encodedData[encodedIndex++] = PAD;
encodedData[encodedIndex++] = PAD;
} else if (fewerThan24bits == SIXTEENBIT) {
b1 = binaryData[dataIndex];
b2 = binaryData[dataIndex + 1];
l = (byte) (b2 & 0x0f);
k = (byte) (b1 & 0x03);
byte val1 = ((b1 & SIGN) == 0) ? (byte) (b1 >> 2)
: (byte) ((b1) >> 2 ^ 0xc0);
byte val2 = ((b2 & SIGN) == 0) ? (byte) (b2 >> 4)
: (byte) ((b2) >> 4 ^ 0xf0);
encodedData[encodedIndex++] = lookUpBase64Alphabet[val1];
encodedData[encodedIndex++] = lookUpBase64Alphabet[val2 | (k << 4)];
encodedData[encodedIndex++] = lookUpBase64Alphabet[l << 2];
encodedData[encodedIndex++] = PAD;
}
return new String(encodedData);
}
/**
* Decodes Base64 data into octects
*
* @param encoded
* string containing Base64 data
* @return Array containind decoded data.
*/
public static byte[] decode(String encoded) {
if (encoded == null) {
return null;
}
char[] base64Data = encoded.toCharArray();
// remove white spaces
int len = removeWhiteSpace(base64Data);
if (len % FOURBYTE != 0) {
return null;// should be divisible by four
}
int numberQuadruple = (len / FOURBYTE);
if (numberQuadruple == 0) {
return new byte[0];
}
byte decodedData[] = null;
byte b1 = 0, b2 = 0, b3 = 0, b4 = 0;
char d1 = 0, d2 = 0, d3 = 0, d4 = 0;
int i = 0;
int encodedIndex = 0;
int dataIndex = 0;
decodedData = new byte[(numberQuadruple) * 3];
for (; i < numberQuadruple - 1; i++) {
if (!isData((d1 = base64Data[dataIndex++]))
|| !isData((d2 = base64Data[dataIndex++]))
|| !isData((d3 = base64Data[dataIndex++]))
|| !isData((d4 = base64Data[dataIndex++]))) {
return null;
}// if found "no data" just return null
b1 = base64Alphabet[d1];
b2 = base64Alphabet[d2];
b3 = base64Alphabet[d3];
b4 = base64Alphabet[d4];
decodedData[encodedIndex++] = (byte) (b1 << 2 | b2 >> 4);
decodedData[encodedIndex++] = (byte) (((b2 & 0xf) << 4) | ((b3 >> 2) & 0xf));
decodedData[encodedIndex++] = (byte) (b3 << 6 | b4);
}
if (!isData((d1 = base64Data[dataIndex++]))
|| !isData((d2 = base64Data[dataIndex++]))) {
return null;// if found "no data" just return null
}
b1 = base64Alphabet[d1];
b2 = base64Alphabet[d2];
d3 = base64Data[dataIndex++];
d4 = base64Data[dataIndex++];
if (!isData((d3)) || !isData((d4))) {// Check if they are PAD characters
if (isPad(d3) && isPad(d4)) {
if ((b2 & 0xf) != 0)// last 4 bits should be zero
{
return null;
}
byte[] tmp = new byte[i * 3 + 1];
System.arraycopy(decodedData, 0, tmp, 0, i * 3);
tmp[encodedIndex] = (byte) (b1 << 2 | b2 >> 4);
return tmp;
} else if (!isPad(d3) && isPad(d4)) {
b3 = base64Alphabet[d3];
if ((b3 & 0x3) != 0)// last 2 bits should be zero
{
return null;
}
byte[] tmp = new byte[i * 3 + 2];
System.arraycopy(decodedData, 0, tmp, 0, i * 3);
tmp[encodedIndex++] = (byte) (b1 << 2 | b2 >> 4);
tmp[encodedIndex] = (byte) (((b2 & 0xf) << 4) | ((b3 >> 2) & 0xf));
return tmp;
} else {
return null;
}
} else { // No PAD e.g 3cQl
b3 = base64Alphabet[d3];
b4 = base64Alphabet[d4];
decodedData[encodedIndex++] = (byte) (b1 << 2 | b2 >> 4);
decodedData[encodedIndex++] = (byte) (((b2 & 0xf) << 4) | ((b3 >> 2) & 0xf));
decodedData[encodedIndex++] = (byte) (b3 << 6 | b4);
}
return decodedData;
}
/**
* remove WhiteSpace from MIME containing encoded Base64 data.
*
* @param data
* the byte array of base64 data (with WS)
* @return the new length
*/
private static int removeWhiteSpace(char[] data) {
if (data == null) {
return 0;
}
// count characters that's not whitespace
int newSize = 0;
int len = data.length;
for (int i = 0; i < len; i++) {
if (!isWhiteSpace(data[i])) {
data[newSize++] = data[i];
}
}
return newSize;
}
}
| |
/**
*
*/
package com.andrew.apolloMod.activities;
import android.app.Activity;
import android.app.SearchManager;
import android.content.*;
import android.content.pm.ActivityInfo;
import android.content.res.Resources;
import android.database.Cursor;
import android.database.DatabaseUtils;
import android.media.AudioManager;
import android.net.Uri;
import android.os.Bundle;
import android.os.IBinder;
import android.provider.BaseColumns;
import android.provider.MediaStore;
import android.provider.MediaStore.Audio;
import android.provider.MediaStore.Audio.ArtistColumns;
import android.support.v4.view.ViewPager;
import android.view.ContextMenu;
import android.view.MenuItem;
import android.view.View;
import android.widget.FrameLayout;
import android.widget.ImageView;
import android.widget.RelativeLayout;
import android.widget.TextView;
import com.andrew.apolloMod.IApolloService;
import com.andrew.apolloMod.R;
import com.andrew.apolloMod.cache.ImageInfo;
import com.andrew.apolloMod.cache.ImageProvider;
import com.andrew.apolloMod.helpers.utils.ApolloUtils;
import com.andrew.apolloMod.helpers.utils.MusicUtils;
import com.andrew.apolloMod.helpers.utils.ThemeUtils;
import com.andrew.apolloMod.ui.adapters.PagerAdapter;
import com.andrew.apolloMod.ui.fragments.list.ArtistAlbumsFragment;
import com.andrew.apolloMod.ui.fragments.list.TracksFragment;
import com.andrew.apolloMod.service.ApolloService;
import com.andrew.apolloMod.service.ServiceToken;
import static com.andrew.apolloMod.Constants.*;
/**
* @author Andrew Neal
* @Note This displays specific track or album listings
*/
public class TracksBrowser extends Activity implements ServiceConnection {
// Bundle
private Bundle bundle;
private Intent intent;
private String mimeType;
private ServiceToken mToken;
private int RESULT_LOAD_IMAGE = 1;
private ImageProvider mImageProvider;
@Override
protected void onCreate(Bundle icicle) {
super.onCreate(icicle);
// Landscape mode on phone isn't ready
if (!ApolloUtils.isTablet(this))
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);
// Control Media volume
setVolumeControlStream(AudioManager.STREAM_MUSIC);
// Layout
setContentView(R.layout.track_browser);
registerForContextMenu(findViewById(R.id.half_artist_image));
//ImageCache
mImageProvider = ImageProvider.getInstance( this );
// Important!
whatBundle(icicle);
// Update the colorstrip color
initColorstrip();
// Update the ActionBar
initActionBar();
// Update the half_and_half layout
initUpperHalf();
// Important!
initPager();
}
@Override
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMenuInfo menuInfo) {
if (Audio.Artists.CONTENT_TYPE.equals(mimeType)) {
menu.setHeaderTitle(R.string.image_edit_artists);
getMenuInflater().inflate(R.menu.context_artistimage, menu);
} else if (Audio.Albums.CONTENT_TYPE.equals(mimeType)) {
menu.setHeaderTitle(R.string.image_edit_albums);
getMenuInflater().inflate(R.menu.context_albumimage, menu);
} else if (Audio.Playlists.CONTENT_TYPE.equals(mimeType)) {
menu.setHeaderTitle(R.string.image_edit_playlist);
getMenuInflater().inflate(R.menu.context_playlist_genreimage, menu);
}
else{
menu.setHeaderTitle(R.string.image_edit_genre);
getMenuInflater().inflate(R.menu.context_playlist_genreimage, menu);
}
}
@Override
public boolean onContextItemSelected(MenuItem item) {
ImageInfo mInfo = null;
switch (item.getItemId()) {
case R.id.image_edit_gallery:
Intent i = new Intent(Intent.ACTION_PICK,android.provider.MediaStore.Images.Media.EXTERNAL_CONTENT_URI);
startActivityForResult(i, RESULT_LOAD_IMAGE);
return true;
case R.id.image_edit_file:
mInfo = new ImageInfo();
mInfo.type = TYPE_ALBUM;
mInfo.size = SIZE_NORMAL;
mInfo.source = SRC_FILE;
mInfo.data = new String[]{ getAlbumId(), getArtist(), getAlbum() };
mImageProvider.loadImage((ImageView)findViewById(R.id.half_artist_image), mInfo );
return true;
case R.id.image_edit_lastfm:
mInfo = new ImageInfo();
mInfo.size = SIZE_NORMAL;
mInfo.source = SRC_LASTFM;
if (Audio.Artists.CONTENT_TYPE.equals(mimeType)) {
mInfo.type = TYPE_ARTIST;
mInfo.data = new String[]{ getArtist() };
} else if (Audio.Albums.CONTENT_TYPE.equals(mimeType)) {
mInfo.type = TYPE_ALBUM;
mInfo.data = new String[]{ getAlbumId(), getArtist(), getAlbum() };
}
mImageProvider.loadImage((ImageView)findViewById(R.id.half_artist_image), mInfo );
return true;
case R.id.image_edit_web:
onSearchWeb();
return true;
default:
return super.onContextItemSelected(item);
}
}
public void onSearchWeb(){
String query = "";
if (Audio.Artists.CONTENT_TYPE.equals(mimeType)) {
query = getArtist();
} else if (Audio.Albums.CONTENT_TYPE.equals(mimeType)) {
query = getAlbum() + " " + getArtist();
} else if (Audio.Playlists.CONTENT_TYPE.equals(mimeType)) {
query = bundle.getString(PLAYLIST_NAME);
}
else{
Long id = bundle.getLong(BaseColumns._ID);
query = MusicUtils.parseGenreName(this, MusicUtils.getGenreName(this, id, true));
}
final Intent googleSearch = new Intent(Intent.ACTION_WEB_SEARCH);
googleSearch.putExtra(SearchManager.QUERY, query);
startActivity(googleSearch);
}
public void onActivityResult(int requestCode, int resultCode, Intent data)
{
if (resultCode == Activity.RESULT_OK && requestCode == RESULT_LOAD_IMAGE && data != null)
{
Uri selectedImage = data.getData();
String[] filePathColumn = { MediaStore.Images.Media.DATA };
Cursor cursor = getContentResolver().query(selectedImage,filePathColumn, null, null, null);
cursor.moveToFirst();
int columnIndex = cursor.getColumnIndex(filePathColumn[0]);
String picturePath = cursor.getString(columnIndex);
cursor.close();
ImageInfo mInfo = new ImageInfo();
if (Audio.Artists.CONTENT_TYPE.equals(mimeType)) {
mInfo.type = TYPE_ARTIST;
mInfo.data = new String[]{ getArtist(), picturePath };
} else if (Audio.Albums.CONTENT_TYPE.equals(mimeType)) {
mInfo.type = TYPE_ALBUM;
mInfo.data = new String[]{ getAlbumId(), getAlbum(), getArtist(), picturePath };
} else if (Audio.Playlists.CONTENT_TYPE.equals(mimeType)) {
mInfo.type = TYPE_PLAYLIST;
mInfo.data = new String[]{ bundle.getString(PLAYLIST_NAME), picturePath };
}
else{
Long id = bundle.getLong(BaseColumns._ID);
mInfo.type = TYPE_GENRE;
mInfo.data = new String[]{ MusicUtils.parseGenreName(this, MusicUtils.getGenreName(this, id, true)), picturePath };
}
mInfo.size = SIZE_NORMAL;
mInfo.source = SRC_GALLERY;
mImageProvider.loadImage((ImageView)findViewById(R.id.half_artist_image), mInfo );
}
}
@Override
public void onSaveInstanceState(Bundle outcicle) {
outcicle.putAll(bundle);
super.onSaveInstanceState(outcicle);
}
@Override
public void onServiceConnected(ComponentName name, IBinder obj) {
MusicUtils.mService = IApolloService.Stub.asInterface(obj);
}
@Override
public void onServiceDisconnected(ComponentName name) {
MusicUtils.mService = null;
}
/**
* Update next BottomActionBar as needed
*/
private final BroadcastReceiver mMediaStatusReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
}
};
@Override
protected void onStart() {
// Bind to Service
mToken = MusicUtils.bindToService(this, this);
IntentFilter filter = new IntentFilter();
filter.addAction(ApolloService.META_CHANGED);
registerReceiver(mMediaStatusReceiver, filter);
setTitle();
super.onStart();
}
@Override
protected void onStop() {
// Unbind
if (MusicUtils.mService != null)
MusicUtils.unbindFromService(mToken);
unregisterReceiver(mMediaStatusReceiver);
super.onStop();
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case android.R.id.home:
super.onBackPressed();
return true;
default:
break;
}
return super.onOptionsItemSelected(item);
}
/**
* @param icicle
* @return what Bundle we're dealing with
*/
public void whatBundle(Bundle icicle) {
intent = getIntent();
bundle = icicle != null ? icicle : intent.getExtras();
if (bundle == null) {
bundle = new Bundle();
}
if (bundle.getString(INTENT_ACTION) == null) {
bundle.putString(INTENT_ACTION, intent.getAction());
}
if (bundle.getString(MIME_TYPE) == null) {
bundle.putString(MIME_TYPE, intent.getType());
}
mimeType = bundle.getString(MIME_TYPE);
}
/**
* For the theme chooser
*/
private void initColorstrip() {
FrameLayout mColorstrip = (FrameLayout)findViewById(R.id.colorstrip);
mColorstrip.setBackgroundColor(getResources().getColor(R.color.holo_blue_dark));
ThemeUtils.setBackgroundColor(this, mColorstrip, "colorstrip");
RelativeLayout mColorstrip2 = (RelativeLayout)findViewById(R.id.bottom_colorstrip);
mColorstrip2.setBackgroundColor(getResources().getColor(R.color.holo_blue_dark));
ThemeUtils.setBackgroundColor(this, mColorstrip2, "colorstrip");
}
/**
* Set the ActionBar title
*/
private void initActionBar() {
ApolloUtils.showUpTitleOnly(getActionBar());
// The ActionBar Title and UP ids are hidden.
int titleId = Resources.getSystem().getIdentifier("action_bar_title", "id", "android");
int upId = Resources.getSystem().getIdentifier("up", "id", "android");
TextView actionBarTitle = (TextView)findViewById(titleId);
ImageView actionBarUp = (ImageView)findViewById(upId);
// Theme chooser
ThemeUtils.setActionBarBackground(this, getActionBar(), "action_bar_background");
ThemeUtils.setTextColor(this, actionBarTitle, "action_bar_title_color");
ThemeUtils.initThemeChooser(this, actionBarUp, "action_bar_up", THEME_ITEM_BACKGROUND);
}
/**
* Sets up the @half_and_half.xml layout
*/
private void initUpperHalf() {
ImageInfo mInfo = new ImageInfo();
mInfo.source = SRC_FIRST_AVAILABLE;
mInfo.size = SIZE_NORMAL;
final ImageView imageView = (ImageView)findViewById(R.id.half_artist_image);
String lineOne = "";
String lineTwo = "";
if (ApolloUtils.isArtist(mimeType)) {
String mArtist = getArtist();
mInfo.type = TYPE_ARTIST;
mInfo.data = new String[]{ mArtist };
lineOne = mArtist;
lineTwo = MusicUtils.makeAlbumsLabel(this, Integer.parseInt(getNumAlbums()), 0, false);
}else if (ApolloUtils.isAlbum(mimeType)) {
String mAlbum = getAlbum(), mArtist = getArtist();
mInfo.type = TYPE_ALBUM;
mInfo.data = new String[]{ getAlbumId(), mAlbum, mArtist };
lineOne = mAlbum;
lineTwo = mArtist;
} else if (Audio.Playlists.CONTENT_TYPE.equals(mimeType)) {
String plyName = bundle.getString(PLAYLIST_NAME);
mInfo.type = TYPE_PLAYLIST;
mInfo.data = new String[]{ plyName };
lineOne = plyName;
}
else{
String genName = MusicUtils.parseGenreName(this,
MusicUtils.getGenreName(this, bundle.getLong(BaseColumns._ID), true));
mInfo.type = TYPE_GENRE;
mInfo.size = SIZE_NORMAL;
mInfo.data = new String[]{ genName };
lineOne = genName;
}
mImageProvider.loadImage( imageView, mInfo );
TextView lineOneView = (TextView)findViewById(R.id.half_artist_image_text);
lineOneView.setText(lineOne);
TextView lineTwoView = (TextView)findViewById(R.id.half_artist_image_text_line_two);
lineTwoView.setText(lineTwo);
}
/**
* Initiate ViewPager and PagerAdapter
*/
private void initPager() {
// Initiate PagerAdapter
PagerAdapter mPagerAdapter = new PagerAdapter(getFragmentManager());
if (ApolloUtils.isArtist(mimeType))
// Show all albums for an artist
mPagerAdapter.addFragment(new ArtistAlbumsFragment(bundle));
// Show the tracks for an artist or album
mPagerAdapter.addFragment(new TracksFragment(bundle));
// Set up ViewPager
ViewPager mViewPager = (ViewPager)findViewById(R.id.viewPager);
mViewPager.setPageMargin(getResources().getInteger(R.integer.viewpager_margin_width));
mViewPager.setPageMarginDrawable(R.drawable.viewpager_margin);
mViewPager.setOffscreenPageLimit(mPagerAdapter.getCount());
mViewPager.setAdapter(mPagerAdapter);
// Theme chooser
ThemeUtils.initThemeChooser(this, mViewPager, "viewpager", THEME_ITEM_BACKGROUND);
ThemeUtils.setMarginDrawable(this, mViewPager, "viewpager_margin");
}
/**
* @return artist name from Bundle
*/
public String getArtist() {
if (bundle.getString(ARTIST_KEY) != null)
return bundle.getString(ARTIST_KEY);
return getResources().getString(R.string.app_name);
}
/**
* @return album name from Bundle
*/
public String getAlbum() {
if (bundle.getString(ALBUM_KEY) != null)
return bundle.getString(ALBUM_KEY);
return getResources().getString(R.string.app_name);
}
/**
* @return album name from Bundle
*/
public String getAlbumId() {
if (bundle.getString(ALBUM_ID_KEY) != null)
return bundle.getString(ALBUM_ID_KEY);
return getResources().getString(R.string.app_name);
}
/**
* @return number of albums from Bundle
*/
public String getNumAlbums() {
if (bundle.getString(NUMALBUMS) != null)
return bundle.getString(NUMALBUMS);
String[] projection = {
BaseColumns._ID, ArtistColumns.ARTIST, ArtistColumns.NUMBER_OF_ALBUMS
};
Uri uri = Audio.Artists.EXTERNAL_CONTENT_URI;
Long id = ApolloUtils.getArtistId(getArtist(), ARTIST_ID, this);
Cursor cursor = null;
try{
cursor = this.getContentResolver().query(uri, projection, BaseColumns._ID+ "=" + DatabaseUtils.sqlEscapeString(String.valueOf(id)), null, null);
}
catch(Exception e){
e.printStackTrace();
}
if(cursor == null)
return String.valueOf(0);
int mArtistNumAlbumsIndex = cursor.getColumnIndexOrThrow(ArtistColumns.NUMBER_OF_ALBUMS);
if(cursor.getCount()>0){
cursor.moveToFirst();
String numAlbums = cursor.getString(mArtistNumAlbumsIndex);
if(numAlbums != null){
return numAlbums;
}
}
return String.valueOf(0);
}
/**
* @return genre name from Bundle
*/
public String getGenre() {
if (bundle.getString(GENRE_KEY) != null)
return bundle.getString(GENRE_KEY);
return getResources().getString(R.string.app_name);
}
/**
* @return playlist name from Bundle
*/
public String getPlaylist() {
if (bundle.getString(PLAYLIST_NAME) != null)
return bundle.getString(PLAYLIST_NAME);
return getResources().getString(R.string.app_name);
}
/**
* Set the correct title
*/
private void setTitle() {
String name;
long id;
if (Audio.Playlists.CONTENT_TYPE.equals(mimeType)) {
id = bundle.getLong(BaseColumns._ID);
switch ((int)id) {
case (int)PLAYLIST_QUEUE:
setTitle(R.string.nowplaying);
return;
case (int)PLAYLIST_FAVORITES:
setTitle(R.string.favorite);
return;
default:
if (id < 0) {
setTitle(R.string.app_name);
return;
}
}
name = MusicUtils.getPlaylistName(this, id);
} else if (Audio.Artists.CONTENT_TYPE.equals(mimeType)) {
id = bundle.getLong(BaseColumns._ID);
name = getString (R.string.artist_page_title)+MusicUtils.getArtistName(this, id, true);
} else if (Audio.Albums.CONTENT_TYPE.equals(mimeType)) {
id = bundle.getLong(BaseColumns._ID);
name = getString (R.string.album_page_title)+MusicUtils.getAlbumName(this, id, true);
} else if (Audio.Genres.CONTENT_TYPE.equals(mimeType)) {
id = bundle.getLong(BaseColumns._ID);
name = MusicUtils.parseGenreName(this, MusicUtils.getGenreName(this, id, true));
} else {
setTitle(R.string.app_name);
return;
}
setTitle(name);
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.guacamole.auth.openid.conf;
import com.google.inject.Inject;
import org.apache.guacamole.GuacamoleException;
import org.apache.guacamole.environment.Environment;
import org.apache.guacamole.properties.IntegerGuacamoleProperty;
import org.apache.guacamole.properties.StringGuacamoleProperty;
/**
* Service for retrieving configuration information regarding the OpenID
* service.
*/
public class ConfigurationService {
/**
* The default claim type to use to retrieve an authenticated user's
* username.
*/
private static final String DEFAULT_USERNAME_CLAIM_TYPE = "email";
/**
* The default space-separated list of OpenID scopes to request.
*/
private static final String DEFAULT_SCOPE = "openid email profile";
/**
* The default amount of clock skew tolerated for timestamp comparisons
* between the Guacamole server and OpenID service clocks, in seconds.
*/
private static final int DEFAULT_ALLOWED_CLOCK_SKEW = 30;
/**
* The default maximum amount of time that an OpenID token should remain
* valid, in minutes.
*/
private static final int DEFAULT_MAX_TOKEN_VALIDITY = 300;
/**
* The default maximum amount of time that a nonce generated by the
* Guacamole server should remain valid, in minutes.
*/
private static final int DEFAULT_MAX_NONCE_VALIDITY = 10;
/**
* The authorization endpoint (URI) of the OpenID service.
*/
private static final StringGuacamoleProperty OPENID_AUTHORIZATION_ENDPOINT =
new StringGuacamoleProperty() {
@Override
public String getName() { return "openid-authorization-endpoint"; }
};
/**
* The endpoint (URI) of the JWKS service which defines how received ID
* tokens (JWTs) shall be validated.
*/
private static final StringGuacamoleProperty OPENID_JWKS_ENDPOINT =
new StringGuacamoleProperty() {
@Override
public String getName() { return "openid-jwks-endpoint"; }
};
/**
* The issuer to expect for all received ID tokens.
*/
private static final StringGuacamoleProperty OPENID_ISSUER =
new StringGuacamoleProperty() {
@Override
public String getName() { return "openid-issuer"; }
};
/**
* The claim type which contains the authenticated user's username within
* any valid JWT.
*/
private static final StringGuacamoleProperty OPENID_USERNAME_CLAIM_TYPE =
new StringGuacamoleProperty() {
@Override
public String getName() { return "openid-username-claim-type"; }
};
/**
* The space-separated list of OpenID scopes to request.
*/
private static final StringGuacamoleProperty OPENID_SCOPE =
new StringGuacamoleProperty() {
@Override
public String getName() { return "openid-scope"; }
};
/**
* The amount of clock skew tolerated for timestamp comparisons between the
* Guacamole server and OpenID service clocks, in seconds.
*/
private static final IntegerGuacamoleProperty OPENID_ALLOWED_CLOCK_SKEW =
new IntegerGuacamoleProperty() {
@Override
public String getName() { return "openid-allowed-clock-skew"; }
};
/**
* The maximum amount of time that an OpenID token should remain valid, in
* minutes.
*/
private static final IntegerGuacamoleProperty OPENID_MAX_TOKEN_VALIDITY =
new IntegerGuacamoleProperty() {
@Override
public String getName() { return "openid-max-token-validity"; }
};
/**
* The maximum amount of time that a nonce generated by the Guacamole server
* should remain valid, in minutes. As each OpenID request has a unique
* nonce value, this imposes an upper limit on the amount of time any
* particular OpenID request can result in successful authentication within
* Guacamole.
*/
private static final IntegerGuacamoleProperty OPENID_MAX_NONCE_VALIDITY =
new IntegerGuacamoleProperty() {
@Override
public String getName() { return "openid-max-nonce-validity"; }
};
/**
* OpenID client ID which should be submitted to the OpenID service when
* necessary. This value is typically provided by the OpenID service when
* OpenID credentials are generated for your application.
*/
private static final StringGuacamoleProperty OPENID_CLIENT_ID =
new StringGuacamoleProperty() {
@Override
public String getName() { return "openid-client-id"; }
};
/**
* The URI that the OpenID service should redirect to after the
* authentication process is complete. This must be the full URL that a
* user would enter into their browser to access Guacamole.
*/
private static final StringGuacamoleProperty OPENID_REDIRECT_URI =
new StringGuacamoleProperty() {
@Override
public String getName() { return "openid-redirect-uri"; }
};
/**
* The Guacamole server environment.
*/
@Inject
private Environment environment;
/**
* Returns the authorization endpoint (URI) of the OpenID service as
* configured with guacamole.properties.
*
* @return
* The authorization endpoint of the OpenID service, as configured with
* guacamole.properties.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed, or if the authorization
* endpoint property is missing.
*/
public String getAuthorizationEndpoint() throws GuacamoleException {
return environment.getRequiredProperty(OPENID_AUTHORIZATION_ENDPOINT);
}
/**
* Returns the OpenID client ID which should be submitted to the OpenID
* service when necessary, as configured with guacamole.properties. This
* value is typically provided by the OpenID service when OpenID credentials
* are generated for your application.
*
* @return
* The client ID to use when communicating with the OpenID service,
* as configured with guacamole.properties.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed, or if the client ID
* property is missing.
*/
public String getClientID() throws GuacamoleException {
return environment.getRequiredProperty(OPENID_CLIENT_ID);
}
/**
* Returns the URI that the OpenID service should redirect to after
* the authentication process is complete, as configured with
* guacamole.properties. This must be the full URL that a user would enter
* into their browser to access Guacamole.
*
* @return
* The client secret to use when communicating with the OpenID service,
* as configured with guacamole.properties.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed, or if the redirect URI
* property is missing.
*/
public String getRedirectURI() throws GuacamoleException {
return environment.getRequiredProperty(OPENID_REDIRECT_URI);
}
/**
* Returns the issuer to expect for all received ID tokens, as configured
* with guacamole.properties.
*
* @return
* The issuer to expect for all received ID tokens, as configured with
* guacamole.properties.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed, or if the issuer property
* is missing.
*/
public String getIssuer() throws GuacamoleException {
return environment.getRequiredProperty(OPENID_ISSUER);
}
/**
* Returns the endpoint (URI) of the JWKS service which defines how
* received ID tokens (JWTs) shall be validated, as configured with
* guacamole.properties.
*
* @return
* The endpoint (URI) of the JWKS service which defines how received ID
* tokens (JWTs) shall be validated, as configured with
* guacamole.properties.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed, or if the JWKS endpoint
* property is missing.
*/
public String getJWKSEndpoint() throws GuacamoleException {
return environment.getRequiredProperty(OPENID_JWKS_ENDPOINT);
}
/**
* Returns the claim type which contains the authenticated user's username
* within any valid JWT, as configured with guacamole.properties. By
* default, this will be "email".
*
* @return
* The claim type which contains the authenticated user's username
* within any valid JWT, as configured with guacamole.properties.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed.
*/
public String getUsernameClaimType() throws GuacamoleException {
return environment.getProperty(OPENID_USERNAME_CLAIM_TYPE, DEFAULT_USERNAME_CLAIM_TYPE);
}
/**
* Returns the space-separated list of OpenID scopes to request. By default,
* this will be "openid email profile". The OpenID scopes determine the
* information returned within the OpenID token, and thus affect what
* values can be used as an authenticated user's username.
*
* @return
* The space-separated list of OpenID scopes to request when identifying
* a user.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed.
*/
public String getScope() throws GuacamoleException {
return environment.getProperty(OPENID_SCOPE, DEFAULT_SCOPE);
}
/**
* Returns the amount of clock skew tolerated for timestamp comparisons
* between the Guacamole server and OpenID service clocks, in seconds. Too
* much clock skew will affect token expiration calculations, possibly
* allowing old tokens to be used. By default, this will be 30.
*
* @return
* The amount of clock skew tolerated for timestamp comparisons, in
* seconds.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed.
*/
public int getAllowedClockSkew() throws GuacamoleException {
return environment.getProperty(OPENID_ALLOWED_CLOCK_SKEW, DEFAULT_ALLOWED_CLOCK_SKEW);
}
/**
* Returns the maximum amount of time that an OpenID token should remain
* valid, in minutes. A token received from an OpenID service which is
* older than this amount of time will be rejected, even if it is otherwise
* valid. By default, this will be 300 (5 hours).
*
* @return
* The maximum amount of time that an OpenID token should remain valid,
* in minutes.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed.
*/
public int getMaxTokenValidity() throws GuacamoleException {
return environment.getProperty(OPENID_MAX_TOKEN_VALIDITY, DEFAULT_MAX_TOKEN_VALIDITY);
}
/**
* Returns the maximum amount of time that a nonce generated by the
* Guacamole server should remain valid, in minutes. As each OpenID request
* has a unique nonce value, this imposes an upper limit on the amount of
* time any particular OpenID request can result in successful
* authentication within Guacamole. By default, this will be 10.
*
* @return
* The maximum amount of time that a nonce generated by the Guacamole
* server should remain valid, in minutes.
*
* @throws GuacamoleException
* If guacamole.properties cannot be parsed.
*/
public int getMaxNonceValidity() throws GuacamoleException {
return environment.getProperty(OPENID_MAX_NONCE_VALIDITY, DEFAULT_MAX_NONCE_VALIDITY);
}
}
| |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.collect.Lists;
/**
* Tests interaction of ACLs with snapshots.
*/
public class TestAclWithSnapshot {
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { });
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem fsAsBruce, fsAsDiana;
private static DistributedFileSystem hdfs;
private static int pathCount = 0;
private static Path path, snapshotPath;
private static String snapshotName;
@Rule
public ExpectedException exception = ExpectedException.none();
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
initCluster(true);
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs, fsAsBruce, fsAsDiana);
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() {
++pathCount;
path = new Path("/p" + pathCount);
snapshotName = "snapshot" + pathCount;
snapshotPath = new Path(path, new Path(".snapshot", snapshotName));
}
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterChange()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path, snapshotPath);
}
private static void doSnapshotRootChangeAssertions(Path path,
Path snapshotPath) throws Exception {
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0550, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionGranted(fsAsDiana, DIANA, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterChange()
throws Exception {
Path filePath = new Path(path, "file1");
Path subdirPath = new Path(path, "subdir1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
(short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirPath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(false);
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(true);
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
}
private static void doSnapshotContentsChangeAssertions(Path filePath,
Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath)
throws Exception {
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0570, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionGranted(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0570, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionGranted(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterRemoval()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
hdfs.removeAcl(path);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path, snapshotPath);
}
private static void doSnapshotRootRemovalAssertions(Path path,
Path snapshotPath) throws Exception {
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval()
throws Exception {
Path filePath = new Path(path, "file1");
Path subdirPath = new Path(path, "subdir1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
(short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirPath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
hdfs.removeAcl(filePath);
hdfs.removeAcl(subdirPath);
// Original has changed, but snapshot still has old ACL.
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(false);
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(true);
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
}
private static void doSnapshotContentsRemovalAssertions(Path filePath,
Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath)
throws Exception {
AclEntry[] expected = new AclEntry[] { };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0500, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0500, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
@Test
public void testModifyReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
hdfs.modifyAclEntries(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0770, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionGranted(fsAsDiana, DIANA, path);
}
@Test
public void testRemoveReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
hdfs.modifyAclEntries(path, aclSpec);
hdfs.removeAcl(path);
AclEntry[] expected = new AclEntry[] { };
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0700, path);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
}
@Test
public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)0700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)0700, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, snapshotPath);
}
@Test
public void testModifyAclEntriesSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
exception.expect(SnapshotAccessControlException.class);
hdfs.modifyAclEntries(snapshotPath, aclSpec);
}
@Test
public void testRemoveAclEntriesSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce"));
exception.expect(SnapshotAccessControlException.class);
hdfs.removeAclEntries(snapshotPath, aclSpec);
}
@Test
public void testRemoveDefaultAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeDefaultAcl(snapshotPath);
}
@Test
public void testRemoveAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeAcl(snapshotPath);
}
@Test
public void testSetAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce"));
exception.expect(SnapshotAccessControlException.class);
hdfs.setAcl(snapshotPath, aclSpec);
}
@Test
public void testChangeAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
exception.expect(NSQuotaExceededException.class);
hdfs.modifyAclEntries(filePath, aclSpec);
}
@Test
public void testRemoveAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
exception.expect(NSQuotaExceededException.class);
hdfs.removeAcl(filePath);
}
@Test
public void testGetAclStatusDotSnapshotPath() throws Exception {
hdfs.mkdirs(path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(new Path(path, ".snapshot"));
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
}
/**
* Asserts that permission is denied to the given fs/user for the given
* directory.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path directory to check
* @throws Exception if there is an unexpected error
*/
private static void assertDirPermissionDenied(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
fail("expected AccessControlException for user " + user + ", path = " +
pathToCheck);
} catch (AccessControlException e) {
// expected
}
}
/**
* Asserts that permission is granted to the given fs/user for the given
* directory.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path directory to check
* @throws Exception if there is an unexpected error
*/
private static void assertDirPermissionGranted(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
} catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " +
pathToCheck);
}
}
/**
* Asserts the value of the FsPermission bits on the inode of the test path.
*
* @param perm short expected permission bits
* @param pathToCheck Path to check
* @throws Exception thrown if there is an unexpected error
*/
private static void assertPermission(short perm, Path pathToCheck)
throws Exception {
AclTestHelpers.assertPermission(hdfs, pathToCheck, perm);
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem
* instances for our test users.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @throws Exception if any step fails
*/
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @throws Exception if restart fails
*/
private static void restart(boolean checkpoint) throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false);
}
}
| |
/*
* Copyright (c) 2010-2014 Evolveum
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.evolveum.midpoint.prism.query;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import javax.xml.namespace.QName;
import com.evolveum.midpoint.prism.path.ItemPath;
import com.evolveum.midpoint.util.DebugDumpable;
import com.evolveum.midpoint.util.DebugUtil;
public class ObjectPaging implements DebugDumpable, Serializable {
private Integer offset;
private Integer maxSize;
private List<ObjectOrdering> ordering = new ArrayList<>();
private String cookie;
protected ObjectPaging() {
}
ObjectPaging(Integer offset, Integer maxSize) {
this.offset = offset;
this.maxSize = maxSize;
}
ObjectPaging(ItemPath orderBy, OrderDirection direction) {
setOrdering(orderBy, direction);
}
ObjectPaging(Integer offset, Integer maxSize, ItemPath orderBy, OrderDirection direction) {
this.offset = offset;
this.maxSize = maxSize;
setOrdering(orderBy, direction);
}
public static ObjectPaging createPaging(Integer offset, Integer maxSize){
return new ObjectPaging(offset, maxSize);
}
public static ObjectPaging createPaging(Integer offset, Integer maxSize, QName orderBy, OrderDirection direction){
return new ObjectPaging(offset, maxSize, new ItemPath(orderBy), direction);
}
public static ObjectPaging createPaging(Integer offset, Integer maxSize, ItemPath orderBy, OrderDirection direction){
return new ObjectPaging(offset, maxSize, orderBy, direction);
}
public static ObjectPaging createPaging(Integer offset, Integer maxSize, String orderBy, String namespace, OrderDirection direction){
return createPaging(offset, maxSize, new QName(namespace, orderBy), direction);
}
public static ObjectPaging createPaging(ItemPath orderBy, OrderDirection direction) {
return new ObjectPaging(orderBy, direction);
}
public static ObjectPaging createPaging(QName orderBy, OrderDirection direction) {
return new ObjectPaging(new ItemPath(orderBy), direction);
}
public static ObjectPaging createEmptyPaging(){
return new ObjectPaging();
}
// TODO rename to getPrimaryOrderingDirection
public OrderDirection getDirection() {
ObjectOrdering primary = getPrimaryOrdering();
return primary != null ? primary.getDirection() : null;
}
// TODO rename to getPrimaryOrderingPath
public ItemPath getOrderBy() {
ObjectOrdering primary = getPrimaryOrdering();
return primary != null ? primary.getOrderBy() : null;
}
public ObjectOrdering getPrimaryOrdering() {
if (hasOrdering()) {
return ordering.get(0);
} else {
return null;
}
}
// TODO name?
public List<ObjectOrdering> getOrderingInstructions() {
return ordering;
}
public boolean hasOrdering() {
return ordering != null && !ordering.isEmpty(); // first is just for sure
}
public void setOrdering(ItemPath orderBy, OrderDirection direction) {
this.ordering = new ArrayList<>();
addOrderingInstruction(orderBy, direction);
}
public void addOrderingInstruction(ItemPath orderBy, OrderDirection direction) {
this.ordering.add(new ObjectOrdering(orderBy, direction));
}
public void addOrderingInstruction(QName orderBy, OrderDirection direction) {
addOrderingInstruction(new ItemPath(orderBy), direction);
}
public void setOrdering(ObjectOrdering... orderings) {
this.ordering = new ArrayList<>(Arrays.asList(orderings));
}
public void setOrdering(Collection<ObjectOrdering> orderings) {
this.ordering = new ArrayList<>(orderings);
}
public Integer getOffset() {
return offset;
}
public void setOffset(Integer offset) {
this.offset = offset;
}
public Integer getMaxSize() {
return maxSize;
}
public void setMaxSize(Integer maxSize) {
this.maxSize = maxSize;
}
/**
* Returns the paging cookie. The paging cookie is used for optimization of paged searches.
* The presence of the cookie may allow the data store to correlate queries and associate
* them with the same server-side context. This may allow the data store to reuse the same
* pre-computed data. We want this as the sorted and paged searches may be quite expensive.
* It is expected that the cookie returned from the search will be passed back in the options
* when the next page of the same search is requested.
*
* It is OK to initialize a search without any cookie. If the datastore utilizes a re-usable
* context it will return a cookie in a search response.
*/
public String getCookie() {
return cookie;
}
/**
* Sets paging cookie. The paging cookie is used for optimization of paged searches.
* The presence of the cookie may allow the data store to correlate queries and associate
* them with the same server-side context. This may allow the data store to reuse the same
* pre-computed data. We want this as the sorted and paged searches may be quite expensive.
* It is expected that the cookie returned from the search will be passed back in the options
* when the next page of the same search is requested.
*
* It is OK to initialize a search without any cookie. If the datastore utilizes a re-usable
* context it will return a cookie in a search response.
*/
public void setCookie(String cookie) {
this.cookie = cookie;
}
public ObjectPaging clone() {
ObjectPaging clone = new ObjectPaging();
copyTo(clone);
return clone;
}
protected void copyTo(ObjectPaging clone) {
clone.offset = this.offset;
clone.maxSize = this.maxSize;
if (this.ordering != null) {
clone.ordering = new ArrayList<>(this.ordering);
} else {
clone.ordering = null;
}
clone.cookie = this.cookie;
}
@Override
public String debugDump() {
return debugDump(0);
}
@Override
public String debugDump(int indent) {
StringBuilder sb = new StringBuilder();
sb.append("Paging:");
if (getOffset() != null) {
sb.append("\n");
DebugUtil.indentDebugDump(sb, indent + 1);
sb.append("Offset: " + getOffset());
}
if (getMaxSize() != null) {
sb.append("\n");
DebugUtil.indentDebugDump(sb, indent + 1);
sb.append("Max size: " + getMaxSize());
}
if (hasOrdering()) {
sb.append("\n");
DebugUtil.indentDebugDump(sb, indent + 1);
sb.append("Ordering: ").append(ordering);
}
if (getCookie() != null) {
sb.append("\n");
DebugUtil.indentDebugDump(sb, indent + 1);
sb.append("Cookie: " + getCookie());
}
return sb.toString();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("PAGING: ");
if (this == null){
sb.append("null");
return sb.toString();
}
if (getOffset() != null){
sb.append("O: ");
sb.append(getOffset());
sb.append(",");
}
if (getMaxSize() != null){
sb.append("M: ");
sb.append(getMaxSize());
sb.append(",");
}
if (hasOrdering()) {
sb.append("ORD: ");
sb.append(ordering);
sb.append(", ");
}
if (getCookie() != null) {
sb.append("C:");
sb.append(getCookie());
}
return sb.toString();
}
@Override
public boolean equals(Object o) {
return equals(o, true);
}
public boolean equals(Object o, boolean exact) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ObjectPaging that = (ObjectPaging) o;
if (offset != null ? !offset.equals(that.offset) : that.offset != null)
return false;
if (maxSize != null ? !maxSize.equals(that.maxSize) : that.maxSize != null)
return false;
if ((ordering != null && that.ordering == null) || (ordering == null && that.ordering != null)) {
return false;
}
if (ordering != null) {
if (ordering.size() != that.ordering.size()) {
return false;
}
for (int i = 0; i < ordering.size(); i++) {
ObjectOrdering oo1 = this.ordering.get(i);
ObjectOrdering oo2 = that.ordering.get(i);
if (!oo1.equals(oo2, exact)) {
return false;
}
}
}
return cookie != null ? cookie.equals(that.cookie) : that.cookie == null;
}
@Override
public int hashCode() {
int result = offset != null ? offset.hashCode() : 0;
result = 31 * result + (maxSize != null ? maxSize.hashCode() : 0);
result = 31 * result + (ordering != null ? ordering.hashCode() : 0);
result = 31 * result + (cookie != null ? cookie.hashCode() : 0);
return result;
}
}
| |
/*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.routing.impl;
import com.google.common.collect.Sets;
import org.easymock.EasyMock;
import org.junit.Before;
import org.junit.Test;
import org.onlab.packet.Ethernet;
import org.onlab.packet.Ip4Prefix;
import org.onlab.packet.IpAddress;
import org.onlab.packet.IpPrefix;
import org.onlab.packet.MacAddress;
import org.onlab.packet.VlanId;
import org.onosproject.TestApplicationId;
import org.onosproject.app.ApplicationService;
import org.onosproject.cfg.ComponentConfigService;
import org.onosproject.core.ApplicationId;
import org.onosproject.core.CoreService;
import org.onosproject.incubator.net.intf.Interface;
import org.onosproject.incubator.net.intf.InterfaceListener;
import org.onosproject.incubator.net.intf.InterfaceService;
import org.onosproject.incubator.net.intf.InterfaceServiceAdapter;
import org.onosproject.incubator.net.routing.ResolvedRoute;
import org.onosproject.incubator.net.routing.RouteEvent;
import org.onosproject.incubator.net.routing.RouteListener;
import org.onosproject.incubator.net.routing.RouteServiceAdapter;
import org.onosproject.net.ConnectPoint;
import org.onosproject.net.DeviceId;
import org.onosproject.net.PortNumber;
import org.onosproject.net.config.NetworkConfigListener;
import org.onosproject.net.config.NetworkConfigRegistry;
import org.onosproject.net.config.NetworkConfigService;
import org.onosproject.net.device.DeviceListener;
import org.onosproject.net.device.DeviceService;
import org.onosproject.net.device.DeviceServiceAdapter;
import org.onosproject.net.flow.DefaultTrafficSelector;
import org.onosproject.net.flow.DefaultTrafficTreatment;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.TrafficTreatment;
import org.onosproject.net.flowobjective.DefaultForwardingObjective;
import org.onosproject.net.flowobjective.DefaultNextObjective;
import org.onosproject.net.flowobjective.FlowObjectiveService;
import org.onosproject.net.flowobjective.ForwardingObjective;
import org.onosproject.net.flowobjective.NextObjective;
import org.onosproject.net.host.InterfaceIpAddress;
import org.onosproject.routing.RoutingService;
import org.onosproject.routing.config.RouterConfig;
import org.osgi.service.component.ComponentContext;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.anyString;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.createNiceMock;
import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.expectLastCall;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.reset;
import static org.easymock.EasyMock.verify;
/**
* Unit tests for SingleSwitchFibInstaller.
*/
public class SingleSwitchFibInstallerTest {
private static final DeviceId DEVICE_ID = DeviceId.deviceId("of:0000000000000001");
private static final ConnectPoint SW1_ETH1 = new ConnectPoint(
DEVICE_ID, PortNumber.portNumber(1));
private static final ConnectPoint SW1_ETH2 = new ConnectPoint(
DEVICE_ID, PortNumber.portNumber(2));
private static final int NEXT_ID = 11;
private static final VlanId VLAN1 = VlanId.vlanId((short) 1);
private static final MacAddress MAC1 = MacAddress.valueOf("00:00:00:00:00:01");
private static final MacAddress MAC2 = MacAddress.valueOf("00:00:00:00:00:02");
private static final IpPrefix PREFIX1 = Ip4Prefix.valueOf("1.1.1.0/24");
private static final IpAddress NEXT_HOP1 = IpAddress.valueOf("192.168.10.1");
private static final IpAddress NEXT_HOP2 = IpAddress.valueOf("192.168.20.1");
private static final InterfaceIpAddress INTF1 =
InterfaceIpAddress.valueOf("192.168.10.2/24");
private static final InterfaceIpAddress INTF2 =
InterfaceIpAddress.valueOf("192.168.20.2/24");
private final Set<Interface> interfaces = Sets.newHashSet();
private InterfaceService interfaceService;
private NetworkConfigService networkConfigService;
private NetworkConfigRegistry networkConfigRegistry;
private FlowObjectiveService flowObjectiveService;
private ApplicationService applicationService;
private DeviceService deviceService;
private static final ApplicationId APPID = TestApplicationId.create("foo");
private RouteListener routeListener;
private DeviceListener deviceListener;
private RouterConfig routerConfig;
private SingleSwitchFibInstaller sSfibInstaller;
private InterfaceListener interfaceListener;
@Before
public void setUp() throws Exception {
sSfibInstaller = new SingleSwitchFibInstaller();
sSfibInstaller.componentConfigService = createNiceMock(ComponentConfigService.class);
ComponentContext mockContext = createNiceMock(ComponentContext.class);
routerConfig = new TestRouterConfig();
interfaceService = createMock(InterfaceService.class);
networkConfigService = createMock(NetworkConfigService.class);
networkConfigService.addListener(anyObject(NetworkConfigListener.class));
expectLastCall().anyTimes();
networkConfigRegistry = createMock(NetworkConfigRegistry.class);
flowObjectiveService = createMock(FlowObjectiveService.class);
applicationService = createNiceMock(ApplicationService.class);
replay(applicationService);
deviceService = new TestDeviceService();
CoreService coreService = createNiceMock(CoreService.class);
expect(coreService.registerApplication(anyString())).andReturn(APPID).anyTimes();
replay(coreService);
sSfibInstaller.networkConfigService = networkConfigService;
sSfibInstaller.networkConfigRegistry = networkConfigRegistry;
sSfibInstaller.interfaceService = interfaceService;
sSfibInstaller.flowObjectiveService = flowObjectiveService;
sSfibInstaller.applicationService = applicationService;
sSfibInstaller.coreService = coreService;
sSfibInstaller.routeService = new TestRouteService();
sSfibInstaller.deviceService = deviceService;
setUpNetworkConfigService();
setUpInterfaceService();
sSfibInstaller.activate(mockContext);
}
/**
* Sets up InterfaceService.
*/
private void setUpInterfaceService() {
interfaceService.addListener(anyObject(InterfaceListener.class));
expectLastCall().andDelegateTo(new TestInterfaceService());
// Interface with no VLAN
Interface sw1Eth1 = new Interface("intf1", SW1_ETH1,
Collections.singletonList(INTF1), MAC1, VlanId.NONE);
expect(interfaceService.getMatchingInterface(NEXT_HOP1)).andReturn(sw1Eth1);
interfaces.add(sw1Eth1);
// Interface with a VLAN
Interface sw2Eth1 = new Interface("intf2", SW1_ETH2,
Collections.singletonList(INTF2), MAC2, VLAN1);
expect(interfaceService.getMatchingInterface(NEXT_HOP2)).andReturn(sw2Eth1);
interfaces.add(sw2Eth1);
expect(interfaceService.getInterfaces()).andReturn(interfaces);
replay(interfaceService);
}
/*
* Sets up NetworkConfigService.
*/
private void setUpNetworkConfigService() {
expect(networkConfigService.getConfig(
anyObject(ApplicationId.class), eq(RoutingService.ROUTER_CONFIG_CLASS))).
andReturn(routerConfig);
replay(networkConfigService);
}
/**
* Sets up FlowObjectiveService.
*/
private void setUpFlowObjectiveService() {
expect(flowObjectiveService.allocateNextId()).andReturn(NEXT_ID);
replay(flowObjectiveService);
}
/**
* Creates a next objective with the given parameters.
*
* @param srcMac source MAC address
* @param dstMac destination MAC address
* @param port port number
* @param vlan vlan ID
* @param add whether to create an add objective or remove objective
* @return new next objective
*/
private NextObjective createNextObjective(MacAddress srcMac,
MacAddress dstMac,
PortNumber port,
VlanId vlan,
boolean add) {
TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder()
.setEthSrc(srcMac)
.setEthDst(dstMac);
TrafficSelector.Builder metabuilder = null;
if (!vlan.equals(VlanId.NONE)) {
treatment.pushVlan()
.setVlanId(vlan)
.setVlanPcp((byte) 0);
} else {
metabuilder = DefaultTrafficSelector.builder();
metabuilder.matchVlanId(VlanId.vlanId(SingleSwitchFibInstaller.ASSIGNED_VLAN));
}
treatment.setOutput(port);
NextObjective.Builder nextBuilder = DefaultNextObjective.builder()
.withId(NEXT_ID)
.addTreatment(treatment.build())
.withType(NextObjective.Type.SIMPLE)
.fromApp(APPID);
if (metabuilder != null) {
nextBuilder.withMeta(metabuilder.build());
}
return add ? nextBuilder.add() : nextBuilder.remove();
}
/**
* Creates a new forwarding objective with the given parameters.
*
* @param prefix IP prefix
* @param add whether to create an add objective or a remove objective
* @return new forwarding objective
*/
private ForwardingObjective createForwardingObjective(IpPrefix prefix,
boolean add) {
TrafficSelector selector = DefaultTrafficSelector.builder()
.matchEthType(Ethernet.TYPE_IPV4)
.matchIPDst(prefix)
.build();
int priority = prefix.prefixLength() * 5 + 100;
ForwardingObjective.Builder fwdBuilder = DefaultForwardingObjective.builder()
.fromApp(APPID)
.makePermanent()
.withSelector(selector)
.withPriority(priority)
.withFlag(ForwardingObjective.Flag.SPECIFIC);
if (add) {
fwdBuilder.nextStep(NEXT_ID);
} else {
fwdBuilder.withTreatment(DefaultTrafficTreatment.builder().build());
}
return add ? fwdBuilder.add() : fwdBuilder.remove();
}
/**
* Tests adding a route.
*
* We verify that the flowObjectiveService records the correct state and that the
* correct flow is submitted to the flowObjectiveService.
*/
@Test
public void testRouteAdd() {
ResolvedRoute resolvedRoute = new ResolvedRoute(PREFIX1, NEXT_HOP1, MAC1);
// Create the next objective
NextObjective nextObjective = createNextObjective(MAC1, MAC1, SW1_ETH1.port(), VlanId.NONE, true);
flowObjectiveService.next(DEVICE_ID, nextObjective);
// Create the flow objective
ForwardingObjective fwd = createForwardingObjective(PREFIX1, true);
flowObjectiveService.forward(DEVICE_ID, fwd);
EasyMock.expectLastCall().once();
setUpFlowObjectiveService();
// Send in the add event
RouteEvent routeEvent = new RouteEvent(RouteEvent.Type.ROUTE_ADDED, resolvedRoute);
routeListener.event(routeEvent);
verify(flowObjectiveService);
}
/**
* Tests adding a route with to a next hop in a VLAN.
*
* We verify that the flowObjectiveService records the correct state and that the
* correct flowObjectiveService is submitted to the flowObjectiveService.
*/
@Test
public void testRouteAddWithVlan() {
ResolvedRoute route = new ResolvedRoute(PREFIX1, NEXT_HOP2, MAC2);
// Create the next objective
NextObjective nextObjective = createNextObjective(MAC2, MAC2, SW1_ETH2.port(), VLAN1, true);
flowObjectiveService.next(DEVICE_ID, nextObjective);
// Create the flow objective
ForwardingObjective fwd = createForwardingObjective(PREFIX1, true);
flowObjectiveService.forward(DEVICE_ID, fwd);
EasyMock.expectLastCall().once();
setUpFlowObjectiveService();
// Send in the add event
routeListener.event(new RouteEvent(RouteEvent.Type.ROUTE_ADDED, route));
verify(flowObjectiveService);
}
/**
* Tests updating a route.
*
* We verify that the flowObjectiveService records the correct state and that the
* correct flow is submitted to the flowObjectiveService.
*/
@Test
public void testRouteUpdate() {
// Firstly add a route
testRouteAdd();
reset(flowObjectiveService);
ResolvedRoute route = new ResolvedRoute(PREFIX1, NEXT_HOP2, MAC2);
// Create the next objective
NextObjective nextObjective = createNextObjective(MAC2, MAC2, SW1_ETH2.port(), VLAN1, true);
flowObjectiveService.next(DEVICE_ID, nextObjective);
// Create the flow objective
ForwardingObjective fwd = createForwardingObjective(PREFIX1, true);
flowObjectiveService.forward(DEVICE_ID, fwd);
EasyMock.expectLastCall().once();
setUpFlowObjectiveService();
// Send in the update event
routeListener.event(new RouteEvent(RouteEvent.Type.ROUTE_UPDATED, route));
verify(flowObjectiveService);
}
/**
* Tests deleting a route.
*
* We verify that the flowObjectiveService records the correct state and that the
* correct flow is withdrawn from the flowObjectiveService.
*/
@Test
public void testRouteDelete() {
// Firstly add a route
testRouteAdd();
// Construct the existing route
ResolvedRoute route = new ResolvedRoute(PREFIX1, null, null);
// Create the flow objective
reset(flowObjectiveService);
ForwardingObjective fwd = createForwardingObjective(PREFIX1, false);
flowObjectiveService.forward(DEVICE_ID, fwd);
replay(flowObjectiveService);
// Send in the delete event
routeListener.event(new RouteEvent(RouteEvent.Type.ROUTE_REMOVED, route));
verify(flowObjectiveService);
}
private class TestInterfaceService extends InterfaceServiceAdapter {
@Override
public void addListener(InterfaceListener listener) {
interfaceListener = listener;
}
}
private class TestRouteService extends RouteServiceAdapter {
@Override
public void addListener(RouteListener listener) {
SingleSwitchFibInstallerTest.this.routeListener = listener;
}
}
private class TestRouterConfig extends RouterConfig {
@Override
public List<String> getInterfaces() {
ArrayList<String> interfaces = new ArrayList<>();
interfaces.add("of:0000000000000001/1");
interfaces.add("of:0000000000000001/2");
return interfaces;
}
@Override
public ConnectPoint getControlPlaneConnectPoint() {
return SW1_ETH1;
}
@Override
public boolean getOspfEnabled() {
return true;
}
}
private class TestDeviceService extends DeviceServiceAdapter {
@Override
public boolean isAvailable(DeviceId deviceId) {
return true;
}
@Override
public void addListener(DeviceListener listener) {
SingleSwitchFibInstallerTest.this.deviceListener = listener;
}
}
}
| |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
/**
* A simple shell-based implementation of {@link IdMappingServiceProvider}
* Map id to user name or group name. It does update every 15 minutes. Only a
* single instance of this class is expected to be on the server.
*
* The maps are incrementally updated as described below:
* 1. Initialize the maps as empty.
* 2. Incrementally update the maps
* - When ShellBasedIdMapping is requested for user or group name given
* an ID, or for ID given a user or group name, do look up in the map
* first, if it doesn't exist, find the corresponding entry with shell
* command, and insert the entry to the maps.
* - When group ID is requested for a given group name, and if the
* group name is numerical, the full group map is loaded. Because we
* don't have a good way to find the entry for a numerical group name,
* loading the full map helps to get in all entries.
* 3. Periodically refresh the maps for both user and group, e.g,
* do step 1.
* Note: for testing purpose, step 1 may initial the maps with full mapping
* when using constructor
* {@link ShellBasedIdMapping#ShellBasedIdMapping(Configuration, boolean)}.
*/
public class ShellBasedIdMapping implements IdMappingServiceProvider {
private static final Log LOG =
LogFactory.getLog(ShellBasedIdMapping.class);
private final static String OS = System.getProperty("os.name");
/** Shell commands to get users and groups */
static final String GET_ALL_USERS_CMD = "getent passwd | cut -d: -f1,3";
static final String GET_ALL_GROUPS_CMD = "getent group | cut -d: -f1,3";
static final String MAC_GET_ALL_USERS_CMD = "dscl . -list /Users UniqueID";
static final String MAC_GET_ALL_GROUPS_CMD = "dscl . -list /Groups PrimaryGroupID";
private final File staticMappingFile;
private StaticMapping staticMapping = null;
private boolean constructFullMapAtInit = false;
// Used for parsing the static mapping file.
private static final Pattern EMPTY_LINE = Pattern.compile("^\\s*$");
private static final Pattern COMMENT_LINE = Pattern.compile("^\\s*#.*$");
private static final Pattern MAPPING_LINE =
Pattern.compile("^(uid|gid)\\s+(\\d+)\\s+(\\d+)\\s*(#.*)?$");
final private long timeout;
// Maps for id to name map. Guarded by this object monitor lock
private BiMap<Integer, String> uidNameMap = HashBiMap.create();
private BiMap<Integer, String> gidNameMap = HashBiMap.create();
private long lastUpdateTime = 0; // Last time maps were updated
/*
* Constructor
* @param conf the configuration
* @param constructFullMapAtInit initialize the maps with full mapping when
* true, otherwise initialize the maps to empty. This parameter is
* intended for testing only, its default is false.
*/
@VisibleForTesting
public ShellBasedIdMapping(Configuration conf,
boolean constructFullMapAtInit) throws IOException {
this.constructFullMapAtInit = constructFullMapAtInit;
long updateTime = conf.getLong(
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY,
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT);
// Minimal interval is 1 minute
if (updateTime < IdMappingConstant.USERGROUPID_UPDATE_MILLIS_MIN) {
LOG.info("User configured user account update time is less"
+ " than 1 minute. Use 1 minute instead.");
timeout = IdMappingConstant.USERGROUPID_UPDATE_MILLIS_MIN;
} else {
timeout = updateTime;
}
String staticFilePath =
conf.get(IdMappingConstant.STATIC_ID_MAPPING_FILE_KEY,
IdMappingConstant.STATIC_ID_MAPPING_FILE_DEFAULT);
staticMappingFile = new File(staticFilePath);
updateMaps();
}
/*
* Constructor
* initialize user and group maps to empty
* @param conf the configuration
*/
public ShellBasedIdMapping(Configuration conf) throws IOException {
this(conf, false);
}
@VisibleForTesting
public long getTimeout() {
return timeout;
}
@VisibleForTesting
public BiMap<Integer, String> getUidNameMap() {
return uidNameMap;
}
@VisibleForTesting
public BiMap<Integer, String> getGidNameMap() {
return gidNameMap;
}
@VisibleForTesting
synchronized public void clearNameMaps() {
uidNameMap.clear();
gidNameMap.clear();
lastUpdateTime = Time.monotonicNow();
}
synchronized private boolean isExpired() {
return Time.monotonicNow() - lastUpdateTime > timeout;
}
// If can't update the maps, will keep using the old ones
private void checkAndUpdateMaps() {
if (isExpired()) {
LOG.info("Update cache now");
try {
updateMaps();
} catch (IOException e) {
LOG.error("Can't update the maps. Will use the old ones,"
+ " which can potentially cause problem.", e);
}
}
}
private static final String DUPLICATE_NAME_ID_DEBUG_INFO =
"NFS gateway could have problem starting with duplicate name or id on the host system.\n"
+ "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n"
+ "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
+ "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
+ "Therefore, same name means the same user or same group. To find the duplicated names/ids, one can do:\n"
+ "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systems,\n"
+ "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
private static void reportDuplicateEntry(final String header,
final Integer key, final String value,
final Integer ekey, final String evalue) {
LOG.warn("\n" + header + String.format(
"new entry (%d, %s), existing entry: (%d, %s).%n%s%n%s",
key, value, ekey, evalue,
"The new entry is to be ignored for the following reason.",
DUPLICATE_NAME_ID_DEBUG_INFO));
}
/**
* uid and gid are defined as uint32 in linux. Some systems create
* (intended or unintended) <nfsnobody, 4294967294> kind of <name,Id>
* mapping, where 4294967294 is 2**32-2 as unsigned int32. As an example,
* https://bugzilla.redhat.com/show_bug.cgi?id=511876.
* Because user or group id are treated as Integer (signed integer or int32)
* here, the number 4294967294 is out of range. The solution is to convert
* uint32 to int32, so to map the out-of-range ID to the negative side of
* Integer, e.g. 4294967294 maps to -2 and 4294967295 maps to -1.
*/
private static Integer parseId(final String idStr) {
Long longVal = Long.parseLong(idStr);
int intVal = longVal.intValue();
return Integer.valueOf(intVal);
}
/**
* Get the list of users or groups returned by the specified command,
* and save them in the corresponding map.
* @throws IOException
*/
@VisibleForTesting
public static boolean updateMapInternal(BiMap<Integer, String> map,
String mapName, String command, String regex,
Map<Integer, Integer> staticMapping) throws IOException {
boolean updated = false;
BufferedReader br = null;
try {
Process process = Runtime.getRuntime().exec(
new String[] { "bash", "-c", command });
br = new BufferedReader(
new InputStreamReader(process.getInputStream(),
Charset.defaultCharset()));
String line = null;
while ((line = br.readLine()) != null) {
String[] nameId = line.split(regex);
if ((nameId == null) || (nameId.length != 2)) {
throw new IOException("Can't parse " + mapName + " list entry:" + line);
}
LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
// HDFS can't differentiate duplicate names with simple authentication
final Integer key = staticMapping.get(parseId(nameId[1]));
final String value = nameId[0];
if (map.containsKey(key)) {
final String prevValue = map.get(key);
if (value.equals(prevValue)) {
// silently ignore equivalent entries
continue;
}
reportDuplicateEntry(
"Got multiple names associated with the same id: ",
key, value, key, prevValue);
continue;
}
if (map.containsValue(value)) {
final Integer prevKey = map.inverse().get(value);
reportDuplicateEntry(
"Got multiple ids associated with the same name: ",
key, value, prevKey, value);
continue;
}
map.put(key, value);
updated = true;
}
LOG.debug("Updated " + mapName + " map size: " + map.size());
} catch (IOException e) {
LOG.error("Can't update " + mapName + " map");
throw e;
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e1) {
LOG.error("Can't close BufferedReader of command result", e1);
}
}
}
return updated;
}
private boolean checkSupportedPlatform() {
if (!OS.startsWith("Linux") && !OS.startsWith("Mac")) {
LOG.error("Platform is not supported:" + OS
+ ". Can't update user map and group map and"
+ " 'nobody' will be used for any user and group.");
return false;
}
return true;
}
private static boolean isInteger(final String s) {
try {
Integer.parseInt(s);
} catch(NumberFormatException e) {
return false;
}
// only got here if we didn't return false
return true;
}
private void initStaticMapping() throws IOException {
staticMapping = new StaticMapping(
new HashMap<Integer, Integer>(), new HashMap<Integer, Integer>());
if (staticMappingFile.exists()) {
LOG.info("Using '" + staticMappingFile + "' for static UID/GID mapping...");
staticMapping = parseStaticMap(staticMappingFile);
} else {
LOG.info("Not doing static UID/GID mapping because '" + staticMappingFile
+ "' does not exist.");
}
}
/*
* Reset the maps to empty.
* For testing code, a full map may be re-constructed here when the object
* was created with constructFullMapAtInit being set to true.
*/
synchronized public void updateMaps() throws IOException {
if (!checkSupportedPlatform()) {
return;
}
if (constructFullMapAtInit) {
loadFullMaps();
} else {
clearNameMaps();
}
}
synchronized private void loadFullUserMap() throws IOException {
if (staticMapping == null) {
initStaticMapping();
}
BiMap<Integer, String> uMap = HashBiMap.create();
if (OS.startsWith("Mac")) {
updateMapInternal(uMap, "user", MAC_GET_ALL_USERS_CMD, "\\s+",
staticMapping.uidMapping);
} else {
updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
staticMapping.uidMapping);
}
uidNameMap = uMap;
lastUpdateTime = Time.monotonicNow();
}
synchronized private void loadFullGroupMap() throws IOException {
if (staticMapping == null) {
initStaticMapping();
}
BiMap<Integer, String> gMap = HashBiMap.create();
if (OS.startsWith("Mac")) {
updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+",
staticMapping.gidMapping);
} else {
updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
staticMapping.gidMapping);
}
gidNameMap = gMap;
lastUpdateTime = Time.monotonicNow();
}
synchronized private void loadFullMaps() throws IOException {
initStaticMapping();
loadFullUserMap();
loadFullGroupMap();
}
// search for id with given name, return "<name>:<id>"
// return
// getent group <name> | cut -d: -f1,3
// OR
// id -u <name> | awk '{print "<name>:"$1 }'
//
private String getName2IdCmdLinux(final String name, final boolean isGrp) {
String cmd;
if (isGrp) {
cmd = "getent group " + name + " | cut -d: -f1,3";
} else {
cmd = "id -u " + name + " | awk '{print \"" + name + ":\"$1 }'";
}
return cmd;
}
// search for name with given id, return "<name>:<id>"
private String getId2NameCmdLinux(final int id, final boolean isGrp) {
String cmd = "getent ";
cmd += isGrp? "group " : "passwd ";
cmd += String.valueOf(id) + " | cut -d: -f1,3";
return cmd;
}
// "dscl . -read /Users/<name> | grep UniqueID" returns "UniqueId: <id>",
// "dscl . -read /Groups/<name> | grep PrimaryGroupID" returns "PrimaryGoupID: <id>"
// The following method returns a command that uses awk to process the result,
// of these commands, and returns "<name> <id>", to simulate one entry returned by
// MAC_GET_ALL_USERS_CMD or MAC_GET_ALL_GROUPS_CMD.
// Specificially, this method returns:
// id -u <name> | awk '{print "<name>:"$1 }'
// OR
// dscl . -read /Groups/<name> | grep PrimaryGroupID | awk '($1 == "PrimaryGroupID:") { print "<name> " $2 }'
//
private String getName2IdCmdMac(final String name, final boolean isGrp) {
String cmd;
if (isGrp) {
cmd = "dscl . -read /Groups/" + name;
cmd += " | grep PrimaryGroupID | awk '($1 == \"PrimaryGroupID:\") ";
cmd += "{ print \"" + name + " \" $2 }'";
} else {
cmd = "id -u " + name + " | awk '{print \"" + name + " \"$1 }'";
}
return cmd;
}
// "dscl . -search /Users UniqueID <id>" returns
// <name> UniqueID = (
// <id>
// )
// "dscl . -search /Groups PrimaryGroupID <id>" returns
// <name> PrimaryGroupID = (
// <id>
// )
// The following method returns a command that uses sed to process the
// the result and returns "<name> <id>" to simulate one entry returned
// by MAC_GET_ALL_USERS_CMD or MAC_GET_ALL_GROUPS_CMD.
// For certain negative id case like nfsnobody, the <id> is quoted as
// "<id>", added one sed section to remove the quote.
// Specifically, the method returns:
// dscl . -search /Users UniqueID <id> | sed 'N;s/\\n//g;N;s/\\n//g' | sed 's/UniqueID =//g' | sed 's/)//g' | sed 's/\"//g'
// OR
// dscl . -search /Groups PrimaryGroupID <id> | sed 'N;s/\\n//g;N;s/\\n//g' | sed 's/PrimaryGroupID =//g' | sed 's/)//g' | sed 's/\"//g'
//
private String getId2NameCmdMac(final int id, final boolean isGrp) {
String cmd = "dscl . -search /";
cmd += isGrp? "Groups PrimaryGroupID " : "Users UniqueID ";
cmd += String.valueOf(id);
cmd += " | sed 'N;s/\\n//g;N;s/\\n//g' | sed 's/";
cmd += isGrp? "PrimaryGroupID" : "UniqueID";
cmd += " = (//g' | sed 's/)//g' | sed 's/\\\"//g'";
return cmd;
}
synchronized private void updateMapIncr(final String name,
final boolean isGrp) throws IOException {
if (!checkSupportedPlatform()) {
return;
}
if (isInteger(name) && isGrp) {
loadFullGroupMap();
return;
}
boolean updated = false;
if (staticMapping == null) {
initStaticMapping();
}
if (OS.startsWith("Linux")) {
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getName2IdCmdLinux(name, true), ":",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getName2IdCmdLinux(name, false), ":",
staticMapping.uidMapping);
}
} else {
// Mac
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getName2IdCmdMac(name, true), "\\s+",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getName2IdCmdMac(name, false), "\\s+",
staticMapping.uidMapping);
}
}
if (updated) {
lastUpdateTime = Time.monotonicNow();
}
}
synchronized private void updateMapIncr(final int id,
final boolean isGrp) throws IOException {
if (!checkSupportedPlatform()) {
return;
}
boolean updated = false;
if (staticMapping == null) {
initStaticMapping();
}
if (OS.startsWith("Linux")) {
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getId2NameCmdLinux(id, true), ":",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getId2NameCmdLinux(id, false), ":",
staticMapping.uidMapping);
}
} else {
// Mac
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getId2NameCmdMac(id, true), "\\s+",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getId2NameCmdMac(id, false), "\\s+",
staticMapping.uidMapping);
}
}
if (updated) {
lastUpdateTime = Time.monotonicNow();
}
}
@SuppressWarnings("serial")
static final class PassThroughMap<K> extends HashMap<K, K> {
public PassThroughMap() {
this(new HashMap<K, K>());
}
public PassThroughMap(Map<K, K> mapping) {
super();
for (Map.Entry<K, K> entry : mapping.entrySet()) {
super.put(entry.getKey(), entry.getValue());
}
}
@SuppressWarnings("unchecked")
@Override
public K get(Object key) {
if (super.containsKey(key)) {
return super.get(key);
} else {
return (K) key;
}
}
}
@VisibleForTesting
static final class StaticMapping {
final Map<Integer, Integer> uidMapping;
final Map<Integer, Integer> gidMapping;
public StaticMapping(Map<Integer, Integer> uidMapping,
Map<Integer, Integer> gidMapping) {
this.uidMapping = new PassThroughMap<Integer>(uidMapping);
this.gidMapping = new PassThroughMap<Integer>(gidMapping);
}
}
static StaticMapping parseStaticMap(File staticMapFile)
throws IOException {
Map<Integer, Integer> uidMapping = new HashMap<Integer, Integer>();
Map<Integer, Integer> gidMapping = new HashMap<Integer, Integer>();
BufferedReader in = new BufferedReader(new InputStreamReader(
new FileInputStream(staticMapFile), Charsets.UTF_8));
try {
String line = null;
while ((line = in.readLine()) != null) {
// Skip entirely empty and comment lines.
if (EMPTY_LINE.matcher(line).matches() ||
COMMENT_LINE.matcher(line).matches()) {
continue;
}
Matcher lineMatcher = MAPPING_LINE.matcher(line);
if (!lineMatcher.matches()) {
LOG.warn("Could not parse line '" + line + "'. Lines should be of " +
"the form '[uid|gid] [remote id] [local id]'. Blank lines and " +
"everything following a '#' on a line will be ignored.");
continue;
}
// We know the line is fine to parse without error checking like this
// since it matched the regex above.
String firstComponent = lineMatcher.group(1);
int remoteId = Integer.parseInt(lineMatcher.group(2));
int localId = Integer.parseInt(lineMatcher.group(3));
if (firstComponent.equals("uid")) {
uidMapping.put(localId, remoteId);
} else {
gidMapping.put(localId, remoteId);
}
}
} finally {
in.close();
}
return new StaticMapping(uidMapping, gidMapping);
}
synchronized public int getUid(String user) throws IOException {
checkAndUpdateMaps();
Integer id = uidNameMap.inverse().get(user);
if (id == null) {
updateMapIncr(user, false);
id = uidNameMap.inverse().get(user);
if (id == null) {
throw new IOException("User just deleted?:" + user);
}
}
return id.intValue();
}
synchronized public int getGid(String group) throws IOException {
checkAndUpdateMaps();
Integer id = gidNameMap.inverse().get(group);
if (id == null) {
updateMapIncr(group, true);
id = gidNameMap.inverse().get(group);
if (id == null) {
throw new IOException("No such group:" + group);
}
}
return id.intValue();
}
synchronized public String getUserName(int uid, String unknown) {
checkAndUpdateMaps();
String uname = uidNameMap.get(uid);
if (uname == null) {
try {
updateMapIncr(uid, false);
} catch (Exception e) {
}
uname = uidNameMap.get(uid);
if (uname == null) {
LOG.warn("Can't find user name for uid " + uid
+ ". Use default user name " + unknown);
uname = unknown;
}
}
return uname;
}
synchronized public String getGroupName(int gid, String unknown) {
checkAndUpdateMaps();
String gname = gidNameMap.get(gid);
if (gname == null) {
try {
updateMapIncr(gid, true);
} catch (Exception e) {
}
gname = gidNameMap.get(gid);
if (gname == null) {
LOG.warn("Can't find group name for gid " + gid
+ ". Use default group name " + unknown);
gname = unknown;
}
}
return gname;
}
// When can't map user, return user name's string hashcode
public int getUidAllowingUnknown(String user) {
checkAndUpdateMaps();
int uid;
try {
uid = getUid(user);
} catch (IOException e) {
uid = user.hashCode();
LOG.info("Can't map user " + user + ". Use its string hashcode:" + uid);
}
return uid;
}
// When can't map group, return group name's string hashcode
public int getGidAllowingUnknown(String group) {
checkAndUpdateMaps();
int gid;
try {
gid = getGid(group);
} catch (IOException e) {
gid = group.hashCode();
LOG.info("Can't map group " + group + ". Use its string hashcode:" + gid);
}
return gid;
}
}
| |
/*
The MIT License (MIT)
Copyright (c) 2014 Marcus Craske <limpygnome@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
----------------------------------------------------------------------------
Version: 1.0
Authors: Marcus Craske <limpygnome@gmail.com>
----------------------------------------------------------------------------
*/
package pals.plugins;
import java.util.Arrays;
import org.joda.time.DateTime;
import pals.base.NodeCore;
import pals.base.Plugin;
import pals.base.Settings;
import pals.base.TemplateManager;
import pals.base.UUID;
import pals.base.Version;
import pals.base.WebManager;
import pals.base.assessment.Assignment;
import pals.base.assessment.AssignmentQuestion;
import pals.base.assessment.InstanceAssignment;
import pals.base.assessment.Module;
import pals.base.assessment.Question;
import pals.base.auth.User;
import pals.base.database.Connector;
import pals.base.utils.JarIO;
import pals.base.utils.Misc;
import pals.base.web.MultipartUrlParser;
import pals.base.web.RemoteRequest;
import pals.base.web.RemoteResponse;
import pals.base.web.WebRequestData;
import pals.base.web.security.CSRF;
import pals.base.web.security.Escaping;
import pals.plugins.web.Captcha;
/**
* The default web-interface for modules.
*/
public class Modules extends Plugin
{
// Methods - Constructors **************************************************
public Modules(NodeCore core, UUID uuid, JarIO jario, Version version, Settings settings, String jarPath)
{
super(core, uuid, jario, version, settings, jarPath);
}
// Methods - Event Handlers ************************************************
@Override
public boolean eventHandler_pluginInstall(NodeCore core, Connector conn)
{
return true;
}
@Override
public boolean eventHandler_pluginUninstall(NodeCore core, Connector conn)
{
return true;
}
@Override
public boolean eventHandler_pluginLoad(NodeCore core)
{
return true;
}
@Override
public void eventHandler_pluginUnload(NodeCore core)
{
// Unregister URLs
core.getWebManager().urlsUnregister(this);
// Unregister templates
core.getTemplates().remove(this);
}
@Override
public boolean eventHandler_registerUrls(NodeCore core, WebManager web)
{
if(!web.urlsRegister(this, new String[]{
"modules",
"admin/modules"
}))
return false;
return true;
}
@Override
public boolean eventHandler_registerTemplates(NodeCore core, TemplateManager manager)
{
if(!manager.load(this, "templates"))
return false;
return true;
}
@Override
public boolean eventHandler_webRequest(WebRequestData data)
{
if(data.getUser() == null)
return false;
MultipartUrlParser mup = new MultipartUrlParser(data);
String page;
switch(mup.getPart(0))
{
case "modules":
if(mup.getPart(1) == null)
// Overview of all modules belonging to a user
return pageModules(data);
else
// Delegate to module controller
return pageModule(data, mup);
case "admin":
page = mup.getPart(1);
if(page == null)
return false;
switch(page)
{
case "modules":
String p2 = mup.getPart(2);
if(p2 == null)
// Overview of all modules
return pageAdminModules(data);
else if(p2.equals("create"))
// Create a new module
return pageAdminModule_create(data);
else
// Specific module...
return pageAdminModule(data, mup);
}
break;
}
return false;
}
@Override
public String getTitle()
{
return "PALS [WEB]: Modules";
}
// Methods - Pages - Main **************************************************
private boolean pageModules(WebRequestData data)
{
// Fetch models
ModelViewModules[] models = ModelViewModules.load(data.getConnector(), data.getUser());
// Setup the page
data.setTemplateData("pals_title", "Modules");
data.setTemplateData("pals_content", "modules/page_modules");
data.setTemplateData("models", models);
return true;
}
private boolean pageModule(WebRequestData data, MultipartUrlParser mup)
{
User user = data.getUser();
// Load the module model
Module module = Module.load(data.getConnector(), mup.parseInt(1, -1));
if(module == null)
return false;
// Check the user is enrolled
if(!module.isEnrolled(data.getConnector(), user))
return false;
// Delegate request
String page = mup.getPart(2);
if(page == null)
return pageModuleView(data, mup, module, user);
else
{
switch(page)
{
case "history":
return pageModuleAssignmentHistory(data, mup, module, user);
}
}
return false;
}
private boolean pageModuleView(WebRequestData data, MultipartUrlParser mup, Module module, User user)
{
// Fetch the module's assignments
Assignment[] assignments = Assignment.load(data.getConnector(), module, true);
// Create view models
ModelViewModule[] models = new ModelViewModule[assignments.length];
// Sum the weight of the assignments and create view models
int total = 0;
int offset = 0;
for(Assignment ass : assignments)
{
total += ass.getWeight();
models[offset++] = new ModelViewModule(data.getConnector(), ass, user);
}
// Setup the page
data.setTemplateData("pals_title", "Module - "+Escaping.htmlEncode(module.getTitle()));
data.setTemplateData("pals_content", "modules/page_module");
// -- Fields
data.setTemplateData("module", module);
data.setTemplateData("assignments", models);
data.setTemplateData("total_weight", total);
return true;
}
private boolean pageModuleAssignmentHistory(WebRequestData data, MultipartUrlParser mup, Module module, User user)
{
final int ASSIGNMENTS_PER_PAGE = 10;
// Load assignment model
Assignment ass = Assignment.load(data.getConnector(), module, mup.parseInt(3, -1));
if(ass == null)
return false;
// Parse the page being displayed
int page = mup.parseInt(4, 1);
// Fetch instance models
InstanceAssignment[] ias = InstanceAssignment.load(data.getConnector(), ass, user, ASSIGNMENTS_PER_PAGE+1, (ASSIGNMENTS_PER_PAGE*page)-ASSIGNMENTS_PER_PAGE);
// Setup the page
data.setTemplateData("pals_title", "Module - "+Escaping.htmlEncode(module.getTitle()));
data.setTemplateData("pals_content", "modules/page_module_assignment_history");
// -- Fields
data.setTemplateData("module", module);
data.setTemplateData("assignment", ass);
data.setTemplateData("assignments", ias.length > ASSIGNMENTS_PER_PAGE ? Arrays.copyOf(ias, ASSIGNMENTS_PER_PAGE) : ias);
data.setTemplateData("page", page);
if(page > 1)
data.setTemplateData("page_prev", page-1);
if(page < Integer.MAX_VALUE && ias.length > ASSIGNMENTS_PER_PAGE)
data.setTemplateData("page_next", page+1);
return true;
}
// Methods - Pages - Admin *************************************************
private boolean pageAdminModules(WebRequestData data)
{
// Check permissions
User user = data.getUser();
if(!user.getGroup().isAdminModules())
return false;
// Setup the page
data.setTemplateData("pals_title", "Admin - Modules");
data.setTemplateData("pals_content", "modules/page_admin_modules");
data.setTemplateData("modules", Module.loadAll(data.getConnector()));
return true;
}
private boolean pageAdminModule_create(WebRequestData data)
{
// Check permissions
User user = data.getUser();
if(!user.getGroup().isAdminModules())
return false;
// Check field data
RemoteRequest request = data.getRequestData();
String moduleTitle = request.getField("module_title");
String csrf = request.getField("csrf");
if(moduleTitle != null)
{
// Check security
if(!CSRF.isSecure(data, csrf))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else
{
Module m = new Module(moduleTitle);
switch(m.persist(data.getConnector()))
{
case Failed:
data.setTemplateData("error", "An error occurred; please try again!");
break;
case Failed_title_length:
data.setTemplateData("error", "Title must be "+m.getTitleMin()+" to "+m.getTitleMax()+" characters in length!");
break;
case Success:
data.getResponseData().setRedirectUrl("/admin/modules/"+m.getModuleID());
break;
}
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Modules - Create");
data.setTemplateData("pals_content", "modules/page_admin_module_create");
// -- Set fields
data.setTemplateData("module_title", Escaping.htmlEncode(moduleTitle));
data.setTemplateData("csrf", CSRF.set(data));
return true;
}
private boolean pageAdminModule(WebRequestData data, MultipartUrlParser mup)
{
// Check permissions
User user = data.getUser();
if(!user.getGroup().isAdminModules())
return false;
// Parse the module
int moduleid = mup.parseInt(2, -1);
if(moduleid == -1)
return false;
// Load the model for the module
Module module = Module.load(data.getConnector(), moduleid);
if(module == null)
return false;
// Handle the page
String page = mup.getPart(3);
if(page == null)
return pageAdminModule_view(data, module);
else
{
switch(page)
{
case "assignments":
page = mup.getPart(4);
if(page == null)
return pageAdminModule_assignmentsView(data, module);
else
switch(page)
{
case "create":
return pageAdminModule_assignmentCreate(data, module);
default:
return pageAdminModule_assignment(data, module, page, mup);
}
case "enrollment":
return pageAdminModule_enrollment(data, module);
case "edit":
return pageAdminModule_edit(data, module);
case "delete":
return pageAdminModule_delete(data, module);
case "marks":
return pageAdminModule_marks(data, module);
default:
return false;
}
}
}
private boolean pageAdminModule_view(WebRequestData data, Module module)
{
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle()));
data.setTemplateData("pals_content", "modules/page_admin_module");
data.setTemplateData("module", module);
// -- Fetch active assignments
Assignment[] assignments = Assignment.loadActive(data.getConnector(), module, true);
// -- Module Users
data.setTemplateData("module_users", module.usersEnrolled(data.getConnector()));
data.setTemplateData("assignments", assignments);
return true;
}
private boolean pageAdminModule_edit(WebRequestData data, Module module)
{
// Check for postback
RemoteRequest req = data.getRequestData();
String moduleTitle = req.getField("module_title");
if(moduleTitle != null)
{
// Validate request
if(!CSRF.isSecure(data))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else
{
// Update the model
module.setTitle(moduleTitle);
// Attempt to persist
Module.PersistStatus mps = module.persist(data.getConnector());
switch(mps)
{
case Failed:
data.setTemplateData("error", "Failed to update model for an unknown reason!");
break;
case Failed_title_length:
data.setTemplateData("error", "Title must be "+module.getTitleMin()+" to "+module.getTitleMax()+" characters in length!");
break;
case Success:
data.setTemplateData("success", "Successfully updated.");
break;
}
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle()) + " - Edit");
data.setTemplateData("pals_content", "modules/page_admin_module_edit");
// -- Fields
data.setTemplateData("csrf", CSRF.set(data));
data.setTemplateData("module", module);
data.setTemplateData("module_title", moduleTitle != null ? moduleTitle : module.getTitle());
return true;
}
private boolean pageAdminModule_delete(WebRequestData data, Module module)
{
// Check for postback
RemoteRequest request = data.getRequestData();
String deleteModule = request.getField("delete_module");
String csrf = request.getField("csrf");
if(deleteModule != null && deleteModule.equals("1"))
{
// Validate security
if(!CSRF.isSecure(data, csrf))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else if(!Captcha.isCaptchaCorrect(data))
data.setTemplateData("error", "Incorrect captcha verification code!");
else
{
// Delete the module...
module.delete(data.getConnector());
// Inform nodes
data.getCore().getRMI().nodesGlobalEventAll("base.cleaner.wake", new Object[]{});
// Redirect to modules page
data.getResponseData().setRedirectUrl("/admin/modules");
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle()) + " - Delete");
data.setTemplateData("pals_content", "modules/page_admin_module_delete");
data.setTemplateData("module", module);
// -- Fields
data.setTemplateData("csrf", CSRF.set(data));
return true;
}
private boolean pageAdminModule_marks(WebRequestData data, Module module)
{
RemoteRequest req = data.getRequestData();
MultipartUrlParser mup = new MultipartUrlParser(data);
// Check if we're displaying all marks or for a single user
int userid;
if((userid = mup.parseInt(4, -1)) != -1)
{
// Fetch mark for user
User u = User.load(data.getConnector(), userid);
if(u == null)
return false;
ModelAssHighest.ModelModule mm[] = ModelAssHighest.loadModule(data.getConnector(), module, u);
if(mm.length != 1)
return false;
data.setTemplateData("marks", mm[0]);
data.setTemplateData("pals_content", "modules/page_admin_module_marks_user");
}
else
{
// Fetch marks
data.setTemplateData("marks", ModelAssHighest.loadModule(data.getConnector(), module, null));
// Check view/download type
String type = mup.getPart(4);
if(type == null)
type = "";
switch(type)
{
case "download.csv":
data.setTemplateData("pals_page", "modules/page_admin_module_marks_csv");
break;
case "print":
data.setTemplateData("pals_page", "modules/page_admin_module_marks_print");
break;
default:
data.setTemplateData("pals_content", "modules/page_admin_module_marks");
break;
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle()) + " - Marks");
data.setTemplateData("module", module);
return true;
}
private boolean pageAdminModule_enrollment(WebRequestData data, Module module)
{
// Check field data
RemoteRequest request = data.getRequestData();
String moduleUsersAdd = request.getField("module_users_add");
String remove = request.getField("remove");
String removeAll = request.getField("remove_all");
String csrf = request.getField("csrf");
// -- Adding users
if(moduleUsersAdd != null && moduleUsersAdd.length() > 0)
{
if(!CSRF.isSecure(data, csrf))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else
{
// Iterate each line of the input, load the user and add
boolean failed = false;
User user;
String un;
for(String line : moduleUsersAdd.replace("\r", "").split("\n"))
{
un = line.trim();
if(un.length() > 0)
{
if((user = User.load(data.getConnector(), un)) != null)
{
module.usersAdd(data.getConnector(), user);
}
else
{
data.setTemplateData("error", "User '"+un+"' does not exist!");
failed = true;
break;
}
}
}
// End transaction
if(!failed)
{
data.setTemplateData("success", "Successfully enrolled users.");
}
}
}
// -- Removing a user
if(remove != null && remove.length() > 0)
{
if(!CSRF.isSecure(data, csrf))
data.setTemplateData("error2", "Invalid request; try again or contact an administrator!");
else
{
try
{
int userid = Integer.parseInt(remove);
User user = User.load(data.getConnector(), userid);
if(user == null)
data.setTemplateData("error2", "User does not exist!");
else if(!module.usersRemove(data.getConnector(), user))
data.setTemplateData("error2", "Could not remove user, please try again!");
}
catch(NumberFormatException ex)
{
data.setTemplateData("error2", "Invalid request; try again or contact an administrator!");
}
}
}
// -- Removing all users
if(removeAll != null && removeAll.equals("1"))
{
if(!CSRF.isSecure(data, csrf))
data.setTemplateData("error2", "Invalid request; try again or contact an administrator!");
else
{
// Remove all the users for the module
module.usersRemoveAll(data.getConnector());
// Redirect back to overview (to hide long url)
data.getResponseData().setRedirectUrl("/admin/modules/"+module.getModuleID()+"/enrollment");
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Enrollment");
data.setTemplateData("pals_content", "modules/page_admin_enrollment");
data.setTemplateData("module", module);
data.setTemplateData("module_users", module.usersEnrolled(data.getConnector()));
// -- Fields
data.setTemplateData("module_users_add", moduleUsersAdd);
data.setTemplateData("csrf", CSRF.set(data));
return true;
}
// Methods - Pages - Admin - Assignments ***********************************
private boolean pageAdminModule_assignmentsView(WebRequestData data, Module module)
{
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments");
data.setTemplateData("pals_content", "modules/page_admin_module_assignments");
// Fetch the assignments
Assignment[] assignments = Assignment.load(data.getConnector(), module, false);
// Compute the total weight
int total = 0;
for(Assignment ass : assignments)
total += ass.getWeight();
// -- Fields
data.setTemplateData("module", module);
data.setTemplateData("assignments", assignments);
data.setTemplateData("total_weight", total);
return true;
}
private boolean pageAdminModule_assignmentCreate(WebRequestData data, Module module)
{
// Check for postback
RemoteRequest req = data.getRequestData();
String assTitle = req.getField("ass_title");
String assWeight = req.getField("ass_weight");
String assMaxAttempts = req.getField("ass_max_attempts");
String csrf = req.getField("csrf");
if(assTitle != null && assWeight != null && assMaxAttempts != null)
{
// Validate request
if(!CSRF.isSecure(data, csrf))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else
{
// Parse field data
// -- Max-attempts
int maxAttempts;
try
{
maxAttempts = Integer.parseInt(assMaxAttempts);
}
catch(NumberFormatException ex)
{
maxAttempts = 0;
}
// -- Weight
int weight;
try
{
weight = Integer.parseInt(assWeight);
}
catch(NumberFormatException ex)
{
weight = 0;
}
// Attempt to persist a new assignment
Assignment ass = new Assignment(module, assTitle, weight, false, maxAttempts, null, false);
Assignment.PersistStatus ps = ass.persist(data.getConnector());
switch(ps)
{
case Failed:
case Invalid_Module:
case Invalid_Due:
data.setTemplateData("error", "An unknown error occurred ('"+ps.name()+"'); please try again or contact an administrator!");
break;
case Invalid_Title:
data.setTemplateData("error", "Invalid title, must be "+ass.getTitleMin()+" to "+ass.getTitleMax()+" characters in length!");
break;
case Invalid_Weight:
data.setTemplateData("error", "Invalid weight, must be a numeric value and greater than zero!");
break;
case Invalid_MaxAttempts:
data.setTemplateData("error", "Invalid max-attempts, must be -1 or greater than zero.");
break;
case Success:
data.getResponseData().setRedirectUrl("/admin/modules/"+module.getModuleID()+"/assignments/"+ass.getAssID()+"/questions");
break;
}
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - Create");
data.setTemplateData("pals_content", "modules/page_admin_module_assignment_create");
data.setTemplateData("module", module);
// -- Fields
data.setTemplateData("ass_title", assTitle);
data.setTemplateData("ass_weight", assWeight);
data.setTemplateData("ass_max_attempts", assMaxAttempts);
data.setTemplateData("csrf", CSRF.set(data));
return true;
}
private boolean pageAdminModule_assignment(WebRequestData data, Module module, String assId, MultipartUrlParser mup)
{
// Parse assignment identifier
int assid = mup.parseInt(4, -1);
if(assid == -1)
return false;
// Load the assignment model
Assignment ass = Assignment.load(data.getConnector(), module, assid);
if(ass == null)
return false;
// Delegate the request
String page = mup.getPart(5);
if(page == null)
return pageAdminModule_assignmentView(data, module, ass, mup);
else
{
switch(page)
{
case "edit":
return pageAdminModule_assignmentEdit(data, module, ass);
case "delete":
return pageAdminModule_assignmentDelete(data, module, ass);
case "questions":
{
page = mup.getPart(6);
if(page == null)
return pageAdminModule_assignment_questions(data, module, ass);
else
{
switch(page)
{
case "add":
return pageAdminModule_assignment_questionAdd(data, module, ass);
default:
{
// Assume it's an assignment-question model
AssignmentQuestion aq = AssignmentQuestion.load(data.getCore(), data.getConnector(), ass, mup.parseInt(6, -1));
if(aq == null)
return false;
switch(mup.getPart(7))
{
case "edit":
return pageAdminModule_assignment_questionEdit(data, module, ass, aq);
case "remove":
return pageAdminModule_assignment_questionRemove(data, module, ass, aq);
default:
return false;
}
}
}
}
}
case "print_off":
case "print_off.csv":
case "print_off.print":
return pageAdminModule_assignmentPrintOff(data, module, ass, page);
}
}
return false;
}
private boolean pageAdminModule_assignmentView(WebRequestData data, Module module, Assignment ass, MultipartUrlParser mup)
{
final int ASSIGNMENTS_PER_PAGE = 10;
// Parse the current page
int page = mup.parseInt(5, 1);
// Fetch the assignments
InstanceAssignment[] ia = InstanceAssignment.load(data.getConnector(), ass, null, ASSIGNMENTS_PER_PAGE+1, (ASSIGNMENTS_PER_PAGE*page)-ASSIGNMENTS_PER_PAGE);
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - " + Escaping.htmlEncode(ass.getTitle()));
data.setTemplateData("pals_content", "modules/page_admin_module_assignment");
// -- Fields
data.setTemplateData("module", module);
data.setTemplateData("assignment", ass);
data.setTemplateData("assignments", ia.length > ASSIGNMENTS_PER_PAGE ? Arrays.copyOf(ia, ASSIGNMENTS_PER_PAGE) : ia);
data.setTemplateData("page", page);
if(page < Integer.MAX_VALUE && ia.length > ASSIGNMENTS_PER_PAGE)
data.setTemplateData("page_next", page+1);
if(page > 1)
data.setTemplateData("page_prev", page-1);
return true;
}
private boolean pageAdminModule_assignmentPrintOff(WebRequestData data, Module module, Assignment ass, String page)
{
// Fetch the highest marks for each user, or zero if they did not achieve anything
ModelAssHighest[] models = ModelAssHighest.load(data.getConnector(), ass);
// Setup the page based on display type
String type = data.getRequestData().getField("type");
RemoteResponse resp = data.getResponseData();
switch(page)
{
case "print_off.csv":
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - " + Escaping.htmlEncode(ass.getTitle()) + " - Highest Marks");
data.setTemplateData("pals_page", "modules/page_admin_module_assignment_csv");
resp.setResponseType("text/csv");
resp.setHeader("Content-Disposition", "attachment; filename=marks.csv");
break;
case "print_off.print":
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - " + Escaping.htmlEncode(ass.getTitle()) + " - Highest Marks");
data.setTemplateData("pals_page", "modules/page_admin_module_assignment_print");
break;
default:
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - " + Escaping.htmlEncode(ass.getTitle()) + " - Highest Marks");
data.setTemplateData("pals_content", "modules/page_admin_module_assignment_view");
break;
}
// -- Fields
data.setTemplateData("module", module);
data.setTemplateData("assignment", ass);
data.setTemplateData("models", models);
return true;
}
private boolean pageAdminModule_assignmentDelete(WebRequestData data, Module module, Assignment ass)
{
// Check postback
RemoteRequest req = data.getRequestData();
String delete = req.getField("delete");
if(delete != null && delete.equals("1"))
{
// Validate request
if(!CSRF.isSecure(data))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else if(!Captcha.isCaptchaCorrect(data))
data.setTemplateData("error", "Incorrect captcha verification code!");
else
{
// Attempt to unpersist the model
if(!ass.delete(data.getConnector()))
data.setTemplateData("error", "Failed to delete assignment for an unknown reason; please try again or contact an administrator!");
else
{
// Inform nodes
data.getCore().getRMI().nodesGlobalEventAll("base.cleaner.wake", new Object[]{});
// Redirect
data.getResponseData().setRedirectUrl("/admin/modules/"+module.getModuleID()+"/assignments");
}
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - " + Escaping.htmlEncode(ass.getTitle()) + " - Delete");
data.setTemplateData("pals_content", "modules/page_admin_module_assignment_delete");
// -- Fields
data.setTemplateData("module", module);
data.setTemplateData("assignment", ass);
data.setTemplateData("csrf", CSRF.set(data));
return true;
}
private boolean pageAdminModule_assignmentEdit(WebRequestData data, Module module, Assignment ass)
{
// Check postback
RemoteRequest req = data.getRequestData();
String assTitle = req.getField("ass_title");
String assWeight = req.getField("ass_weight");
String assActive = req.getField("ass_active");
String assMaxAttempts = req.getField("ass_max_attempts");
// --- Due-date
String assDue = req.getField("ass_due"); // Checkbox
String assDueDay = req.getField("ass_due_day");
String assDueMonth = req.getField("ass_due_month");
String assDueYear = req.getField("ass_due_year");
String assDueHour = req.getField("ass_due_hour");
String assDueMinute = req.getField("ass_due_minute");
int year = -1, month = -1, day = -1, hour = -1, minute = -1;
boolean invalidDueDate = false;
if(assTitle != null && assWeight != null && assMaxAttempts != null)
{
// Parse field data
// -- Due-date
DateTime due = null;
if(assDue != null && assDue.equals("1"))
{
try
{
year = Integer.parseInt(assDueYear);
month = Integer.parseInt(assDueMonth);
day = Integer.parseInt(assDueDay);
hour = Integer.parseInt(assDueHour);
minute = Integer.parseInt(assDueMinute);
due = new DateTime(year, month, day, hour, minute);
// Reset the assignment's handle in-case the time/date has changed
ass.setDueHandled(false);
}
catch(NullPointerException | IllegalArgumentException ex)
{
data.setTemplateData("error", "Invalid due-date!");
invalidDueDate = true;
}
}
if(!invalidDueDate)
{
// -- Max-attempts
int maxAttempts;
try
{
maxAttempts = Integer.parseInt(assMaxAttempts);
}
catch(NumberFormatException ex)
{
maxAttempts = 0;
}
// -- Weight
int weight;
try
{
weight = Integer.parseInt(assWeight);
}
catch(NumberFormatException ex)
{
weight = 0;
}
// Validate request
if(!CSRF.isSecure(data))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else
{
try
{
// Update the model
ass.setWeight(Integer.parseInt(assWeight));
ass.setTitle(assTitle);
ass.setActive(assActive != null);
ass.setMaxAttempts(maxAttempts);
ass.setDue(due);
// Attempt to persist
Assignment.PersistStatus aps = ass.persist(data.getConnector());
switch(aps)
{
case Invalid_Module:
case Failed:
data.setTemplateData("error", "Failed to update assignment for unknown reason ('"+aps.name()+"'); please try again or contact an administrator!");
break;
case Invalid_Title:
data.setTemplateData("error", "Invalid title, must be "+ass.getTitleMin()+" to "+ass.getTitleMax()+" characters in length!");
break;
case Invalid_Weight:
data.setTemplateData("error", "Invalid weight, must be numeric and greater than zero!");
break;
case Invalid_Due:
data.setTemplateData("error", "Invalid due-date, also make sure the time is in the future!");
break;
case Invalid_MaxAttempts:
data.setTemplateData("error", "Invalid max-attempts, must be -1 or greater than zero.");
break;
case Success:
data.setTemplateData("success", "Updated assignment successfully.");
break;
}
}
catch(NumberFormatException ex)
{
data.setTemplateData("error", "Weight must be numeric and greater than zero!");
}
}
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - " + Escaping.htmlEncode(ass.getTitle()) + " - Edit");
data.setTemplateData("pals_content", "modules/page_admin_module_assignment_edit");
// -- Fields
data.setTemplateData("csrf", CSRF.set(data));
data.setTemplateData("module", module);
data.setTemplateData("assignment", ass);
data.setTemplateData("ass_title", assTitle != null ? assTitle : ass.getTitle());
data.setTemplateData("ass_weight", assWeight != null ? assWeight : String.valueOf(ass.getWeight()));
if((assTitle == null && ass.isActive()) || assActive != null)
data.setTemplateData("ass_active", true);
data.setTemplateData("ass_max_attempts", assMaxAttempts != null ? assMaxAttempts : String.valueOf(ass.getMaxAttempts()));
// -- -- Due
if((assTitle != null && assDue != null && assDue.equals("1")) || (assTitle == null && ass.getDue() != null))
data.setTemplateData("ass_due", true);
data.setTemplateData("ass_year", DateTime.now().getYear());
data.setTemplateData("ass_due_year", year != -1 ? year : ass.getDue() != null ? ass.getDue().getYear() : -1);
data.setTemplateData("ass_due_month", month != -1 ? month : ass.getDue() != null ? ass.getDue().getMonthOfYear() : -1);
data.setTemplateData("ass_due_day", day != -1 ? day : ass.getDue() != null ? ass.getDue().getDayOfMonth() : -1);
data.setTemplateData("ass_due_hour", hour != -1 ? hour : ass.getDue() != null ? ass.getDue().getHourOfDay() : -1);
data.setTemplateData("ass_due_minute", minute != -1 ? minute : ass.getDue() != null ? ass.getDue().getMinuteOfHour() : -1);
return true;
}
private boolean pageAdminModule_assignment_questions(WebRequestData data, Module module, Assignment ass)
{
RemoteRequest req = data.getRequestData();
// Check if we've received postback to bump an item
String aqid = req.getField("aqid");
String action = req.getField("action");
if(aqid != null && action != null)
{
try
{
// Load the assignment-question
AssignmentQuestion aq = AssignmentQuestion.load(data.getCore(), data.getConnector(), ass, Integer.parseInt(aqid));
if(aq != null)
{
// Apply action
switch(action)
{
case "page_up":
aq.setPage(aq.getPage()-1);
break;
case "page_down":
aq.setPage(aq.getPage()+1);
break;
case "order_up":
aq.setPageOrder(aq.getPageOrder()-1);
break;
case "order_down":
aq.setPageOrder(aq.getPageOrder()+1);
break;
default:
return false;
}
// Persist data
AssignmentQuestion.PersistStatus aqps = aq.persist(data.getConnector());
switch(aqps)
{
case Failed:
case Invalid_Assignment:
case Invalid_Question:
case Invalid_Weight:
data.setTemplateData("error", "Failed to apply action, error '"+(aqps.name())+"'; please try again or contact an administrator!");
break;
case Invalid_Page:
case Invalid_PageOrder:
case Success:
data.getResponseData().setRedirectUrl("/admin/modules/"+module.getModuleID()+"/assignments/"+ass.getAssID()+"/questions");
break;
}
}
else
return false;
}
catch(NumberFormatException ex)
{
return false;
}
}
// Fetch the assignment-questions
AssignmentQuestion[] questions = AssignmentQuestion.loadAll(data.getCore(), data.getConnector(), ass);
// Sum the total weight
int totalWeight = 0;
for(AssignmentQuestion q : questions)
totalWeight += q.getWeight();
// Set the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - Questions");
data.setTemplateData("pals_content", "modules/page_admin_assignment_questions");
// -- Fields
data.setTemplateData("module", module);
data.setTemplateData("assignment", ass);
data.setTemplateData("questions", questions);
data.setTemplateData("total_weight", totalWeight);
return true;
}
private boolean pageAdminModule_assignment_questionAdd(WebRequestData data, Module module, Assignment ass)
{
RemoteRequest req = data.getRequestData();
// Redirect if no question, to be added, has been specified or the model
// cannot be loaded
Question q = null;
try
{
String qid = req.getField("qid");
if(qid != null)
q = Question.load(data.getCore(), data.getConnector(), Integer.parseInt(qid));
}
catch(NumberFormatException ex)
{
}
if(q == null)
{
data.getResponseData().setRedirectUrl("/admin/questions?assid="+ass.getAssID());
return true;
}
// Check for postback
String qWeight = req.getField("q_weight");
if(qWeight != null)
{
// Validate the request
if(!CSRF.isSecure(data))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else
{
try
{
// Create a new assignment-question model
AssignmentQuestion aq = new AssignmentQuestion(ass, q, Integer.parseInt(qWeight), 1, 1);
// Attempt to persist
AssignmentQuestion.PersistStatus aqps = aq.persist(data.getConnector());
switch(aqps)
{
case Failed:
case Invalid_Assignment:
data.setTemplateData("error", "An unknown error occurred ('"+aqps.name()+"'); please try again or contact an administrator!");
break;
case Invalid_Question:
data.setTemplateData("error", "Invalid question; please try again or select another question!");
break;
case Invalid_Weight:
data.setTemplateData("error", "Invalid weight; must be numeric and greater than zero!");
break;
case Invalid_Question_Not_Configured:
data.setTemplateData("error", "The selected question has not been configured properly; check it has been configured and has criteria!");
break;
case Success:
data.getResponseData().setRedirectUrl("/admin/modules/"+module.getModuleID()+"/assignments/"+ass.getAssID()+"/questions");
break;
}
}
catch(NumberFormatException ex)
{
data.setTemplateData("error", "Invalid weight; must be numeric and greater than zero!");
}
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - Questions - Add");
data.setTemplateData("pals_content", "modules/page_admin_assignment_questions_add");
// -- Fields
data.setTemplateData("csrf", CSRF.set(data));
data.setTemplateData("weight", qWeight);
data.setTemplateData("question", q);
data.setTemplateData("assignment", ass);
data.setTemplateData("module", module);
data.setTemplateData("q_weight", qWeight);
return true;
}
private boolean pageAdminModule_assignment_questionEdit(WebRequestData data, Module module, Assignment ass, AssignmentQuestion aq)
{
// Check for postbaack
RemoteRequest req = data.getRequestData();
String questionWeight = req.getField("question_weight");
String questionPage = req.getField("question_page");
String questionPageOrder = req.getField("question_page_order");
if(questionWeight != null && questionPage != null && questionPageOrder != null)
{
// Validate request
if(!CSRF.isSecure(data))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else
{
boolean error = false;
int weight = -1, page = -1, pageOrder = -1;
try
{
weight = Integer.parseInt(questionWeight);
}
catch(NumberFormatException ex)
{
error = true;
data.setTemplateData("error", "Invalid weight; must be numeric and greater than zero!");
}
try
{
page = Integer.parseInt(questionPage);
}
catch(NumberFormatException ex)
{
error = true;
data.setTemplateData("error", "Invalid page; must be a numeric value!");
}
try
{
pageOrder = Integer.parseInt(questionPageOrder);
}
catch(NumberFormatException ex)
{
error = true;
data.setTemplateData("error", "Invalid page-order; must be a numeric value!");
}
if(!error)
{
// Update the model
aq.setWeight(weight);
aq.setPage(page);
aq.setPageOrder(pageOrder);
// Attempt to persist
AssignmentQuestion.PersistStatus aqps = aq.persist(data.getConnector());
switch(aqps)
{
case Failed:
case Invalid_Assignment:
case Invalid_Question:
data.setTemplateData("error", "An unknown error occurred ('"+aqps.name()+"'); please try again or contact an administrator!");
break;
case Invalid_Page:
data.setTemplateData("error", "Page must be between 1 to "+AssignmentQuestion.PAGE_LIMIT+".");
break;
case Invalid_PageOrder:
data.setTemplateData("error", "Page order must be between 1 to "+AssignmentQuestion.PAGE_ORDER_LIMIT+".");
break;
case Invalid_Weight:
data.setTemplateData("error", "Weight must be greater than zero.");
break;
case Success:
data.setTemplateData("success", "Successfully updated settings.");
break;
}
}
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - Questions - Edit");
data.setTemplateData("pals_content", "modules/page_admin_assignment_questions_edit");
// -- Fields
data.setTemplateData("csrf", CSRF.set(data));
data.setTemplateData("module", module);
data.setTemplateData("assignment", ass);
data.setTemplateData("question", aq);
data.setTemplateData("question_weight", questionWeight != null ? questionWeight : aq.getWeight());
data.setTemplateData("question_page", questionPage != null ? questionPage : aq.getPage());
data.setTemplateData("question_page_order", questionPageOrder != null ? questionPageOrder : aq.getPageOrder());
return true;
}
private boolean pageAdminModule_assignment_questionRemove(WebRequestData data, Module module, Assignment ass, AssignmentQuestion aq)
{
// Check for postbaack
RemoteRequest req = data.getRequestData();
String delete = req.getField("delete");
if(delete != null && delete.equals("1"))
{
// Validate the request
if(!CSRF.isSecure(data))
data.setTemplateData("error", "Invalid request; please try again or contact an administrator!");
else
{
// Unpersist the model
if(aq.delete(data.getConnector()))
{
// Inform nodes
data.getCore().getRMI().nodesGlobalEventAll("base.cleaner.wake", new Object[]{});
// Redirect
data.getResponseData().setRedirectUrl("/admin/modules/"+module.getModuleID()+"/assignments/"+ass.getAssID()+"/questions");
}
else
data.setTemplateData("error", "Failed to unpersist model for an unknown reason!");
}
}
// Setup the page
data.setTemplateData("pals_title", "Admin - Module - "+Escaping.htmlEncode(module.getTitle())+" - Assignments - Questions - Remove");
data.setTemplateData("pals_content", "modules/page_admin_assignment_questions_remove");
// -- Fields
data.setTemplateData("csrf", CSRF.set(data));
data.setTemplateData("module", module);
data.setTemplateData("assignment", ass);
data.setTemplateData("question", aq);
return true;
}
}
| |
package io.tvcalendar.web.rest;
import com.codahale.metrics.annotation.Timed;
import io.tvcalendar.domain.Authority;
import io.tvcalendar.domain.PersistentToken;
import io.tvcalendar.domain.User;
import io.tvcalendar.repository.PersistentTokenRepository;
import io.tvcalendar.repository.UserRepository;
import io.tvcalendar.security.SecurityUtils;
import io.tvcalendar.service.MailService;
import io.tvcalendar.service.UserService;
import io.tvcalendar.web.rest.dto.KeyAndPasswordDTO;
import io.tvcalendar.web.rest.dto.UserDTO;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import javax.inject.Inject;
import javax.servlet.http.HttpServletRequest;
import javax.validation.Valid;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.util.*;
/**
* REST controller for managing the current user's account.
*/
@RestController
@RequestMapping("/api")
public class AccountResource {
private final Logger log = LoggerFactory.getLogger(AccountResource.class);
@Inject
private UserRepository userRepository;
@Inject
private UserService userService;
@Inject
private PersistentTokenRepository persistentTokenRepository;
@Inject
private MailService mailService;
/**
* POST /register -> register the user.
*/
@RequestMapping(value = "/register",
method = RequestMethod.POST,
produces = MediaType.TEXT_PLAIN_VALUE)
@Timed
public ResponseEntity<?> registerAccount(@Valid @RequestBody UserDTO userDTO, HttpServletRequest request) {
return userRepository.findOneByLogin(userDTO.getLogin())
.map(user -> new ResponseEntity<>("login already in use", HttpStatus.BAD_REQUEST))
.orElseGet(() -> userRepository.findOneByEmail(userDTO.getEmail())
.map(user -> new ResponseEntity<>("e-mail address already in use", HttpStatus.BAD_REQUEST))
.orElseGet(() -> {
User user = userService.createUserInformation(userDTO.getLogin(), userDTO.getPassword(),
userDTO.getFirstName(), userDTO.getLastName(), userDTO.getEmail().toLowerCase(),
userDTO.getLangKey());
String baseUrl = request.getScheme() + // "http"
"://" + // "://"
request.getServerName() + // "myhost"
":" + // ":"
request.getServerPort() + // "80"
request.getContextPath(); // "/myContextPath" or "" if deployed in root context
mailService.sendActivationEmail(user, baseUrl);
return new ResponseEntity<>(HttpStatus.CREATED);
})
);
}
/**
* GET /activate -> activate the registered user.
*/
@RequestMapping(value = "/activate",
method = RequestMethod.GET,
produces = MediaType.APPLICATION_JSON_VALUE)
@Timed
public ResponseEntity<String> activateAccount(@RequestParam(value = "key") String key) {
return Optional.ofNullable(userService.activateRegistration(key))
.map(user -> new ResponseEntity<String>(HttpStatus.OK))
.orElse(new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR));
}
/**
* GET /authenticate -> check if the user is authenticated, and return its login.
*/
@RequestMapping(value = "/authenticate",
method = RequestMethod.GET,
produces = MediaType.APPLICATION_JSON_VALUE)
@Timed
public String isAuthenticated(HttpServletRequest request) {
log.debug("REST request to check if the current user is authenticated");
return request.getRemoteUser();
}
/**
* GET /account -> get the current user.
*/
@RequestMapping(value = "/account",
method = RequestMethod.GET,
produces = MediaType.APPLICATION_JSON_VALUE)
@Timed
public ResponseEntity<UserDTO> getAccount() {
return Optional.ofNullable(userService.getUserWithAuthorities())
.map(user -> new ResponseEntity<>(new UserDTO(user), HttpStatus.OK))
.orElse(new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR));
}
/**
* POST /account -> update the current user information.
*/
@RequestMapping(value = "/account",
method = RequestMethod.POST,
produces = MediaType.APPLICATION_JSON_VALUE)
@Timed
public ResponseEntity<String> saveAccount(@RequestBody UserDTO userDTO) {
return userRepository
.findOneByLogin(SecurityUtils.getCurrentUser().getUsername())
.map(u -> {
userService.updateUserInformation(userDTO.getFirstName(), userDTO.getLastName(), userDTO.getEmail(),
userDTO.getLangKey());
return new ResponseEntity<String>(HttpStatus.OK);
})
.orElseGet(() -> new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR));
}
/**
* POST /change_password -> changes the current user's password
*/
@RequestMapping(value = "/account/change_password",
method = RequestMethod.POST,
produces = MediaType.APPLICATION_JSON_VALUE)
@Timed
public ResponseEntity<?> changePassword(@RequestBody String password) {
if (!checkPasswordLength(password)) {
return new ResponseEntity<>("Incorrect password", HttpStatus.BAD_REQUEST);
}
userService.changePassword(password);
return new ResponseEntity<>(HttpStatus.OK);
}
/**
* GET /account/sessions -> get the current open sessions.
*/
@RequestMapping(value = "/account/sessions",
method = RequestMethod.GET,
produces = MediaType.APPLICATION_JSON_VALUE)
@Timed
public ResponseEntity<List<PersistentToken>> getCurrentSessions() {
return userRepository.findOneByLogin(SecurityUtils.getCurrentUser().getUsername())
.map(user -> new ResponseEntity<>(
persistentTokenRepository.findByUser(user),
HttpStatus.OK))
.orElse(new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR));
}
/**
* DELETE /account/sessions?series={series} -> invalidate an existing session.
*
* - You can only delete your own sessions, not any other user's session
* - If you delete one of your existing sessions, and that you are currently logged in on that session, you will
* still be able to use that session, until you quit your browser: it does not work in real time (there is
* no API for that), it only removes the "remember me" cookie
* - This is also true if you invalidate your current session: you will still be able to use it until you close
* your browser or that the session times out. But automatic login (the "remember me" cookie) will not work
* anymore.
* There is an API to invalidate the current session, but there is no API to check which session uses which
* cookie.
*/
@RequestMapping(value = "/account/sessions/{series}",
method = RequestMethod.DELETE)
@Timed
public void invalidateSession(@PathVariable String series) throws UnsupportedEncodingException {
String decodedSeries = URLDecoder.decode(series, "UTF-8");
userRepository.findOneByLogin(SecurityUtils.getCurrentUser().getUsername()).ifPresent(u -> {
persistentTokenRepository.findByUser(u).stream()
.filter(persistentToken -> StringUtils.equals(persistentToken.getSeries(), decodedSeries))
.findAny().ifPresent(t -> persistentTokenRepository.delete(decodedSeries));
});
}
@RequestMapping(value = "/account/reset_password/init",
method = RequestMethod.POST,
produces = MediaType.TEXT_PLAIN_VALUE)
@Timed
public ResponseEntity<?> requestPasswordReset(@RequestBody String mail, HttpServletRequest request) {
return userService.requestPasswordReset(mail)
.map(user -> {
String baseUrl = request.getScheme() +
"://" +
request.getServerName() +
":" +
request.getServerPort() +
request.getContextPath();
mailService.sendPasswordResetMail(user, baseUrl);
return new ResponseEntity<>("e-mail was sent", HttpStatus.OK);
}).orElse(new ResponseEntity<>("e-mail address not registered", HttpStatus.BAD_REQUEST));
}
@RequestMapping(value = "/account/reset_password/finish",
method = RequestMethod.POST,
produces = MediaType.APPLICATION_JSON_VALUE)
@Timed
public ResponseEntity<String> finishPasswordReset(@RequestBody KeyAndPasswordDTO keyAndPassword) {
if (!checkPasswordLength(keyAndPassword.getNewPassword())) {
return new ResponseEntity<>("Incorrect password", HttpStatus.BAD_REQUEST);
}
return userService.completePasswordReset(keyAndPassword.getNewPassword(), keyAndPassword.getKey())
.map(user -> new ResponseEntity<String>(HttpStatus.OK)).orElse(new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR));
}
private boolean checkPasswordLength(String password) {
return (!StringUtils.isEmpty(password) &&
password.length() >= UserDTO.PASSWORD_MIN_LENGTH &&
password.length() <= UserDTO.PASSWORD_MAX_LENGTH);
}
}
| |
/* Copyright (c) 2001-2011, The HSQL Development Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the HSQL Development Group nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.hsqldb;
import org.hsqldb.HsqlNameManager.SimpleName;
import org.hsqldb.ParserDQL.CompileContext;
import org.hsqldb.RangeVariable.RangeIteratorMain;
import org.hsqldb.index.Index;
import org.hsqldb.lib.HashSet;
import org.hsqldb.lib.HsqlArrayList;
import org.hsqldb.lib.OrderedHashSet;
import org.hsqldb.map.ValuePool;
/**
* Metadata for range joined variables
*
* @author Fred Toussi (fredt@users dot sourceforge.net)
* @version 2.2.9
* @since 1.9.0
*/
public class RangeVariableJoined extends RangeVariable {
RangeVariable[] rangeArray;
public RangeVariableJoined(Table table, SimpleName alias,
OrderedHashSet columnList,
SimpleName[] columnNameList,
CompileContext compileContext) {
super(table, alias, columnList, columnNameList, compileContext);
setParameters();
}
private void setParameters() {
QuerySpecification qs =
(QuerySpecification) this.rangeTable.getQueryExpression();
this.rangeArray = qs.rangeVariables;
for (int i = 0; i < rangeArray.length; i++) {
if (rangeArray[i].isLeftJoin) {
hasLeftJoin = true;
}
if (rangeArray[i].isRightJoin) {
hasRightJoin = true;
}
if (rangeArray[i].isLateral) {
hasLateral = true;
}
break;
}
}
public RangeVariable[] getBaseRangeVariables() {
return rangeArray;
}
public void setRangeTableVariables() {
super.setRangeTableVariables();
}
public void setJoinType(boolean isLeft, boolean isRight) {
super.setJoinType(isLeft, isRight);
}
public void addNamedJoinColumns(OrderedHashSet columns) {
super.addNamedJoinColumns(columns);
}
public void addColumn(int columnIndex) {
super.addColumn(columnIndex);
}
public void addAllColumns() {
super.addAllColumns();
}
public void addNamedJoinColumnExpression(String name, Expression e) {
super.addNamedJoinColumnExpression(name, e);
}
public ExpressionColumn getColumnExpression(String name) {
ExpressionColumn col = super.getColumnExpression(name);
if (col == null) {
col = rangeArray[0].getColumnExpression(name);
}
return col;
}
public Table getTable() {
return super.getTable();
}
public boolean hasSingleIndexCondition() {
return super.hasSingleIndexCondition();
}
public boolean setDistinctColumnsOnIndex(int[] colMap) {
return super.setDistinctColumnsOnIndex(colMap);
}
/**
* Used for sort
*/
public Index getSortIndex() {
return super.getSortIndex();
}
/**
* Used for sort
*/
public boolean setSortIndex(Index index, boolean reversed) {
return super.setSortIndex(index, reversed);
}
public boolean reverseOrder() {
return super.reverseOrder();
}
public OrderedHashSet getColumnNames() {
return super.getColumnNames();
}
public OrderedHashSet getUniqueColumnNameSet() {
return super.getUniqueColumnNameSet();
}
public int findColumn(String schemaName, String tableName,
String columnName) {
if (tableAlias != null) {
return super.findColumn(schemaName, tableName, columnName);
}
boolean hasNamed = rangeArray[0].namedJoinColumnExpressions != null;
int count = 0;
if (hasNamed) {
count = rangeArray[0].namedJoinColumnExpressions.size();
if (rangeArray[0].namedJoinColumnExpressions.containsKey(
columnName)) {
if (tableName != null) {
return -1;
}
return super.findColumn(schemaName, tableName, columnName);
}
}
for (int i = 0; i < rangeArray.length; i++) {
RangeVariable currentRange = rangeArray[i];
int colIndex = currentRange.findColumn(schemaName, tableName,
columnName);
if (colIndex > -1) {
if (!hasNamed) {
return count + colIndex;
}
for (int j = 0; j < colIndex; j++) {
ColumnSchema col = currentRange.rangeTable.getColumn(j);
if (!currentRange.namedJoinColumnExpressions.containsKey(
col.getNameString())) {
count++;
}
}
return count;
}
count += currentRange.rangeTable.getColumnCount();
if (hasNamed) {
count -= currentRange.namedJoinColumnExpressions.size();
}
}
return -1;
}
public SimpleName getColumnAlias(int i) {
return super.getColumnAlias(i);
}
public boolean hasColumnAlias() {
return super.hasColumnAlias();
}
public SimpleName getTableAlias() {
return super.getTableAlias();
}
public RangeVariable getRangeForTableName(String name) {
if (tableAlias != null) {
return super.getRangeForTableName(name);
}
for (int i = 0; i < rangeArray.length; i++) {
RangeVariable range = rangeArray[i].getRangeForTableName(name);
if (range != null) {
return range;
}
}
return null;
}
/**
* Add all columns to a list of expressions
*/
public void addTableColumns(HsqlArrayList exprList) {
super.addTableColumns(exprList);
}
/**
* Add all columns to a list of expressions
*/
public int addTableColumns(HsqlArrayList exprList, int position,
HashSet exclude) {
return super.addTableColumns(exprList, position, exclude);
}
public void addTableColumns(RangeVariable subRange, Expression expression,
HashSet exclude) {
int index = getFirstColumnIndex(subRange);
addTableColumns(expression, index,
subRange.rangeTable.getColumnCount(), exclude);
}
protected int getFirstColumnIndex(RangeVariable subRange) {
if (subRange == this) {
return 0;
}
int count = 0;
for (int i = 0; i < rangeArray.length; i++) {
int index = rangeArray[i].getFirstColumnIndex(subRange);
if (index == -1) {
count += rangeArray[i].rangeTable.getColumnCount();
} else {
return count + index;
}
}
return -1;
}
/**
* Removes reference to Index to avoid possible memory leaks after alter
* table or drop index
*/
public void setForCheckConstraint() {
super.setForCheckConstraint();
}
/**
* used before condition processing
*/
public Expression getJoinCondition() {
return super.getJoinCondition();
}
public void addJoinCondition(Expression e) {
super.addJoinCondition(e);
}
public void resetConditions() {
super.resetConditions();
}
public void replaceColumnReference(RangeVariable range,
Expression[] list) {}
public void replaceRangeVariables(RangeVariable[] ranges,
RangeVariable[] newRanges) {
super.replaceRangeVariables(ranges, newRanges);
}
public void resolveRangeTable(Session session, RangeGroup rangeGroup,
RangeGroup[] rangeGroups) {
super.resolveRangeTable(session, rangeGroup, rangeGroups);
}
/**
* Retreives a String representation of this obejct. <p>
*
* The returned String describes this object's table, alias
* access mode, index, join mode, Start, End and And conditions.
*
* @return a String representation of this object
*/
public String describe(Session session, int blanks) {
RangeVariableConditions[] conditionsArray = joinConditions;
StringBuffer sb;
String b = ValuePool.spaceString.substring(0, blanks);
sb = new StringBuffer();
String temp = "INNER";
if (isLeftJoin) {
temp = "LEFT OUTER";
if (isRightJoin) {
temp = "FULL";
}
} else if (isRightJoin) {
temp = "RIGHT OUTER";
}
sb.append(b).append("join type=").append(temp).append("\n");
sb.append(b).append("table=").append(rangeTable.getName().name).append(
"\n");
if (tableAlias != null) {
sb.append(b).append("alias=").append(tableAlias.name).append("\n");
}
boolean fullScan = !conditionsArray[0].hasIndexCondition();
sb.append(b).append("access=").append(fullScan ? "FULL SCAN"
: "INDEX PRED").append(
"\n");
for (int i = 0; i < conditionsArray.length; i++) {
RangeVariableConditions conditions = this.joinConditions[i];
if (i > 0) {
sb.append(b).append("OR condition = [");
} else {
sb.append(b).append("condition = [");
}
sb.append(conditions.describe(session, blanks + 2));
sb.append(b).append("]\n");
}
return sb.toString();
}
public RangeIteratorMain getIterator(Session session) {
return super.getIterator(session);
}
}
| |
/*
* Copyright 2013-2014 must-be.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package consulo.msil.lang.psi.impl;
import com.intellij.lang.ASTNode;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.psi.PsiElement;
import com.intellij.psi.ResolveState;
import com.intellij.psi.scope.PsiScopeProcessor;
import com.intellij.psi.stubs.IStubElementType;
import com.intellij.util.IncorrectOperationException;
import consulo.annotation.access.RequiredReadAction;
import consulo.dotnet.psi.*;
import consulo.dotnet.resolve.DotNetTypeRef;
import consulo.internal.dotnet.msil.decompiler.util.MsilHelper;
import consulo.msil.lang.psi.*;
import consulo.msil.lang.psi.impl.elementType.stub.MsilMethodEntryStub;
import org.jetbrains.annotations.NonNls;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
/**
* @author VISTALL
* @since 21.05.14
*/
public class MsilMethodEntryImpl extends MsilStubElementImpl<MsilMethodEntryStub> implements MsilMethodEntry
{
public MsilMethodEntryImpl(@Nonnull ASTNode node)
{
super(node);
}
public MsilMethodEntryImpl(@Nonnull MsilMethodEntryStub stub, @Nonnull IStubElementType nodeType)
{
super(stub, nodeType);
}
@Override
public void accept(MsilVisitor visitor)
{
visitor.visitMethodEntry(this);
}
@RequiredReadAction
@Nonnull
@Override
public DotNetType getReturnType()
{
return getFirstStubOrPsiChild(MsilStubTokenSets.TYPE_STUBS, DotNetType.ARRAY_FACTORY);
}
@RequiredReadAction
@Nonnull
@Override
public DotNetTypeRef getReturnTypeRef()
{
return getReturnType().toTypeRef();
}
@Nonnull
@Override
public DotNetCodeBodyProxy getCodeBlock()
{
return DotNetCodeBodyProxy.EMPTY;
}
@Nullable
@Override
public DotNetGenericParameterList getGenericParameterList()
{
return getStubOrPsiChild(MsilStubElements.GENERIC_PARAMETER_LIST);
}
@Nonnull
@Override
public DotNetGenericParameter[] getGenericParameters()
{
DotNetGenericParameterList genericParameterList = getGenericParameterList();
return genericParameterList == null ? DotNetGenericParameter.EMPTY_ARRAY : genericParameterList.getParameters();
}
@Override
public int getGenericParametersCount()
{
DotNetGenericParameterList genericParameterList = getGenericParameterList();
return genericParameterList == null ? 0 : genericParameterList.getGenericParametersCount();
}
@RequiredReadAction
@Override
public boolean hasModifier(@Nonnull DotNetModifier modifier)
{
return getModifierList().hasModifier(modifier);
}
@RequiredReadAction
@Nonnull
@Override
public DotNetModifierList getModifierList()
{
return getRequiredStubOrPsiChild(MsilStubElements.MODIFIER_LIST);
}
@Nonnull
@Override
public DotNetTypeRef[] getParameterTypeRefs()
{
DotNetParameterList parameterList = getParameterList();
return parameterList == null ? DotNetTypeRef.EMPTY_ARRAY : parameterList.getParameterTypeRefs();
}
@Nullable
@Override
public DotNetParameterList getParameterList()
{
return getRequiredStubOrPsiChild(MsilStubElements.PARAMETER_LIST);
}
@Nonnull
@Override
public DotNetParameter[] getParameters()
{
DotNetParameterList parameterList = getParameterList();
return parameterList == null ? DotNetParameter.EMPTY_ARRAY : parameterList.getParameters();
}
@RequiredReadAction
@Nullable
@Override
public String getPresentableParentQName()
{
return StringUtil.getPackageName(getNameFromBytecode());
}
@RequiredReadAction
@Nullable
@Override
public String getPresentableQName()
{
return getNameFromBytecode();
}
@Nullable
@Override
public PsiElement getNameIdentifier()
{
return findChildByType(MsilTokenSets.IDENTIFIERS_AND_CTOR);
}
@Override
public String getName()
{
String nameFromBytecode = getNameFromBytecode();
if(MsilHelper.CONSTRUCTOR_NAME.equals(nameFromBytecode) || MsilHelper.STATIC_CONSTRUCTOR_NAME.equals(nameFromBytecode))
{
return nameFromBytecode;
}
return StringUtil.getShortName(nameFromBytecode);
}
@Override
@Nonnull
public String getNameFromBytecode()
{
MsilMethodEntryStub stub = getGreenStub();
if(stub != null)
{
return stub.getNameFromBytecode();
}
PsiElement element = getNameIdentifier();
return element == null ? "" : StringUtil.unquoteString(element.getText());
}
@RequiredReadAction
@Nonnull
@Override
public MsilCustomAttribute[] getAttributes()
{
return getStubOrPsiChildren(MsilStubElements.CUSTOM_ATTRIBUTE, MsilCustomAttribute.ARRAY_FACTORY);
}
@RequiredReadAction
@Nonnull
@Override
public MsilCustomAttribute[] getParameterAttributes(int index)
{
MsilParameterAttributeList parameterAttributeList = findParameterAttributeList(index);
return parameterAttributeList == null ? MsilCustomAttribute.EMPTY_ARRAY : parameterAttributeList.getAttributes();
}
@RequiredReadAction
@Nullable
@Override
public MsilConstantValue getConstantValue(int index)
{
MsilParameterAttributeList parameterAttributeList = findParameterAttributeList(index);
return parameterAttributeList == null ? null : parameterAttributeList.getValue();
}
@Nullable
@RequiredReadAction
private MsilParameterAttributeList findParameterAttributeList(int index)
{
index ++; // index is zero based, but in file it started with one
MsilParameterAttributeList[] list = getStubOrPsiChildren(MsilStubElements.PARAMETER_ATTRIBUTE_LIST, MsilParameterAttributeList.ARRAY_FACTORY);
for(MsilParameterAttributeList attributeList : list)
{
if(attributeList.getIndex() == index)
{
return attributeList;
}
}
return null;
}
@RequiredReadAction
@Nonnull
@Override
public MsilCustomAttribute[] getGenericParameterAttributes(@Nonnull String name)
{
MsilTypeParameterAttributeList[] list = getStubOrPsiChildren(MsilStubElements.TYPE_PARAMETER_ATTRIBUTE_LIST, MsilTypeParameterAttributeList
.ARRAY_FACTORY);
for(MsilTypeParameterAttributeList attributeList : list)
{
if(name.equals(attributeList.getGenericParameterName()))
{
return attributeList.getAttributes();
}
}
return MsilCustomAttribute.EMPTY_ARRAY;
}
@Override
public PsiElement setName(@NonNls @Nonnull String s) throws IncorrectOperationException
{
return null;
}
@Override
public boolean processDeclarations(@Nonnull PsiScopeProcessor processor, @Nonnull ResolveState state, PsiElement lastParent, @Nonnull PsiElement
place)
{
for(DotNetGenericParameter dotNetGenericParameter : getGenericParameters())
{
if(!processor.execute(dotNetGenericParameter, state))
{
return false;
}
}
return true;
}
@Nullable
@Override
public DotNetType getTypeForImplement()
{
return null;
}
@Nonnull
@Override
public DotNetTypeRef getTypeRefForImplement()
{
return DotNetTypeRef.ERROR_TYPE;
}
}
| |
package com.rs.cache.loaders;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import com.rs.cache.Cache;
import com.rs.game.player.content.dungeoneering.DungeonUtils;
import com.rs.io.InputStream;
@SuppressWarnings("unused")
public class ObjectDefinitions {
private static final ConcurrentHashMap<Integer, ObjectDefinitions> objectDefinitions = new ConcurrentHashMap<Integer, ObjectDefinitions>();
private short[] originalColors;
int[] toObjectIds;
static int anInt3832;
int[] anIntArray3833 = null;
private int anInt3834;
int anInt3835;
static int anInt3836;
private byte aByte3837;
int anInt3838 = -1;
boolean aBoolean3839;
private int anInt3840;
private int anInt3841;
static int anInt3842;
static int anInt3843;
int anInt3844;
boolean aBoolean3845;
static int anInt3846;
private byte aByte3847;
private byte aByte3849;
int anInt3850;
int anInt3851;
public boolean secondBool;
public boolean aBoolean3853;
int anInt3855;
public boolean notCliped;
int anInt3857;
private byte[] aByteArray3858;
int[] anIntArray3859;
int anInt3860;
String[] options;
int configFileId;
private short[] modifiedColors;
int anInt3865;
boolean aBoolean3866;
boolean aBoolean3867;
public boolean projectileCliped;
private int[] anIntArray3869;
boolean aBoolean3870;
public int sizeY;
boolean aBoolean3872;
boolean aBoolean3873;
public int thirdInt;
private int anInt3875;
public int objectAnimation;
private int anInt3877;
private int anInt3878;
public int clipType;
private int anInt3881;
private int anInt3882;
private int anInt3883;
Object loader;
private int anInt3889;
public int sizeX;
public boolean aBoolean3891;
int anInt3892;
public int secondInt;
boolean aBoolean3894;
boolean aBoolean3895;
int anInt3896;
int configId;
private byte[] aByteArray3899;
int anInt3900;
public String name;
private int anInt3902;
int anInt3904;
int anInt3905;
boolean aBoolean3906;
int[] anIntArray3908;
private byte aByte3912;
int anInt3913;
private byte aByte3914;
private int anInt3915;
public int[][] modelIds;
private int anInt3917;
/**
* Object anim shit 1
*/
private short[] aShortArray3919;
/**
* Object anim shit 2
*/
private short[] aShortArray3920;
int anInt3921;
private HashMap<Integer, Object> parameters;
boolean aBoolean3923;
boolean aBoolean3924;
int anInt3925;
public int id;
public static void main(String[] args) throws IOException {
Cache.init();
/* ObjectDefinitions defs = getObjectDefinitions(48072);
System.out.println(defs.objectAnimation);*/
//for (int i = 18425; i <= 18425; i++) {
ObjectDefinitions defs = getObjectDefinitions(69828);
System.out.println(defs.objectAnimation);
System.out.println(defs.configId);
System.out.println(defs.configFileId);
System.out.println(Arrays.toString(defs.toObjectIds));
System.out.println(defs.toObjectIds.length);
//}
}
public String getFirstOption() {
if (options == null || options.length < 1)
return "";
return options[0];
}
public String getSecondOption() {
if (options == null || options.length < 2)
return "";
return options[1];
}
public String getOption(int option) {
if (options == null || options.length < option || option == 0)
return "";
return options[option - 1];
}
public String getThirdOption() {
if (options == null || options.length < 3)
return "";
return options[2];
}
public boolean containsOption(int i, String option) {
if (options == null || options[i] == null || options.length <= i)
return false;
return options[i].equals(option);
}
public boolean containsOption(String o) {
if (options == null)
return false;
for (String option : options) {
if (option == null)
continue;
if (option.equalsIgnoreCase(o))
return true;
}
return false;
}
private void readValues(InputStream stream, int opcode) {
// System.out.println(opcode);
if (opcode != 1 && opcode != 5) {
if (opcode != 2) {
if (opcode != 14) {
if (opcode != 15) {
if (opcode == 17) { // nocliped
projectileCliped = false;
clipType = 0;
} else if (opcode != 18) {
if (opcode == 19)
secondInt = stream.readUnsignedByte();
else if (opcode == 21)
aByte3912 = (byte) 1;
else if (opcode != 22) {
if (opcode != 23) {
if (opcode != 24) {
if (opcode == 27) // cliped, no idea
// diff between 2
// and 1
clipType = 1;
else if (opcode == 28)
anInt3892 = (stream
.readUnsignedByte() << 2);
else if (opcode != 29) {
if (opcode != 39) {
if (opcode < 30 || opcode >= 35) {
if (opcode == 40) {
int i_53_ = (stream
.readUnsignedByte());
originalColors = new short[i_53_];
modifiedColors = new short[i_53_];
for (int i_54_ = 0; i_53_ > i_54_; i_54_++) {
originalColors[i_54_] = (short) (stream
.readUnsignedShort());
modifiedColors[i_54_] = (short) (stream
.readUnsignedShort());
}
} else if (opcode != 41) { // object
// anim
if (opcode != 42) {
if (opcode != 62) {
if (opcode != 64) {
if (opcode == 65)
anInt3902 = stream
.readUnsignedShort();
else if (opcode != 66) {
if (opcode != 67) {
if (opcode == 69)
anInt3925 = stream
.readUnsignedByte();
else if (opcode != 70) {
if (opcode == 71)
anInt3889 = stream
.readShort() << 2;
else if (opcode != 72) {
if (opcode == 73)
secondBool = true;
else if (opcode == 74)
notCliped = true;
else if (opcode != 75) {
if (opcode != 77
&& opcode != 92) {
if (opcode == 78) {
anInt3860 = stream
.readUnsignedShort();
anInt3904 = stream
.readUnsignedByte();
} else if (opcode != 79) {
if (opcode == 81) {
aByte3912 = (byte) 2;
anInt3882 = 256 * stream
.readUnsignedByte();
} else if (opcode != 82) {
if (opcode == 88)
aBoolean3853 = false;
else if (opcode != 89) {
if (opcode == 90)
aBoolean3870 = true;
else if (opcode != 91) {
if (opcode != 93) {
if (opcode == 94)
aByte3912 = (byte) 4;
else if (opcode != 95) {
if (opcode != 96) {
if (opcode == 97)
aBoolean3866 = true;
else if (opcode == 98)
aBoolean3923 = true;
else if (opcode == 99) {
anInt3857 = stream
.readUnsignedByte();
anInt3835 = stream
.readUnsignedShort();
} else if (opcode == 100) {
anInt3844 = stream
.readUnsignedByte();
anInt3913 = stream
.readUnsignedShort();
} else if (opcode != 101) {
if (opcode == 102)
anInt3838 = stream
.readUnsignedShort();
else if (opcode == 103)
thirdInt = 0;
else if (opcode != 104) {
if (opcode == 105)
aBoolean3906 = true;
else if (opcode == 106) {
int i_55_ = stream
.readUnsignedByte();
anIntArray3869 = new int[i_55_];
anIntArray3833 = new int[i_55_];
for (int i_56_ = 0; i_56_ < i_55_; i_56_++) {
anIntArray3833[i_56_] = stream
.readBigSmart();
int i_57_ = stream
.readUnsignedByte();
anIntArray3869[i_56_] = i_57_;
anInt3881 += i_57_;
}
} else if (opcode == 107)
anInt3851 = stream
.readUnsignedShort();
else if (opcode >= 150
&& opcode < 155) {
options[opcode
+ -150] = stream
.readString();
/*
* if
* (
* !
* loader
* .
* showOptions
* )
* options
* [
* opcode
* +
* -
* 150
* ]
* =
* null
* ;
*/
} else if (opcode != 160) {
if (opcode == 162) {
aByte3912 = (byte) 3;
anInt3882 = stream
.readInt();
} else if (opcode == 163) {
aByte3847 = (byte) stream
.readByte();
aByte3849 = (byte) stream
.readByte();
aByte3837 = (byte) stream
.readByte();
aByte3914 = (byte) stream
.readByte();
} else if (opcode != 164) {
if (opcode != 165) {
if (opcode != 166) {
if (opcode == 167)
anInt3921 = stream
.readUnsignedShort();
else if (opcode != 168) {
if (opcode == 169) {
aBoolean3845 = true;
// added
// opcode
} else if (opcode == 170) {
int anInt3383 = stream
.readUnsignedSmart();
// added
// opcode
} else if (opcode == 171) {
int anInt3362 = stream
.readUnsignedSmart();
// added
// opcode
} else if (opcode == 173) {
int anInt3302 = stream
.readUnsignedShort();
int anInt3336 = stream
.readUnsignedShort();
// added
// opcode
} else if (opcode == 177) {
boolean ub = true;
// added
// opcode
} else if (opcode == 178) {
int db = stream
.readUnsignedByte();
} else if (opcode == 189) {
boolean bloom = true;
} else if (opcode == 249) {
int length = stream
.readUnsignedByte();
if (parameters == null)
parameters = new HashMap<Integer, Object>(
length);
for (int i_60_ = 0; i_60_ < length; i_60_++) {
boolean bool = stream
.readUnsignedByte() == 1;
int i_61_ = stream
.read24BitInt();
if (!bool)
parameters
.put(i_61_,
stream.readInt());
else
parameters
.put(i_61_,
stream.readString());
}
}
} else
aBoolean3894 = true;
} else
anInt3877 = stream
.readShort();
} else
anInt3875 = stream
.readShort();
} else
anInt3834 = stream
.readShort();
} else {
int i_62_ = stream
.readUnsignedByte();
anIntArray3908 = new int[i_62_];
for (int i_63_ = 0; i_62_ > i_63_; i_63_++)
anIntArray3908[i_63_] = stream
.readUnsignedShort();
}
} else
anInt3865 = stream
.readUnsignedByte();
} else
anInt3850 = stream
.readUnsignedByte();
} else
aBoolean3924 = true;
} else {
aByte3912 = (byte) 5;
anInt3882 = stream
.readShort();
}
} else {
aByte3912 = (byte) 3;
anInt3882 = stream
.readUnsignedShort();
}
} else
aBoolean3873 = true;
} else
aBoolean3895 = false;
} else
aBoolean3891 = true;
} else {
anInt3900 = stream
.readUnsignedShort();
anInt3905 = stream
.readUnsignedShort();
anInt3904 = stream
.readUnsignedByte();
int i_64_ = stream
.readUnsignedByte();
anIntArray3859 = new int[i_64_];
for (int i_65_ = 0; i_65_ < i_64_; i_65_++)
anIntArray3859[i_65_] = stream
.readUnsignedShort();
}
} else {
configFileId = stream
.readUnsignedShort();
if (configFileId == 65535)
configFileId = -1;
configId = stream
.readUnsignedShort();
if (configId == 65535)
configId = -1;
int i_66_ = -1;
if (opcode == 92) {
i_66_ = stream
.readBigSmart();
}
int i_67_ = stream
.readUnsignedByte();
toObjectIds = new int[i_67_
- -2];
for (int i_68_ = 0; i_67_ >= i_68_; i_68_++) {
toObjectIds[i_68_] = stream
.readBigSmart();
}
toObjectIds[i_67_ + 1] = i_66_;
}
} else
anInt3855 = stream
.readUnsignedByte();
} else
anInt3915 = stream
.readShort() << 2;
} else
anInt3883 = stream
.readShort() << 2;
} else
anInt3917 = stream
.readUnsignedShort();
} else
anInt3841 = stream
.readUnsignedShort();
} else
// 64
aBoolean3872 = false;
} else
aBoolean3839 = true;
} else {
int i_69_ = (stream
.readUnsignedByte());
aByteArray3858 = (new byte[i_69_]);
for (int i_70_ = 0; i_70_ < i_69_; i_70_++)
aByteArray3858[i_70_] = (byte) (stream
.readByte());
}
} else { //object anim?
int i_71_ = (stream
.readUnsignedByte());
aShortArray3920 = new short[i_71_];
aShortArray3919 = new short[i_71_];
for (int i_72_ = 0; i_71_ > i_72_; i_72_++) {
aShortArray3920[i_72_] = (short) (stream
.readUnsignedShort());
aShortArray3919[i_72_] = (short) (stream
.readUnsignedShort());
}
}
} else {
options[-30 + opcode] = (stream
.readString());
}
} else
// 39
anInt3840 = (stream.readByte() * 5);
} else {// 29
anInt3878 = stream.readByte();
}
} else {
objectAnimation = stream.readBigSmart();
}
} else
thirdInt = 1;
} else
aBoolean3867 = true;
} else
projectileCliped = false;
} else
// 15
sizeY = stream.readUnsignedByte();
} else
// 14
sizeX = stream.readUnsignedByte();
} else {
name = stream.readString();
}
} else {
boolean aBoolean1162 = false;
if (opcode == 5 && aBoolean1162)
skipReadModelIds(stream);
int i_73_ = stream.readUnsignedByte();
modelIds = new int[i_73_][];
aByteArray3899 = new byte[i_73_];
for (int i_74_ = 0; i_74_ < i_73_; i_74_++) {
aByteArray3899[i_74_] = (byte) stream.readByte();
int i_75_ = stream.readUnsignedByte();
modelIds[i_74_] = new int[i_75_];
for (int i_76_ = 0; i_75_ > i_76_; i_76_++)
modelIds[i_74_][i_76_] = stream.readBigSmart();
}
if (opcode == 5 && !aBoolean1162)
skipReadModelIds(stream);
}
}
private void skipReadModelIds(InputStream stream) {
int length = stream.readUnsignedByte();
for (int index = 0; index < length; index++) {
stream.skip(1);
int length2 = stream.readUnsignedByte();
for (int i = 0; i < length2; i++)
stream.readBigSmart();
}
}
private void readValueLoop(InputStream stream) {
for (;;) {
int opcode = stream.readUnsignedByte();
if (opcode == 0) {
// System.out.println("Remaining: "+stream.getRemaining());
break;
}
readValues(stream, opcode);
}
}
private ObjectDefinitions() {
anInt3835 = -1;
anInt3860 = -1;
configFileId = -1;
aBoolean3866 = false;
anInt3851 = -1;
anInt3865 = 255;
aBoolean3845 = false;
aBoolean3867 = false;
anInt3850 = 0;
anInt3844 = -1;
anInt3881 = 0;
anInt3857 = -1;
aBoolean3872 = true;
anInt3882 = -1;
anInt3834 = 0;
options = new String[5];
anInt3875 = 0;
aBoolean3839 = false;
anIntArray3869 = null;
sizeY = 1;
thirdInt = -1;
anInt3883 = 0;
aBoolean3895 = true;
anInt3840 = 0;
aBoolean3870 = false;
anInt3889 = 0;
aBoolean3853 = true;
secondBool = false;
clipType = 2;
projectileCliped = true;
notCliped = false;
anInt3855 = -1;
anInt3878 = 0;
anInt3904 = 0;
sizeX = 1;
objectAnimation = -1;
aBoolean3891 = false;
anInt3905 = 0;
name = "null";
anInt3913 = -1;
aBoolean3906 = false;
aBoolean3873 = false;
aByte3914 = (byte) 0;
anInt3915 = 0;
anInt3900 = 0;
secondInt = -1;
aBoolean3894 = false;
aByte3912 = (byte) 0;
anInt3921 = 0;
anInt3902 = 128;
configId = -1;
anInt3877 = 0;
anInt3925 = 0;
anInt3892 = 64;
aBoolean3923 = false;
aBoolean3924 = false;
anInt3841 = 128;
anInt3917 = 128;
}
final void method3287() {
if (secondInt == -1) {
secondInt = 0;
if (aByteArray3899 != null && aByteArray3899.length == 1
&& aByteArray3899[0] == 10)
secondInt = 1;
for (int i_13_ = 0; i_13_ < 5; i_13_++) {
if (options[i_13_] != null) {
secondInt = 1;
break;
}
}
}
if (anInt3855 == -1)
anInt3855 = clipType != 0 ? 1 : 0;
}
private static int getArchiveId(int i_0_) {
return i_0_ >>> -1135990488;
}
public static ObjectDefinitions getObjectDefinitions(int id) {
ObjectDefinitions def = objectDefinitions.get(id);
if (def == null) {
def = new ObjectDefinitions();
def.id = id;
byte[] data = Cache.STORE.getIndexes()[16].getFile(
getArchiveId(id), id & 0xff);
if (data == null) {
//System.out.println("Failed loading Object " + id + ".");
} else
def.readValueLoop(new InputStream(data));
def.method3287();
if ((def.name != null && (def.name.equalsIgnoreCase("bank booth") || def.name
.equalsIgnoreCase("counter")))) {
def.notCliped = false;
def.projectileCliped = true;
if (def.clipType == 0)
def.clipType = 1;
} else if (DungeonUtils.isDoor(id)) {
def.notCliped = false;
def.projectileCliped = true;
if (def.clipType == 0)
def.clipType = 1;
}
if (def.notCliped) {
def.projectileCliped = false;
def.clipType = 0;
}
objectDefinitions.put(id, def);
}
return def;
}
public int getClipType() {
return clipType;
}
public boolean isProjectileCliped() {
return projectileCliped;
}
public int getSizeX() {
return sizeX;
}
public int getSizeY() {
return sizeY;
}
public static void clearObjectDefinitions() {
objectDefinitions.clear();
}
/**
* Prints all fields in this class.
*/
public void printFields() {
for (Field field : getClass().getDeclaredFields()) {
if ((field.getModifiers() & 8) != 0) {
continue;
}
try {
System.out.println(field.getName() + ": " + getValue(field));
} catch (Throwable e) {
e.printStackTrace();
}
}
System.out.println("-- end of " + getClass().getSimpleName() + " fields --");
}
private Object getValue(Field field) throws Throwable {
field.setAccessible(true);
Class<?> type = field.getType();
if (type == int[][].class) {
return Arrays.toString((int[][]) field.get(this));
} else if (type == int[].class) {
return Arrays.toString((int[]) field.get(this));
} else if (type == byte[].class) {
return Arrays.toString((byte[]) field.get(this));
} else if (type == short[].class) {
return Arrays.toString((short[]) field.get(this));
} else if (type == double[].class) {
return Arrays.toString((double[]) field.get(this));
} else if (type == float[].class) {
return Arrays.toString((float[]) field.get(this));
} else if (type == Object[].class) {
return Arrays.toString((Object[]) field.get(this));
}
return field.get(this);
}
}
| |
/*
* Copyright (C) 2011 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.googlecode.eyesfree.textdetect;
import android.os.Environment;
import com.googlecode.leptonica.android.Pix;
import com.googlecode.leptonica.android.Pixa;
/**
* @author alanv@google.com (Alan Viverette)
*/
public class HydrogenTextDetector {
private final long mNative;
static {
System.loadLibrary("lept");
System.loadLibrary("hydrogen");
}
private Parameters mParams;
public HydrogenTextDetector() {
mNative = nativeConstructor();
mParams = new Parameters();
setParameters(mParams);
}
public void setSize(int width, int height) {
// TODO(alanv): Set up native buffers
}
@Override
protected void finalize() throws Throwable {
try {
nativeDestructor(mNative);
} finally {
super.finalize();
}
}
public void setParameters(Parameters params) {
mParams = params;
nativeSetParameters(mNative, mParams);
}
public Parameters getParameters() {
return mParams;
}
public Pixa getTextAreas() {
long nativePixa = nativeGetTextAreas(mNative);
if (nativePixa == 0) {
return null;
}
int width = nativeGetSourceWidth(mNative);
int height = nativeGetSourceHeight(mNative);
return new Pixa(nativePixa, width, height);
}
public float getSkewAngle() {
return nativeGetSkewAngle(mNative);
}
public float[] getTextConfs() {
return nativeGetTextConfs(mNative);
}
public Pix getSourceImage() {
long nativePix = nativeGetSourceImage(mNative);
if (nativePix == 0) {
return null;
}
return new Pix(nativePix);
}
/**
* Sets the text detection source image to be a clone of the supplied source
* image. The supplied image may be recycled after calling this method.
*
* @param pixs The source image on which to perform text detection.
*/
public void setSourceImage(Pix pixs) {
nativeSetSourceImage(mNative, pixs.getNativePix());
}
public void detectText() {
nativeDetectText(mNative);
}
public void clear() {
nativeClear(mNative);
}
// ******************
// * PUBLIC CLASSES *
// ******************
public class Parameters {
public boolean debug;
public String out_dir;
// Edge-based thresholding
public int edge_tile_x;
public int edge_tile_y;
public int edge_thresh;
public int edge_avg_thresh;
// Skew angle correction
public boolean skew_enabled;
public float skew_min_angle;
public float skew_sweep_range;
public float skew_sweep_delta;
public int skew_sweep_reduction;
public int skew_search_reduction;
public float skew_search_min_delta;
// Singleton filter
public float single_min_aspect;
public float single_max_aspect;
public int single_min_area;
public float single_min_density;
// Quick pair filter
public float pair_h_ratio;
public float pair_d_ratio;
public float pair_h_dist_ratio;
public float pair_v_dist_ratio;
public float pair_h_shared;
// Cluster pair filter
public int cluster_width_spacing;
public float cluster_shared_edge;
public float cluster_h_ratio;
// Finalized cluster filter
public int cluster_min_blobs;
public float cluster_min_aspect;
public float cluster_min_fdr;
public int cluster_min_edge;
public int cluster_min_edge_avg;
public Parameters() {
debug = false;
out_dir = Environment.getExternalStorageDirectory().toString();
// Edge-based thresholding
edge_tile_x = 32;
edge_tile_y = 64;
edge_thresh = 64;
edge_avg_thresh = 4;
// Skew angle correction
skew_enabled = true;
skew_min_angle = 1.0f;
skew_sweep_range = 30.0f;
skew_sweep_delta = 5.0f;
skew_sweep_reduction = 8;
skew_search_reduction = 4;
skew_search_min_delta = 0.01f;
// Singleton filter
single_min_aspect = 0.1f;
single_max_aspect = 4.0f;
single_min_area = 4;
single_min_density = 0.2f;
// Quick pair filter
pair_h_ratio = 1.0f;
pair_d_ratio = 1.5f;
pair_h_dist_ratio = 2.0f;
pair_v_dist_ratio = 0.25f;
pair_h_shared = 0.25f;
// Cluster pair filter
cluster_width_spacing = 2;
cluster_shared_edge = 0.5f;
cluster_h_ratio = 1.0f;
// Finalized cluster filter
cluster_min_blobs = 5;
cluster_min_aspect = 2;
cluster_min_fdr = 2.5f;
cluster_min_edge = 32;
cluster_min_edge_avg = 1;
}
}
// ******************
// * NATIVE METHODS *
// ******************
private static native long nativeConstructor();
private static native void nativeDestructor(long nativePtr);
private static native void nativeSetParameters(long nativePtr, Parameters params);
private static native long nativeGetTextAreas(long nativePtr);
private static native float nativeGetSkewAngle(long nativePtr);
private static native int nativeGetSourceWidth(long nativePtr);
private static native int nativeGetSourceHeight(long nativePtr);
private static native float[] nativeGetTextConfs(long nativePtr);
private static native long nativeGetSourceImage(long nativePtr);
private static native int nativeSetSourceImage(long nativePtr, long nativePix);
private static native void nativeDetectText(long nativePtr);
private static native void nativeClear(long nativePtr);
}
| |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.CollectionUtil;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import java.util.Comparator;
/** A {@link MergeScheduler} that runs each merge using a
* separate thread.
*
* <p>Specify the max number of threads that may run at
* once with {@link #setMaxThreadCount}.</p>
*
* <p>Separately specify the maximum number of simultaneous
* merges with {@link #setMaxMergeCount}. If the number of
* merges exceeds the max number of threads then the
* largest merges are paused until one of the smaller
* merges completes.</p>
*
* <p>If more than {@link #getMaxMergeCount} merges are
* requested then this class will forcefully throttle the
* incoming threads by pausing until one more more merges
* complete.</p>
*/
public class ConcurrentMergeScheduler extends MergeScheduler {
private int mergeThreadPriority = -1;
protected List<MergeThread> mergeThreads = new ArrayList<MergeThread>();
// Max number of merge threads allowed to be running at
// once. When there are more merges then this, we
// forcefully pause the larger ones, letting the smaller
// ones run, up until maxMergeCount merges at which point
// we forcefully pause incoming threads (that presumably
// are the ones causing so much merging). We dynamically
// default this from 1 to 3, depending on how many cores
// you have:
private int maxThreadCount = Math.max(1, Math.min(3, Runtime.getRuntime().availableProcessors()/2));
// Max number of merges we accept before forcefully
// throttling the incoming threads
private int maxMergeCount = maxThreadCount+2;
protected Directory dir;
private volatile boolean closed;
protected IndexWriter writer;
protected int mergeThreadCount;
public ConcurrentMergeScheduler() {
if (allInstances != null) {
// Only for testing
addMyself();
}
}
/** Sets the max # simultaneous merge threads that should
* be running at once. This must be <= {@link
* #setMaxMergeCount}. */
public void setMaxThreadCount(int count) {
if (count < 1) {
throw new IllegalArgumentException("count should be at least 1");
}
if (count > maxMergeCount) {
throw new IllegalArgumentException("count should be <= maxMergeCount (= " + maxMergeCount + ")");
}
maxThreadCount = count;
}
/** @see #setMaxThreadCount(int) */
public int getMaxThreadCount() {
return maxThreadCount;
}
/** Sets the max # simultaneous merges that are allowed.
* If a merge is necessary yet we already have this many
* threads running, the incoming thread (that is calling
* add/updateDocument) will block until a merge thread
* has completed. Note that we will only run the
* smallest {@link #setMaxThreadCount} merges at a time. */
public void setMaxMergeCount(int count) {
if (count < 1) {
throw new IllegalArgumentException("count should be at least 1");
}
if (count < maxThreadCount) {
throw new IllegalArgumentException("count should be >= maxThreadCount (= " + maxThreadCount + ")");
}
maxMergeCount = count;
}
/** See {@link #setMaxMergeCount}. */
public int getMaxMergeCount() {
return maxMergeCount;
}
/** Return the priority that merge threads run at. By
* default the priority is 1 plus the priority of (ie,
* slightly higher priority than) the first thread that
* calls merge. */
public synchronized int getMergeThreadPriority() {
initMergeThreadPriority();
return mergeThreadPriority;
}
/** Set the base priority that merge threads run at.
* Note that CMS may increase priority of some merge
* threads beyond this base priority. It's best not to
* set this any higher than
* Thread.MAX_PRIORITY-maxThreadCount, so that CMS has
* room to set relative priority among threads. */
public synchronized void setMergeThreadPriority(int pri) {
if (pri > Thread.MAX_PRIORITY || pri < Thread.MIN_PRIORITY)
throw new IllegalArgumentException("priority must be in range " + Thread.MIN_PRIORITY + " .. " + Thread.MAX_PRIORITY + " inclusive");
mergeThreadPriority = pri;
updateMergeThreads();
}
// Larger merges come first
protected static final Comparator<MergeThread> compareByMergeDocCount = new Comparator<MergeThread>() {
public int compare(MergeThread t1, MergeThread t2) {
final MergePolicy.OneMerge m1 = t1.getCurrentMerge();
final MergePolicy.OneMerge m2 = t2.getCurrentMerge();
final int c1 = m1 == null ? Integer.MAX_VALUE : m1.totalDocCount;
final int c2 = m2 == null ? Integer.MAX_VALUE : m2.totalDocCount;
return c2 - c1;
}
};
/**
* Called whenever the running merges have changed, to pause & unpause
* threads. This method sorts the merge threads by their merge size in
* descending order and then pauses/unpauses threads from first to last --
* that way, smaller merges are guaranteed to run before larger ones.
*/
protected synchronized void updateMergeThreads() {
// Only look at threads that are alive & not in the
// process of stopping (ie have an active merge):
final List<MergeThread> activeMerges = new ArrayList<MergeThread>();
int threadIdx = 0;
while (threadIdx < mergeThreads.size()) {
final MergeThread mergeThread = mergeThreads.get(threadIdx);
if (!mergeThread.isAlive()) {
// Prune any dead threads
mergeThreads.remove(threadIdx);
continue;
}
if (mergeThread.getCurrentMerge() != null) {
activeMerges.add(mergeThread);
}
threadIdx++;
}
// Sort the merge threads in descending order.
CollectionUtil.mergeSort(activeMerges, compareByMergeDocCount);
int pri = mergeThreadPriority;
final int activeMergeCount = activeMerges.size();
for (threadIdx=0;threadIdx<activeMergeCount;threadIdx++) {
final MergeThread mergeThread = activeMerges.get(threadIdx);
final MergePolicy.OneMerge merge = mergeThread.getCurrentMerge();
if (merge == null) {
continue;
}
// pause the thread if maxThreadCount is smaller than the number of merge threads.
final boolean doPause = threadIdx < activeMergeCount - maxThreadCount;
if (verbose()) {
if (doPause != merge.getPause()) {
if (doPause) {
message("pause thread " + mergeThread.getName());
} else {
message("unpause thread " + mergeThread.getName());
}
}
}
if (doPause != merge.getPause()) {
merge.setPause(doPause);
}
if (!doPause) {
if (verbose()) {
message("set priority of merge thread " + mergeThread.getName() + " to " + pri);
}
mergeThread.setThreadPriority(pri);
pri = Math.min(Thread.MAX_PRIORITY, 1+pri);
}
}
}
/**
* Returns true if verbosing is enabled. This method is usually used in
* conjunction with {@link #message(String)}, like that:
*
* <pre>
* if (verbose()) {
* message("your message");
* }
* </pre>
*/
protected boolean verbose() {
return writer != null && writer.verbose();
}
/**
* Outputs the given message - this method assumes {@link #verbose()} was
* called and returned true.
*/
protected void message(String message) {
writer.message("CMS: " + message);
}
private synchronized void initMergeThreadPriority() {
if (mergeThreadPriority == -1) {
// Default to slightly higher priority than our
// calling thread
mergeThreadPriority = 1+Thread.currentThread().getPriority();
if (mergeThreadPriority > Thread.MAX_PRIORITY)
mergeThreadPriority = Thread.MAX_PRIORITY;
}
}
@Override
public void close() {
closed = true;
sync();
}
/** Wait for any running merge threads to finish */
public void sync() {
while (true) {
MergeThread toSync = null;
synchronized (this) {
for (MergeThread t : mergeThreads) {
if (t.isAlive()) {
toSync = t;
break;
}
}
}
if (toSync != null) {
try {
toSync.join();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
} else {
break;
}
}
}
/**
* Returns the number of merge threads that are alive. Note that this number
* is ≤ {@link #mergeThreads} size.
*/
protected synchronized int mergeThreadCount() {
int count = 0;
for (MergeThread mt : mergeThreads) {
if (mt.isAlive() && mt.getCurrentMerge() != null) {
count++;
}
}
return count;
}
@Override
public void merge(IndexWriter writer) throws IOException {
assert !Thread.holdsLock(writer);
this.writer = writer;
initMergeThreadPriority();
dir = writer.getDirectory();
// First, quickly run through the newly proposed merges
// and add any orthogonal merges (ie a merge not
// involving segments already pending to be merged) to
// the queue. If we are way behind on merging, many of
// these newly proposed merges will likely already be
// registered.
if (verbose()) {
message("now merge");
message(" index: " + writer.segString());
}
// Iterate, pulling from the IndexWriter's queue of
// pending merges, until it's empty:
while (true) {
synchronized(this) {
long startStallTime = 0;
while (mergeThreadCount() >= 1+maxMergeCount) {
startStallTime = System.currentTimeMillis();
if (verbose()) {
message(" too many merges; stalling...");
}
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
if (verbose()) {
if (startStallTime != 0) {
message(" stalled for " + (System.currentTimeMillis()-startStallTime) + " msec");
}
}
}
// TODO: we could be careful about which merges to do in
// the BG (eg maybe the "biggest" ones) vs FG, which
// merges to do first (the easiest ones?), etc.
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null) {
if (verbose())
message(" no more merges pending; now return");
return;
}
// We do this w/ the primary thread to keep
// deterministic assignment of segment names
writer.mergeInit(merge);
boolean success = false;
try {
synchronized(this) {
message(" consider merge " + merge.segString(dir));
// OK to spawn a new merge thread to handle this
// merge:
final MergeThread merger = getMergeThread(writer, merge);
mergeThreads.add(merger);
if (verbose()) {
message(" launch new thread [" + merger.getName() + "]");
}
merger.start();
// Must call this after starting the thread else
// the new thread is removed from mergeThreads
// (since it's not alive yet):
updateMergeThreads();
success = true;
}
} finally {
if (!success) {
writer.mergeFinish(merge);
}
}
}
}
/** Does the actual merge, by calling {@link IndexWriter#merge} */
protected void doMerge(MergePolicy.OneMerge merge) throws IOException {
writer.merge(merge);
}
/** Create and return a new MergeThread */
protected synchronized MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
final MergeThread thread = new MergeThread(writer, merge);
thread.setThreadPriority(mergeThreadPriority);
thread.setDaemon(true);
thread.setName("Lucene Merge Thread #" + mergeThreadCount++);
return thread;
}
protected class MergeThread extends Thread {
IndexWriter tWriter;
MergePolicy.OneMerge startMerge;
MergePolicy.OneMerge runningMerge;
private volatile boolean done;
public MergeThread(IndexWriter writer, MergePolicy.OneMerge startMerge) throws IOException {
this.tWriter = writer;
this.startMerge = startMerge;
}
public synchronized void setRunningMerge(MergePolicy.OneMerge merge) {
runningMerge = merge;
}
public synchronized MergePolicy.OneMerge getRunningMerge() {
return runningMerge;
}
public synchronized MergePolicy.OneMerge getCurrentMerge() {
if (done) {
return null;
} else if (runningMerge != null) {
return runningMerge;
} else {
return startMerge;
}
}
public void setThreadPriority(int pri) {
try {
setPriority(pri);
} catch (NullPointerException npe) {
// Strangely, Sun's JDK 1.5 on Linux sometimes
// throws NPE out of here...
} catch (SecurityException se) {
// Ignore this because we will still run fine with
// normal thread priority
}
}
@Override
public void run() {
// First time through the while loop we do the merge
// that we were started with:
MergePolicy.OneMerge merge = this.startMerge;
try {
if (verbose())
message(" merge thread: start");
while(true) {
setRunningMerge(merge);
doMerge(merge);
// Subsequent times through the loop we do any new
// merge that writer says is necessary:
merge = tWriter.getNextMerge();
if (merge != null) {
tWriter.mergeInit(merge);
updateMergeThreads();
if (verbose())
message(" merge thread: do another merge " + merge.segString(dir));
} else {
break;
}
}
if (verbose())
message(" merge thread: done");
} catch (Throwable exc) {
// Ignore the exception if it was due to abort:
if (!(exc instanceof MergePolicy.MergeAbortedException)) {
if (!suppressExceptions) {
// suppressExceptions is normally only set during
// testing.
anyExceptions = true;
handleMergeException(exc);
}
}
} finally {
done = true;
synchronized(ConcurrentMergeScheduler.this) {
updateMergeThreads();
ConcurrentMergeScheduler.this.notifyAll();
}
}
}
@Override
public String toString() {
MergePolicy.OneMerge merge = getRunningMerge();
if (merge == null)
merge = startMerge;
return "merge thread: " + merge.segString(dir);
}
}
/** Called when an exception is hit in a background merge
* thread */
protected void handleMergeException(Throwable exc) {
try {
// When an exception is hit during merge, IndexWriter
// removes any partial files and then allows another
// merge to run. If whatever caused the error is not
// transient then the exception will keep happening,
// so, we sleep here to avoid saturating CPU in such
// cases:
Thread.sleep(250);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
throw new MergePolicy.MergeException(exc, dir);
}
static boolean anyExceptions = false;
/** Used for testing */
public static boolean anyUnhandledExceptions() {
if (allInstances == null) {
throw new RuntimeException("setTestMode() was not called; often this is because your test case's setUp method fails to call super.setUp in LuceneTestCase");
}
synchronized(allInstances) {
final int count = allInstances.size();
// Make sure all outstanding threads are done so we see
// any exceptions they may produce:
for(int i=0;i<count;i++)
allInstances.get(i).sync();
boolean v = anyExceptions;
anyExceptions = false;
return v;
}
}
public static void clearUnhandledExceptions() {
synchronized(allInstances) {
anyExceptions = false;
}
}
/** Used for testing */
private void addMyself() {
synchronized(allInstances) {
final int size = allInstances.size();
int upto = 0;
for(int i=0;i<size;i++) {
final ConcurrentMergeScheduler other = allInstances.get(i);
if (!(other.closed && 0 == other.mergeThreadCount()))
// Keep this one for now: it still has threads or
// may spawn new threads
allInstances.set(upto++, other);
}
allInstances.subList(upto, allInstances.size()).clear();
allInstances.add(this);
}
}
private boolean suppressExceptions;
/** Used for testing */
void setSuppressExceptions() {
suppressExceptions = true;
}
/** Used for testing */
void clearSuppressExceptions() {
suppressExceptions = false;
}
/** Used for testing */
private static List<ConcurrentMergeScheduler> allInstances;
/** @deprecated this test mode code will be removed in a future release */
@Deprecated
public static void setTestMode() {
allInstances = new ArrayList<ConcurrentMergeScheduler>();
}
}
| |
/*
* Copyright (C) 2015-2016 Lukoh Nam, goForer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.goforer.base.ui.activity;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.Context;
import android.os.Bundle;
import android.support.annotation.IdRes;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentTransaction;
import android.support.v7.app.ActionBar;
import android.support.v7.app.AppCompatActivity;
import android.util.AttributeSet;
import android.view.MenuItem;
import android.view.View;
import com.goforer.base.model.event.ActivityStackClearEvent;
import com.goforer.beatery.R;
import com.goforer.beatery.model.event.action.LogoutAction;
import com.goforer.beatery.utillity.ConnectionUtils;
import org.greenrobot.eventbus.EventBus;
import org.greenrobot.eventbus.Subscribe;
import org.greenrobot.eventbus.ThreadMode;
import butterknife.ButterKnife;
/**
* Base class for activities that want to use the support-based {@link AppCompatActivity}
*/
public abstract class BaseActivity extends AppCompatActivity {
private ProgressDialog mProgressDialog;
private boolean mIsResumed = false;
public static Activity mCurrentActivity;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
EventBus.getDefault().register(this);
if (savedInstanceState == null) {
setEffectIn();
}
setActionBar();
setContentView();
bindViews();
if (ConnectionUtils.INSTANCE.isNetworkAvailable(this)) {
setViews();
}
}
@Override
public View onCreateView(String name, Context context, AttributeSet attrs) {
return super.onCreateView(name, context, attrs);
}
@Override
public View onCreateView(View parent, String name, Context context, AttributeSet attrs) {
return super.onCreateView(parent, name, context, attrs);
}
@Override
protected void onDestroy() {
EventBus.getDefault().unregister(this);
super.onDestroy();
}
@Override
protected void onResume() {
super.onResume();
mIsResumed = true;
mCurrentActivity = this;
}
@Override
protected void onPause() {
super.onPause();
mIsResumed = false;
}
/**
* Return true if this activity is resumed
*
* @return true if this activity is resumed
*/
public boolean resumed() {
return mIsResumed;
}
/**
* Initialize the ActionBar and set options into it.
*
* @see ActionBar
*/
protected void setActionBar() {
ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setDisplayOptions(ActionBar.DISPLAY_HOME_AS_UP | ActionBar.DISPLAY_SHOW_TITLE);
}
}
/**
* Set the activity content from a layout resource. The resource will be
* inflated, adding all top-level views to the activity.
* <p>
* All activity must implement this method to get the resource inflated like below example:
*
* Example :
* @@Override
* public void setContentView() {
* setContentView(R.layout.activity_gallery);
* }
* </p>
*
* @see #setContentView(android.view.View, android.view.ViewGroup.LayoutParams)
*/
protected abstract void setContentView();
/**
* Inject annotated fields and methods in the specified target {@link Activity} for field
* injection. The current content view is used as the view root.
*
* @see ButterKnife#bind(Activity target)
*/
protected void bindViews() {
ButterKnife.bind(this);
}
/**
* Initialize all views to set into the activity.
* <p>
* The activity which has no Fragment must override this method to set all views
* into the activity.
* </p>
*
*/
protected void setViews() {
}
/**
* Set the effect when the activity is starting.
*
* See {@link Activity#overridePendingTransition(int enterAnim, int exitAnim)}.
*/
protected void setEffectIn() {
overridePendingTransition(R.anim.slide_in_from_right, R.anim.scale_down_exit);
}
/**
* Set the effect when the activity is closing.
*
* See {@link Activity#overridePendingTransition(int enterAnim, int exitAnim)}.
*/
protected void setEffectOut() {
overridePendingTransition(R.anim.scale_up_enter, R.anim.slide_out_to_right);
}
@Override
public void finish() {
super.finish();
setEffectOut();
}
/**
* Transact an existing fragment that was added to a container.
*
* @param cls the component class that is to be used for BaseActivity
* @param containerViewId Identifier of the container whose fragment(s) are to be replaced.
* @param args Bundle of arguments to supply to the fragment
*/
protected void transactFragment(Class<?> cls, @IdRes int containerViewId, Bundle args) {
transactFragment(cls.getName(), containerViewId, args);
}
/**
* Transact an existing fragment that was added to a container.
*
* @param tag Optional tag name for the fragment
* @param containerViewId Identifier of the container whose fragment(s) are to be replaced.
* @param args Bundle of arguments to supply to the fragment
*/
protected void transactFragment(String tag, @IdRes int containerViewId, Bundle args) {
FragmentManager fragmentManager = getSupportFragmentManager();
Fragment fragment = fragmentManager.findFragmentByTag(tag);
if (fragment == null) {
fragment = Fragment.instantiate(this, tag, args);
}
FragmentTransaction ft = fragmentManager.beginTransaction();
ft.replace(containerViewId, fragment, tag);
ft.commit();
}
/**
* Return previously set Fragment with given the component class.
*
* @param cls The previously set the component class that is to be used for BaseActivity.
*
* @return The previously set Fragment
*/
protected Fragment getFragment(Class<?> cls) {
FragmentManager fragmentManager = getSupportFragmentManager();
return fragmentManager.findFragmentByTag(cls.getName());
}
/**
* Return previously set Fragment with given the tag.
*
* @param tag The previously set the component class tag that is to be used for BaseActivity.
*
* @return The previously set Fragment
*/
protected Fragment getFragment(String tag) {
FragmentManager fragmentManager = getSupportFragmentManager();
return fragmentManager.findFragmentByTag(tag);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case android.R.id.home:
supportFinishAfterTransition();
return true;
}
return super.onOptionsItemSelected(item);
}
/**
* Show a loading progress with given string
*
* @param stringResId the string resource ID
*/
public void showProgress(int stringResId) {
if (mProgressDialog == null) {
mProgressDialog = new ProgressDialog(this);
mProgressDialog.setCancelable(false);
mProgressDialog.setCanceledOnTouchOutside(false);
}
if (stringResId <= 0) {
stringResId = R.string.loading;
}
mProgressDialog.setMessage(getString(stringResId));
if (!mProgressDialog.isShowing()) {
mProgressDialog.show();
}
}
/**
* Dismiss a loading progress, removing it from the screen. This method can be invoked safely
* from any thread.
*/
public void dismissProgress() {
if (mProgressDialog != null && mProgressDialog.isShowing()) {
mProgressDialog.dismiss();
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEvent(ActivityStackClearEvent event) {
finish();
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onAction(LogoutAction action){
finish();
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.hadoop.impl;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.ignite.configuration.HadoopConfiguration;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.IgniteKernal;
import org.apache.ignite.internal.processors.hadoop.Hadoop;
import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
import org.apache.ignite.internal.util.typedef.internal.U;
import static org.apache.ignite.internal.processors.hadoop.impl.HadoopUtils.createJobInfo;
import static org.apache.ignite.internal.processors.hadoop.state.HadoopJobTrackerSelfTestState.combineExecCnt;
import static org.apache.ignite.internal.processors.hadoop.state.HadoopJobTrackerSelfTestState.latch;
import static org.apache.ignite.internal.processors.hadoop.state.HadoopJobTrackerSelfTestState.mapExecCnt;
import static org.apache.ignite.internal.processors.hadoop.state.HadoopJobTrackerSelfTestState.reduceExecCnt;
/**
* Job tracker self test.
*/
public class HadoopJobTrackerSelfTest extends HadoopAbstractSelfTest {
/** */
private static final String PATH_OUTPUT = "/test-out";
/** Test block count parameter name. */
private static final int BLOCK_CNT = 10;
/** {@inheritDoc} */
@Override protected boolean igfsEnabled() {
return true;
}
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
super.beforeTestsStarted();
startGrids(gridCount());
}
/** {@inheritDoc} */
@Override protected void afterTestsStopped() throws Exception {
stopAllGrids();
super.afterTestsStopped();
}
/** {@inheritDoc} */
@Override protected void beforeTest() throws Exception {
latch.put("mapAwaitLatch", new CountDownLatch(1));
latch.put("reduceAwaitLatch", new CountDownLatch(1));
latch.put("combineAwaitLatch", new CountDownLatch(1));
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
mapExecCnt.set(0);
combineExecCnt.set(0);
reduceExecCnt.set(0);
}
/** {@inheritDoc} */
@Override public HadoopConfiguration hadoopConfiguration(String gridName) {
HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
cfg.setMapReducePlanner(new HadoopTestRoundRobinMrPlanner());
// TODO: IGNITE-404: Uncomment when fixed.
//cfg.setExternalExecution(false);
return cfg;
}
/**
* @throws Exception If failed.
*/
public void testSimpleTaskSubmit() throws Exception {
try {
UUID globalId = UUID.randomUUID();
Job job = Job.getInstance();
setupFileSystems(job.getConfiguration());
job.setMapperClass(TestMapper.class);
job.setReducerClass(TestReducer.class);
job.setInputFormatClass(InFormat.class);
FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT + "1"));
HadoopJobId jobId = new HadoopJobId(globalId, 1);
grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
checkStatus(jobId, false);
info("Releasing map latch.");
latch.get("mapAwaitLatch").countDown();
checkStatus(jobId, false);
info("Releasing reduce latch.");
latch.get("reduceAwaitLatch").countDown();
checkStatus(jobId, true);
assertEquals(10, mapExecCnt.get());
assertEquals(0, combineExecCnt.get());
assertEquals(1, reduceExecCnt.get());
}
finally {
// Safety.
latch.get("mapAwaitLatch").countDown();
latch.get("combineAwaitLatch").countDown();
latch.get("reduceAwaitLatch").countDown();
}
}
/**
* @throws Exception If failed.
*/
public void testTaskWithCombinerPerMap() throws Exception {
try {
UUID globalId = UUID.randomUUID();
Job job = Job.getInstance();
setupFileSystems(job.getConfiguration());
job.setMapperClass(TestMapper.class);
job.setReducerClass(TestReducer.class);
job.setCombinerClass(TestCombiner.class);
job.setInputFormatClass(InFormat.class);
FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT + "2"));
HadoopJobId jobId = new HadoopJobId(globalId, 1);
grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
checkStatus(jobId, false);
info("Releasing map latch.");
latch.get("mapAwaitLatch").countDown();
checkStatus(jobId, false);
// All maps are completed. We have a combiner, so no reducers should be executed
// before combiner latch is released.
U.sleep(50);
assertEquals(0, reduceExecCnt.get());
info("Releasing combiner latch.");
latch.get("combineAwaitLatch").countDown();
checkStatus(jobId, false);
info("Releasing reduce latch.");
latch.get("reduceAwaitLatch").countDown();
checkStatus(jobId, true);
assertEquals(10, mapExecCnt.get());
assertEquals(10, combineExecCnt.get());
assertEquals(1, reduceExecCnt.get());
}
finally {
// Safety.
latch.get("mapAwaitLatch").countDown();
latch.get("combineAwaitLatch").countDown();
latch.get("reduceAwaitLatch").countDown();
}
}
/**
* Checks job execution status.
*
* @param jobId Job ID.
* @param complete Completion status.
* @throws Exception If failed.
*/
private void checkStatus(HadoopJobId jobId, boolean complete) throws Exception {
for (int i = 0; i < gridCount(); i++) {
IgniteKernal kernal = (IgniteKernal)grid(i);
Hadoop hadoop = kernal.hadoop();
HadoopJobStatus stat = hadoop.status(jobId);
assert stat != null;
IgniteInternalFuture<?> fut = hadoop.finishFuture(jobId);
if (!complete)
assertFalse(fut.isDone());
else {
info("Waiting for status future completion on node [idx=" + i + ", nodeId=" +
kernal.getLocalNodeId() + ']');
fut.get();
}
}
}
/**
* Test input format
*/
public static class InFormat extends InputFormat {
@Override public List<InputSplit> getSplits(JobContext ctx) throws IOException, InterruptedException {
List<InputSplit> res = new ArrayList<>(BLOCK_CNT);
for (int i = 0; i < BLOCK_CNT; i++)
try {
res.add(new FileSplit(new Path(new URI("someFile")), i, i + 1, new String[] {"localhost"}));
}
catch (URISyntaxException e) {
throw new IOException(e);
}
return res;
}
@Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext ctx) throws IOException, InterruptedException {
return new RecordReader() {
@Override public void initialize(InputSplit split, TaskAttemptContext ctx) {
}
@Override public boolean nextKeyValue() {
return false;
}
@Override public Object getCurrentKey() {
return null;
}
@Override public Object getCurrentValue() {
return null;
}
@Override public float getProgress() {
return 0;
}
@Override public void close() {
}
};
}
}
/**
* Test mapper.
*/
private static class TestMapper extends Mapper {
@Override public void run(Context ctx) throws IOException, InterruptedException {
System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
latch.get("mapAwaitLatch").await();
mapExecCnt.incrementAndGet();
System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
}
}
/**
* Test reducer.
*/
private static class TestReducer extends Reducer {
@Override public void run(Context ctx) throws IOException, InterruptedException {
System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
latch.get("reduceAwaitLatch").await();
reduceExecCnt.incrementAndGet();
System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
}
}
/**
* Test combiner.
*/
private static class TestCombiner extends Reducer {
@Override public void run(Context ctx) throws IOException, InterruptedException {
System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
latch.get("combineAwaitLatch").await();
combineExecCnt.incrementAndGet();
System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
}
}
}
| |
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.packages;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.devtools.build.lib.events.Location;
import com.google.devtools.build.lib.events.StoredEventHandler;
import com.google.devtools.build.lib.packages.PackageIdentifier.RepositoryName;
import com.google.devtools.build.lib.packages.RuleFactory.InvalidRuleException;
import com.google.devtools.build.lib.syntax.EvalException;
import com.google.devtools.build.lib.syntax.FuncallExpression;
import com.google.devtools.build.lib.syntax.Label;
import com.google.devtools.build.lib.syntax.Label.SyntaxException;
import com.google.devtools.build.lib.vfs.Path;
import java.io.Serializable;
import java.util.Map;
import java.util.Map.Entry;
/**
* This creates the //external package, where targets not homed in this repository can be bound.
*/
public class ExternalPackage extends Package {
public static final String NAME = "external";
public static final PackageIdentifier PACKAGE_IDENTIFIER =
PackageIdentifier.createInDefaultRepo(NAME);
private Map<Label, Binding> bindMap;
private Map<RepositoryName, Rule> repositoryMap;
ExternalPackage() {
super(PACKAGE_IDENTIFIER);
}
/**
* Returns a description of the repository with the given name, or null if there's no such
* repository.
*/
public Rule getRepositoryInfo(RepositoryName repositoryName) {
return repositoryMap.get(repositoryName);
}
/**
* If the given label is bound, returns the (fully resolved) label it is bound to. Otherwise,
* returns null.
*/
public Label getActualLabel(Label label) {
if (bindMap.containsKey(label)) {
return bindMap.get(label).getActual();
}
return null;
}
/**
* Checks if the given package is //external.
*/
public static boolean isExternal(Package pkg) {
return pkg != null && pkg.getName().equals(NAME);
}
/**
* Holder for a binding's actual label and location.
*/
public static class Binding implements Serializable {
private final Label actual;
private final Location location;
public Binding(Label actual, Location location) {
this.actual = actual;
this.location = location;
}
public Label getActual() {
return actual;
}
public Location getLocation() {
return location;
}
/**
* Checks if the label is bound, i.e., starts with {@code //external:}.
*/
public static boolean isBoundLabel(Label label) {
return label.getPackageName().equals(NAME);
}
}
/**
* Given a workspace file path, creates an ExternalPackage.
*/
public static class Builder
extends Package.Builder {
private Map<Label, Binding> bindMap = Maps.newLinkedHashMap();
private Map<RepositoryName, Rule> repositoryMap = Maps.newLinkedHashMap();
public Builder(Path workspacePath) {
super(new ExternalPackage());
setFilename(workspacePath);
setMakeEnv(new MakeEnvironment.Builder());
}
protected ExternalPackage externalPackage() {
return (ExternalPackage) pkg;
}
@Override
public ExternalPackage build() {
for (Rule rule : repositoryMap.values()) {
try {
addRule(rule);
} catch (NameConflictException e) {
throw new IllegalStateException("Got a name conflict for " + rule
+ ", which can't happen: " + e.getMessage());
}
}
externalPackage().bindMap = ImmutableMap.copyOf(bindMap);
externalPackage().repositoryMap = ImmutableMap.copyOf(repositoryMap);
Package base = super.build();
return (ExternalPackage) base;
}
/**
* Sets the name for this repository.
*/
@Override
public Builder setWorkspaceName(String workspaceName) {
pkg.workspaceName = workspaceName;
return this;
}
public void addBinding(Label label, Binding binding) {
bindMap.put(label, binding);
}
public void resolveBindTargets(RuleClass ruleClass)
throws EvalException, NoSuchBindingException {
for (Entry<Label, Binding> entry : bindMap.entrySet()) {
resolveLabel(entry.getKey(), entry.getValue());
}
for (Entry<Label, Binding> entry : bindMap.entrySet()) {
try {
addRule(ruleClass, entry);
} catch (NameConflictException | InvalidRuleException e) {
throw new EvalException(entry.getValue().location, e.getMessage());
}
}
}
// Uses tortoise and the hare algorithm to detect cycles.
private void resolveLabel(final Label virtual, Binding binding)
throws NoSuchBindingException {
Label actual = binding.getActual();
Label tortoise = virtual;
Label hare = actual;
boolean moveTortoise = true;
while (Binding.isBoundLabel(actual)) {
if (tortoise == hare) {
throw new NoSuchBindingException("cycle detected resolving " + virtual + " binding",
binding.getLocation());
}
Label previous = actual; // For the exception.
Binding oldBinding = binding;
binding = bindMap.get(actual);
if (binding == null) {
throw new NoSuchBindingException("no binding found for target " + previous + " (via "
+ virtual + ")", oldBinding.getLocation());
}
actual = binding.getActual();
hare = actual;
moveTortoise = !moveTortoise;
if (moveTortoise) {
tortoise = bindMap.get(tortoise).getActual();
}
}
bindMap.put(virtual, binding);
}
private void addRule(RuleClass klass, Map.Entry<Label, Binding> bindingEntry)
throws InvalidRuleException, NameConflictException {
Label virtual = bindingEntry.getKey();
Label actual = bindingEntry.getValue().actual;
Location location = bindingEntry.getValue().location;
Map<String, Object> attributes = Maps.newHashMap();
// Bound rules don't have a name field, but this works because we don't want more than one
// with the same virtual name.
attributes.put("name", virtual.getName());
attributes.put("actual", actual);
StoredEventHandler handler = new StoredEventHandler();
Rule rule = RuleFactory.createAndAddRule(this, klass, attributes, handler, null, location);
rule.setVisibility(ConstantRuleVisibility.PUBLIC);
}
/**
* Adds the rule to the map of rules. Overwrites rules that are already there, to allow "later"
* WORKSPACE files to overwrite "earlier" ones.
*/
public Builder createAndAddRepositoryRule(RuleClass ruleClass, Map<String, Object> kwargs,
FuncallExpression ast)
throws InvalidRuleException, NameConflictException, SyntaxException {
StoredEventHandler eventHandler = new StoredEventHandler();
Rule tempRule = RuleFactory.createRule(this, ruleClass, kwargs, eventHandler, ast,
ast.getLocation());
addEvents(eventHandler.getEvents());
repositoryMap.put(RepositoryName.create("@" + tempRule.getName()), tempRule);
for (Map.Entry<String, Label> entry :
ruleClass.getExternalBindingsFunction().apply(tempRule).entrySet()) {
Label nameLabel = Label.parseAbsolute("//external:" + entry.getKey());
addBinding(nameLabel, new Binding(entry.getValue(), tempRule.getLocation()));
}
return this;
}
/**
* This is used when a binding is invalid, either because one of the targets is malformed,
* refers to a package that does not exist, or creates a circular dependency.
*/
public class NoSuchBindingException extends Exception {
private Location location;
public NoSuchBindingException(String message, Location location) {
super(message);
this.location = location;
}
public Location getLocation() {
return location;
}
}
}
}
| |
/*
* $Id$
*
* Copyright (c) 2014, Simsilica, LLC
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.nx.util.jme3.lemur.panel;
import com.google.common.base.Objects;
import com.jme3.font.BitmapFont;
import com.jme3.font.BitmapText;
import com.jme3.input.KeyInput;
import com.jme3.math.Vector3f;
import com.nx.util.jme3.lemur.ChatHistory;
import com.nx.util.jme3.lemur.ConsoleCommand;
import com.nx.util.jme3.lemur.CustomGridPanel;
import com.simsilica.lemur.Axis;
import com.simsilica.lemur.Command;
import com.simsilica.lemur.DefaultRangedValueModel;
import com.simsilica.lemur.GridPanel;
import com.simsilica.lemur.GuiGlobals;
import com.simsilica.lemur.ListBox;
import com.simsilica.lemur.Panel;
import com.simsilica.lemur.RangedValueModel;
import com.simsilica.lemur.Slider;
import com.simsilica.lemur.TextField;
import com.simsilica.lemur.component.BorderLayout;
import com.simsilica.lemur.component.TextEntryComponent;
import com.simsilica.lemur.core.GuiControl;
import com.simsilica.lemur.core.VersionedList;
import com.simsilica.lemur.core.VersionedReference;
import com.simsilica.lemur.event.KeyAction;
import com.simsilica.lemur.event.KeyActionListener;
import com.simsilica.lemur.grid.GridModel;
import com.simsilica.lemur.input.AnalogFunctionListener;
import com.simsilica.lemur.input.FunctionId;
import com.simsilica.lemur.list.CellRenderer;
import com.simsilica.lemur.list.DefaultCellRenderer;
import com.simsilica.lemur.style.Attributes;
import com.simsilica.lemur.style.ElementId;
import com.simsilica.lemur.style.StyleAttribute;
import com.simsilica.lemur.style.StyleDefaults;
import com.simsilica.lemur.style.Styles;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
/**
*
* @author NemesisMate based on Paul Speed's #ListBox
*
* @see ListBox
*/
public class LemurConsole<T> extends Panel {
private static final Logger log = LoggerFactory.getLogger(LemurConsole.class);
public enum MessageType {
DEFAULT, CONSOLE, COMMAND;
}
Map<MessageType, Command> messageCallbacks;
public static final String ELEMENT_ID = "list";
public static final String CONTAINER_ID = "container";
public static final String ITEMS_ID = "items";
public static final String SLIDER_ID = "slider";
// public static final String SELECTOR_ID = "selector";
private BorderLayout layout;
private VersionedList<T> model;
private VersionedReference<List<T>> modelRef;
private CellRenderer<T> cellRenderer;
// private VersionedReference<Set<Integer>> selectionRef;
private CustomGridPanel grid;
private Slider slider;
TextField textField;
// private Node selectorArea;
// private Panel selector;
// private Vector3f selectorAreaOrigin = new Vector3f();
// private Vector3f selectorAreaSize = new Vector3f();
private RangedValueModel baseIndex; // upside down actually
private VersionedReference<Double> indexRef;
private int maxIndex;
private boolean preserveOnExit = true;
private boolean allowVoidSubmission = true;
private boolean stickToBottom = true;
private boolean commandsToHistory = true;
private String prefixColorCode = "\\#c0ffee#";
private String commandColorCode = "\\#25A6E5#";
private String consoleDefaultPrefix = "console"; // If setterAdded, recall in it to: noPrefixFillCheck();
private String prefixSeparator = "$ "; // If setterAdded, recall in it to: noPrefixFillCheck();
private String commandPrefix = "/";
private String noPrefixFill; // = noPrefixFillCheck() = consoleDefaultPrefix + prefixSeparator;
private ChatHistory chatHistory = new ChatHistory();
// Command<ConsoleCommand> commandCallback;
private float endMargin;
public LemurConsole() {
this(true, new VersionedList<T>(), null,
new ElementId(ELEMENT_ID), null);
}
public LemurConsole(VersionedList<T> model ) {
this(true, model, null,
new ElementId(ELEMENT_ID), null);
}
public LemurConsole(VersionedList<T> model, CellRenderer<T> renderer, String style ) {
this(true, model, renderer, new ElementId(ELEMENT_ID), style);
}
public LemurConsole(VersionedList<T> model, String style ) {
this(true, model, null, new ElementId(ELEMENT_ID), style);
}
public LemurConsole(VersionedList<T> model, ElementId elementId, String style ) {
this(true, model, null, elementId, style);
}
public LemurConsole(VersionedList<T> model, CellRenderer<T> renderer, ElementId elementId, String style ) {
this(true, model, renderer, elementId, style);
}
protected LemurConsole(boolean applyStyles, VersionedList<T> model, CellRenderer<T> cellRenderer,
ElementId elementId, String style ) {
super(false, elementId.child(CONTAINER_ID), style);
if( cellRenderer == null ) {
// Create a default one
cellRenderer = new DefaultCellRenderer(elementId.child("item"), style);
}
this.cellRenderer = cellRenderer;
this.layout = new BorderLayout();
getControl(GuiControl.class).setLayout(layout);
grid = new CustomGridPanel(new GridModelDelegate(), elementId.child(ITEMS_ID), style);
grid.setVisibleColumns(1);
// grid.getControl(GuiControl.class).addListener(new GridListener());
layout.addChild(grid, BorderLayout.Position.Center);
baseIndex = new DefaultRangedValueModel();
indexRef = baseIndex.createReference();
slider = new Slider(baseIndex, Axis.Y, elementId.child(SLIDER_ID), style);
layout.addChild(slider, BorderLayout.Position.East);
if( applyStyles ) {
Styles styles = GuiGlobals.getInstance().getStyles();
styles.applyStyles(this, getElementId(), style);
}
setName("Console");
textField = new TextField("", style);
KeyActionListener submissionListener = new KeyActionListener() {
@Override
public void keyAction(TextEntryComponent arg0, KeyAction arg1) {
if(preserveOnExit) {
String text = arg0.getText();
arg0.setText(text.substring(0, text.length()));
} else arg0.setText("");
}
};
textField.getActionMap().put(new KeyAction(0x00), submissionListener);
textField.getActionMap().put(new KeyAction(KeyInput.KEY_NUMPADENTER), submissionListener);
textField.getActionMap().put(new KeyAction(KeyInput.KEY_RETURN), new KeyActionListener() {
@Override
public void keyAction(TextEntryComponent arg0, KeyAction arg1) {
String text = textField.getText();
String trimmedText;
if(!allowVoidSubmission) {
if(text.length() == 0) return;
else {
trimmedText = text.trim();
if(trimmedText.length() == 0) return;
}
} else trimmedText = text.trim();
// if(!allowVoidSubmission && (text.length() == 0 || text.trim().length() == 0)) return;
final MessageType messageType;
final Object callbackExec;
if(trimmedText.length() > 1 && trimmedText.charAt(0) == '/') {
String[] split = trimmedText.split(" ", 2);
String[] args = split.length > 1 ? split[1].split(" ") : new String[0];
messageType = MessageType.COMMAND;
callbackExec = new ConsoleCommand(split[0].replaceFirst(commandPrefix, ""), args);
// sendConsoleMessage(trimmedText, MessageType.COMMAND);
// LemurGuiModule.getInstance().getCommandManager().execute(commandSender, split[0].replaceFirst("/", ""), args);
if(commandsToHistory) {
chatHistory.addToHistory(trimmedText);
}
// model.add((T) (consoleDefaultPrefix + prefixSeparator + trimmedText));
} else {
messageType = MessageType.CONSOLE;
callbackExec = trimmedText;
chatHistory.addToHistory(trimmedText);
// sendConsoleMessage(trimmedText, MessageType.CONSOLE);
// model.add((T) (consoleDefaultPrefix + prefixSeparator + trimmedText));
// sendMessage(trimmedText);
}
sendConsoleMessage(trimmedText, messageType);
Command callback = getCallback(messageType);
if (callback != null) {
callback.execute(callbackExec);
} else {
if(log.isWarnEnabled()) {
log.warn("No callback specified for console messages of type: {}.", messageType);
}
}
// model.add((T) (consoleDefaultPrefix + prefixSeparator + trimmedText));
textField.setText("");
// scrollToBottom();
}
});
layout.addChild(BorderLayout.Position.South, textField);
setPreferredSize(new Vector3f(1000, 100, 0));
textField.getActionMap().put(new KeyAction(KeyInput.KEY_UP), new KeyActionListener() {
@Override
public void keyAction(TextEntryComponent arg0, KeyAction arg1) {
log.debug("History up");
textField.setText(chatHistory.moveUp(textField.getText()));
}
});
textField.getActionMap().put(new KeyAction(KeyInput.KEY_DOWN), new KeyActionListener() {
@Override
public void keyAction(TextEntryComponent arg0, KeyAction arg1) {
log.debug("History down");
textField.setText(chatHistory.moveDown());
}
});
// CursorEventControl.addListenersToSpatial(this, new DefaultCursorListener() {
// @Override
// public void cursorMoved(CursorMotionEvent event, Spatial target, Spatial capture) {
// System.out.println("SCROLLING DELTA: " + event.getScrollDelta() + ", value: " + event.getScrollValue());
// if(event.getScrollDelta() != 0) {
// if( event.getScrollDelta() > 0 ) {
// scroll(Math.max(1, event.getScrollDelta() / 120));
// } else {
// scroll(Math.min(-1, event.getScrollDelta() / 120));
// }
// }
// }
// });
//TODO: replace by a grid-hovered listener. This would allow a second listener on the input textfield to scroll through the history.
FunctionId f = new FunctionId("bleebleblee");
GuiGlobals.getInstance().getInputMapper().map(f, com.simsilica.lemur.input.Axis.MOUSE_WHEEL);
GuiGlobals.getInstance().getInputMapper().addAnalogListener(new AnalogFunctionListener() {
@Override
public void valueActive(FunctionId func, double value, double tpf) {
scroll(value);
}
}, f);
// grid.get
// Need a spacer so that the 'selector' panel doesn't think
// it's being managed by this panel.
// Have to set this up after applying styles so that the default
// styles are properly initialized the first time.
// selectorArea = new Node("selectorArea");
// attachChild(selectorArea);
// selector = new Panel(elementId.child(SELECTOR_ID), style);
noPrefixFillCheck();
setModel(model);
resetModelRange();
}
private Command getCallback(MessageType type) {
if(messageCallbacks != null) {
return messageCallbacks.get(type);
}
return null;
}
public void setAllowVoidSubmission(boolean allowVoidSubmition) {
this.allowVoidSubmission = allowVoidSubmition;
}
public void setConsoleDefaultPrefix(String consoleDefaultPrefix) {
this.consoleDefaultPrefix = consoleDefaultPrefix;
}
public void setPrefixSeparator(String prefixSeparator) {
this.prefixSeparator = prefixSeparator;
}
public void setStickToBottom(boolean stickToBottom) {
this.stickToBottom = stickToBottom;
}
public void setPreserveOnExit(boolean preserveOnExit) {
this.preserveOnExit = preserveOnExit;
}
public void setCommandPrefix(String commandPrefix) {
this.commandPrefix = commandPrefix;
}
// public void setCommandSender(CommandSender commandSender) {
// this.commandSender = commandSender;
// }
public void setCallback(MessageType type, Command callbackCommand) {
if(messageCallbacks == null) {
messageCallbacks = new EnumMap<MessageType, Command>(MessageType.class);
}
messageCallbacks.put(type, callbackCommand);
}
@Deprecated
public void setCommandCallback(Command<ConsoleCommand> commandCallback) {
// this.commandCallback = commandCallback;
setCallback(MessageType.COMMAND, commandCallback);
}
//TODO: make an enum to say which kind of message to send, instead of a method for every one of it.
public void sendConsoleMessage(String message, MessageType type) {
switch (type) {
case COMMAND:
message = commandColorCode + message;
case CONSOLE:
messageToConsole(prefixColorCode + consoleDefaultPrefix + prefixSeparator + message);
break;
case DEFAULT:
if(message == null) {
return;
}
if(message.contains("\n")) {
String[] lines = message.split("\n");
// int length = lines.length;
//
// model.add((T) (consoleDefaultPrefix + prefixSeparator + lines[0]));
//
// // In the case that the \n is at the end, there would be only one element;
// if(length > 1) {
// for(int i = 1; i < length; i++) {
// model.add((T) lines[i]);
// }
// }
// model.addAll((Collection<T>) Arrays.asList(lines));
// In the case that the \n is at the end, there would be only one element;
if(noPrefixFill.isEmpty()) {
for (String line : lines) {
messageToConsole(line);
}
} else {
for(String line : lines) {
messageToConsole(noPrefixFill + line);
}
}
return;
}
messageToConsole(noPrefixFill.isEmpty() ? message : noPrefixFill + message);
break;
}
}
private void messageToConsole(String finalMessage) {
BitmapFont font = GuiGlobals.getInstance().getStyles().getSelector(getStyle()).get("font", BitmapFont.class);
float widthLimit = grid.getSize().getX() + endMargin;
if(widthLimit == endMargin) {
widthLimit = grid.getPreferredSize().getX();
if(widthLimit == endMargin) {
log.trace("The console hasn't got any size, so no wrap can be performed.");
model.add((T) (finalMessage));
return;
}
}
float currentWidth = font.getLineWidth(finalMessage);
if(currentWidth > widthLimit) {
String[] messages = getWrap(finalMessage, currentWidth, widthLimit);
for(String m : messages) {
model.add((T) m);
}
} else {
model.add((T) (finalMessage));
}
}
public float getLineHeight() {
BitmapFont font = GuiGlobals.getInstance().getStyles().getSelector(getStyle()).get("font", BitmapFont.class);
BitmapText text = new BitmapText(font);
text.setText("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789()[]:;\"");
return text.getHeight();
}
// Easy wrap - not word aware and no line adjusting aware (this... is not well done at all - it treats all letters as if they occupied the same width xD)
private String[] getWrap(String finalMessage, float currentWidth, float widthLimit) {
int finalLength = finalMessage.length();
int wrapAmount = (int) (currentWidth / widthLimit) + 1;
int messagesLength = finalLength / wrapAmount;
String[] messages = new String[wrapAmount];
int startIndex;
int i = 0;
for(; i < (wrapAmount - 1); i++) {
startIndex = i * messagesLength;
int endIndex = startIndex + messagesLength;
messages[i] = finalMessage.substring(startIndex, endIndex);
}
startIndex = i * messagesLength;
messages[i] = finalMessage.substring(startIndex, finalLength);
log.trace("Wrapped: {}, into: {}.", finalMessage, Arrays.toString(messages));
for(String message : messages) {
log.trace("Wr: {}", message);
}
return messages;
}
@Deprecated
public void sendMessage(String message) {
// chatHistory.addToHistory(message);
sendConsoleMessage(message, MessageType.DEFAULT);
}
public void scroll(double amount) {
baseIndex.setValue(baseIndex.getValue() + amount);
}
public void scrollToBottom() {
baseIndex.setValue(0);
}
public void setFocus() {
GuiGlobals.getInstance().requestFocus(textField);
}
@StyleDefaults(ELEMENT_ID)
public static void initializeDefaultStyles( Styles styles, Attributes attrs ) {
// ElementId parent = new ElementId(ELEMENT_ID);
//QuadBackgroundComponent quad = new QuadBackgroundComponent(new ColorRGBA(0.5f, 0.5f, 0.5f, 1));
// QuadBackgroundComponent quad = new QuadBackgroundComponent(new ColorRGBA(0.8f, 0.9f, 0.1f, 1));
// quad.getMaterial().getMaterial().getAdditionalRenderState().setBlendMode(BlendMode.Exclusion);
// styles.getSelector(parent.child(SELECTOR_ID), null).set("background", quad, false);
}
@Override
public void updateLogicalState( float tpf ) {
super.updateLogicalState(tpf);
if( modelRef.update() ) {
resetModelRange();
}
boolean indexUpdate = indexRef.update();
// boolean selectionUpdate = selectionRef.update();
if( indexUpdate ) {
// System.out.println("2: MIN: " + baseIndex.getMinimum() + " VALUE: " + baseIndex.getValue() + " MAX: " + baseIndex.getMaximum());
// System.out.println("MAX: " + maxIndex + "BASE: " + baseIndex.getValue());
int index = (int)(maxIndex - baseIndex.getValue());
grid.setRow(index);
// grid.setRow(maxIndex);
}
// if( selectionUpdate || indexUpdate ) {
// refreshSelector();
// }
}
// protected void gridResized( Vector3f pos, Vector3f size ) {
// if( pos.equals(selectorAreaOrigin) && size.equals(selectorAreaSize) ) {
// return;
// }
//
// selectorAreaOrigin.set(pos);
// selectorAreaSize.set(size);
// }
public void setModel( VersionedList<T> model ) {
if( this.model == model && model != null ) {
return;
}
if( this.model != null ) {
// Clean up the old one
// detachItemListeners();
}
if( model == null ) {
// Easier to create a default one than to handle a null model
// everywhere
model = new VersionedList<T>();
}
this.model = model;
this.modelRef = model.createReference();
grid.setLocation(0,0);
grid.setModel(new GridModelDelegate()); // need a new one for a new version
resetModelRange();
baseIndex.setValue(maxIndex);
}
public VersionedList<T> getModel() {
return model;
}
public Slider getSlider() {
return slider;
}
public GridPanel getGridPanel() {
return grid;
}
@StyleAttribute(value="visibleItems", lookupDefault=false)
public void setVisibleItems( int count ) {
grid.setVisibleRows(count);
resetModelRange();
}
public int getVisibleItems() {
return grid.getVisibleRows();
}
@StyleAttribute(value="cellRenderer", lookupDefault=false)
public void setCellRenderer( CellRenderer renderer ) {
if( Objects.equal(this.cellRenderer, renderer) ) {
return;
}
this.cellRenderer = renderer;
grid.refreshGrid(); // cheating
}
public CellRenderer getCellRenderer() {
return cellRenderer;
}
public void setAlpha( float alpha, boolean recursive ) {
super.setAlpha(alpha, recursive);
// Catch some of our intermediaries
// setChildAlpha(selector, alpha);
}
protected void resetModelRange() {
int count = model == null ? 0 : model.size();
int visible = grid.getVisibleRows();
maxIndex = Math.max(0, count - visible);
// Because the slider is upside down, we have to
// do some math if we want our base not to move as
// items are added to the list after us
double val = baseIndex.getMaximum() - baseIndex.getValue();
baseIndex.setMinimum(0);
baseIndex.setMaximum(maxIndex);
if(stickToBottom) {
baseIndex.setValue(0);
}
else {
baseIndex.setValue(maxIndex - val);
}
}
protected Panel getListCell( int row, int col, Panel existing ) {
T value = model.get(row);
Panel cell = cellRenderer.getView(value, false, existing);
// if( cell != existing ) {
// // Transfer the click listener
// CursorEventControl.addListenersToSpatial(cell, clickListener);
// CursorEventControl.removeListenersFromSpatial(existing, clickListener);
// }
return cell;
}
@Override
public String toString() {
return getClass().getName() + "[elementId=" + getElementId() + "]";
}
// private class GridListener extends AbstractGuiControlListener {
// public void reshape( GuiControl source, Vector3f pos, Vector3f size ) {
// gridResized(pos, size);
// }
// }
protected class GridModelDelegate implements GridModel<Panel> {
@Override
public int getRowCount() {
if( model == null ) {
return 0;
}
return model.size();
}
@Override
public int getColumnCount() {
return 1;
}
@Override
public Panel getCell( int row, int col, Panel existing ) {
return getListCell(row, col, existing);
}
@Override
public void setCell( int row, int col, Panel value ) {
throw new UnsupportedOperationException("ListModel is read only.");
}
@Override
public long getVersion() {
return model == null ? 0 : model.getVersion();
}
@Override
public GridModel<Panel> getObject() {
return this;
}
@Override
public VersionedReference<GridModel<Panel>> createReference() {
return new VersionedReference<GridModel<Panel>>(this);
}
}
private void noPrefixFillCheck() {
int amount = 0;
if(consoleDefaultPrefix != null) {
amount += consoleDefaultPrefix.length();
}
if(prefixSeparator != null) {
amount += prefixSeparator.length();
}
String newFill = "";
for(int i = 0; i < amount; i++) {
newFill += " ";
}
noPrefixFill = newFill;
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.core.journal.impl;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.activemq.artemis.api.core.ActiveMQExceptionType;
import org.apache.activemq.artemis.core.journal.impl.dataformat.JournalInternalRecord;
public class JournalTransaction {
private JournalRecordProvider journal;
private List<JournalUpdate> pos;
private List<JournalUpdate> neg;
private final long id;
// All the files this transaction is touching on.
// We can't have those files being reclaimed if there is a pending transaction
private Set<JournalFile> pendingFiles;
private TransactionCallback currentCallback;
private boolean compacting = false;
private Map<JournalFile, TransactionCallback> callbackList;
private JournalFile lastFile = null;
private final AtomicInteger counter = new AtomicInteger();
public JournalTransaction(final long id, final JournalRecordProvider journal) {
this.id = id;
this.journal = journal;
}
public void replaceRecordProvider(final JournalRecordProvider provider) {
journal = provider;
}
/**
* @return the id
*/
public long getId() {
return id;
}
public int getCounter(final JournalFile file) {
return internalgetCounter(file).intValue();
}
public void incCounter(final JournalFile file) {
internalgetCounter(file).incrementAndGet();
}
public long[] getPositiveArray() {
if (pos == null) {
return new long[0];
}
else {
int i = 0;
long[] ids = new long[pos.size()];
for (JournalUpdate el : pos) {
ids[i++] = el.getId();
}
return ids;
}
}
public void setCompacting() {
compacting = true;
// Everything is cleared on the transaction...
// since we are compacting, everything is at the compactor's level
clear();
}
/**
* This is used to merge transactions from compacting
*/
public void merge(final JournalTransaction other) {
if (other.pos != null) {
if (pos == null) {
pos = new ArrayList<JournalUpdate>();
}
pos.addAll(other.pos);
}
if (other.neg != null) {
if (neg == null) {
neg = new ArrayList<JournalUpdate>();
}
neg.addAll(other.neg);
}
if (other.pendingFiles != null) {
if (pendingFiles == null) {
pendingFiles = new HashSet<JournalFile>();
}
pendingFiles.addAll(other.pendingFiles);
}
compacting = false;
}
/**
*
*/
public void clear() {
// / Compacting is recreating all the previous files and everything
// / so we just clear the list of previous files, previous pos and previous adds
// / The transaction may be working at the top from now
if (pendingFiles != null) {
pendingFiles.clear();
}
if (callbackList != null) {
callbackList.clear();
}
if (pos != null) {
pos.clear();
}
if (neg != null) {
neg.clear();
}
counter.set(0);
lastFile = null;
currentCallback = null;
}
/**
* @param currentFile
* @param data
*/
public void fillNumberOfRecords(final JournalFile currentFile, final JournalInternalRecord data) {
data.setNumberOfRecords(getCounter(currentFile));
}
public TransactionCallback getCallback(final JournalFile file) throws Exception {
if (callbackList == null) {
callbackList = new HashMap<JournalFile, TransactionCallback>();
}
currentCallback = callbackList.get(file);
if (currentCallback == null) {
currentCallback = new TransactionCallback();
callbackList.put(file, currentCallback);
}
if (currentCallback.getErrorMessage() != null) {
throw ActiveMQExceptionType.createException(currentCallback.getErrorCode(), currentCallback.getErrorMessage());
}
currentCallback.countUp();
return currentCallback;
}
public void addPositive(final JournalFile file, final long id, final int size) {
incCounter(file);
addFile(file);
if (pos == null) {
pos = new ArrayList<JournalUpdate>();
}
pos.add(new JournalUpdate(file, id, size));
}
public void addNegative(final JournalFile file, final long id) {
incCounter(file);
addFile(file);
if (neg == null) {
neg = new ArrayList<JournalUpdate>();
}
neg.add(new JournalUpdate(file, id, 0));
}
/**
* The caller of this method needs to guarantee appendLock.lock at the journal. (unless this is being called from load what is a single thread process).
*/
public void commit(final JournalFile file) {
JournalCompactor compactor = journal.getCompactor();
if (compacting) {
compactor.addCommandCommit(this, file);
}
else {
if (pos != null) {
for (JournalUpdate trUpdate : pos) {
JournalRecord posFiles = journal.getRecords().get(trUpdate.id);
if (compactor != null && compactor.lookupRecord(trUpdate.id)) {
// This is a case where the transaction was opened after compacting was started,
// but the commit arrived while compacting was working
// We need to cache the counter update, so compacting will take the correct files when it is done
compactor.addCommandUpdate(trUpdate.id, trUpdate.file, trUpdate.size);
}
else if (posFiles == null) {
posFiles = new JournalRecord(trUpdate.file, trUpdate.size);
journal.getRecords().put(trUpdate.id, posFiles);
}
else {
posFiles.addUpdateFile(trUpdate.file, trUpdate.size);
}
}
}
if (neg != null) {
for (JournalUpdate trDelete : neg) {
if (compactor != null) {
compactor.addCommandDelete(trDelete.id, trDelete.file);
}
else {
JournalRecord posFiles = journal.getRecords().remove(trDelete.id);
if (posFiles != null) {
posFiles.delete(trDelete.file);
}
}
}
}
// Now add negs for the pos we added in each file in which there were
// transactional operations
for (JournalFile jf : pendingFiles) {
file.incNegCount(jf);
}
}
}
public void waitCallbacks() throws InterruptedException {
if (callbackList != null) {
for (TransactionCallback callback : callbackList.values()) {
callback.waitCompletion();
}
}
}
/**
* Wait completion at the latest file only
*/
public void waitCompletion() throws Exception {
if (currentCallback != null) {
currentCallback.waitCompletion();
}
}
/**
* The caller of this method needs to guarantee appendLock.lock before calling this method if being used outside of the lock context.
* or else potFilesMap could be affected
*/
public void rollback(final JournalFile file) {
JournalCompactor compactor = journal.getCompactor();
if (compacting && compactor != null) {
compactor.addCommandRollback(this, file);
}
else {
// Now add negs for the pos we added in each file in which there were
// transactional operations
// Note that we do this on rollback as we do on commit, since we need
// to ensure the file containing
// the rollback record doesn't get deleted before the files with the
// transactional operations are deleted
// Otherwise we may run into problems especially with XA where we are
// just left with a prepare when the tx
// has actually been rolled back
for (JournalFile jf : pendingFiles) {
file.incNegCount(jf);
}
}
}
/**
* The caller of this method needs to guarantee appendLock.lock before calling this method if being used outside of the lock context.
* or else potFilesMap could be affected
*/
public void prepare(final JournalFile file) {
// We don't want the prepare record getting deleted before time
addFile(file);
}
/**
* Used by load, when the transaction was not loaded correctly
*/
public void forget() {
// The transaction was not committed or rolled back in the file, so we
// reverse any pos counts we added
for (JournalFile jf : pendingFiles) {
jf.decPosCount();
}
}
@Override
public String toString() {
return "JournalTransaction(" + id + ")";
}
private AtomicInteger internalgetCounter(final JournalFile file) {
if (lastFile != file) {
lastFile = file;
counter.set(0);
}
return counter;
}
private void addFile(final JournalFile file) {
if (pendingFiles == null) {
pendingFiles = new HashSet<JournalFile>();
}
if (!pendingFiles.contains(file)) {
pendingFiles.add(file);
// We add a pos for the transaction itself in the file - this
// prevents any transactional operations
// being deleted before a commit or rollback is written
file.incPosCount();
}
}
private static class JournalUpdate {
private final JournalFile file;
long id;
int size;
/**
* @param file
* @param id
* @param size
*/
private JournalUpdate(final JournalFile file, final long id, final int size) {
super();
this.file = file;
this.id = id;
this.size = size;
}
/**
* @return the id
*/
public long getId() {
return id;
}
}
}
| |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sling.event.impl.jobs.queues;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Properties;
import org.apache.felix.scr.annotations.Property;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.Service;
import org.apache.sling.api.resource.Resource;
import org.apache.sling.api.resource.ResourceResolver;
import org.apache.sling.commons.scheduler.Scheduler;
import org.apache.sling.commons.threads.ThreadPool;
import org.apache.sling.commons.threads.ThreadPoolManager;
import org.apache.sling.event.impl.EventingThreadPool;
import org.apache.sling.event.impl.jobs.JobConsumerManager;
import org.apache.sling.event.impl.jobs.JobHandler;
import org.apache.sling.event.impl.jobs.JobImpl;
import org.apache.sling.event.impl.jobs.config.ConfigurationChangeListener;
import org.apache.sling.event.impl.jobs.config.InternalQueueConfiguration;
import org.apache.sling.event.impl.jobs.config.JobManagerConfiguration;
import org.apache.sling.event.impl.jobs.config.QueueConfigurationManager;
import org.apache.sling.event.impl.jobs.config.QueueConfigurationManager.QueueInfo;
import org.apache.sling.event.impl.jobs.jmx.QueueStatusEvent;
import org.apache.sling.event.impl.jobs.jmx.QueuesMBeanImpl;
import org.apache.sling.event.impl.jobs.stats.StatisticsManager;
import org.apache.sling.event.impl.support.Environment;
import org.apache.sling.event.impl.support.ResourceHelper;
import org.apache.sling.event.jobs.Job;
import org.apache.sling.event.jobs.NotificationConstants;
import org.apache.sling.event.jobs.Queue;
import org.apache.sling.event.jobs.jmx.QueuesMBean;
import org.osgi.service.event.Event;
import org.osgi.service.event.EventAdmin;
import org.osgi.service.event.EventConstants;
import org.osgi.service.event.EventHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of the queue manager.
*/
@Component(immediate=true)
@Service(value={Runnable.class, QueueManager.class, EventHandler.class})
@Properties({
@Property(name=Scheduler.PROPERTY_SCHEDULER_PERIOD, longValue=60),
@Property(name=Scheduler.PROPERTY_SCHEDULER_CONCURRENT, boolValue=false),
@Property(name=EventConstants.EVENT_TOPIC, value=NotificationConstants.TOPIC_JOB_ADDED)
})
public class QueueManager
implements Runnable, EventHandler, ConfigurationChangeListener {
/** Default logger. */
private final Logger logger = LoggerFactory.getLogger(this.getClass());
@Reference
private EventAdmin eventAdmin;
@Reference
private Scheduler scheduler;
@Reference
private JobConsumerManager jobConsumerManager;
@Reference
private QueuesMBean queuesMBean;
@Reference
private ThreadPoolManager threadPoolManager;
/**
* Our thread pool.
*/
@Reference(referenceInterface=EventingThreadPool.class)
private ThreadPool threadPool;
/** The job manager configuration. */
@Reference
private JobManagerConfiguration configuration;
@Reference
private StatisticsManager statisticsManager;
/** Lock object for the queues map - we don't want to sync directly on the concurrent map. */
private final Object queuesLock = new Object();
/** All active queues. */
private final Map<String, JobQueueImpl> queues = new ConcurrentHashMap<String, JobQueueImpl>();
/** We count the scheduler runs. */
private volatile long schedulerRuns;
/** Flag whether the manager is active or suspended. */
private final AtomicBoolean isActive = new AtomicBoolean(false);
/** The queue services. */
private volatile QueueServices queueServices;
/**
* Activate this component.
* @param props Configuration properties
*/
@Activate
protected void activate(final Map<String, Object> props) {
logger.info("Apache Sling Queue Manager started on instance {}", Environment.APPLICATION_ID);
this.queueServices = new QueueServices();
queueServices.configuration = this.configuration;
queueServices.eventAdmin = this.eventAdmin;
queueServices.jobConsumerManager = this.jobConsumerManager;
queueServices.scheduler = this.scheduler;
queueServices.threadPoolManager = this.threadPoolManager;
queueServices.statisticsManager = statisticsManager;
queueServices.eventingThreadPool = this.threadPool;
this.configuration.addListener(this);
}
/**
* Deactivate this component.
*/
@Deactivate
protected void deactivate() {
logger.debug("Apache Sling Queue Manager stopping on instance {}", Environment.APPLICATION_ID);
this.configuration.removeListener(this);
final Iterator<JobQueueImpl> i = this.queues.values().iterator();
while ( i.hasNext() ) {
final JobQueueImpl jbq = i.next();
jbq.close();
// update mbeans
((QueuesMBeanImpl)queuesMBean).sendEvent(new QueueStatusEvent(null, jbq));
}
this.queues.clear();
this.queueServices = null;
logger.info("Apache Sling Queue Manager stopped on instance {}", Environment.APPLICATION_ID);
}
/**
* This method is invoked periodically by the scheduler.
* It searches for idle queues and stops them after a timeout. If a queue
* is idle for two consecutive clean up calls, it is removed.
* @see java.lang.Runnable#run()
*/
private void maintain() {
this.schedulerRuns++;
logger.debug("Queue manager maintenance: Starting #{}", this.schedulerRuns);
// queue maintenance
if ( this.isActive.get() ) {
for(final JobQueueImpl jbq : this.queues.values() ) {
jbq.maintain();
}
}
// full topic scan is done every third run
if ( schedulerRuns % 3 == 0 && this.isActive.get() ) {
this.fullTopicScan();
}
// we only do a full clean up on every fifth run
final boolean doFullCleanUp = (schedulerRuns % 5 == 0);
if ( doFullCleanUp ) {
// check for idle queue
logger.debug("Checking for idle queues...");
// we synchronize to avoid creating a queue which is about to be removed during cleanup
synchronized ( queuesLock ) {
final Iterator<Map.Entry<String, JobQueueImpl>> i = this.queues.entrySet().iterator();
while ( i.hasNext() ) {
final Map.Entry<String, JobQueueImpl> current = i.next();
final JobQueueImpl jbq = current.getValue();
if ( jbq.tryToClose() ) {
logger.debug("Removing idle job queue {}", jbq);
// remove
i.remove();
// update mbeans
((QueuesMBeanImpl)queuesMBean).sendEvent(new QueueStatusEvent(null, jbq));
}
}
}
}
logger.debug("Queue manager maintenance: Finished #{}", this.schedulerRuns);
}
/**
* Start a new queue
* This method first searches the corresponding queue - if such a queue
* does not exist yet, it is created and started.
*
* @param queueInfo The queue info
* @param topics The topics
*/
private void start(final QueueInfo queueInfo,
final Set<String> topics) {
final InternalQueueConfiguration config = queueInfo.queueConfiguration;
// get or create queue
boolean isNewQueue = false;
JobQueueImpl queue = null;
// we synchronize to avoid creating a queue which is about to be removed during cleanup
synchronized ( queuesLock ) {
queue = this.queues.get(queueInfo.queueName);
// check for reconfiguration, we really do an identity check here(!)
if ( queue != null && queue.getConfiguration() != config ) {
this.outdateQueue(queue);
// we use a new queue with the configuration
queue = null;
}
if ( queue == null ) {
queue = JobQueueImpl.createQueue(queueInfo.queueName, config, queueServices, topics);
// on startup the queue might be empty and we get null back from createQueue
if ( queue != null ) {
isNewQueue = true;
queues.put(queueInfo.queueName, queue);
((QueuesMBeanImpl)queuesMBean).sendEvent(new QueueStatusEvent(queue, null));
}
}
}
if ( queue != null ) {
if ( !isNewQueue ) {
queue.wakeUpQueue(topics);
}
queue.startJobs();
}
}
/**
* This method is invoked periodically by the scheduler.
* In the default configuration every minute
* @see java.lang.Runnable#run()
*/
@Override
public void run() {
this.maintain();
}
private void outdateQueue(final JobQueueImpl queue) {
// remove the queue with the old name
// check for main queue
final String oldName = ResourceHelper.filterQueueName(queue.getName());
this.queues.remove(oldName);
// check if we can close or have to rename
if ( queue.tryToClose() ) {
// copy statistics
// update mbeans
((QueuesMBeanImpl)queuesMBean).sendEvent(new QueueStatusEvent(null, queue));
} else {
queue.outdate();
// readd with new name
String newName = ResourceHelper.filterName(queue.getName());
int index = 0;
while ( this.queues.containsKey(newName) ) {
newName = ResourceHelper.filterName(queue.getName()) + '$' + String.valueOf(index++);
}
this.queues.put(newName, queue);
// update mbeans
((QueuesMBeanImpl)queuesMBean).sendEvent(new QueueStatusEvent(queue, queue));
}
}
/**
* Outdate all queues.
*/
private void restart() {
// let's rename/close all queues and clear them
synchronized ( queuesLock ) {
final List<JobQueueImpl> queues = new ArrayList<JobQueueImpl>(this.queues.values());
for(final JobQueueImpl queue : queues ) {
this.outdateQueue(queue);
}
}
// check if we're still active
final JobManagerConfiguration config = this.configuration;
if ( config != null ) {
final List<Job> rescheduleList = this.configuration.clearJobRetryList();
for(final Job j : rescheduleList) {
final JobHandler jh = new JobHandler((JobImpl)j, null, this.configuration);
jh.reschedule();
}
}
}
/**
* @param name The queue name
* @return The queue or {@code null}.
* @see org.apache.sling.event.jobs.JobManager#getQueue(java.lang.String)
*/
public Queue getQueue(final String name) {
return this.queues.get(name);
}
/**
* @return An iterator for the available queues.
* @see org.apache.sling.event.jobs.JobManager#getQueues()
*/
public Iterable<Queue> getQueues() {
final Iterator<JobQueueImpl> jqI = this.queues.values().iterator();
return new Iterable<Queue>() {
@Override
public Iterator<Queue> iterator() {
return new Iterator<Queue>() {
@Override
public boolean hasNext() {
return jqI.hasNext();
}
@Override
public Queue next() {
return jqI.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
}
/**
* This method is called whenever the topology or queue configurations change.
* @param active Whether the job handling is active atm.
*/
@Override
public void configurationChanged(final boolean active) {
// are we still active?
if ( this.configuration != null ) {
logger.debug("Topology changed {}", active);
this.isActive.set(active);
if ( active ) {
fullTopicScan();
} else {
this.restart();
}
}
}
private void fullTopicScan() {
logger.debug("Scanning repository for existing topics...");
final Set<String> topics = this.scanTopics();
final Map<QueueInfo, Set<String>> mapping = this.updateTopicMapping(topics);
// start queues
for(final Map.Entry<QueueInfo, Set<String>> entry : mapping.entrySet() ) {
this.start(entry.getKey(), entry.getValue());
}
}
/**
* Scan the resource tree for topics.
*/
private Set<String> scanTopics() {
final Set<String> topics = new HashSet<String>();
final ResourceResolver resolver = this.configuration.createResourceResolver();
try {
final Resource baseResource = resolver.getResource(this.configuration.getLocalJobsPath());
// sanity check - should never be null
if ( baseResource != null ) {
final Iterator<Resource> topicIter = baseResource.listChildren();
while ( topicIter.hasNext() ) {
final Resource topicResource = topicIter.next();
final String topic = topicResource.getName().replace('.', '/');
logger.debug("Found topic {}", topic);
topics.add(topic);
}
}
} finally {
resolver.close();
}
return topics;
}
/**
* @see org.osgi.service.event.EventHandler#handleEvent(org.osgi.service.event.Event)
*/
@Override
public void handleEvent(final Event event) {
final String topic = (String)event.getProperty(NotificationConstants.NOTIFICATION_PROPERTY_JOB_TOPIC);
if ( this.isActive.get() && topic != null ) {
final QueueInfo info = this.configuration.getQueueConfigurationManager().getQueueInfo(topic);
this.start(info, Collections.singleton(topic));
}
}
/**
* Get the latest mapping from queue name to topics
*/
private Map<QueueInfo, Set<String>> updateTopicMapping(final Set<String> topics) {
final Map<QueueInfo, Set<String>> mapping = new HashMap<QueueConfigurationManager.QueueInfo, Set<String>>();
for(final String topic : topics) {
final QueueInfo queueInfo = this.configuration.getQueueConfigurationManager().getQueueInfo(topic);
Set<String> queueTopics = mapping.get(queueInfo);
if ( queueTopics == null ) {
queueTopics = new HashSet<String>();
mapping.put(queueInfo, queueTopics);
}
queueTopics.add(topic);
}
this.logger.debug("Established new topic mapping: {}", mapping);
return mapping;
}
protected void bindThreadPool(final EventingThreadPool etp) {
this.threadPool = etp;
}
protected void unbindThreadPool(final EventingThreadPool etp) {
if ( this.threadPool == etp ) {
this.threadPool = null;
}
}
}
| |
package automenta.vivisect.swing.property;
import java.awt.Component;
import java.awt.Dialog;
import java.awt.Dialog.ModalityType;
import java.awt.Frame;
import java.awt.Window;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import java.beans.PropertyEditor;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.lang.reflect.Field;
import java.util.Collection;
import java.util.Date;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.swing.JButton;
import javax.swing.JLayeredPane;
import javax.swing.JPanel;
import automenta.vivisect.swing.property.propertysheet.PropertySheet;
import automenta.vivisect.swing.property.propertysheet.PropertySheetDialog;
import automenta.vivisect.swing.property.propertysheet.PropertySheetPanel;
import automenta.vivisect.swing.property.swing.BannerPanel;
public class PropertyUtils {
private static SerializableProperty createProperty(Object obj, Field f,
boolean forEdit) {
Property a = f.getAnnotation(Property.class);
if (a != null) {
f.setAccessible(true);
String name = f.getName();
String displayName = a.name();
String desc = a.description();
Class<? extends PropertyEditor> editClass = null;
String category = a.category();
if (a.name().length() == 0)
displayName = f.getName();
if (a.description().length() == 0) {
desc = displayName;
}
if (a.editorClass() != PropertyEditor.class) {
editClass = a.editorClass();
}
if (category == null || category.length() == 0) {
category = "Base";
}
Object o = null;
try {
o = f.get(null);
} catch (Exception e) {
// nothing
}
if (o == null) {
try {
o = f.get(obj);
} catch (Exception e) {
// nothing
}
}
SerializableProperty pp = new SerializableProperty(name,
f.getType(), o);
pp.setShortDescription(desc);
pp.setEditable(a.editable());
pp.setDisplayName(displayName);
pp.setEditor(editClass);
if (category != null && category.length() > 0) {
pp.setCategory(category);
}
return pp;
}
return null;
}
private static Field[] getFields(Object o) {
Class<?> c;
if (o instanceof Class<?>)
c = (Class<?>) o;
else
c = o.getClass();
HashSet<Field> fields = new HashSet<>();
while (c != Object.class) {
for (Field f : c.getFields())
fields.add(f);
for (Field f : c.getDeclaredFields()) {
f.setAccessible(true);
fields.add(f);
}
c = c.getSuperclass();
}
return fields.toArray(new Field[0]);
}
public static LinkedHashMap<String, SerializableProperty> getProperties(
Object obj, boolean editable) {
LinkedHashMap<String, SerializableProperty> props = new LinkedHashMap<String, SerializableProperty>();
for (Field f : getFields(obj)) {
SerializableProperty pp = createProperty(obj, f, editable);
if (pp != null)
props.put(f.getName(), pp);
}
return props;
}
public static void setProperties(Object obj,
LinkedHashMap<String, SerializableProperty> props) {
setProperties(obj, props, true);
}
public static void setProperties(Object obj,
LinkedHashMap<String, SerializableProperty> props,
boolean triggerEvents) {
Class<? extends Object> providerClass = obj instanceof Class<?> ? (Class<?>) obj
: obj.getClass();
String name;
SerializableProperty property;
Object propertyValue;
for (Field f : getFields(providerClass)) {
Property a = f.getAnnotation(Property.class);
if (a == null)
continue;
name = f.getName();
property = props.get(name);
if (property == null) {
Logger.getGlobal().log(Level.WARNING,
"Property " + name + " will not be saved.");
continue;
}
try {
propertyValue = property.getValue();
Object oldValue = f.get(obj);
try {
f.set(obj, propertyValue);
} catch (Exception e) {
switch (f.getGenericType().toString()) {
case "int":
case "Integer":
f.setInt(obj, (int) Double.parseDouble(propertyValue
.toString()));
propertyValue = (int) Double.parseDouble(propertyValue
.toString());
break;
case "long":
case "Long":
f.setLong(obj, (long) Double.parseDouble(propertyValue
.toString()));
propertyValue = (long) Double.parseDouble(propertyValue
.toString());
break;
case "short":
case "Short":
f.setShort(obj, (short) Double
.parseDouble(propertyValue.toString()));
propertyValue = (short) Double
.parseDouble(propertyValue.toString());
break;
case "byte":
case "Byte":
f.setByte(obj, (byte) Double.parseDouble(propertyValue
.toString()));
propertyValue = (byte) Double.parseDouble(propertyValue
.toString());
break;
case "float":
case "Float":
f.setFloat(obj, (float) Double
.parseDouble(propertyValue.toString()));
propertyValue = (float) Double
.parseDouble(propertyValue.toString());
break;
case "double":
case "Double":
f.setDouble(obj,
Double.parseDouble(propertyValue.toString()));
break;
default:
break;
}
}
if (triggerEvents && !oldValue.equals(propertyValue)
&& obj instanceof PropertyChangeListener)
((PropertyChangeListener) obj)
.propertyChange(new PropertyChangeEvent(
PropertyUtils.class, f.getName(), oldValue,
propertyValue));
} catch (Exception e) {
e.printStackTrace();
continue;
}
}
}
public static void saveProperties(Object obj, File f) throws IOException {
LinkedHashMap<String, SerializableProperty> props = getProperties(obj,
true);
Properties p = new Properties();
for (SerializableProperty prop : props.values())
p.setProperty(prop.getName(), prop.toString());
p.store(new FileWriter(f), "Properties saved on " + new Date());
}
public static String saveProperties(Object obj) throws IOException {
LinkedHashMap<String, SerializableProperty> props = getProperties(obj,
true);
Properties p = new Properties();
for (SerializableProperty prop : props.values())
p.setProperty(prop.getName(), prop.toString());
StringWriter writer = new StringWriter();
p.store(writer, null);
return writer.toString().replaceAll("^\\#.*", "").trim()+"\n";
}
public static void setProperties(Object obj, Properties p,
boolean triggerEvents) {
LinkedHashMap<String, SerializableProperty> props = getProperties(obj,
true);
for (Entry<Object, Object> entry : p.entrySet()) {
if (props.containsKey(entry.getKey())) {
SerializableProperty sp = props.get(entry.getKey());
sp.fromString("" + entry.getValue());
}
}
setProperties(obj, props, triggerEvents);
}
public static void loadProperties(Object obj, String properties,
boolean triggerEvents) throws IOException {
Properties p = new Properties();
StringReader reader = new StringReader(properties);
p.load(reader);
setProperties(obj, p, triggerEvents);
}
public static void loadProperties(Object obj, File f) throws IOException {
loadProperties(obj, f, true);
}
public static void loadProperties(Object obj, File f, boolean triggerEvents)
throws IOException {
Properties p = new Properties();
p.load(new FileReader(f));
setProperties(obj, p, triggerEvents);
}
public static PropertySheetPanel getPropsPanel(Object obj, boolean editable) {
PropertySheetPanel psp = new PropertySheetPanel();
psp.setMode(PropertySheet.VIEW_AS_CATEGORIES);
psp.setToolBarVisible(false);
psp.setEnabled(true);
psp.setSortingCategories(true);
psp.setDescriptionVisible(true);
Collection<SerializableProperty> props = getProperties(obj, editable)
.values();
for (SerializableProperty p : props) {
p.setEditable(editable && p.isEditable());
psp.addProperty(p);
}
return psp;
}
public static void editProperties(Window parent, Object obj,
boolean editable) {
final PropertySheetPanel psp = getPropsPanel(obj, editable);
final PropertySheetDialog propertySheetDialog = createWindow(parent,
editable, psp, "Properties of "
+ obj.getClass().getSimpleName());
if (!propertySheetDialog.ask()) {
// cancelled
return;
}
LinkedHashMap<String, SerializableProperty> newProps = new LinkedHashMap<>();
for (automenta.vivisect.swing.property.propertysheet.Property p : psp.getProperties())
newProps.put(p.getName(), new SerializableProperty(p));
setProperties(obj, newProps, true);
}
public static PropertySheetDialog createWindow(Window parent,
boolean editable, final PropertySheetPanel psp, String title) {
final PropertySheetDialog propertySheetDialog;
if (parent instanceof Dialog) {
Dialog pDialog = (Dialog) parent;
propertySheetDialog = new PropertySheetDialog(pDialog);
} else if (parent instanceof Frame) {
Frame pFrame = (Frame) parent;
propertySheetDialog = new PropertySheetDialog(pFrame);
} else {
propertySheetDialog = new PropertySheetDialog() {
private static final long serialVersionUID = 1L;
@Override
public void ok() {
if (psp.getTable().getEditorComponent() != null)
psp.getTable().commitEditing();
super.ok();
};
};
}
if (editable) {
propertySheetDialog
.setDialogMode(PropertySheetDialog.OK_CANCEL_DIALOG);
} else {
propertySheetDialog.setDialogMode(PropertySheetDialog.CLOSE_DIALOG);
}
int sb = 1;
for (Component compL0 : propertySheetDialog.getRootPane()
.getComponents()) {
if (compL0 instanceof JLayeredPane) {
for (Component compL01 : ((JLayeredPane) compL0)
.getComponents()) {
if (!(compL01 instanceof JPanel))
continue;
for (Component compL1 : ((JPanel) compL01).getComponents()) {
if (compL1 instanceof BannerPanel)
continue;
if (compL1 instanceof JPanel) {
for (Component compL2 : ((JPanel) compL1)
.getComponents()) {
for (Component compL3 : ((JPanel) compL2)
.getComponents()) {
if (compL3 instanceof JButton) {
if (propertySheetDialog.getDialogMode() == PropertySheetDialog.OK_CANCEL_DIALOG
&& sb == 1) {
((JButton) compL3).setText("OK");
sb--;
} else if (propertySheetDialog
.getDialogMode() == PropertySheetDialog.CLOSE_DIALOG
|| sb == 0) {
((JButton) compL3)
.setText(propertySheetDialog
.getDialogMode() == PropertySheetDialog.CLOSE_DIALOG ? "Close"
: "Cancel");
sb--;
break;
}
if (sb < 0)
break;
}
}
if (sb < 0)
break;
}
}
}
}
}
}
if (title != null) {
propertySheetDialog.getBanner().setTitle(title);
propertySheetDialog.setTitle(title);
}
// propertySheetDialog.setIconImage(ImageUtils.getImage("images/menus/settings.png"));
// propertySheetDialog.getBanner().setIcon(ImageUtils.getIcon("images/settings.png"));
propertySheetDialog.getContentPane().add(psp);
propertySheetDialog.pack();
propertySheetDialog.setLocationRelativeTo(null);
propertySheetDialog.setModalityType(ModalityType.DOCUMENT_MODAL);
return propertySheetDialog;
}
}
| |
/*
* Copyright (c) 2010-2020. Axon Framework
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.axonframework.commandhandling.gateway;
import org.axonframework.commandhandling.CommandBus;
import org.axonframework.commandhandling.CommandCallback;
import org.axonframework.commandhandling.CommandExecutionException;
import org.axonframework.commandhandling.CommandMessage;
import org.axonframework.commandhandling.CommandResultMessage;
import org.axonframework.commandhandling.GenericCommandMessage;
import org.axonframework.commandhandling.GenericCommandResultMessage;
import org.axonframework.messaging.MessageDispatchInterceptor;
import org.axonframework.messaging.unitofwork.CurrentUnitOfWork;
import org.axonframework.messaging.unitofwork.DefaultUnitOfWork;
import org.axonframework.messaging.unitofwork.UnitOfWork;
import org.axonframework.utils.MockException;
import org.junit.jupiter.api.*;
import org.mockito.*;
import org.mockito.invocation.*;
import org.mockito.stubbing.*;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import static org.axonframework.commandhandling.GenericCommandResultMessage.asCommandResultMessage;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.*;
/**
* Test class validating the {@link DefaultCommandGateway}.
*
* @author Allard Buijze
* @author Nakul Mishra
*/
class DefaultCommandGatewayTest {
private DefaultCommandGateway testSubject;
private CommandBus mockCommandBus;
private RetryScheduler mockRetryScheduler;
private MessageDispatchInterceptor<CommandMessage<?>> mockCommandMessageTransformer;
@SuppressWarnings("unchecked")
@BeforeEach
void setUp() {
mockCommandBus = mock(CommandBus.class);
mockRetryScheduler = mock(RetryScheduler.class);
mockCommandMessageTransformer = mock(MessageDispatchInterceptor.class);
when(mockCommandMessageTransformer.handle(isA(CommandMessage.class)))
.thenAnswer(invocation -> invocation.getArguments()[0]);
testSubject = DefaultCommandGateway.builder()
.commandBus(mockCommandBus)
.retryScheduler(mockRetryScheduler)
.dispatchInterceptors(mockCommandMessageTransformer)
.build();
}
@SuppressWarnings({"unchecked"})
@Test
void testSendWithCallbackCommandIsRetried() {
doAnswer(invocation -> {
((CommandCallback<Object, Object>) invocation.getArguments()[1])
.onResult((CommandMessage<Object>) invocation.getArguments()[0],
asCommandResultMessage(new RuntimeException(new RuntimeException())));
return null;
}).when(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
when(mockRetryScheduler.scheduleRetry(isA(CommandMessage.class), isA(RuntimeException.class), isA(List.class),
isA(Runnable.class)))
.thenAnswer(new RescheduleCommand())
.thenReturn(false);
//noinspection rawtypes
final AtomicReference<CommandResultMessage> actualResult = new AtomicReference<>();
testSubject.send("Command",
(CommandCallback<Object, Object>) (commandMessage, commandResultMessage) -> actualResult
.set(commandResultMessage));
verify(mockCommandMessageTransformer).handle(isA(CommandMessage.class));
//noinspection rawtypes
ArgumentCaptor<List> captor = ArgumentCaptor.forClass(List.class);
verify(mockRetryScheduler, times(2)).scheduleRetry(isA(CommandMessage.class), isA(RuntimeException.class),
captor.capture(), isA(Runnable.class));
verify(mockCommandBus, times(2)).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
assertTrue(actualResult.get().isExceptional());
assertTrue(actualResult.get().exceptionResult() instanceof RuntimeException);
assertEquals(1, captor.getAllValues().get(0).size());
assertEquals(2, captor.getValue().size());
assertEquals(2, ((Class<? extends Throwable>[]) captor.getValue().get(0)).length);
}
@SuppressWarnings({"unchecked"})
@Test
void testSendWithoutCallbackCommandIsRetried() {
doAnswer(invocation -> {
((CommandCallback<Object, Object>) invocation.getArguments()[1]).onResult(
(CommandMessage<Object>) invocation.getArguments()[0],
asCommandResultMessage(new RuntimeException(new RuntimeException()))
);
return null;
}).when(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
when(mockRetryScheduler.scheduleRetry(isA(CommandMessage.class), isA(RuntimeException.class), isA(List.class),
isA(Runnable.class)))
.thenAnswer(new RescheduleCommand())
.thenReturn(false);
CompletableFuture<?> future = testSubject.send("Command");
verify(mockCommandMessageTransformer).handle(isA(CommandMessage.class));
//noinspection rawtypes
ArgumentCaptor<List> captor = ArgumentCaptor.forClass(List.class);
verify(mockRetryScheduler, times(2)).scheduleRetry(isA(CommandMessage.class), isA(RuntimeException.class),
captor.capture(), isA(Runnable.class));
verify(mockCommandBus, times(2)).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
assertEquals(1, captor.getAllValues().get(0).size());
assertEquals(2, captor.getValue().size());
assertEquals(2, ((Class<? extends Throwable>[]) captor.getValue().get(0)).length);
assertTrue(future.isDone());
assertTrue(future.isCompletedExceptionally());
}
@SuppressWarnings({"unchecked"})
@Test
void testSendWithoutCallback() throws ExecutionException, InterruptedException {
doAnswer(invocation -> {
((CommandCallback<Object, Object>) invocation.getArguments()[1]).onResult(
(CommandMessage<Object>) invocation.getArguments()[0],
asCommandResultMessage("returnValue")
);
return null;
}).when(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
CompletableFuture<?> future = testSubject.send("Command");
assertTrue(future.isDone());
assertEquals("returnValue", future.get());
}
@SuppressWarnings({"unchecked"})
@Test
void testSendAndWaitCommandIsRetried() {
final RuntimeException failure = new RuntimeException(new RuntimeException());
doAnswer(invocation -> {
((CommandCallback<Object, Object>) invocation.getArguments()[1]).onResult(
(CommandMessage<Object>) invocation.getArguments()[0], asCommandResultMessage(failure)
);
return null;
}).when(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
when(mockRetryScheduler.scheduleRetry(isA(CommandMessage.class), isA(RuntimeException.class), isA(List.class),
isA(Runnable.class)))
.thenAnswer(new RescheduleCommand())
.thenReturn(false);
try {
testSubject.sendAndWait("Command");
} catch (RuntimeException rte) {
assertSame(failure, rte);
}
verify(mockCommandMessageTransformer).handle(isA(CommandMessage.class));
//noinspection rawtypes
ArgumentCaptor<List> captor = ArgumentCaptor.forClass(List.class);
verify(mockRetryScheduler, times(2)).scheduleRetry(isA(CommandMessage.class), isA(RuntimeException.class),
captor.capture(), isA(Runnable.class));
verify(mockCommandBus, times(2)).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
assertEquals(1, captor.getAllValues().get(0).size());
assertEquals(2, captor.getValue().size());
assertEquals(2, ((Class<? extends Throwable>[]) captor.getValue().get(0)).length);
}
@SuppressWarnings({"unchecked"})
@Test
void testSendAndWaitWithTimeoutCommandIsRetried() {
final RuntimeException failure = new RuntimeException(new RuntimeException());
doAnswer(invocation -> {
((CommandCallback<Object, Object>) invocation.getArguments()[1]).onResult(
(CommandMessage<Object>) invocation.getArguments()[0], asCommandResultMessage(failure)
);
return null;
}).when(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
when(mockRetryScheduler.scheduleRetry(isA(CommandMessage.class), isA(RuntimeException.class), isA(List.class),
isA(Runnable.class)))
.thenAnswer(new RescheduleCommand())
.thenReturn(false);
try {
testSubject.sendAndWait("Command", 1, TimeUnit.SECONDS);
} catch (RuntimeException rte) {
assertSame(failure, rte);
}
verify(mockCommandMessageTransformer).handle(isA(CommandMessage.class));
//noinspection rawtypes
ArgumentCaptor<List> captor = ArgumentCaptor.forClass(List.class);
verify(mockRetryScheduler, times(2)).scheduleRetry(isA(CommandMessage.class), isA(RuntimeException.class),
captor.capture(), isA(Runnable.class));
verify(mockCommandBus, times(2)).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
assertEquals(1, captor.getAllValues().get(0).size());
assertEquals(2, captor.getValue().size());
assertEquals(2, ((Class<? extends Throwable>[]) captor.getValue().get(0)).length);
}
@SuppressWarnings("unchecked")
@Test
void testSendAndWaitNullOnInterrupt() {
doAnswer(invocation -> {
Thread.currentThread().interrupt();
return null;
}).when(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
assertNull(testSubject.sendAndWait("Hello"));
assertTrue(Thread.interrupted(), "Interrupt flag should be set on thread");
verify(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
}
@SuppressWarnings("unchecked")
@Test
void testSendAndWaitWithTimeoutNullOnInterrupt() {
doAnswer(invocation -> {
Thread.currentThread().interrupt();
return null;
}).when(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
try {
testSubject.sendAndWait("Hello", 60, TimeUnit.SECONDS);
testSubject.sendAndWait("Hello", 60, TimeUnit.SECONDS);
fail("Expected interrupted exception");
} catch (CommandExecutionException e) {
assertTrue(e.getCause() instanceof InterruptedException);
}
assertTrue(Thread.interrupted(), "Interrupt flag should be set on thread");
verify(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
}
@SuppressWarnings("unchecked")
@Test
void testSendAndWaitWithTimeoutNullOnTimeout() {
try {
assertNull(testSubject.sendAndWait("Hello", 10, TimeUnit.MILLISECONDS));
fail("Expected interrupted exception");
} catch (CommandExecutionException e) {
assertTrue(e.getCause() instanceof TimeoutException);
}
verify(mockCommandBus).dispatch(isA(CommandMessage.class), isA(CommandCallback.class));
}
@SuppressWarnings("unchecked")
@Test
void testCorrelationDataIsAttachedToCommandAsObject() {
UnitOfWork<CommandMessage<?>> unitOfWork = DefaultUnitOfWork.startAndGet(null);
unitOfWork.registerCorrelationDataProvider(message -> Collections.singletonMap("correlationId", "test"));
testSubject.send("Hello");
verify(mockCommandBus).dispatch(argThat(x -> "test".equals(x.getMetaData().get("correlationId"))),
isA(CommandCallback.class));
CurrentUnitOfWork.clear(unitOfWork);
}
@SuppressWarnings("unchecked")
@Test
void testCorrelationDataIsAttachedToCommandAsMessage() {
final Map<String, String> data = new HashMap<>();
data.put("correlationId", "test");
data.put("header", "someValue");
UnitOfWork<CommandMessage<?>> unitOfWork = DefaultUnitOfWork.startAndGet(null);
unitOfWork.registerCorrelationDataProvider(message -> data);
testSubject.send(new GenericCommandMessage<>("Hello", Collections.singletonMap("header", "value")));
verify(mockCommandBus).dispatch(argThat(x -> "test".equals(x.getMetaData().get("correlationId"))
&& "value".equals(x.getMetaData().get("header"))), isA(CommandCallback.class));
CurrentUnitOfWork.clear(unitOfWork);
}
@Test
void testPayloadExtractionProblemsReportedInException() throws ExecutionException, InterruptedException {
doAnswer(i -> {
CommandCallback<String, String> callback = i.getArgument(1);
callback.onResult(i.getArgument(0), new GenericCommandResultMessage<String>("result") {
private static final long serialVersionUID = -5443344481326465863L;
@Override
public String getPayload() {
throw new MockException("Faking serialization problem");
}
});
return null;
}).when(mockCommandBus).dispatch(any(), any());
CompletableFuture<String> actual = testSubject.send("command");
assertTrue(actual.isDone());
assertTrue(actual.isCompletedExceptionally());
assertEquals("Faking serialization problem", actual.exceptionally(Throwable::getMessage).get());
}
private static class RescheduleCommand implements Answer<Boolean> {
@Override
public Boolean answer(InvocationOnMock invocation) {
((Runnable) invocation.getArguments()[3]).run();
return true;
}
}
}
| |
package com.bandtest.mainact;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import android.accounts.Account;
import android.accounts.AccountManager;
import android.app.Activity;
import android.content.Intent;
import android.content.IntentSender.SendIntentException;
import android.os.Bundle;
import android.util.Log;
import android.widget.ProgressBar;
import android.widget.Toast;
import android.content.IntentSender;
import com.google.android.gms.common.ConnectionResult;
import com.google.android.gms.common.GooglePlayServicesUtil;
import com.google.android.gms.common.api.GoogleApiClient;
import com.google.android.gms.common.api.GoogleApiClient.ConnectionCallbacks;
import com.google.android.gms.common.api.GoogleApiClient.OnConnectionFailedListener;
import com.google.android.gms.common.api.ResultCallback;
import com.google.android.gms.common.api.Scope;
import com.google.android.gms.drive.Drive;
import com.google.android.gms.drive.DriveApi.MetadataBufferResult;
import com.google.android.gms.drive.DriveFile;
import com.google.android.gms.drive.DriveFile.DownloadProgressListener;
import com.google.android.gms.drive.DriveFolder;
import com.google.android.gms.drive.DriveId;
import com.google.android.gms.drive.Metadata;
import com.google.android.gms.drive.MetadataBuffer;
import com.google.android.gms.drive.OpenFileActivityBuilder;
import com.google.android.gms.drive.query.Filters;
import com.google.android.gms.drive.query.Query;
import com.google.android.gms.drive.query.SearchableField;
import com.google.android.gms.drive.DriveApi.ContentsResult;
import com.google.android.gms.drive.MetadataChangeSet;
import com.google.api.services.drive.DriveScopes;
public class Main extends Activity implements ConnectionCallbacks, OnConnectionFailedListener, ResultCallback<MetadataBufferResult> {
GoogleApiClient mGoogleApiClient;
protected String mAccountName;
protected static final String EXTRA_ACCOUNT_NAME = "account_name";
protected static final int REQUEST_CODE_RESOLUTION = 1;
protected static final int NEXT_AVAILABLE_REQUEST_CODE = 2;
protected static final int REQUEST_CODE_CREATOR = 3;
protected static final int REQUEST_CODE_OPENER = 4;
private static Query shared;
private static Query title;
private static Query notShared;
private static Query token;
private boolean hasFiles;
private String mNextPageToken;
/**
* Progress bar to show the current download progress of the file.
*/
private ProgressBar mProgressBar;
/**
* File that is selected with the open file activity.
*/
private DriveId mSelectedFileDriveId;
@Override
public void onCreate(Bundle savedInstanceState) {
Log.d("Testing", "In the onCreate");
super.onCreate( savedInstanceState );
/*
shared = new Query.Builder().addFilter(Filters.sharedWithMe()).build();
title = new Query.Builder().addFilter(Filters.eq(SearchableField.TITLE, "Carbon Scoop")).build();
notShared = new Query.Builder().addFilter(Filters.eq(SearchableField.TITLE, "*")).build();
token = new Query.Builder().setPageToken(mNextPageToken).build();
hasFiles = true;
mGoogleApiClient = new GoogleApiClient.Builder( this )
.addApi( Drive.API )
.addScope(Drive.SCOPE_FILE)
// .addScope(new Scope(DriveScopes.DRIVE))
.addConnectionCallbacks( this )
.addOnConnectionFailedListener( this )
.build();
*/
if (savedInstanceState != null) {
mAccountName = savedInstanceState.getString(EXTRA_ACCOUNT_NAME);
}
if (mAccountName == null) {
mAccountName = getIntent().getStringExtra(EXTRA_ACCOUNT_NAME);
}
if (mAccountName == null) {
Account[] accounts = AccountManager.get(this).getAccountsByType("com.google");
if (accounts.length == 0) {
Log.d("testing", "Must have a Google account installed");
return;
}
mAccountName = accounts[0].name;
}
readMasterDotFile = false;
Log.d("testing", mAccountName);
Log.d("testing", "Out of onCreate");
}
/**
* Saves the activity state.
*/
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putString(EXTRA_ACCOUNT_NAME, mAccountName);
}
/**
* Called when activity gets visible. A connection to Drive services need to
* be initiated as soon as the activity is visible. Registers
* {@code ConnectionCallbacks} and {@code OnConnectionFailedListener} on the
* activities itself.
*/
@Override
protected void onResume() {
Log.d("Testing", "onResume()");
super.onResume();
if (mAccountName == null) {
return;
}
Log.d("Testing", "onResume() - 2");
if (mGoogleApiClient == null) {
// permissions to query available accounts.
mGoogleApiClient = new GoogleApiClient.Builder(this)
.addApi(Drive.API)
.addScope(Drive.SCOPE_FILE)
.addConnectionCallbacks(this)
.addOnConnectionFailedListener(this).build();
}
if( readMasterDotFile == true )
{
return;
}
try
{
InputStream in = openFileInput("bdu_master_dot_fileid");
if( in != null )
{
InputStreamReader tmp = new InputStreamReader(in);
BufferedReader reader = new BufferedReader(tmp);
String str;
StringBuilder buf = new StringBuilder();
while( (str = reader.readLine()) != null )
{
buf.append(str+"\n");
}
in.close();
if( buf.length() > 0 )
{
mSelectedFileDriveId = DriveId.decodeFromString( buf.toString() );
Log.i("testing", "Using previously selected file: " + mSelectedFileDriveId.encodeToString() );
}
}
}
catch( java.io.FileNotFoundException e ){
// The file hasn't been created yet. This is not an error and will just cause the
// file selection dialog to pop up later.
}
catch( Throwable t)
{
Toast.makeText( this, "Exception: " + t.toString(), Toast.LENGTH_SHORT ).show();
Log.i("testing","fileid-load", t);
}
mGoogleApiClient.connect();
}
/**
* Called when activity gets invisible. Connection to Drive service needs to
* be disconnected as soon as an activity is invisible.
*/
@Override
protected void onPause() {
Log.d("Testing", "onPause()");
if (mGoogleApiClient != null) {
mGoogleApiClient.disconnect();
}
super.onPause();
Log.d("Testing", "onPause(): " + mSelectedFileDriveId );
if( mSelectedFileDriveId != null )
{
try
{
OutputStreamWriter out = new OutputStreamWriter( openFileOutput("bdu_master_dot_fileid", 0 ) );
out.write( mSelectedFileDriveId.encodeToString() + "\n" );
out.close();
Log.i("testing","fileid-save: " + mSelectedFileDriveId.encodeToString() );
}
catch( Throwable t )
{
Toast.makeText( this, "Exception: " + t.toString(), Toast.LENGTH_SHORT ).show();
Log.i("testing","fileid-save-exception", t);
}
}
}
public void search() {
Log.d("Testing", "search()");
/*
//Drive.DriveApi.query(mGoogleApiClient, shared).setResultCallback(this);
//Drive.DriveApi.query(mGoogleApiClient, all).setResultCallback(this);
Drive.DriveApi.query(mGoogleApiClient, notShared).setResultCallback(this);
Log.d("Testing", "done with the search()");
*/
//if (!hasFiles) {
// return;
//}
// retrieve the results for the next page.
//Query query = new Query.Builder().build();
//Drive.DriveApi.query(mGoogleApiClient, query).setResultCallback(this);
Log.d( "testing", "mGoogleApiClient.connected:" + mGoogleApiClient.isConnected() );
DriveFolder rootFolder = Drive.DriveApi.getRootFolder(mGoogleApiClient);
rootFolder.listChildren( mGoogleApiClient ).setResultCallback( rootFolderCallback );
}
final private ResultCallback<MetadataBufferResult> rootFolderCallback =
new ResultCallback<MetadataBufferResult>() {
@Override
public void onResult(MetadataBufferResult metadataBufferResult) {
Log.d("testing", "got root folder");
MetadataBuffer buffer = metadataBufferResult.getMetadataBuffer();
Log.d("testing", "Buffer count " + buffer.getCount());
for(Metadata m : buffer){
Log.d("testing", "Metadata name " + m.getTitle() + "(" + (m.isFolder() ? "folder" : "file") + ")");
/*
if (m.isFolder() && m.getTitle().equals("Neewie"))
Drive.DriveApi.getFolder(mApiClient, m.getDriveId())
.listChildren(mApiClient)
.setResultCallback(fileCallback);
*/
}
}
};
private boolean readMasterDotFile;
/*
@Override
protected void onStart() {
Log.d("Testing", "onStart()");
Log.d("Testing", mGoogleApiClient.toString());
super.onStart();
mGoogleApiClient.connect();
}
*/
/*
@Override
protected void onStop() {
Log.d("Testing", "onStop()");
Log.d("Testing", mGoogleApiClient.toString());
super.onStart();
mGoogleApiClient.disconnect();
}
*/
@Override
public void onConnected( Bundle connectionHint ) {
Log.d("testing", "Connected YAYAYAYAYAY!");
//saveFileToDrive();
//search();
// If there is a selected file, open its contents.
if( mSelectedFileDriveId != null )
{
open();
return;
}
IntentSender intentSender = Drive.DriveApi
.newOpenFileActivityBuilder()
.setMimeType(new String[] { "txt/plain", "text/plain", "text/html", "application/xml" })
.build(mGoogleApiClient);
try {
startIntentSenderForResult( intentSender, REQUEST_CODE_OPENER, null, 0, 0, 0 );
} catch (SendIntentException e) {
Log.w("testing", "Unable to send intent", e);
}
}
@Override
public void onConnectionSuspended( int cause ) {
Log.d("testing", "Connection suspended");
}
@Override
public void onConnectionFailed( ConnectionResult result ) {
Log.d("Testing", "onConnectionFailed()");
if (!result.hasResolution()) {
Log.d("Testing", "onConnectionFailed() - no resolution");
Log.d("Testing", result.toString());
Log.d("Testing", GooglePlayServicesUtil.getErrorString(result.getErrorCode()));
GooglePlayServicesUtil.getErrorDialog(result.getErrorCode(), this, 0).show();
return;
}
try {
result.startResolutionForResult(this, REQUEST_CODE_RESOLUTION);
} catch (SendIntentException e) {
Log.d("testing", "Exception while starting resolution activity", e);
}
}
@Override
protected void onActivityResult(final int requestCode, final int resultCode, final Intent data) {
Log.d("Testing", "onActivityResult()");
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == REQUEST_CODE_RESOLUTION && resultCode == RESULT_OK) {
mGoogleApiClient.connect();
}
else if( requestCode == REQUEST_CODE_CREATOR )
{
// Called after a file is saved to Drive.
if (resultCode == RESULT_OK)
{
Log.i("testing", "File created.");
//search();
}
}
else if( requestCode == REQUEST_CODE_OPENER )
{
Log.i("testing", "File opener: " + resultCode);
// Called after a file is saved to Drive.
if (resultCode == RESULT_OK)
{
Log.i("testing", "File opener.");
mSelectedFileDriveId = (DriveId) data.getParcelableExtra( OpenFileActivityBuilder.EXTRA_RESPONSE_DRIVE_ID );
Log.i("testing", "selectedFileID: " + mSelectedFileDriveId.encodeToString());
//search();
}
}
}
private void saveFileToDrive() {
// Start by creating a new contents, and setting a callback.
Log.d("testing", "Creating new contents.");
Drive.DriveApi.newContents(mGoogleApiClient).setResultCallback(new ResultCallback<ContentsResult>() {
@Override
public void onResult(ContentsResult result) {
// If the operation was not successful, we cannot do anything
// and must
// fail.
if (!result.getStatus().isSuccess()) {
Log.i("testing", "Failed to create new contents.");
return;
}
// Otherwise, we can write our data to the new contents.
Log.d("testing", "New contents created.");
// Get an output stream for the contents.
OutputStream outputStream = result.getContents().getOutputStream();
// Write the bitmap data from it.
try {
outputStream.write( "<test-xml><child>bob</child><child>ted</child></test-xml>".getBytes() );
} catch (IOException e1) {
Log.i("testing", "Unable to write file contents.");
}
// Create the initial metadata - MIME type and title.
// Note that the user will be able to change the title later.
MetadataChangeSet metadataChangeSet = new MetadataChangeSet.Builder().setMimeType("application/xml").setTitle("test.xml").build();
// Create an intent for the file chooser, and start it.
IntentSender intentSender = Drive.DriveApi
.newCreateFileActivityBuilder()
.setInitialMetadata(metadataChangeSet)
.setInitialContents(result.getContents())
.build(mGoogleApiClient);
try {
startIntentSenderForResult( intentSender, REQUEST_CODE_CREATOR, null, 0, 0, 0 );
} catch (SendIntentException e) {
Log.i("testing", "Failed to launch file chooser.");
}
}
});
}
@Override
public void onResult(MetadataBufferResult result) {
if(!result.getStatus().isSuccess()){
Toast.makeText(this, "Didn't work", Toast.LENGTH_SHORT).show();
return;
}
Log.d( "testing", "MetaDataBuffer" + result.getMetadataBuffer() );
Log.d( "testing", "Page Token: " + result.getMetadataBuffer().getNextPageToken());
//mNextPageToken = result.getMetadataBuffer().getNextPageToken();
//hasFiles = mNextPageToken != null;
Log.d("testing", "Contents: " + result.getMetadataBuffer().describeContents());
Log.d("testing", "Retrieved file count: " + result.getMetadataBuffer().getCount());
Log.d("testing", "Retrieved is sucess: " + result.getStatus() );
Log.d("testing", "Retrieved is sucess: " + result.getStatus().getStatusCode() );
Log.d("testing", "Tings " + result.getMetadataBuffer().getMetadata() );
Log.d("testing", "PageToken " + result.getMetadataBuffer().getNextPageToken() );
}
private void open() {
// Reset progress dialog back to zero as we're
// initiating an opening request.
//mProgressBar.setProgress(0);
DownloadProgressListener listener = new DownloadProgressListener() {
@Override
public void onProgress(long bytesDownloaded, long bytesExpected) {
// Update progress dialog with the latest progress.
int progress = (int)(bytesDownloaded*100/bytesExpected);
Log.d("testing", String.format("Loading progress: %d percent", progress));
//mProgressBar.setProgress(progress);
}
};
Drive.DriveApi.getFile( mGoogleApiClient, mSelectedFileDriveId )
.openContents( mGoogleApiClient, DriveFile.MODE_READ_ONLY, listener)
.setResultCallback(contentsCallback);
readMasterDotFile = true;
}
private ResultCallback<ContentsResult> contentsCallback = new ResultCallback<ContentsResult>() {
@Override
public void onResult(ContentsResult result) {
if (!result.getStatus().isSuccess()) {
Log.i("testing", "Error while opening the file contents");
return;
}
Log.i("testing", "File contents opened");
try{
InputStream inStream = result.getContents().getInputStream();
BufferedReader reader = new BufferedReader(new InputStreamReader(inStream));
StringBuilder out = new StringBuilder();
String line;
while ((line = reader.readLine()) != null) {
out.append(line);
}
reader.close();
Log.i("testing", "Content: " + out.toString() );
}
catch( IOException ioe )
{
Log.i("testing", "IOException: " + ioe.getMessage() );
}
}
};
}
| |
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ui;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.ScalableIcon;
import com.intellij.util.ArrayUtil;
import com.intellij.util.ui.JBUI;
import org.intellij.lang.annotations.MagicConstant;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import java.awt.*;
import java.util.Arrays;
public class LayeredIcon extends JBUI.AuxScalableJBIcon {
private static final Logger LOG = Logger.getInstance("#com.intellij.ui.LayeredIcon");
private final Icon[] myIcons;
private Icon[] myScaledIcons;
private final boolean[] myDisabledLayers;
private final int[] myHShifts;
private final int[] myVShifts;
private int myXShift;
private int myYShift;
private int myWidth;
private int myHeight;
public LayeredIcon(int layerCount) {
myIcons = new Icon[layerCount];
myDisabledLayers = new boolean[layerCount];
myHShifts = new int[layerCount];
myVShifts = new int[layerCount];
}
public LayeredIcon(@NotNull Icon... icons) {
this(icons.length);
for (int i = 0; i < icons.length; i++) {
setIcon(icons[i], i);
}
}
protected LayeredIcon(LayeredIcon icon) {
super(icon);
myIcons = ArrayUtil.copyOf(icon.myIcons);
myScaledIcons = null;
myDisabledLayers = ArrayUtil.copyOf(icon.myDisabledLayers);
myHShifts = ArrayUtil.copyOf(icon.myHShifts);
myVShifts = ArrayUtil.copyOf(icon.myVShifts);
myXShift = icon.myXShift;
myYShift = icon.myYShift;
myWidth = icon.myWidth;
myHeight = icon.myHeight;
}
@NotNull
@Override
protected LayeredIcon copy() {
return new LayeredIcon(this);
}
@NotNull
private Icon[] myScaledIcons() {
if (myScaledIcons != null) {
return myScaledIcons;
}
if (getScale() == 1f) {
return myScaledIcons = myIcons;
}
for (Icon icon : myIcons) {
if (icon != null && !(icon instanceof ScalableIcon)) {
return myScaledIcons = myIcons;
}
}
myScaledIcons = new Icon[myIcons.length];
for (int i = 0; i < myIcons.length; i++) {
if (myIcons[i] != null) {
myScaledIcons[i] = ((ScalableIcon)myIcons[i]).scale(getScale());
}
}
return myScaledIcons;
}
@Override
public LayeredIcon withJBUIPreScaled(boolean preScaled) {
super.withJBUIPreScaled(preScaled);
updateSize();
return this;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof LayeredIcon)) return false;
if (!super.equals(o)) return false;
final LayeredIcon icon = (LayeredIcon)o;
if (myHeight != icon.myHeight) return false;
if (myWidth != icon.myWidth) return false;
if (myXShift != icon.myXShift) return false;
if (myYShift != icon.myYShift) return false;
if (!Arrays.equals(myHShifts, icon.myHShifts)) return false;
if (!Arrays.equals(myIcons, icon.myIcons)) return false;
if (!Arrays.equals(myVShifts, icon.myVShifts)) return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
public void setIcon(Icon icon, int layer) {
setIcon(icon, layer, 0, 0);
}
public Icon getIcon(int layer) {
return myIcons[layer];
}
@NotNull
public Icon[] getAllLayers() {
return myIcons;
}
public void setIcon(Icon icon, int layer, int hShift, int vShift) {
if (icon instanceof LayeredIcon) {
((LayeredIcon)icon).checkIHaventIconInsideMe(this);
}
myIcons[layer] = icon;
myScaledIcons = null;
myHShifts[layer] = hShift;
myVShifts[layer] = vShift;
updateSize();
}
/**
*
* @param constraint is expected to be one of compass-directions or CENTER
*/
public void setIcon(Icon icon, int layer, @MagicConstant(valuesFromClass = SwingConstants.class) int constraint) {
int width = getIconWidth();
int height = getIconHeight();
int w = icon.getIconWidth();
int h = icon.getIconHeight();
if (width <= 1 || height <= 1) {
setIcon(icon, layer);
return;
}
int x;
int y;
switch (constraint) {
case SwingConstants.CENTER:
x = (width - w) / 2;
y = (height - h) /2;
break;
case SwingConstants.NORTH:
x = (width - w) / 2;
y = 0;
break;
case SwingConstants.NORTH_EAST:
x = width - w;
y = 0;
break;
case SwingConstants.EAST:
x = width - w;
y = (height - h) / 2;
break;
case SwingConstants.SOUTH_EAST:
x = width - w;
y = height - h;
break;
case SwingConstants.SOUTH:
x = (width - w) / 2;
y = height - h;
break;
case SwingConstants.SOUTH_WEST:
x = 0;
y = height - h;
break;
case SwingConstants.WEST:
x = 0;
y = (height - h) / 2;
break;
case SwingConstants.NORTH_WEST:
x = 0;
y = 0;
break;
default:
throw new IllegalArgumentException(
"The constraint should be one of SwingConstants' compass-directions [1..8] or CENTER [0], actual value is " + constraint);
}
setIcon(icon, layer, x, y);
}
private void checkIHaventIconInsideMe(Icon icon) {
LOG.assertTrue(icon != this);
for (Icon child : myIcons) {
if (child instanceof LayeredIcon) ((LayeredIcon)child).checkIHaventIconInsideMe(icon);
}
}
@Override
public void paintIcon(Component c, Graphics g, int x, int y) {
if (updateJBUIScale()) updateSize();
Icon[] icons = myScaledIcons();
for (int i = 0; i < icons.length; i++) {
Icon icon = icons[i];
if (icon == null || myDisabledLayers[i]) continue;
int xOffset = x + scaleVal(myXShift + myHShifts(i), Scale.INSTANCE);
int yOffset = y + scaleVal(myYShift + myVShifts(i), Scale.INSTANCE);
icon.paintIcon(c, g, xOffset, yOffset);
}
}
public boolean isLayerEnabled(int layer) {
return !myDisabledLayers[layer];
}
public void setLayerEnabled(int layer, boolean enabled) {
myDisabledLayers[layer] = !enabled;
}
@Override
public int getIconWidth() {
if (myWidth <= 1 || updateJBUIScale()) {
updateSize();
}
return scaleVal(myWidth, Scale.INSTANCE);
}
@Override
public int getIconHeight() {
if (myHeight <= 1 || updateJBUIScale()) {
updateSize();
}
return scaleVal(myHeight, Scale.INSTANCE);
}
private int myHShifts(int i) {
return scaleVal(myHShifts[i], Scale.JBUI);
}
private int myVShifts(int i) {
return scaleVal(myVShifts[i], Scale.JBUI);
}
protected void updateSize() {
int minX = Integer.MAX_VALUE;
int maxX = Integer.MIN_VALUE;
int minY = Integer.MAX_VALUE;
int maxY = Integer.MIN_VALUE;
boolean hasNotNullIcons = false;
for (int i = 0; i < myIcons.length; i++) {
Icon icon = myIcons[i];
if (icon == null) continue;
hasNotNullIcons = true;
int hShift = myHShifts(i);
int vShift = myVShifts(i);
minX = Math.min(minX, hShift);
maxX = Math.max(maxX, hShift + icon.getIconWidth());
minY = Math.min(minY, vShift);
maxY = Math.max(maxY, vShift + icon.getIconHeight());
}
if (!hasNotNullIcons) return;
myWidth = maxX - minX;
myHeight = maxY - minY;
if (myIcons.length > 1) {
myXShift = -minX;
myYShift = -minY;
}
}
public static Icon create(final Icon backgroundIcon, final Icon foregroundIcon) {
final LayeredIcon layeredIcon = new LayeredIcon(2);
layeredIcon.setIcon(backgroundIcon, 0);
layeredIcon.setIcon(foregroundIcon, 1);
return layeredIcon;
}
@Override
public String toString() {
return "Layered icon. myIcons=" + Arrays.asList(myIcons);
}
}
| |
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*/
package com.microsoft.azure.management.network;
import java.util.Map;
import com.microsoft.azure.management.apigeneration.Fluent;
import com.microsoft.azure.management.apigeneration.Method;
import com.microsoft.azure.management.network.implementation.ApplicationGatewayInner;
import com.microsoft.azure.management.network.implementation.NetworkManager;
import com.microsoft.azure.management.network.model.HasPrivateIpAddress;
import com.microsoft.azure.management.network.model.HasPublicIpAddress;
import com.microsoft.azure.management.resources.fluentcore.arm.models.GroupableResource;
import com.microsoft.azure.management.resources.fluentcore.arm.models.HasSubnet;
import com.microsoft.azure.management.resources.fluentcore.arm.models.Resource;
import com.microsoft.azure.management.resources.fluentcore.model.Appliable;
import com.microsoft.azure.management.resources.fluentcore.model.Creatable;
import com.microsoft.azure.management.resources.fluentcore.model.Refreshable;
import com.microsoft.azure.management.resources.fluentcore.model.Updatable;
import com.microsoft.azure.management.resources.fluentcore.model.Wrapper;
/**
* Entry point for application gateway management API in Azure.
*/
@Fluent
public interface ApplicationGateway extends
GroupableResource<NetworkManager>,
Refreshable<ApplicationGateway>,
Wrapper<ApplicationGatewayInner>,
Updatable<ApplicationGateway.Update>,
HasSubnet,
HasPrivateIpAddress {
// Getters
/**
* @return true if the application gateway has at least one internally load balanced frontend accessible within the virtual network
*/
boolean isPrivate();
/**
* @return true if the application gateway has at least one Internet-facing frontend
*/
boolean isPublic();
/**
* @return the frontend IP configuration associated with a public IP address, if any, that frontend listeners and request routing rules can reference implicitly
*/
ApplicationGatewayFrontend defaultPublicFrontend();
/**
* @return the frontend IP configuration associated with a private IP address, if any, that frontend listeners and request routing rules can reference implicitly
*/
ApplicationGatewayFrontend defaultPrivateFrontend();
/**
* @return the SKU of this application gateway
*/
ApplicationGatewaySku sku();
/**
* @return number of instances
*/
int instanceCount();
/**
* @return the size of the application gateway
*/
ApplicationGatewaySkuName size();
/**
* @return the tier of the application gateway
*/
ApplicationGatewayTier tier();
/**
* @return the operational state of the application gateway
*/
ApplicationGatewayOperationalState operationalState();
/**
* @return the SSL policy for the application gateway
*/
ApplicationGatewaySslPolicy sslPolicy();
/**
* @return IP configurations of this application gateway, indexed by name
*/
Map<String, ApplicationGatewayIpConfiguration> ipConfigurations();
/**
* @return backend address pools of this application gateway, indexed by name
*/
Map<String, ApplicationGatewayBackend> backends();
/**
* @return the IP configuration named "default" if it exists, or the one existing IP configuration if only one exists, else null
*/
ApplicationGatewayIpConfiguration defaultIpConfiguration();
/**
* @return frontend IP configurations, indexed by name
*/
Map<String, ApplicationGatewayFrontend> frontends();
/**
* @return frontend IP configurations with a public IP address, indexed by name
*/
Map<String, ApplicationGatewayFrontend> publicFrontends();
/**
* @return frontend IP configurations with a private IP address on a subnet, indexed by name
*/
Map<String, ApplicationGatewayFrontend> privateFrontends();
/**
* @return named frontend ports of this application gateway, indexed by name
*/
Map<String, Integer> frontendPorts();
/**
* @return backend HTTP configurations of this application gateway, indexed by name
*/
Map<String, ApplicationGatewayBackendHttpConfiguration> backendHttpConfigurations();
/**
* @return SSL certificates, indexed by name
*/
Map<String, ApplicationGatewaySslCertificate> sslCertificates();
/**
* @return Frontend listeners, indexed by name
*/
Map<String, ApplicationGatewayListener> listeners();
/**
* @return request routing rules, indexed by name
*/
Map<String, ApplicationGatewayRequestRoutingRule> requestRoutingRules();
/**
* Returns the name of the existing port, if any, that is associated with the specified port number.
* @param portNumber a port number
* @return the existing port name for that port number, or null if none found
*/
String frontendPortNameFromNumber(int portNumber);
/**
* Finds a frontend listener associated with the specified frontend port number, if any.
* @param portNumber a used port number
* @return a frontend listener, or null if none found
*/
ApplicationGatewayListener listenerByPortNumber(int portNumber);
/**
* The entirety of the application gateway definition.
*/
interface Definition extends
DefinitionStages.Blank,
DefinitionStages.WithGroup,
DefinitionStages.WithCreate,
DefinitionStages.WithRequestRoutingRule,
DefinitionStages.WithRequestRoutingRuleOrCreate {
}
/**
* Grouping of application gateway definition stages.
*/
interface DefinitionStages {
/**
* The first stage of an application gateway definition.
*/
interface Blank
extends GroupableResource.DefinitionWithRegion<WithGroup> {
}
/**
* The stage of an application gateway definition allowing to specify the resource group.
*/
interface WithGroup
extends GroupableResource.DefinitionStages.WithGroup<WithRequestRoutingRule> {
}
/**
* The stage of an application gateway definition allowing to add a new Internet-facing frontend with a public IP address.
*/
interface WithPublicIpAddress extends HasPublicIpAddress.DefinitionStages.WithPublicIpAddressNoDnsLabel<WithCreate> {
}
/**
* The stage of an application gateway definition allowing to define one or more public, or Internet-facing, frontends.
*/
interface WithPublicFrontend extends WithPublicIpAddress {
/**
* Specifies that the application gateway should not be Internet-facing.
* @return the next stage of the definition
*/
@Method
WithCreate withoutPublicFrontend();
}
/**
* The stage of an internal application gateway definition allowing to make the application gateway accessible to its
* virtual network.
*/
interface WithPrivateFrontend {
/**
* Enables a private (internal) default frontend in the subnet containing the application gateway.
* <p>
* A frontend with the name "default" will be created if needed.
* @return the next stage of the definition
*/
@Method
WithCreate withPrivateFrontend();
/**
* Specifies that no private (internal) frontend should be enabled.
* @return the next stage of the definition
*/
@Method
WithCreate withoutPrivateFrontend();
}
/**
* The stage of an application gateway definition allowing to add a listener.
*/
interface WithListener {
/**
* Begins the definition of a new application gateway listener to be attached to the gateway.
* @param name a unique name for the listener
* @return the first stage of the listener definition
*/
ApplicationGatewayListener.DefinitionStages.Blank<WithCreate> defineListener(String name);
}
/**
* The stage of an application gateway definition allowing to add a frontend port.
*/
interface WithFrontendPort {
/**
* Creates a frontend port with an auto-generated name and the specified port number, unless one already exists.
* @param portNumber a port number
* @return the next stage of the definition
*/
WithCreate withFrontendPort(int portNumber);
/**
* Creates a frontend port with the specified name and port number, unless a port matching this name and/or number already exists.
* @param portNumber a port number
* @param name the name to assign to the port
* @return the next stage of the definition, or null if a port matching either the name or the number, but not both, already exists.
*/
WithCreate withFrontendPort(int portNumber, String name);
}
/**
* The stage of an application gateway definition allowing to add an SSL certificate to be used by HTTPS listeners.
*/
interface WithSslCert {
/**
* Begins the definition of a new application gateway SSL certificate to be attached to the gateway for use in HTTPS listeners.
* @param name a unique name for the certificate
* @return the first stage of the certificate definition
*/
ApplicationGatewaySslCertificate.DefinitionStages.Blank<WithCreate> defineSslCertificate(String name);
}
/**
* The stage of an application gateway definition allowing to add a backend.
*/
interface WithBackend {
/**
* Begins the definition of a new application gateway backend to be attached to the gateway.
* @param name a unique name for the backend
* @return the first stage of the backend definition
*/
ApplicationGatewayBackend.DefinitionStages.Blank<WithCreate> defineBackend(String name);
}
/**
* The stage of an application gateway definition allowing to add a backend HTTP configuration.
*/
interface WithBackendHttpConfig {
/**
* Begins the definition of a new application gateway backend HTTP configuration to be attached to the gateway.
* @param name a unique name for the backend HTTP configuration
* @return the first stage of the backend HTTP configuration definition
*/
ApplicationGatewayBackendHttpConfiguration.DefinitionStages.Blank<WithCreate> defineBackendHttpConfiguration(String name);
}
/**
* The stage of an application gateway definition allowing to add a request routing rule.
*/
interface WithRequestRoutingRule {
/**
* Begins the definition of a request routing rule for this application gateway.
* @param name a unique name for the request routing rule
* @return the first stage of the request routing rule
*/
ApplicationGatewayRequestRoutingRule.DefinitionStages.Blank<WithRequestRoutingRuleOrCreate> defineRequestRoutingRule(String name);
}
/**
* The stage of an application gateway definition allowing to continue adding more request routing rules,
* or start specifying optional settings, or create the application gateway.
*/
interface WithRequestRoutingRuleOrCreate extends WithRequestRoutingRule, WithCreate {
}
/**
* The stage of an application gateway update allowing to specify the size.
*/
interface WithSize {
/**
* Specifies the size of the application gateway to create within the context of the selected tier.
* <p>
* By default, the smallest size is used.
* @param size an application gateway SKU name
* @return the next stage of the definition
*/
/*
* The API refers to this as the "SKU"/"SkuName", the docs refer to this as the "size" (and docs call Standard vs WAF as the "SKU"),
* while the portal refers to this as the "SKU size"... The documentation naming sounds the most correct, so following that here.
*/
WithCreate withSize(ApplicationGatewaySkuName size);
}
/**
* The stage of an application gateway definition allowing to specify the capacity (number of instances) of the application gateway.
*/
interface WithInstanceCount {
/**
* Specifies the capacity (number of instances) for the application gateway.
* <p>
* By default, 1 instance is used.
* @param instanceCount the capacity as a number between 1 and 10 but also based on the limits imposed by the selected applicatiob gateway size
* @return the next stage of the definition
*/
/*
* The API refers to this as "Capacity", but the portal and the docs refer to this as "instance count", so using that naming here
*/
WithCreate withInstanceCount(int instanceCount);
}
/**
* The stage of an application gateway definition allowing to specify the subnet the app gateway is getting
* its private IP address from.
*/
interface WithExistingSubnet extends HasSubnet.DefinitionStages.WithSubnet<WithCreate> {
/**
* Specifies the subnet the application gateway gets its private IP address from.
* <p>
* This will create a new IP configuration, if it does not already exist.
* <p>
* Private (internal) frontends, if any have been enabled, will be configured to use this subnet as well.
* @param subnet an existing subnet
* @return the next stage of the definition
*/
WithCreate withExistingSubnet(Subnet subnet);
/**
* Specifies the subnet the application gateway gets its private IP address from.
* <p>
* This will create a new IP configuration, if it does not already exist.
* <p>
* Private (internal) frontends, if any have been enabled, will be configured to use this subnet as well.
* @param network the virtual network the subnet is part of
* @param subnetName the name of a subnet within the selected network
* @return the next stage of the definition
*/
WithCreate withExistingSubnet(Network network, String subnetName);
}
/**
* The stage of an application gateway definition allowing to specify the default IP address the app gateway will be internally available at,
* if the default private frontend has been enabled.
*/
interface WithPrivateIpAddress extends HasPrivateIpAddress.DefinitionStages.WithPrivateIpAddress<WithCreate> {
}
/**
* The stage of an application gateway definition containing all the required inputs for
* the resource to be created (via {@link WithCreate#create()}), but also allowing
* for any other optional settings to be specified.
*/
interface WithCreate extends
Creatable<ApplicationGateway>,
Resource.DefinitionWithTags<WithCreate>,
WithSize,
WithInstanceCount,
WithSslCert,
WithFrontendPort,
WithListener,
WithBackendHttpConfig,
WithBackend,
WithExistingSubnet,
WithPrivateIpAddress,
WithPrivateFrontend,
WithPublicFrontend,
WithPublicIpAddress {
}
}
/**
* Grouping of application gateway update stages.
*/
interface UpdateStages {
/**
* The stage of an internal application gateway update allowing to make the application gateway accessible to its
* virtual network.
*/
interface WithPrivateFrontend {
/**
* Enables a private (internal) default frontend in the subnet containing the application gateway.
* <p>
* A frontend with the name "default" will be created if needed.
* @return the next stage of the update
*/
@Method
Update withPrivateFrontend();
/**
* Specifies that no private, or internal, frontend should be enabled.
* @return the next stage of the definition
*/
@Method
Update withoutPrivateFrontend();
}
/**
* The stage of an application gateway update allowing to specify the subnet the app gateway is getting
* its private IP address from.
*/
interface WithExistingSubnet extends HasSubnet.UpdateStages.WithSubnet<Update> {
/**
* Specifies the subnet the application gateway gets its private IP address from.
* <p>
* This will create a new IP configuration, if it does not already exist.
* <p>
* Private (internal) frontends, if any have been enabled, will be configured to use this subnet as well.
* @param subnet an existing subnet
* @return the next stage of the update
*/
Update withExistingSubnet(Subnet subnet);
/**
* Specifies the subnet the application gateway gets its private IP address from.
* <p>
* This will create a new IP configuration, if it does not already exist.
* <p>
* Private (internal) frontends, if any have been enabled, will be configured to use this subnet as well.
* @param network the virtual network the subnet is part of
* @param subnetName the name of a subnet within the selected network
* @return the next stage of the update
*/
Update withExistingSubnet(Network network, String subnetName);
}
/**
* The stage of an application gateway update allowing to modify IP configurations.
*/
interface WithIpConfig {
/**
* Removes the specified IP configuration.
* <p>
* Note that removing an IP configuration referenced by other settings may break the application gateway.
* Also, there must be at least one IP configuration for the application gateway to function.
* @param ipConfigurationName the name of the IP configuration to remove
* @return the next stage of the update
*/
Update withoutIpConfiguration(String ipConfigurationName);
/**
* Begins the update of an existing IP configuration.
* @param ipConfigurationName the name of an existing IP configuration
* @return the first stage of an IP configuration update
*/
ApplicationGatewayIpConfiguration.Update updateIpConfiguration(String ipConfigurationName);
/**
* Begins the update of the default IP configuration i.e. the only one IP configuration that exists, assuming only one exists.
* @return the first stage of an IP configuration update.
*/
@Method
ApplicationGatewayIpConfiguration.Update updateDefaultIpConfiguration();
/**
* Begins the definition of the default IP configuration.
* <p>
* If a default IP configuration already exists, it will be this is equivalent to {@code updateDefaultIpConfiguration()}.
* @return the first stage of an IP configuration update
*/
@Method
ApplicationGatewayIpConfiguration.UpdateDefinitionStages.Blank<Update> defineDefaultIpConfiguration();
}
/**
* The stage of an application gateway update allowing to modify frontend ports.
*/
interface WithFrontendPort {
/**
* Creates a frontend port with an auto-generated name and the specified port number, unless one already exists.
* @param portNumber a port number
* @return the next stage of the definition
*/
Update withFrontendPort(int portNumber);
/**
* Creates a frontend port with the specified name and port number, unless a port matching this name and/or number already exists.
* @param portNumber a port number
* @param name the name to assign to the port
* @return the next stage of the definition, or null if a port matching either the name or the number, but not both, already exists.
*/
Update withFrontendPort(int portNumber, String name);
/**
* Removes the specified frontend port.
* <p>
* Note that removing a frontend port referenced by other settings may break the application gateway.
* @param name the name of the frontend port to remove
* @return the next stage of the update
*/
Update withoutFrontendPort(String name);
/**
* Removes the specified frontend port.
* <p>
* Note that removing a frontend port referenced by other settings may break the application gateway.
* @param portNumber the port number of the frontend port to remove
* @return the next stage of the update
*/
Update withoutFrontendPort(int portNumber);
}
/**
* The stage of an application gateway update allowing to specify a public IP address for the public frontend.
*/
interface WithPublicIpAddress extends HasPublicIpAddress.UpdateStages.WithPublicIpAddressNoDnsLabel<Update> {
}
/**
* The stage of an application gateway update allowing to modify frontend IP configurations.
*/
interface WithFrontend {
/**
* Removes the specified frontend IP configuration.
* <p>
* Note that removing a frontend referenced by other settings may break the application gateway.
* @param frontendName the name of the frontend IP configuration to remove
* @return the next stage of the update
*/
Update withoutFrontend(String frontendName);
/**
* Begins the update of an existing frontend IP configuration.
* @param frontendName the name of an existing frontend IP configuration
* @return the first stage of the frontend IP configuration update
*/
ApplicationGatewayFrontend.Update updateFrontend(String frontendName);
/**
* Specifies that the application gateway should not be Internet-facing.
* <p>
* Note that if there are any other settings referencing the public frontend, removing it may break the application gateway.
* @return the next stage of the update
*/
@Method
Update withoutPublicFrontend();
/**
* Specifies that the application gateway should not be private, i.e. its endponts should not be internally accessible
* from within the virtual network.
* <p>
* Note that if there are any other settings referencing the private frontend, removing it may break the application gateway.
* @return the next stage of the update
*/
@Method
Update withoutPrivateFrontend();
/**
* Begins the update of the public frontend IP configuration, if it exists.
* @return the first stage of a frontend update or null if no public frontend exists
*/
@Method
ApplicationGatewayFrontend.Update updatePublicFrontend();
/**
* Begins the update of the private frontend IP configuration, if it exists.
* @return the first stage of a frontend update or null if no private frontend exists
*/
/* TODO: Nothing to update in the private frontend today - changing Subnet and/or private IP not supported
* @Method
* ApplicationGatewayFrontend.Update updatePrivateFrontend();
*/
/**
* Begins the definition of the default public frontend IP configuration, creating one if it does not already exist.
* @return the first stage of a frontend definition
*/
@Method
ApplicationGatewayFrontend.UpdateDefinitionStages.Blank<Update> definePublicFrontend();
/**
* Begins the definition of the default private frontend IP configuration, creating one if it does not already exist.
* @return the first stage of a frontend definition
*/
@Method
ApplicationGatewayFrontend.UpdateDefinitionStages.Blank<Update> definePrivateFrontend();
}
/**
* The stage of an application gateway update allowing to modify backends.
*/
interface WithBackend {
/**
* Begins the definition of a new application gateway backend to be attached to the gateway.
* @param name a unique name for the backend
* @return the first stage of the backend definition
*/
ApplicationGatewayBackend.UpdateDefinitionStages.Blank<Update> defineBackend(String name);
/**
* Ensures the specified fully qualified domain name (FQDN) is not associated with any backend.
* @param fqdn a fully qualified domain name (FQDN)
* @return the next stage of the update
*/
Update withoutBackendFqdn(String fqdn);
/**
* Ensures the specified IP address is not associated with any backend.
* @param ipAddress an IP address
* @return the next stage of the update
*/
Update withoutBackendIpAddress(String ipAddress);
/**
* Removes the specified backend.
* <p>
* Note that removing a backend referenced by other settings may break the application gateway.
* @param backendName the name of an existing backend on this application gateway
* @return the next stage of the update
*/
Update withoutBackend(String backendName);
/**
* Begins the update of an existing backend on this application gateway.
* @param name the name of the backend
* @return the first stage of an update of the backend
*/
ApplicationGatewayBackend.Update updateBackend(String name);
}
/**
* The stage of an application gateway update allowing to specify the size.
*/
interface WithSize {
/**
* Specifies the size of the application gateway to use within the context of the selected tier.
* @param size an application gateway size name
* @return the next stage of the update
*/
Update withSize(ApplicationGatewaySkuName size);
}
/**
* The stage of an application gateway update allowing to specify the capacity (number of instances) of
* the application gateway.
*/
interface WithInstanceCount {
/**
* Specifies the capacity (number of instances) for the application gateway.
* @param instanceCount the capacity as a number between 1 and 10 but also based on the limits imposed by the selected applicatiob gateway size
* @return the next stage of the update
*/
Update withInstanceCount(int instanceCount);
}
/**
* The stage of an application gateway update allowing to modify SSL certificates.
*/
interface WithSslCert {
/**
* Begins the definition of a new application gateway SSL certificate to be attached to the gateway for use in frontend HTTPS listeners.
* @param name a unique name for the certificate
* @return the first stage of the certificate definition
*/
ApplicationGatewaySslCertificate.UpdateDefinitionStages.Blank<Update> defineSslCertificate(String name);
/**
* Removes the specified SSL certificate from the application gateway.
* <p>
* Note that removing a certificate referenced by other settings may break the application gateway.
* @param name the name of the certificate to remove
* @return the next stage of the update
*/
Update withoutCertificate(String name);
}
/**
* The stage of an application gateway update allowing to modify frontend listeners.
*/
interface WithListener {
/**
* Begins the definition of a new application gateway listener to be attached to the gateway.
* @param name a unique name for the listener
* @return the first stage of the listener definition
*/
ApplicationGatewayListener.UpdateDefinitionStages.Blank<Update> defineListener(String name);
/**
* Removes a frontend listener from the application gateway.
* <p>
* Note that removing a listener referenced by other settings may break the application gateway.
* @param name the name of the listener to remove
* @return the next stage of the update
*/
Update withoutListener(String name);
/**
* Begins the update of a listener.
* @param name the name of an existing listener to update
* @return the next stage of the definition or null if the requested listener does not exist
*/
ApplicationGatewayListener.Update updateListener(String name);
}
/**
* The stage of an application gateway update allowing to modify backend HTTP configurations.
*/
interface WithBackendHttpConfig {
/**
* Begins the definition of a new application gateway backend HTTP configuration to be attached to the gateway.
* @param name a unique name for the backend HTTP configuration
* @return the first stage of the backend HTTP configuration definition
*/
ApplicationGatewayBackendHttpConfiguration.UpdateDefinitionStages.Blank<Update> defineBackendHttpConfiguration(String name);
/**
* Removes the specified backend HTTP configuration from this application gateway.
* <p>
* Note that removing a backend HTTP configuration referenced by other settings may break the application gateway.
* @param name the name of an existing backend HTTP configuration on this application gateway
* @return the next stage of the update
*/
Update withoutBackendHttpConfiguration(String name);
/**
* Begins the update of a backend HTTP configuration.
* @param name the name of an existing backend HTTP configuration on this application gateway
* @return the next stage of the update
*/
ApplicationGatewayBackendHttpConfiguration.Update updateBackendHttpConfiguration(String name);
}
/**
* The stage of an application gateway update allowing to modify request routing rules.
*/
interface WithRequestRoutingRule {
/**
* Begins the definition of a request routing rule for this application gateway.
* @param name a unique name for the request routing rule
* @return the first stage of the request routing rule
*/
ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.Blank<Update> defineRequestRoutingRule(String name);
/**
* Removes a request routing rule from the application gateway.
* @param name the name of the request routing rule to remove
* @return the next stage of the update
*/
Update withoutRequestRoutingRule(String name);
/**
* Begins the update of a request routing rule.
* @param name the name of an existing request routing rule
* @return the first stage of a request routing rule update or null if the requested rule does not exist
*/
ApplicationGatewayRequestRoutingRule.Update updateRequestRoutingRule(String name);
}
}
/**
* The template for an application gateway update operation, containing all the settings that
* can be modified.
* <p>
* Call {@code apply()} to apply the changes to the resource in Azure.
*/
interface Update extends
Appliable<ApplicationGateway>,
Resource.UpdateWithTags<Update>,
UpdateStages.WithSize,
UpdateStages.WithInstanceCount,
UpdateStages.WithBackend,
UpdateStages.WithBackendHttpConfig,
UpdateStages.WithIpConfig,
UpdateStages.WithFrontend,
UpdateStages.WithPublicIpAddress,
UpdateStages.WithFrontendPort,
UpdateStages.WithSslCert,
UpdateStages.WithListener,
UpdateStages.WithRequestRoutingRule,
UpdateStages.WithExistingSubnet {
}
}
| |
/*
* Copyright 2001,2004 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.axis.encoding.ser;
import java.lang.reflect.Array;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import javax.xml.namespace.QName;
import org.apache.axis.description.TypeDesc;
import org.apache.axis.encoding.DeserializationContext;
import org.apache.axis.encoding.Deserializer;
import org.apache.axis.encoding.SimpleType;
import org.apache.axis.encoding.TypeMapping;
import org.apache.axis.message.SOAPHandler;
import org.apache.axis.utils.BeanPropertyDescriptor;
import org.apache.axis.utils.Messages;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
/**
* Deserializer for
* <xsd:simpleType ...>
* <xsd:list itemType="...">
* </xsd:simpleType>
* based on SimpleDeserializer
*
* @author Ias (iasandcb@tmax.co.kr)
*/
public class SimpleListDeserializer extends SimpleDeserializer {
StringBuffer val = new StringBuffer();
private Constructor constructor = null;
private Map propertyMap = null;
private HashMap attributeMap = null;
private DeserializationContext context = null;
public QName xmlType;
public Class javaType;
private TypeDesc typeDesc = null;
protected SimpleListDeserializer cacheStringDSer = null;
protected QName cacheXMLType = null;
/**
* The Deserializer is constructed with the xmlType and
* javaType (which could be a java primitive like int.class)
*/
public SimpleListDeserializer(Class javaType, QName xmlType) {
super (javaType, xmlType);
this.xmlType = xmlType;
this.javaType = javaType;
}
public SimpleListDeserializer(Class javaType, QName xmlType, TypeDesc typeDesc) {
super (javaType, xmlType, typeDesc);
this.xmlType = xmlType;
this.javaType = javaType;
this.typeDesc = typeDesc;
}
/**
* Reset deserializer for re-use
*/
public void reset() {
val.setLength(0); // Reset string buffer back to zero
attributeMap = null; // Remove attribute map
isNil = false; // Don't know if nil
isEnded = false; // Indicate the end of element not yet called
}
/**
* The Factory calls setConstructor.
*/
public void setConstructor(Constructor c)
{
constructor = c;
}
/**
* There should not be nested elements, so thow and exception if this occurs.
*/
public SOAPHandler onStartChild(String namespace,
String localName,
String prefix,
Attributes attributes,
DeserializationContext context)
throws SAXException
{
throw new SAXException(
Messages.getMessage("cantHandle00", "SimpleDeserializer"));
}
/**
* Append any characters received to the value. This method is defined
* by Deserializer.
*/
public void characters(char [] chars, int start, int end)
throws SAXException
{
val.append(chars, start, end);
}
/**
* Append any characters to the value. This method is defined by
* Deserializer.
*/
public void onEndElement(String namespace, String localName,
DeserializationContext context)
throws SAXException
{
if (isNil || val == null) {
value = null;
return;
}
try {
value = makeValue(val.toString());
} catch (InvocationTargetException ite) {
Throwable realException = ite.getTargetException();
if (realException instanceof Exception)
throw new SAXException((Exception)realException);
else
throw new SAXException(ite.getMessage());
} catch (Exception e) {
throw new SAXException(e);
}
// If this is a SimpleType, set attributes we have stashed away
setSimpleTypeAttributes();
}
/**
* Convert the string that has been accumulated into an Object. Subclasses
* may override this.
* @param source the serialized value to be deserialized
* @throws Exception any exception thrown by this method will be wrapped
*/
public Object makeValue(String source) throws Exception
{
// According to XML Schema Spec Part 0: Primer 2.3.1 - white space delimitor
StringTokenizer tokenizer = new StringTokenizer(source.trim());
int length = tokenizer.countTokens();
Object list = Array.newInstance(javaType, length);
for (int i = 0; i < length; i++) {
String token = tokenizer.nextToken();
Array.set(list, i, makeUnitValue(token));
}
return list;
}
private Object makeUnitValue(String source) throws Exception
{
// If the javaType is a boolean, except a number of different sources
if (javaType == boolean.class || javaType == Boolean.class) {
// This is a pretty lame test, but it is what the previous code did.
switch (source.charAt(0)) {
case '0': case 'f': case 'F':
return Boolean.FALSE;
case '1': case 't': case 'T':
return Boolean.TRUE;
default:
throw new NumberFormatException(
Messages.getMessage("badBool00"));
}
}
// If expecting a Float or a Double, need to accept some special cases.
if (javaType == float.class ||
javaType == java.lang.Float.class) {
if (source.equals("NaN")) {
return new Float(Float.NaN);
} else if (source.equals("INF")) {
return new Float(Float.POSITIVE_INFINITY);
} else if (source.equals("-INF")) {
return new Float(Float.NEGATIVE_INFINITY);
}
}
if (javaType == double.class ||
javaType == java.lang.Double.class) {
if (source.equals("NaN")) {
return new Double(Double.NaN);
} else if (source.equals("INF")) {
return new Double(Double.POSITIVE_INFINITY);
} else if (source.equals("-INF")) {
return new Double(Double.NEGATIVE_INFINITY);
}
}
if (javaType == QName.class) {
int colon = source.lastIndexOf(":");
String namespace = colon < 0 ? "" :
context.getNamespaceURI(source.substring(0, colon));
String localPart = colon < 0 ? source :
source.substring(colon + 1);
return new QName(namespace, localPart);
}
return constructor.newInstance(new Object [] { source });
}
/**
* Set the bean properties that correspond to element attributes.
*
* This method is invoked after startElement when the element requires
* deserialization (i.e. the element is not an href and the value is not nil.)
* @param namespace is the namespace of the element
* @param localName is the name of the element
* @param prefix is the prefix of the element
* @param attributes are the attributes on the element...used to get the type
* @param context is the DeserializationContext
*/
public void onStartElement(String namespace, String localName,
String prefix, Attributes attributes,
DeserializationContext context)
throws SAXException
{
this.context = context;
// If we have no metadata, we have no attributes. Q.E.D.
if (typeDesc == null)
return;
// loop through the attributes and set bean properties that
// correspond to attributes
for (int i=0; i < attributes.getLength(); i++) {
QName attrQName = new QName(attributes.getURI(i),
attributes.getLocalName(i));
String fieldName = typeDesc.getFieldNameForAttribute(attrQName);
if (fieldName == null)
continue;
// look for the attribute property
BeanPropertyDescriptor bpd =
(BeanPropertyDescriptor) propertyMap.get(fieldName);
if (bpd != null) {
if (!bpd.isWriteable() || bpd.isIndexed() ) continue ;
// determine the QName for this child element
TypeMapping tm = context.getTypeMapping();
Class type = bpd.getType();
QName qn = tm.getTypeQName(type);
if (qn == null)
throw new SAXException(
Messages.getMessage("unregistered00", type.toString()));
// get the deserializer
Deserializer dSer = context.getDeserializerForType(qn);
if (dSer == null)
throw new SAXException(
Messages.getMessage("noDeser00", type.toString()));
if (! (dSer instanceof SimpleListDeserializer))
throw new SAXException(
Messages.getMessage("AttrNotSimpleType00",
bpd.getName(),
type.toString()));
// Success! Create an object from the string and save
// it in our attribute map for later.
if (attributeMap == null) {
attributeMap = new HashMap();
}
try {
Object val = ((SimpleListDeserializer)dSer).
makeValue(attributes.getValue(i));
attributeMap.put(fieldName, val);
} catch (Exception e) {
throw new SAXException(e);
}
} // if
} // attribute loop
} // onStartElement
/**
* Process any attributes we may have encountered (in onStartElement)
*/
private void setSimpleTypeAttributes() throws SAXException {
// if this isn't a simpleType bean, wont have attributes
if (! SimpleType.class.isAssignableFrom(javaType) ||
attributeMap == null)
return;
// loop through map
Set entries = attributeMap.entrySet();
for (Iterator iterator = entries.iterator(); iterator.hasNext();) {
Map.Entry entry = (Map.Entry) iterator.next();
String name = (String) entry.getKey();
Object val = entry.getValue();
BeanPropertyDescriptor bpd =
(BeanPropertyDescriptor) propertyMap.get(name);
if (!bpd.isWriteable() || bpd.isIndexed()) continue;
try {
bpd.set(value, val );
} catch (Exception e) {
throw new SAXException(e);
}
}
}
}
| |
/*
* Copyright (C) 2013 Brett Wooldridge
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.zaxxer.hikari;
import java.io.Closeable;
import java.io.PrintWriter;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.zaxxer.hikari.metrics.MetricsTrackerFactory;
import com.zaxxer.hikari.pool.HikariPool;
import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException;
/**
* The HikariCP pooled DataSource.
*
* @author Brett Wooldridge
*/
public class HikariDataSource extends HikariConfig implements DataSource, Closeable
{
private static final Logger LOGGER = LoggerFactory.getLogger(HikariDataSource.class);
private final AtomicBoolean isShutdown = new AtomicBoolean();
private final HikariPool fastPathPool;
private volatile HikariPool pool;
/**
* Default constructor. Setters be used to configure the pool. Using
* this constructor vs. {@link #HikariDataSource(HikariConfig)} will
* result in {@link #getConnection()} performance that is slightly lower
* due to lazy initialization checks.
*/
public HikariDataSource()
{
super();
fastPathPool = null;
}
/**
* Construct a HikariDataSource with the specified configuration.
*
* @param configuration a HikariConfig instance
*/
public HikariDataSource(HikariConfig configuration)
{
configuration.validate();
configuration.copyState(this);
LOGGER.info("{} - Starting...", configuration.getPoolName());
pool = fastPathPool = new HikariPool(this);
LOGGER.info("{} - Start completed.", configuration.getPoolName());
}
/** {@inheritDoc} */
@Override
public Connection getConnection() throws SQLException
{
if (isClosed()) {
throw new SQLException("HikariDataSource " + this + " has been closed.");
}
if (fastPathPool != null) {
return fastPathPool.getConnection();
}
// See http://en.wikipedia.org/wiki/Double-checked_locking#Usage_in_Java
HikariPool result = pool;
if (result == null) {
synchronized (this) {
result = pool;
if (result == null) {
validate();
LOGGER.info("{} - Starting...", getPoolName());
try {
pool = result = new HikariPool(this);
}
catch (PoolInitializationException pie) {
if (pie.getCause() instanceof SQLException) {
throw (SQLException) pie.getCause();
}
else {
throw pie;
}
}
LOGGER.info("{} - Start completed.", getPoolName());
}
}
}
return result.getConnection();
}
/** {@inheritDoc} */
@Override
public Connection getConnection(String username, String password) throws SQLException
{
throw new SQLFeatureNotSupportedException();
}
/** {@inheritDoc} */
@Override
public PrintWriter getLogWriter() throws SQLException
{
HikariPool p = pool;
return (p != null ? p.getUnwrappedDataSource().getLogWriter() : null);
}
/** {@inheritDoc} */
@Override
public void setLogWriter(PrintWriter out) throws SQLException
{
HikariPool p = pool;
if (p != null) {
p.getUnwrappedDataSource().setLogWriter(out);
}
}
/** {@inheritDoc} */
@Override
public void setLoginTimeout(int seconds) throws SQLException
{
HikariPool p = pool;
if (p != null) {
p.getUnwrappedDataSource().setLoginTimeout(seconds);
}
}
/** {@inheritDoc} */
@Override
public int getLoginTimeout() throws SQLException
{
HikariPool p = pool;
return (p != null ? p.getUnwrappedDataSource().getLoginTimeout() : 0);
}
/** {@inheritDoc} */
@Override
public java.util.logging.Logger getParentLogger() throws SQLFeatureNotSupportedException
{
throw new SQLFeatureNotSupportedException();
}
/** {@inheritDoc} */
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(Class<T> iface) throws SQLException
{
if (iface.isInstance(this)) {
return (T) this;
}
HikariPool p = pool;
if (p != null) {
final DataSource unwrappedDataSource = p.getUnwrappedDataSource();
if (iface.isInstance(unwrappedDataSource)) {
return (T) unwrappedDataSource;
}
if (unwrappedDataSource != null) {
return unwrappedDataSource.unwrap(iface);
}
}
throw new SQLException("Wrapped DataSource is not an instance of " + iface);
}
/** {@inheritDoc} */
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException
{
if (iface.isInstance(this)) {
return true;
}
HikariPool p = pool;
if (p != null) {
final DataSource unwrappedDataSource = p.getUnwrappedDataSource();
if (iface.isInstance(unwrappedDataSource)) {
return true;
}
if (unwrappedDataSource != null) {
return unwrappedDataSource.isWrapperFor(iface);
}
}
return false;
}
/** {@inheritDoc} */
@Override
public void setMetricRegistry(Object metricRegistry)
{
boolean isAlreadySet = getMetricRegistry() != null;
super.setMetricRegistry(metricRegistry);
HikariPool p = pool;
if (p != null) {
if (isAlreadySet) {
throw new IllegalStateException("MetricRegistry can only be set one time");
}
else {
p.setMetricRegistry(super.getMetricRegistry());
}
}
}
/** {@inheritDoc} */
@Override
public void setMetricsTrackerFactory(MetricsTrackerFactory metricsTrackerFactory)
{
boolean isAlreadySet = getMetricsTrackerFactory() != null;
super.setMetricsTrackerFactory(metricsTrackerFactory);
HikariPool p = pool;
if (p != null) {
if (isAlreadySet) {
throw new IllegalStateException("MetricsTrackerFactory can only be set one time");
}
else {
p.setMetricsTrackerFactory(super.getMetricsTrackerFactory());
}
}
}
/** {@inheritDoc} */
@Override
public void setHealthCheckRegistry(Object healthCheckRegistry)
{
boolean isAlreadySet = getHealthCheckRegistry() != null;
super.setHealthCheckRegistry(healthCheckRegistry);
HikariPool p = pool;
if (p != null) {
if (isAlreadySet) {
throw new IllegalStateException("HealthCheckRegistry can only be set one time");
}
else {
p.setHealthCheckRegistry(super.getHealthCheckRegistry());
}
}
}
/**
* Get the {@code HikariPoolMXBean} for this HikariDataSource instance. If this method is called on
* a {@code HikariDataSource} that has been constructed without a {@code HikariConfig} instance,
* and before an initial call to {@code #getConnection()}, the return value will be {@code null}.
*
* @return the {@code HikariPoolMXBean} instance, or {@code null}.
*/
public HikariPoolMXBean getHikariPoolMXBean()
{
return pool;
}
/**
* Get the {@code HikariConfigMXBean} for this HikariDataSource instance.
*
* @return the {@code HikariConfigMXBean} instance.
*/
public HikariConfigMXBean getHikariConfigMXBean()
{
return this;
}
/**
* Evict a connection from the pool. If the connection has already been closed (returned to the pool)
* this may result in a "soft" eviction; the connection will be evicted sometime in the future if it is
* currently in use. If the connection has not been closed, the eviction is immediate.
*
* @param connection the connection to evict from the pool
*/
public void evictConnection(Connection connection)
{
HikariPool p;
if (!isClosed() && (p = pool) != null && connection.getClass().getName().startsWith("com.zaxxer.hikari")) {
p.evictConnection(connection);
}
}
/**
* Suspend allocation of connections from the pool. All callers to <code>getConnection()</code>
* will block indefinitely until <code>resumePool()</code> is called.
*
* @deprecated Call the {@code HikariPoolMXBean#suspendPool()} method on the {@code HikariPoolMXBean}
* obtained by {@code #getHikariPoolMXBean()} or JMX lookup.
*/
@Deprecated
public void suspendPool()
{
HikariPool p;
if (!isClosed() && (p = pool) != null) {
p.suspendPool();
}
}
/**
* Resume allocation of connections from the pool.
*
* @deprecated Call the {@code HikariPoolMXBean#resumePool()} method on the {@code HikariPoolMXBean}
* obtained by {@code #getHikariPoolMXBean()} or JMX lookup.
*/
@Deprecated
public void resumePool()
{
HikariPool p;
if (!isClosed() && (p = pool) != null) {
p.resumePool();
}
}
/**
* Shutdown the DataSource and its associated pool.
*/
@Override
public void close()
{
if (isShutdown.getAndSet(true)) {
return;
}
HikariPool p = pool;
if (p != null) {
try {
LOGGER.info("{} - Shutdown initiated...", getPoolName());
p.shutdown();
LOGGER.info("{} - Shutdown completed.", getPoolName());
}
catch (InterruptedException e) {
LOGGER.warn("{} - Interrupted during closing", getPoolName(), e);
Thread.currentThread().interrupt();
}
}
}
/**
* Determine whether the HikariDataSource has been closed.
*
* @return true if the HikariDataSource has been closed, false otherwise
*/
public boolean isClosed()
{
return isShutdown.get();
}
/**
* Shutdown the DataSource and its associated pool.
*
* @deprecated This method has been deprecated, please use {@link #close()} instead
*/
@Deprecated
public void shutdown()
{
LOGGER.warn("The shutdown() method has been deprecated, please use the close() method instead");
close();
}
/** {@inheritDoc} */
@Override
public String toString()
{
return "HikariDataSource (" + pool + ")";
}
}
| |
package com.controlgroup.coffeesystem.zelda.client;
import com.google.gwt.canvas.dom.client.Context2d;
import com.google.gwt.canvas.dom.client.CssColor;
import com.google.gwt.dom.client.CanvasElement;
import com.google.gwt.dom.client.ImageElement;
import com.google.gwt.user.client.Window;
/**
* Created by timmattison on 1/20/15.
*/
public class Drawing {
public static final int gridElementWidth = Graphics.caveWallImageElement.getWidth();
public static final int gridElementHeight = Graphics.caveWallImageElement.getHeight();
public static final int linkPadding = 40;
public static final int linkWidth = 48;
public static final int linkHeight = 48;
private static final int letterWidth = 21;
private static final int letterHeight = 21;
private static final int spriteSheetHorizontalLetterSpacing = 27;
private static final int spriteSheetVerticalLetterSpacing = 27;
private static final int CHARACTER_SPRITE_SHEET_HORIZONTAL_COUNT = 16;
private static final CssColor steamColor = CssColor.make(255, 255, 255);
/**
* 1 == Wall
* 2 == Where Link starts out
* 3 == Blank, but a place where Link can't walk
*/
public static final int[][] caveWalls = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1},
{1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1},
{1, 1, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 1, 1},
{1, 1, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 1, 1},
{1, 1, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 1, 1},
{1, 1, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 1, 1},
{1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1},
};
public static final int HUD_VERTICAL_SIZE = 48 * 3;
private static Steam[] steam = null;
private static final long MINUTES_PER_REDUCTION = 15;
public static void clear(CssColor redrawColor, Context2d context) {
CanvasElement canvasElement = context.getCanvas();
int width = canvasElement.getWidth();
int height = canvasElement.getHeight();
context.setFillStyle(redrawColor);
context.fillRect(0, 0, width, height);
}
public static void drawFire(int frameCounter, Context2d context) {
// Alternate between the fire images on each frame
ImageElement fireImageElement = Graphics.fire0ImageElement;
if (frameCounter % 2 == 0) {
fireImageElement = Graphics.fire1ImageElement;
}
// Draw the fire on the left
context.drawImage(fireImageElement, Constants.LEFT_FIRE_X, Constants.LEFT_FIRE_Y, fireImageElement.getWidth(), fireImageElement.getHeight());
// Draw the fire on the right
context.drawImage(fireImageElement, Constants.RIGHT_FIRE_X, Constants.RIGHT_FIRE_Y, fireImageElement.getWidth(), fireImageElement.getHeight());
}
public static void drawCaveWalls(Context2d context, int xOffset, int yOffset) {
int y = 0;
for (int[] rowData : caveWalls) {
int x = 0;
for (int columnData : rowData) {
if (columnData == 1) {
drawCaveWall(context, xOffset + (x * Graphics.caveWallImageElement.getWidth()), yOffset + (y * Graphics.caveWallImageElement.getHeight()));
}
x++;
}
y++;
}
}
private static void drawCaveWall(Context2d context, int destinationX, int destinationY) {
context.drawImage(Graphics.caveWallImageElement, destinationX, destinationY, Graphics.caveWallImageElement.getWidth(), Graphics.caveWallImageElement.getHeight());
}
public static void drawOldGuy(CssColor redrawColor, Context2d context) {
context.setFillStyle(redrawColor);
context.drawImage(Graphics.oldManImageElement, Constants.OLD_GUY_STATIC_X, Constants.OLD_GUY_STATIC_Y, Constants.OLD_GUY_STATIC_WIDTH, Constants.OLD_GUY_STATIC_HEIGHT);
}
public static void drawCoffeeCups(Context2d context, int count, long coffeeAgeInMinutes) {
// TODO - Make the coffee cups dance
// Draw multiple coffee cups
if (steam == null) {
steam = new Steam[Constants.MAX_CUPS_TO_DRAW];
for (int loop = 0; loop < Constants.MAX_CUPS_TO_DRAW; loop++) {
steam[loop] = new BasicSteam();
}
}
if (count > Constants.MAX_CUPS_TO_DRAW) {
count = Constants.MAX_CUPS_TO_DRAW;
}
int coffeeCupWidth = Graphics.coffeeCupImageElement.getWidth();
int coffeeCupHeight = Graphics.coffeeCupImageElement.getHeight();
int offset = (count * (coffeeCupWidth + Constants.COFFEE_CUPS_BUFFER)) / 2;
for (int loop = 0; loop < count; loop++) {
int center = Constants.COFFEE_CUPS_X_CENTER - (coffeeCupWidth * loop) - (Constants.COFFEE_CUPS_BUFFER * loop) + offset;
context.drawImage(Graphics.coffeeCupImageElement, center, Constants.COFFEE_CUPS_Y, coffeeCupWidth, coffeeCupHeight);
drawSteam(context, steam[loop], center + 8, Constants.COFFEE_CUPS_Y - 14, coffeeAgeInMinutes);
}
}
public static void drawLetter(Context2d context, char letter, int destinationX, int destinationY) {
int sourceX = -1;
int sourceY = -1;
if ((letter >= '0') && (letter <= '9')) {
sourceY = 0;
sourceX = (letterWidth + spriteSheetHorizontalLetterSpacing) * (letter - '0');
} else if ((letter >= 'A') && (letter <= 'Z')) {
int letterIndex = letter - 'A';
// Move past the numbers
letterIndex += 10;
sourceX = letterIndex % CHARACTER_SPRITE_SHEET_HORIZONTAL_COUNT;
sourceY = letterIndex / CHARACTER_SPRITE_SHEET_HORIZONTAL_COUNT;
} else {
sourceY = 2;
if (letter == ',') {
sourceX = 4;
} else if (letter == '!') {
sourceX = 5;
} else if (letter == '\'') {
sourceX = 6;
} else if (letter == '&') {
sourceX = 7;
} else if (letter == '.') {
sourceX = 8;
} else if (letter == '"') {
sourceX = 9;
} else if (letter == '?') {
sourceX = 10;
} else if (letter == '-') {
sourceX = 11;
} else if (letter == ' ') {
sourceX = 15;
} else {
// Can't do anything with this character
Window.alert("Cannot process letter: [" + letter + "]");
return;
}
}
sourceX = (letterWidth + spriteSheetHorizontalLetterSpacing) * sourceX;
sourceY = (letterHeight + spriteSheetVerticalLetterSpacing) * sourceY;
context.drawImage(Graphics.nesFontImageElement, sourceX, sourceY, letterWidth, letterHeight, destinationX, destinationY, letterWidth, letterHeight);
}
public static void drawString(Context2d context, String string, int destinationX, int destinationY, int characterSpacing) {
string = string.toUpperCase();
int currentX = destinationX;
for (char letter : string.toCharArray()) {
drawLetter(context, letter, currentX, destinationY);
currentX += (letterWidth + characterSpacing);
}
}
public static void copyBufferOverOtherBuffer(CssColor redrawColor, Context2d destination, Context2d source) {
CanvasElement canvasElement = destination.getCanvas();
int width = canvasElement.getWidth();
int height = canvasElement.getHeight();
destination.setFillStyle(redrawColor);
destination.fillRect(0, 0, width, height);
destination.drawImage(source.getCanvas(), 0, 0, width, height);
}
public static void drawBackground(Context2d context) {
// Draw the background
context.drawImage(Graphics.backgroundImageElement, 0, 0, context.getCanvas().getWidth(), context.getCanvas().getHeight());
}
public static void drawLifeMeter(int frameCounter, Context2d context, boolean alive) {
// Alternate between half heart and empty heart on each frame:
ImageElement heartImageElement = Graphics.halfHeartImageElement;
if (!alive || (frameCounter % 2 == 0)) {
heartImageElement = Graphics.emptyHeartImageElement;
}
// Draw the first heart
context.drawImage(heartImageElement, Constants.FIRST_HEART_X, Constants.FIRST_HEART_Y, heartImageElement.getWidth(), heartImageElement.getHeight());
// Draw the rest of the hearts
for (int loop = 1; loop < Constants.HEARTS_TO_DRAW; loop++) {
context.drawImage(Graphics.emptyHeartImageElement,
Constants.FIRST_HEART_X + (Constants.HEART_BUFFER * loop) + (Graphics.emptyHeartImageElement.getWidth() * loop),
Constants.FIRST_HEART_Y, Graphics.emptyHeartImageElement.getWidth(), Graphics.emptyHeartImageElement.getHeight());
}
}
public static void drawLink(Context2d context, ImageElement linkImageElement, int destinationX, int destinationY) {
context.drawImage(linkImageElement, 0, 0, linkHeight, linkWidth, destinationX, destinationY, linkHeight, linkWidth);
}
public static void drawSteam(Context2d context, Steam steam, int destinationX, int destinationY, long coffeeAgeInMinutes) {
context.setFillStyle(steamColor);
// Calculate the "limiter" which is how much we'll reduce the steam based on the coffee's age in minutes
long limiter = coffeeAgeInMinutes / MINUTES_PER_REDUCTION;
boolean[][] currentSteam = steam.next(limiter);
for (int x = 0; x < currentSteam.length; x++) {
for (int y = 0; y < currentSteam[x].length; y++) {
if (currentSteam[x][y] == true) {
context.fillRect(destinationX + x, destinationY + y, 1, 1);
}
}
}
}
}
| |
/*
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.storage;
import static org.easymock.EasyMock.capture;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.createStrictMock;
import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import com.google.cloud.ReadChannel;
import com.google.cloud.storage.Acl.Project;
import com.google.cloud.storage.Acl.Project.ProjectRole;
import com.google.cloud.storage.Acl.Role;
import com.google.cloud.storage.Acl.User;
import com.google.cloud.storage.Blob.BlobSourceOption;
import com.google.cloud.storage.Storage.BlobWriteOption;
import com.google.cloud.storage.Storage.CopyRequest;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.BaseEncoding;
import org.easymock.Capture;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.net.URL;
import java.security.Key;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import javax.crypto.spec.SecretKeySpec;
public class BlobTest {
private static final Acl ACL = Acl.of(User.ofAllAuthenticatedUsers(), Role.OWNER);
private static final Acl OTHER_ACL = Acl.of(new Project(ProjectRole.OWNERS, "p"), Role.READER);
private static final List<Acl> ACLS = ImmutableList.of(ACL, OTHER_ACL);
private static final Integer COMPONENT_COUNT = 2;
private static final String CONTENT_TYPE = "text/html";
private static final String CACHE_CONTROL = "cache";
private static final String CONTENT_DISPOSITION = "content-disposition";
private static final String CONTENT_ENCODING = "UTF-8";
private static final String CONTENT_LANGUAGE = "En";
private static final String CRC32 = "0xFF00";
private static final Long DELETE_TIME = System.currentTimeMillis();
private static final String ETAG = "0xFF00";
private static final Long GENERATION = 1L;
private static final String GENERATED_ID = "B/N:1";
private static final String MD5 = "0xFF00";
private static final String MEDIA_LINK = "http://media/b/n";
private static final Map<String, String> METADATA = ImmutableMap.of("n1", "v1", "n2", "v2");
private static final Long META_GENERATION = 10L;
private static final User OWNER = new User("user@gmail.com");
private static final String SELF_LINK = "http://storage/b/n";
private static final Long SIZE = 1024L;
private static final Long UPDATE_TIME = DELETE_TIME - 1L;
private static final Long CREATE_TIME = UPDATE_TIME - 1L;
private static final String ENCRYPTION_ALGORITHM = "AES256";
private static final String KEY_SHA256 = "keySha";
private static final BlobInfo.CustomerEncryption CUSTOMER_ENCRYPTION =
new BlobInfo.CustomerEncryption(ENCRYPTION_ALGORITHM, KEY_SHA256);
private static final BlobInfo FULL_BLOB_INFO = BlobInfo.newBuilder("b", "n", GENERATION)
.setAcl(ACLS)
.setComponentCount(COMPONENT_COUNT)
.setContentType(CONTENT_TYPE)
.setCacheControl(CACHE_CONTROL)
.setContentDisposition(CONTENT_DISPOSITION)
.setContentEncoding(CONTENT_ENCODING)
.setContentLanguage(CONTENT_LANGUAGE)
.setCrc32c(CRC32)
.setDeleteTime(DELETE_TIME)
.setEtag(ETAG)
.setGeneratedId(GENERATED_ID)
.setMd5(MD5)
.setMediaLink(MEDIA_LINK)
.setMetadata(METADATA)
.setMetageneration(META_GENERATION)
.setOwner(OWNER)
.setSelfLink(SELF_LINK)
.setSize(SIZE)
.setUpdateTime(UPDATE_TIME)
.setCreateTime(CREATE_TIME)
.setCustomerEncryption(CUSTOMER_ENCRYPTION)
.build();
private static final BlobInfo BLOB_INFO = BlobInfo.newBuilder("b", "n")
.setMetageneration(42L)
.build();
private static final BlobInfo DIRECTORY_INFO = BlobInfo.newBuilder("b", "n/")
.setSize(0L)
.setIsDirectory(true)
.build();
private static final String BASE64_KEY = "JVzfVl8NLD9FjedFuStegjRfES5ll5zc59CIXw572OA=";
private static final Key KEY =
new SecretKeySpec(BaseEncoding.base64().decode(BASE64_KEY), "AES256");
private Storage storage;
private Blob blob;
private Blob expectedBlob;
private Storage serviceMockReturnsOptions = createMock(Storage.class);
private StorageOptions mockOptions = createMock(StorageOptions.class);
@Before
public void setUp() {
storage = createStrictMock(Storage.class);
}
@After
public void tearDown() throws Exception {
verify(storage);
}
private void initializeExpectedBlob(int optionsCalls) {
expect(serviceMockReturnsOptions.getOptions()).andReturn(mockOptions).times(optionsCalls);
replay(serviceMockReturnsOptions);
expectedBlob = new Blob(serviceMockReturnsOptions, new BlobInfo.BuilderImpl(BLOB_INFO));
}
private void initializeBlob() {
blob = new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO));
}
@Test
public void testExists_True() throws Exception {
initializeExpectedBlob(1);
Storage.BlobGetOption[] expectedOptions = {Storage.BlobGetOption.fields()};
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.get(expectedBlob.getBlobId(), expectedOptions)).andReturn(expectedBlob);
replay(storage);
initializeBlob();
assertTrue(blob.exists());
}
@Test
public void testExists_False() throws Exception {
Storage.BlobGetOption[] expectedOptions = {Storage.BlobGetOption.fields()};
expect(storage.getOptions()).andReturn(null);
expect(storage.get(BLOB_INFO.getBlobId(), expectedOptions)).andReturn(null);
replay(storage);
initializeBlob();
assertFalse(blob.exists());
}
@Test
public void testContent() throws Exception {
initializeExpectedBlob(2);
byte[] content = {1, 2};
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.readAllBytes(BLOB_INFO.getBlobId())).andReturn(content);
replay(storage);
initializeBlob();
assertArrayEquals(content, blob.getContent());
}
@Test
public void testContentWithDecryptionKey() throws Exception {
initializeExpectedBlob(2);
byte[] content = {1, 2};
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.readAllBytes(BLOB_INFO.getBlobId(),
Storage.BlobSourceOption.decryptionKey(BASE64_KEY)))
.andReturn(content).times(2);
replay(storage);
initializeBlob();
assertArrayEquals(content, blob.getContent(BlobSourceOption.decryptionKey(BASE64_KEY)));
assertArrayEquals(content, blob.getContent(BlobSourceOption.decryptionKey(KEY)));
}
@Test
public void testReload() throws Exception {
initializeExpectedBlob(2);
Blob expectedReloadedBlob = expectedBlob.toBuilder().setCacheControl("c").build();
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.get(BLOB_INFO.getBlobId(), new Storage.BlobGetOption[0]))
.andReturn(expectedReloadedBlob);
replay(storage);
initializeBlob();
Blob updatedBlob = blob.reload();
assertEquals(expectedReloadedBlob, updatedBlob);
}
@Test
public void testReloadNull() throws Exception {
initializeExpectedBlob(1);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.get(BLOB_INFO.getBlobId(), new Storage.BlobGetOption[0])).andReturn(null);
replay(storage);
initializeBlob();
Blob reloadedBlob = blob.reload();
assertNull(reloadedBlob);
}
@Test
public void testReloadWithOptions() throws Exception {
initializeExpectedBlob(2);
Blob expectedReloadedBlob = expectedBlob.toBuilder().setCacheControl("c").build();
Storage.BlobGetOption[] options = {Storage.BlobGetOption.metagenerationMatch(42L)};
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.get(BLOB_INFO.getBlobId(), options)).andReturn(expectedReloadedBlob);
replay(storage);
initializeBlob();
Blob updatedBlob = blob.reload(BlobSourceOption.metagenerationMatch());
assertEquals(expectedReloadedBlob, updatedBlob);
}
@Test
public void testUpdate() throws Exception {
initializeExpectedBlob(2);
Blob expectedUpdatedBlob = expectedBlob.toBuilder().setCacheControl("c").build();
expect(storage.getOptions()).andReturn(mockOptions).times(2);
expect(storage.update(eq(expectedUpdatedBlob), new Storage.BlobTargetOption[0]))
.andReturn(expectedUpdatedBlob);
replay(storage);
initializeBlob();
Blob updatedBlob = new Blob(storage, new BlobInfo.BuilderImpl(expectedUpdatedBlob));
Blob actualUpdatedBlob = updatedBlob.update();
assertEquals(expectedUpdatedBlob, actualUpdatedBlob);
}
@Test
public void testDelete() throws Exception {
initializeExpectedBlob(2);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.delete(BLOB_INFO.getBlobId(), new Storage.BlobSourceOption[0])).andReturn(true);
replay(storage);
initializeBlob();
assertTrue(blob.delete());
}
@Test
public void testCopyToBucket() throws Exception {
initializeExpectedBlob(2);
BlobInfo target = BlobInfo.newBuilder(BlobId.of("bt", "n")).build();
CopyWriter copyWriter = createMock(CopyWriter.class);
Capture<CopyRequest> capturedCopyRequest = Capture.newInstance();
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.copy(capture(capturedCopyRequest))).andReturn(copyWriter);
replay(storage);
initializeBlob();
CopyWriter returnedCopyWriter = blob.copyTo("bt");
assertEquals(copyWriter, returnedCopyWriter);
assertEquals(capturedCopyRequest.getValue().getSource(), blob.getBlobId());
assertEquals(capturedCopyRequest.getValue().getTarget(), target);
assertFalse(capturedCopyRequest.getValue().overrideInfo());
assertTrue(capturedCopyRequest.getValue().getSourceOptions().isEmpty());
assertTrue(capturedCopyRequest.getValue().getTargetOptions().isEmpty());
}
@Test
public void testCopyTo() throws Exception {
initializeExpectedBlob(2);
BlobInfo target = BlobInfo.newBuilder(BlobId.of("bt", "nt")).build();
CopyWriter copyWriter = createMock(CopyWriter.class);
Capture<CopyRequest> capturedCopyRequest = Capture.newInstance();
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.copy(capture(capturedCopyRequest))).andReturn(copyWriter);
replay(storage);
initializeBlob();
CopyWriter returnedCopyWriter = blob.copyTo("bt", "nt");
assertEquals(copyWriter, returnedCopyWriter);
assertEquals(capturedCopyRequest.getValue().getSource(), blob.getBlobId());
assertEquals(capturedCopyRequest.getValue().getTarget(), target);
assertFalse(capturedCopyRequest.getValue().overrideInfo());
assertTrue(capturedCopyRequest.getValue().getSourceOptions().isEmpty());
assertTrue(capturedCopyRequest.getValue().getTargetOptions().isEmpty());
}
@Test
public void testCopyToBlobId() throws Exception {
initializeExpectedBlob(2);
BlobInfo target = BlobInfo.newBuilder(BlobId.of("bt", "nt")).build();
BlobId targetId = BlobId.of("bt", "nt");
CopyWriter copyWriter = createMock(CopyWriter.class);
Capture<CopyRequest> capturedCopyRequest = Capture.newInstance();
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.copy(capture(capturedCopyRequest))).andReturn(copyWriter);
replay(storage);
initializeBlob();
CopyWriter returnedCopyWriter = blob.copyTo(targetId);
assertEquals(copyWriter, returnedCopyWriter);
assertEquals(capturedCopyRequest.getValue().getSource(), blob.getBlobId());
assertEquals(capturedCopyRequest.getValue().getTarget(), target);
assertFalse(capturedCopyRequest.getValue().overrideInfo());
assertTrue(capturedCopyRequest.getValue().getSourceOptions().isEmpty());
assertTrue(capturedCopyRequest.getValue().getTargetOptions().isEmpty());
}
@Test
public void testReader() throws Exception {
initializeExpectedBlob(2);
ReadChannel channel = createMock(ReadChannel.class);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.reader(BLOB_INFO.getBlobId())).andReturn(channel);
replay(storage);
initializeBlob();
assertSame(channel, blob.reader());
}
@Test
public void testReaderWithDecryptionKey() throws Exception {
initializeExpectedBlob(2);
ReadChannel channel = createMock(ReadChannel.class);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.reader(BLOB_INFO.getBlobId(),
Storage.BlobSourceOption.decryptionKey(BASE64_KEY)))
.andReturn(channel).times(2);
replay(storage);
initializeBlob();
assertSame(channel, blob.reader(BlobSourceOption.decryptionKey(BASE64_KEY)));
assertSame(channel, blob.reader(BlobSourceOption.decryptionKey(KEY)));
}
@Test
public void testWriter() throws Exception {
initializeExpectedBlob(2);
BlobWriteChannel channel = createMock(BlobWriteChannel.class);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.writer(eq(expectedBlob))).andReturn(channel);
replay(storage);
initializeBlob();
assertSame(channel, blob.writer());
}
@Test
public void testWriterWithEncryptionKey() throws Exception {
initializeExpectedBlob(2);
BlobWriteChannel channel = createMock(BlobWriteChannel.class);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.writer(eq(expectedBlob), eq(BlobWriteOption.encryptionKey(BASE64_KEY))))
.andReturn(channel).times(2);
replay(storage);
initializeBlob();
assertSame(channel, blob.writer(BlobWriteOption.encryptionKey(BASE64_KEY)));
assertSame(channel, blob.writer(BlobWriteOption.encryptionKey(KEY)));
}
@Test
public void testSignUrl() throws Exception {
initializeExpectedBlob(2);
URL url = new URL("http://localhost:123/bla");
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.signUrl(expectedBlob, 100, TimeUnit.SECONDS)).andReturn(url);
replay(storage);
initializeBlob();
assertEquals(url, blob.signUrl(100, TimeUnit.SECONDS));
}
@Test
public void testGetAcl() throws Exception {
initializeExpectedBlob(1);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.getAcl(BLOB_INFO.getBlobId(), User.ofAllAuthenticatedUsers())).andReturn(ACL);
replay(storage);
initializeBlob();
assertEquals(ACL, blob.getAcl(User.ofAllAuthenticatedUsers()));
}
@Test
public void testDeleteAcl() throws Exception {
initializeExpectedBlob(1);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.deleteAcl(BLOB_INFO.getBlobId(),
User.ofAllAuthenticatedUsers())).andReturn(true);
replay(storage);
initializeBlob();
assertTrue(blob.deleteAcl(User.ofAllAuthenticatedUsers()));
}
@Test
public void testCreateAcl() throws Exception {
initializeExpectedBlob(1);
expect(storage.getOptions()).andReturn(mockOptions);
Acl returnedAcl = ACL.toBuilder().setEtag("ETAG").setId("ID").build();
expect(storage.createAcl(BLOB_INFO.getBlobId(), ACL)).andReturn(returnedAcl);
replay(storage);
initializeBlob();
assertEquals(returnedAcl, blob.createAcl(ACL));
}
@Test
public void testUpdateAcl() throws Exception {
initializeExpectedBlob(1);
expect(storage.getOptions()).andReturn(mockOptions);
Acl returnedAcl = ACL.toBuilder().setEtag("ETAG").setId("ID").build();
expect(storage.updateAcl(BLOB_INFO.getBlobId(), ACL)).andReturn(returnedAcl);
replay(storage);
initializeBlob();
assertEquals(returnedAcl, blob.updateAcl(ACL));
}
@Test
public void testListAcls() throws Exception {
initializeExpectedBlob(1);
expect(storage.getOptions()).andReturn(mockOptions);
expect(storage.listAcls(BLOB_INFO.getBlobId())).andReturn(ACLS);
replay(storage);
initializeBlob();
assertEquals(ACLS, blob.listAcls());
}
@Test
public void testToBuilder() {
expect(storage.getOptions()).andReturn(mockOptions).times(6);
replay(storage);
Blob fullBlob = new Blob(storage, new BlobInfo.BuilderImpl(FULL_BLOB_INFO));
assertEquals(fullBlob, fullBlob.toBuilder().build());
Blob simpleBlob = new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO));
assertEquals(simpleBlob, simpleBlob.toBuilder().build());
Blob directory = new Blob(storage, new BlobInfo.BuilderImpl(DIRECTORY_INFO));
assertEquals(directory, directory.toBuilder().build());
}
@Test
public void testBuilder() {
initializeExpectedBlob(4);
expect(storage.getOptions()).andReturn(mockOptions).times(6);
replay(storage);
Blob.Builder builder = new Blob.Builder(new Blob(storage, new BlobInfo.BuilderImpl(BLOB_INFO)));
Blob blob = builder.setAcl(ACLS)
.setComponentCount(COMPONENT_COUNT)
.setContentType(CONTENT_TYPE)
.setCacheControl(CACHE_CONTROL)
.setContentDisposition(CONTENT_DISPOSITION)
.setContentEncoding(CONTENT_ENCODING)
.setContentLanguage(CONTENT_LANGUAGE)
.setCrc32c(CRC32)
.setCreateTime(CREATE_TIME)
.setCustomerEncryption(CUSTOMER_ENCRYPTION)
.setDeleteTime(DELETE_TIME)
.setEtag(ETAG)
.setGeneratedId(GENERATED_ID)
.setMd5(MD5)
.setMediaLink(MEDIA_LINK)
.setMetadata(METADATA)
.setMetageneration(META_GENERATION)
.setOwner(OWNER)
.setSelfLink(SELF_LINK)
.setSize(SIZE)
.setUpdateTime(UPDATE_TIME)
.build();
assertEquals("b", blob.getBucket());
assertEquals("n", blob.getName());
assertEquals(ACLS, blob.getAcl());
assertEquals(COMPONENT_COUNT, blob.getComponentCount());
assertEquals(CONTENT_TYPE, blob.getContentType());
assertEquals(CACHE_CONTROL, blob.getCacheControl());
assertEquals(CONTENT_DISPOSITION, blob.getContentDisposition());
assertEquals(CONTENT_ENCODING, blob.getContentEncoding());
assertEquals(CONTENT_LANGUAGE, blob.getContentLanguage());
assertEquals(CRC32, blob.getCrc32c());
assertEquals(CREATE_TIME, blob.getCreateTime());
assertEquals(CUSTOMER_ENCRYPTION, blob.getCustomerEncryption());
assertEquals(DELETE_TIME, blob.getDeleteTime());
assertEquals(ETAG, blob.getEtag());
assertEquals(GENERATED_ID, blob.getGeneratedId());
assertEquals(MD5, blob.getMd5());
assertEquals(MEDIA_LINK, blob.getMediaLink());
assertEquals(METADATA, blob.getMetadata());
assertEquals(META_GENERATION, blob.getMetageneration());
assertEquals(OWNER, blob.getOwner());
assertEquals(SELF_LINK, blob.getSelfLink());
assertEquals(SIZE, blob.getSize());
assertEquals(UPDATE_TIME, blob.getUpdateTime());
assertEquals(storage.getOptions(), blob.getStorage().getOptions());
assertFalse(blob.isDirectory());
builder = new Blob.Builder(new Blob(storage, new BlobInfo.BuilderImpl(DIRECTORY_INFO)));
blob = builder.setBlobId(BlobId.of("b", "n/"))
.setIsDirectory(true)
.setSize(0L)
.build();
assertEquals("b", blob.getBucket());
assertEquals("n/", blob.getName());
assertNull(blob.getAcl());
assertNull(blob.getComponentCount());
assertNull(blob.getContentType());
assertNull(blob.getCacheControl());
assertNull(blob.getContentDisposition());
assertNull(blob.getContentEncoding());
assertNull(blob.getContentLanguage());
assertNull(blob.getCrc32c());
assertNull(blob.getCreateTime());
assertNull(blob.getCustomerEncryption());
assertNull(blob.getDeleteTime());
assertNull(blob.getEtag());
assertNull(blob.getGeneratedId());
assertNull(blob.getMd5());
assertNull(blob.getMediaLink());
assertNull(blob.getMetadata());
assertNull(blob.getMetageneration());
assertNull(blob.getOwner());
assertNull(blob.getSelfLink());
assertEquals(0L, (long) blob.getSize());
assertNull(blob.getUpdateTime());
assertTrue(blob.isDirectory());
}
}
| |
package com.game.dhanraj.myownalexa;
import android.annotation.SuppressLint;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.SharedPreferences;
import android.support.v4.content.ContextCompat;
import android.support.v7.app.AlertDialog;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.support.v7.widget.CardView;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.TextView;
import com.game.dhanraj.myownalexa.sharedpref.Util;
import com.mikepenz.iconics.view.IconicsImageView;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import static com.game.dhanraj.myownalexa.Constants.BASE_THEME;
import static com.game.dhanraj.myownalexa.Constants.BASE_THEME_INTEGER;
import static com.game.dhanraj.myownalexa.Constants.DARK_THEME;
import static com.game.dhanraj.myownalexa.Constants.LIGHT_THEME;
public class SettingsActivity extends AppCompatActivity {
//DOUBT - are static variables independent of activity lifecycle
private Toolbar toolbar;
private SharedPreferences sharedPreferences;
public int theme, theme_integer;
private ArrayList<String> ringtones;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_settings);
toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
getSupportActionBar().setTitle("Settings");
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
getSupportActionBar().setHomeButtonEnabled(true);
sharedPreferences = Util.getPrefernces(SettingsActivity.this);
theme = sharedPreferences.getInt(BASE_THEME, ContextCompat.getColor(SettingsActivity.this, R.color.light_background));
theme_integer = sharedPreferences.getInt(BASE_THEME_INTEGER, 1);
findViewById(R.id.settings_layout).setBackgroundColor(theme);
findViewById(R.id.ll_alarm).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent i = new Intent(SettingsActivity.this, RingtonesActivity.class);
startActivity(i);
}
});
findViewById(R.id.ll_theme).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
baseThemeDialog();
}
});
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle item selection
switch (item.getItemId()) {
case android.R.id.home:
onBackPressed();
return true;
default:
return super.onOptionsItemSelected(item);
}
}
@Override
public void onBackPressed() {
super.onBackPressed();
finish();
}
@SuppressLint("ResourceAsColor")
private void baseThemeDialog(){
final AlertDialog.Builder dialogBuilder = new AlertDialog.Builder(SettingsActivity.this, getDialogStyle());
final View dialogLayout = getLayoutInflater().inflate(R.layout.dialog_basic_theme, null);
final TextView dialogTitle = (TextView) dialogLayout.findViewById(R.id.basic_theme_title);
final CardView dialogCardView = (CardView) dialogLayout.findViewById(R.id.basic_theme_card);
final IconicsImageView themeIconWhite = (IconicsImageView) dialogLayout.findViewById(R.id.white_basic_theme_icon);
final IconicsImageView themeIconDark = (IconicsImageView) dialogLayout.findViewById(R.id.dark_basic_theme_icon);
final IconicsImageView whiteSelect = (IconicsImageView) dialogLayout.findViewById(R.id.white_basic_theme_select);
final IconicsImageView darkSelect = (IconicsImageView) dialogLayout.findViewById(R.id.dark_basic_theme_select);
themeIconWhite.setIcon("gmd-invert-colors");
themeIconDark.setIcon("gmd-invert-colors");
whiteSelect.setIcon("gmd-done");
darkSelect.setIcon("gmd-done");
switch (getBaseThemeInteger()) {
case LIGHT_THEME:
whiteSelect.setVisibility(View.VISIBLE);
darkSelect.setVisibility(View.GONE);
break;
case DARK_THEME:
whiteSelect.setVisibility(View.GONE);
darkSelect.setVisibility(View.VISIBLE);
break;
}
/** SET OBJ THEME **/
dialogCardView.setCardBackgroundColor(R.color.cardview_light_background);
dialogLayout.findViewById(R.id.ll_white_basic_theme).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
whiteSelect.setVisibility(View.VISIBLE);
darkSelect.setVisibility(View.GONE);
setBaseTheme(LIGHT_THEME, false);
}
});
dialogLayout.findViewById(R.id.ll_dark_basic_theme).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
whiteSelect.setVisibility(View.GONE);
darkSelect.setVisibility(View.VISIBLE);
setBaseTheme(DARK_THEME, false);
}
});
dialogBuilder.setView(dialogLayout);
dialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
SharedPreferences.Editor preferences = sharedPreferences.edit();
preferences.putInt(BASE_THEME, getBaseTheme());
preferences.putInt(BASE_THEME_INTEGER, getBaseThemeInteger());
preferences.apply();
findViewById(R.id.settings_layout).setBackgroundColor(theme);
}
});
dialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
// do nothing
}
});
dialogBuilder.setView(dialogLayout);
AlertDialog alertDialog = dialogBuilder.create();
alertDialog.show();
}
private int getBaseThemeInteger() { return theme_integer; }
private int getBaseTheme() {
return theme;
}
private int getDialogStyle() {
int style;
switch (getBaseTheme()) {
case R.color.dark_background:
style = R.style.AlertDialog_Dark;
break;
// case AMOLED_THEME: style = R.style.AlertDialog_Dark_Amoled;break;
case R.color.light_background:
style = R.style.AlertDialog_Light;
break;
default:
style = R.style.AlertDialog_Light;
}
return style;
}
private void setBaseTheme(int theme, boolean permanent) {
if(permanent){
// TODO: implement it
} else {
switch(theme) {
case LIGHT_THEME :
this.theme = ContextCompat.getColor(SettingsActivity.this, R.color.light_background);
theme_integer = LIGHT_THEME;
break;
case DARK_THEME:
this.theme = ContextCompat.getColor(SettingsActivity.this, R.color.dark_background);
theme_integer = DARK_THEME;
break;
}
}
}
}
| |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.apex.malhar.lib.io.fs;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Queue;
import javax.validation.constraints.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.apex.malhar.lib.io.block.BlockWriter;
import org.apache.apex.malhar.lib.io.fs.Synchronizer.StitchBlock;
import org.apache.apex.malhar.lib.io.fs.Synchronizer.StitchedFileMetaData;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import com.google.common.collect.Queues;
import com.datatorrent.api.Context;
import com.datatorrent.api.Context.DAGContext;
import com.datatorrent.api.DefaultOutputPort;
import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
/**
* This is generic File Stitcher which can be used to merge data from one or
* more files into single stitched file. StitchedFileMetaData defines
* constituents of the stitched file.
*
* This class uses Reconciler to
*
* @since 3.4.0
*/
public class FileStitcher<T extends StitchedFileMetaData> extends AbstractReconciler<T, T>
{
/**
* Filesystem on which application is running
*/
protected transient FileSystem appFS;
/**
* Destination file system
*/
protected transient FileSystem outputFS;
/**
* Path for destination directory
*/
@NotNull
protected String filePath;
/**
* Path for blocks directory
*/
protected transient String blocksDirectoryPath;
/**
* Directory under application directory where blocks gets stored
*/
private String blocksDirectory = BlockWriter.DEFAULT_BLOCKS_DIR;
protected static final String PART_FILE_EXTENTION = "._COPYING_";
/**
* Queue maintaining successful files
*/
protected Queue<T> successfulFiles = Queues.newLinkedBlockingQueue();
/**
* Queue maintaining skipped files
*/
protected Queue<T> skippedFiles = Queues.newLinkedBlockingQueue();
/**
* Queue maintaining failed files
*/
protected Queue<T> failedFiles = Queues.newLinkedBlockingQueue();
/**
* Output port for emitting completed stitched files metadata
*/
@OutputPortFieldAnnotation(optional = true)
public final transient DefaultOutputPort<T> completedFilesMetaOutput = new DefaultOutputPort<T>();
private boolean writeChecksum = true;
protected transient Path tempOutFilePath;
@Override
public void setup(Context.OperatorContext context)
{
blocksDirectoryPath = context.getValue(DAGContext.APPLICATION_PATH) + Path.SEPARATOR + blocksDirectory;
try {
outputFS = getOutputFSInstance();
outputFS.setWriteChecksum(writeChecksum);
} catch (IOException ex) {
throw new RuntimeException("Exception in getting output file system.", ex);
}
try {
appFS = getAppFSInstance();
} catch (IOException ex) {
try {
outputFS.close();
} catch (IOException e) {
throw new RuntimeException("Exception in closing output file system.", e);
}
throw new RuntimeException("Exception in getting application file system.", ex);
}
super.setup(context); // Calling it at the end as the reconciler thread uses resources allocated above.
}
/*
* Calls super.endWindow() and sets counters
* @see com.datatorrent.api.BaseOperator#endWindow()
*/
@Override
public void endWindow()
{
T stitchedFileMetaData;
int size = doneTuples.size();
for (int i = 0; i < size; i++) {
stitchedFileMetaData = doneTuples.peek();
// If a tuple is present in doneTuples, it has to be also present in successful/failed/skipped
// as processCommittedData adds tuple in successful/failed/skipped
// and then reconciler thread add that in doneTuples
if (successfulFiles.contains(stitchedFileMetaData)) {
successfulFiles.remove(stitchedFileMetaData);
LOG.debug("File copy successful: {}", stitchedFileMetaData.getStitchedFileRelativePath());
} else if (skippedFiles.contains(stitchedFileMetaData)) {
skippedFiles.remove(stitchedFileMetaData);
LOG.debug("File copy skipped: {}", stitchedFileMetaData.getStitchedFileRelativePath());
} else if (failedFiles.contains(stitchedFileMetaData)) {
failedFiles.remove(stitchedFileMetaData);
LOG.debug("File copy failed: {}", stitchedFileMetaData.getStitchedFileRelativePath());
} else {
throw new RuntimeException("Tuple present in doneTuples but not in sucessful /skipped/ failed files: "
+ stitchedFileMetaData.getStitchedFileRelativePath());
}
completedFilesMetaOutput.emit(stitchedFileMetaData);
committedTuples.remove(stitchedFileMetaData);
doneTuples.poll();
}
}
/**
*
* @return Application FileSystem instance
* @throws IOException
*/
protected FileSystem getAppFSInstance() throws IOException
{
return FileSystem.newInstance((new Path(blocksDirectoryPath)).toUri(), new Configuration());
}
/**
*
* @return Destination FileSystem instance
* @throws IOException
*/
protected FileSystem getOutputFSInstance() throws IOException
{
return FileSystem.newInstance((new Path(filePath)).toUri(), new Configuration());
}
@Override
public void teardown()
{
super.teardown();
boolean gotException = false;
try {
if (appFS != null) {
appFS.close();
appFS = null;
}
} catch (IOException e) {
gotException = true;
}
try {
if (outputFS != null) {
outputFS.close();
outputFS = null;
}
} catch (IOException e) {
gotException = true;
}
if (gotException) {
throw new RuntimeException("Exception while closing file systems.");
}
}
/**
* Enques incoming data for for processing
*/
@Override
protected void processTuple(T stitchedFileMetaData)
{
LOG.debug("stitchedFileMetaData: {}", stitchedFileMetaData);
enqueueForProcessing(stitchedFileMetaData);
}
/**
* Stitches the output file when all blocks for that file are commited
*/
@Override
protected void processCommittedData(T stitchedFileMetaData)
{
try {
mergeOutputFile(stitchedFileMetaData);
} catch (IOException e) {
throw new RuntimeException("Unable to merge file: " + stitchedFileMetaData.getStitchedFileRelativePath(), e);
}
}
/**
* Read data from block files and write to output file. Information about
* which block files should be read is specified in outFileMetadata
*
* @param stitchedFileMetaData
* @throws IOException
*/
protected void mergeOutputFile(T stitchedFileMetaData) throws IOException
{
mergeBlocks(stitchedFileMetaData);
successfulFiles.add(stitchedFileMetaData);
LOG.debug("Completed processing file: {} ", stitchedFileMetaData.getStitchedFileRelativePath());
}
protected void mergeBlocks(T stitchedFileMetaData) throws IOException
{
//when writing to tmp files there can be vagrant tmp files which we have to clean
final Path dst = new Path(filePath, stitchedFileMetaData.getStitchedFileRelativePath());
PathFilter tempFileFilter = new PathFilter()
{
@Override
public boolean accept(Path path)
{
return path.getName().startsWith(dst.getName()) && path.getName().endsWith(PART_FILE_EXTENTION);
}
};
if (outputFS.exists(dst.getParent())) {
FileStatus[] statuses = outputFS.listStatus(dst.getParent(), tempFileFilter);
for (FileStatus status : statuses) {
String statusName = status.getPath().getName();
LOG.debug("deleting vagrant file {}", statusName);
outputFS.delete(status.getPath(), true);
}
}
tempOutFilePath = new Path(filePath,
stitchedFileMetaData.getStitchedFileRelativePath() + '.' + System.currentTimeMillis() + PART_FILE_EXTENTION);
try {
writeTempOutputFile(stitchedFileMetaData);
moveToFinalFile(stitchedFileMetaData);
} catch (BlockNotFoundException e) {
LOG.warn("Block file {} not found. Assuming recovery mode for file {}. ", e.getBlockPath(),
stitchedFileMetaData.getStitchedFileRelativePath());
//Remove temp output file
outputFS.delete(tempOutFilePath, false);
}
}
/**
* Writing all Stitch blocks to temporary file
*
* @param stitchedFileMetaData
* @throws IOException
* @throws BlockNotFoundException
*/
protected OutputStream writeTempOutputFile(T stitchedFileMetaData) throws IOException, BlockNotFoundException
{
OutputStream outputStream = getOutputStream(tempOutFilePath);
try {
for (StitchBlock outputBlock : stitchedFileMetaData.getStitchBlocksList()) {
outputBlock.writeTo(appFS, blocksDirectoryPath, outputStream);
}
} finally {
outputStream.close();
}
return outputStream;
}
protected OutputStream getOutputStream(Path partFilePath) throws IOException
{
return outputFS.create(partFilePath);
}
/**
* Moving temp output file to final file
*
* @param stitchedFileMetaData
* @throws IOException
*/
protected void moveToFinalFile(T stitchedFileMetaData) throws IOException
{
Path destination = new Path(filePath, stitchedFileMetaData.getStitchedFileRelativePath());
moveToFinalFile(tempOutFilePath, destination);
}
/**
* Moving temp output file to final file
*
* @param tempOutFilePath
* Temporary output file
* @param destination
* Destination directory path
* @throws IOException
*/
protected void moveToFinalFile(Path tempOutFilePath, Path destination) throws IOException
{
Path src = Path.getPathWithoutSchemeAndAuthority(tempOutFilePath);
Path dst = Path.getPathWithoutSchemeAndAuthority(destination);
boolean moveSuccessful = false;
if (!outputFS.exists(dst.getParent())) {
outputFS.mkdirs(dst.getParent());
}
if (outputFS.exists(dst)) {
outputFS.delete(dst, false);
}
moveSuccessful = outputFS.rename(src, dst);
if (moveSuccessful) {
LOG.debug("File {} moved successfully to destination folder.", dst);
} else {
throw new RuntimeException("Unable to move file from " + src + " to " + dst);
}
}
/**
* Directory under application directory where blocks gets stored
* @return blocks directory
*/
public String getBlocksDirectory()
{
return blocksDirectory;
}
/**
* Directory under application directory where blocks gets stored
* @param blocksDirectory blocks directory
*/
public void setBlocksDirectory(String blocksDirectory)
{
this.blocksDirectory = blocksDirectory;
}
/**
* Path for destination directory
* @return path for destination directory
*/
public String getFilePath()
{
return filePath;
}
/**
* Path for destination directory
* @param filePath path for destination directory
*/
public void setFilePath(String filePath)
{
this.filePath = filePath;
}
/**
* Flag to control writing checksum
* @return write checksum flag status
*/
public boolean isWriteChecksum()
{
return writeChecksum;
}
/**
* Flag to control writing checksum
* @param writeChecksum write checksum flag status
*/
public void setWriteChecksum(boolean writeChecksum)
{
this.writeChecksum = writeChecksum;
}
protected static final Logger LOG = LoggerFactory.getLogger(FileStitcher.class);
/**
* Defining new type of exception for missing block. Currently, methods
* catching this exception assumes that block is missing because of explicit
* deletion by File output module (for completed files)
*
*/
public static class BlockNotFoundException extends Exception
{
private static final long serialVersionUID = -7409415466834194798L;
Path blockPath;
/**
* @param blockPath
*/
public BlockNotFoundException(Path blockPath)
{
super();
this.blockPath = blockPath;
}
/**
* @return the blockPath
*/
public Path getBlockPath()
{
return blockPath;
}
}
}
| |
package com.SixClawWorm.application;
import android.app.*;
import android.util.*;
import android.widget.*;
import android.content.*;
import android.bluetooth.*;
import android.os.*;
import com.SixClawWorm.utils.*;
import android.view.*;
public class BluetoothChat extends Activity
{
private static final boolean D = true;
public static final String DEVICE_NAME = "device_name";
public static final int MESSAGE_DEVICE_NAME = 4;
public static final int MESSAGE_READ = 2;
public static final int MESSAGE_STATE_CHANGE = 1;
public static final int MESSAGE_TOAST = 5;
public static final int MESSAGE_WRITE = 3;
private static final int REQUEST_CONNECT_DEVICE_INSECURE = 2;
private static final int REQUEST_CONNECT_DEVICE_SECURE = 1;
private static final int REQUEST_ENABLE_BT = 3;
private static final String TAG = "BluetoothChat";
public static final String TOAST = "toast";
private static BluetoothAdapter mBluetoothAdapter;
public static BluetoothChatService mChatService;
private Button Mlayer;
private Button StartBtn;
private String mConnectedDeviceName;
private ArrayAdapter<String> mConversationArrayAdapter;
private ListView mConversationView;
private final Handler mHandler;
private EditText mOutEditText;
private StringBuffer mOutStringBuffer;
private Button mSendButton;
private TextView mTitle;
private TextView$OnEditorActionListener mWriteListener;
private long maxMemory;
private Button rssiNum;
static {
BluetoothChat.mBluetoothAdapter = null;
BluetoothChat.mChatService = null;
}
public BluetoothChat() {
super();
this.mConnectedDeviceName = null;
this.mWriteListener = (TextView$OnEditorActionListener)new TextView$OnEditorActionListener() {
public boolean onEditorAction(final TextView textView, final int n, final KeyEvent keyEvent) {
if (n == 0 && keyEvent.getAction() == 1) {
textView.getText().toString();
}
Log.i("BluetoothChat", "END onEditorAction");
return true;
}
};
this.mHandler = new Handler() {
public void handleMessage(final Message message) {
Label_0040: {
switch (message.what) {
case 1: {
Log.i("BluetoothChat", "MESSAGE_STATE_CHANGE: " + message.arg1);
switch (message.arg1) {
case 1:
case 2: {
break Label_0040;
}
default: {
return;
}
case 0: {
ToastHint.show((Context)BluetoothChat.this, "The connection fails");
return;
}
case 3: {
ProgressDialogHint.Dismiss();
ToastHint.show((Context)BluetoothChat.this, "The connection is successful");
Param.ConntectSucceed = true;
return;
}
}
break;
}
case 3: {
final byte[] array = (byte[])message.obj;
return;
}
case 2: {
final byte[] array2 = (byte[])message.obj;
return;
}
case 4: {
BluetoothChat.access$0(BluetoothChat.this, message.getData().getString("device_name"));
Toast.makeText(BluetoothChat.this.getApplicationContext(), (CharSequence)("Connected to " + BluetoothChat.this.mConnectedDeviceName), 0).show();
return;
}
case 5: {
Toast.makeText(BluetoothChat.this.getApplicationContext(), (CharSequence)message.getData().getString("toast"), 0).show();
return;
}
}
}
}
};
}
static /* synthetic */ void access$0(final BluetoothChat bluetoothChat, final String mConnectedDeviceName) {
bluetoothChat.mConnectedDeviceName = mConnectedDeviceName;
}
public static void connectDevice(final Intent intent, final boolean b) {
final BluetoothDevice remoteDevice = BluetoothChat.mBluetoothAdapter.getRemoteDevice(intent.getExtras().getString(DeviceListActivity.EXTRA_DEVICE_ADDRESS));
if (remoteDevice != null && BluetoothChat.mChatService != null) {
BluetoothChat.mChatService.connect(remoteDevice, b);
}
}
private void ensureDiscoverable() {
Log.d("BluetoothChat", "ensure discoverable");
if (BluetoothChat.mBluetoothAdapter.getScanMode() != 23) {
final Intent intent = new Intent("android.bluetooth.adapter.action.REQUEST_DISCOVERABLE");
intent.putExtra("android.bluetooth.adapter.extra.DISCOVERABLE_DURATION", 300);
this.startActivity(intent);
}
}
private void sendMessage(final byte[] array) {
if (BluetoothChat.mChatService.getState() != 3) {
Toast.makeText((Context)this, 2130968578, 0).show();
}
else if (array.length > 0) {
BluetoothChat.mChatService.write(array);
this.mOutStringBuffer.setLength(0);
}
}
private void setupChat() {
Log.d("BluetoothChat", "setupChat()");
BluetoothChat.mChatService = new BluetoothChatService((Context)this, this.mHandler);
this.mOutStringBuffer = new StringBuffer("");
}
private byte[] toStringHex(final String s) {
final byte[] array = new byte[s.length() / 2];
int i = 0;
while (i < array.length) {
final int n = i * 2;
final int n2 = 2 + i * 2;
try {
array[i] = (byte)(0xFF & Integer.parseInt(s.substring(n, n2), 16));
return array;
}
catch (Exception ex) {
ex.printStackTrace();
++i;
}
}
return array;
}
public void onActivityResult(final int n, final int n2, final Intent intent) {
Log.d("BluetoothChat", "onActivityResult " + n2);
switch (n) {
case 1: {
if (n2 == -1) {
connectDevice(intent, true);
return;
}
break;
}
case 2: {
if (n2 == -1) {
connectDevice(intent, false);
return;
}
break;
}
case 3: {
if (n2 == -1) {
this.setupChat();
return;
}
Log.d("BluetoothChat", "BT not enabled");
Toast.makeText((Context)this, 2130968579, 0).show();
this.finish();
}
}
}
public void onCreate(final Bundle bundle) {
super.onCreate(bundle);
this.setContentView(2130903041);
ExitApplication.getInstance().addActivity(this);
(this.Mlayer = (Button)this.findViewById(2131099649)).setOnClickListener((View$OnClickListener)new View$OnClickListener() {
public void onClick(final View view) {
BluetoothChat.this.startActivity(new Intent((Context)BluetoothChat.this, (Class)inTrodutionActivity.class));
}
});
BluetoothChat.mBluetoothAdapter = BluetoothAdapter.getDefaultAdapter();
if (BluetoothChat.mBluetoothAdapter == null) {
Toast.makeText((Context)this, (CharSequence)"Bluetooth is not available", 1).show();
this.finish();
return;
}
(this.StartBtn = (Button)this.findViewById(2131099655)).setOnClickListener((View$OnClickListener)new View$OnClickListener() {
public void onClick(final View view) {
BluetoothChat.this.startActivity(new Intent((Context)BluetoothChat.this, (Class)MenuActivity.class));
}
});
}
public boolean onCreateOptionsMenu(final Menu menu) {
this.getMenuInflater().inflate(2131034112, menu);
return true;
}
public void onDestroy() {
super.onDestroy();
if (BluetoothChat.mChatService != null) {
BluetoothChat.mChatService.stop();
}
Log.e("BluetoothChat", "--- ON DESTROY ---");
}
public boolean onKeyDown(final int n, final KeyEvent keyEvent) {
if (n == 4) {
ExitApplication.getInstance().exit((Context)this);
}
return super.onKeyDown(n, keyEvent);
}
public boolean onOptionsItemSelected(final MenuItem menuItem) {
switch (menuItem.getItemId()) {
default: {
return false;
}
case 2131099672: {
this.startActivityForResult(new Intent((Context)this, (Class)DeviceListActivity.class), 2);
return true;
}
case 2131099673: {
this.ensureDiscoverable();
return true;
}
}
}
public void onPause() {
synchronized (this) {
super.onPause();
Log.e("BluetoothChat", "- ON PAUSE -");
}
}
public void onResume() {
synchronized (this) {
super.onResume();
Log.e("BluetoothChat", "+ ON RESUME +");
if (BluetoothChat.mChatService != null) {
if (BluetoothChat.mChatService.getState() == 0) {
BluetoothChat.mChatService.start();
}
else {
BluetoothChat.mChatService.getState();
}
Param.ChatService = BluetoothChat.mChatService;
}
}
}
public void onStart() {
super.onStart();
if (!BluetoothChat.mBluetoothAdapter.isEnabled()) {
this.startActivityForResult(new Intent("android.bluetooth.adapter.action.REQUEST_ENABLE"), 3);
}
else if (BluetoothChat.mChatService == null) {
this.setupChat();
}
}
public void onStop() {
super.onStop();
Log.e("BluetoothChat", "-- ON STOP --");
}
}
| |
//////////////////////////////////////////////////////////////////////////////////////////
//
// Implementation of the Blueprints Interface for ArangoDB by triAGENS GmbH Cologne.
//
// Copyright triAGENS GmbH Cologne.
//
//////////////////////////////////////////////////////////////////////////////////////////
package com.arangodb.blueprints;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.Vector;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import com.arangodb.ArangoException;
import com.arangodb.blueprints.client.ArangoDBConfiguration;
import com.arangodb.blueprints.client.ArangoDBException;
import com.arangodb.blueprints.client.ArangoDBIndex;
import com.arangodb.blueprints.client.ArangoDBSimpleGraph;
import com.arangodb.blueprints.client.ArangoDBSimpleGraphClient;
import com.arangodb.blueprints.utils.ArangoDBUtil;
import com.arangodb.entity.EdgeDefinitionEntity;
import com.arangodb.entity.GraphEntity;
import com.arangodb.entity.IndexType;
import com.tinkerpop.blueprints.Edge;
import com.tinkerpop.blueprints.Element;
import com.tinkerpop.blueprints.Features;
import com.tinkerpop.blueprints.Graph;
import com.tinkerpop.blueprints.GraphQuery;
import com.tinkerpop.blueprints.KeyIndexableGraph;
import com.tinkerpop.blueprints.MetaGraph;
import com.tinkerpop.blueprints.Parameter;
import com.tinkerpop.blueprints.Vertex;
import com.tinkerpop.blueprints.util.ExceptionFactory;
import com.tinkerpop.blueprints.util.StringFactory;
/**
* The ArangoDB graph class
*
* @author Achim Brandt (http://www.triagens.de)
* @author Johannes Gocke (http://www.triagens.de)
* @author Guido Schwab (http://www.triagens.de)
*/
public class ArangoDBGraph implements Graph, MetaGraph<ArangoDBSimpleGraph>, KeyIndexableGraph {
private static final Features FEATURES = new Features();
static {
FEATURES.supportsDuplicateEdges = true;
FEATURES.supportsSelfLoops = true;
FEATURES.isPersistent = true;
FEATURES.supportsVertexIteration = true;
FEATURES.supportsEdgeIteration = true;
FEATURES.supportsVertexIndex = false;
FEATURES.supportsEdgeIndex = false;
FEATURES.ignoresSuppliedIds = false;
FEATURES.supportsTransactions = false;
FEATURES.supportsEdgeKeyIndex = true;
FEATURES.supportsVertexKeyIndex = true;
FEATURES.supportsKeyIndices = true;
FEATURES.isWrapper = true;
FEATURES.supportsIndices = false;
FEATURES.supportsEdgeRetrieval = true;
FEATURES.supportsVertexProperties = true;
FEATURES.supportsEdgeProperties = true;
// For more information on supported types, please see:
// http://code.google.com/p/orient/wiki/Types
FEATURES.supportsSerializableObjectProperty = true;
FEATURES.supportsBooleanProperty = true;
FEATURES.supportsDoubleProperty = true;
FEATURES.supportsFloatProperty = true;
FEATURES.supportsIntegerProperty = true;
FEATURES.supportsPrimitiveArrayProperty = true;
FEATURES.supportsUniformListProperty = true;
FEATURES.supportsMixedListProperty = true;
FEATURES.supportsLongProperty = true;
FEATURES.supportsMapProperty = true;
FEATURES.supportsStringProperty = true;
FEATURES.supportsThreadedTransactions = false;
FEATURES.supportsThreadIsolatedTransactions = false;
}
/**
* ArangoDBSimpleGraph
*/
private ArangoDBSimpleGraph simpleGraph = null;
/**
* A ArangoDBSimpleGraphClient to handle the connection to the Database
*/
private ArangoDBSimpleGraphClient client = null;
/**
* Creates a Graph (simple configuration)
*
* @param host
* the ArangoDB host name
* @param port
* the ArangoDB port
* @param name
* the name of the graph
* @param verticesCollectionName
* the name of the vertices collection
* @param edgesCollectionName
* the name of the edges collection
*
* @throws ArangoDBGraphException
* if the graph could not be created
*/
public ArangoDBGraph(String host, int port, String name, String verticesCollectionName, String edgesCollectionName)
throws ArangoDBGraphException {
this(new ArangoDBConfiguration(host, port), name, verticesCollectionName, edgesCollectionName);
}
/**
* Creates a Graph
*
* @param configuration
* an ArangoDB configuration object
* @param name
* the name of the graph
* @param verticesCollectionName
* the name of the vertices collection
* @param edgesCollectionName
* the name of the edges collection
*
* @throws ArangoDBGraphException
* if the graph could not be created
*/
public ArangoDBGraph(ArangoDBConfiguration configuration, String name, String verticesCollectionName,
String edgesCollectionName) throws ArangoDBGraphException {
if (StringUtils.isBlank(name)) {
throw new ArangoDBGraphException("graph name must not be null.");
}
if (StringUtils.isBlank(verticesCollectionName)) {
throw new ArangoDBGraphException("vertex collection name must not be null.");
}
if (StringUtils.isBlank(edgesCollectionName)) {
throw new ArangoDBGraphException("edge collection name must not be null.");
}
client = new ArangoDBSimpleGraphClient(configuration);
try {
GraphEntity graph = client.getGraph(name);
if (graph != null) {
boolean error = false;
List<EdgeDefinitionEntity> edgeDefinitions = graph.getEdgeDefinitions();
if (edgeDefinitions.size() != 1 || CollectionUtils.isNotEmpty(graph.getOrphanCollections())) {
error = true;
} else {
EdgeDefinitionEntity edgeDefinitionEntity = edgeDefinitions.get(0);
if (!edgesCollectionName.equals(edgeDefinitionEntity.getCollection())
|| edgeDefinitionEntity.getFrom().size() != 1 || edgeDefinitionEntity.getTo().size() != 1
|| !verticesCollectionName.equals(edgeDefinitionEntity.getFrom().get(0))
|| !verticesCollectionName.equals(edgeDefinitionEntity.getTo().get(0))) {
error = true;
}
}
if (error) {
throw new ArangoDBGraphException("Graph with that name already exists but with other settings");
}
simpleGraph = new ArangoDBSimpleGraph(graph, verticesCollectionName, edgesCollectionName);
}
} catch (ArangoException e1) {
}
if (simpleGraph == null) {
try {
simpleGraph = this.client.createGraph(name, verticesCollectionName, edgesCollectionName);
} catch (ArangoException e2) {
throw new ArangoDBGraphException(e2);
}
}
}
public Features getFeatures() {
return FEATURES;
}
public void shutdown() {
client.shutdown();
}
public Vertex addVertex(Object id) {
return ArangoDBVertex.create(this, id);
}
public Vertex getVertex(Object id) {
return ArangoDBVertex.load(this, id);
}
public void removeVertex(Vertex vertex) {
if (vertex.getClass().equals(ArangoDBVertex.class)) {
ArangoDBVertex v = (ArangoDBVertex) vertex;
v.remove();
}
}
public Iterable<Vertex> getVertices() {
ArangoDBGraphQuery q = new ArangoDBGraphQuery(this);
return q.vertices();
}
public Iterable<Vertex> getVertices(String key, Object value) {
ArangoDBGraphQuery q = new ArangoDBGraphQuery(this);
q.has(key, value);
return q.vertices();
}
public Edge addEdge(Object id, Vertex outVertex, Vertex inVertex, String label) {
if (label == null) {
throw ExceptionFactory.edgeLabelCanNotBeNull();
}
return ArangoDBEdge.create(this, id, outVertex, inVertex, label);
}
public Edge getEdge(Object id) {
return ArangoDBEdge.load(this, id);
}
public void removeEdge(Edge edge) {
if (edge.getClass().equals(ArangoDBEdge.class)) {
ArangoDBEdge e = (ArangoDBEdge) edge;
e.remove();
}
}
public Iterable<Edge> getEdges() {
ArangoDBGraphQuery q = new ArangoDBGraphQuery(this);
return q.edges();
}
public Iterable<Edge> getEdges(String key, Object value) {
ArangoDBGraphQuery q = new ArangoDBGraphQuery(this);
q.has(key, value);
return q.edges();
}
public ArangoDBSimpleGraph getRawGraph() {
return simpleGraph;
}
public String toString() {
return StringFactory.graphString(this, this.simpleGraph.toString());
}
public <T extends Element> void dropKeyIndex(String key, Class<T> elementClass) {
List<ArangoDBIndex> indices = null;
try {
if (elementClass.isAssignableFrom(Vertex.class)) {
indices = client.getVertexIndices(simpleGraph);
} else if (elementClass.isAssignableFrom(Edge.class)) {
indices = client.getEdgeIndices(simpleGraph);
}
} catch (ArangoDBException e) {
}
String n = ArangoDBUtil.normalizeKey(key);
if (indices != null) {
for (ArangoDBIndex i : indices) {
if (i.getFields().size() == 1) {
String field = i.getFields().get(0);
if (field.equals(n)) {
try {
client.deleteIndex(i.getId());
} catch (ArangoDBException e) {
}
}
}
}
}
}
@SuppressWarnings("rawtypes")
public <T extends Element> void createKeyIndex(String key, Class<T> elementClass, Parameter... indexParameters) {
IndexType type = IndexType.SKIPLIST;
boolean unique = false;
Vector<String> fields = new Vector<String>();
String n = ArangoDBUtil.normalizeKey(key);
fields.add(n);
for (Parameter p : indexParameters) {
if (p.getKey().equals("type")) {
type = object2IndexType(p.getValue());
}
if (p.getKey().equals("unique")) {
unique = (Boolean) p.getValue();
}
}
try {
if (elementClass.isAssignableFrom(Vertex.class)) {
getClient().createVertexIndex(simpleGraph, type, unique, fields);
} else if (elementClass.isAssignableFrom(Edge.class)) {
getClient().createEdgeIndex(simpleGraph, type, unique, fields);
}
} catch (ArangoDBException e) {
}
}
private IndexType object2IndexType(Object obj) {
if (obj instanceof IndexType) {
return (IndexType) obj;
}
if (obj != null) {
String str = obj.toString();
for (IndexType indexType : IndexType.values()) {
if (indexType.toString().equalsIgnoreCase(str)) {
return indexType;
}
}
}
return IndexType.SKIPLIST;
}
public <T extends Element> Set<String> getIndexedKeys(Class<T> elementClass) {
HashSet<String> result = new HashSet<String>();
List<ArangoDBIndex> indices = null;
try {
if (elementClass.isAssignableFrom(Vertex.class)) {
indices = client.getVertexIndices(simpleGraph);
} else if (elementClass.isAssignableFrom(Edge.class)) {
indices = client.getEdgeIndices(simpleGraph);
}
for (ArangoDBIndex i : indices) {
if (i.getFields().size() == 1) {
String key = i.getFields().get(0);
// ignore system index
if (key.charAt(0) != '_') {
result.add(ArangoDBUtil.denormalizeKey(key));
}
}
}
} catch (ArangoDBException e) {
}
return result;
}
public GraphQuery query() {
return new ArangoDBGraphQuery(this);
}
/**
* Returns the ArangoDBSimpleGraphClient object
*
* @return the ArangoDBSimpleGraphClient object
*/
public ArangoDBSimpleGraphClient getClient() {
return client;
}
/**
* Returns the identifier of the graph
*
* @return the identifier of the graph
*/
public String getId() {
return simpleGraph.getGraphEntity().getDocumentKey();
}
}
| |
// Copyright (C) 2013 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.server.notedb;
import static com.google.gerrit.server.notedb.ChangeNoteUtil.FOOTER_LABEL;
import static com.google.gerrit.server.notedb.ChangeNoteUtil.FOOTER_PATCH_SET;
import static com.google.gerrit.server.notedb.ChangeNoteUtil.FOOTER_STATUS;
import static com.google.gerrit.server.notedb.ChangeNoteUtil.FOOTER_SUBMITTED_WITH;
import static com.google.gerrit.server.notedb.ChangeNoteUtil.GERRIT_PLACEHOLDER_HOST;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Enums;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Supplier;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.ImmutableSetMultimap;
import com.google.common.collect.LinkedListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import com.google.common.collect.Ordering;
import com.google.common.collect.Table;
import com.google.common.collect.Tables;
import com.google.common.primitives.Ints;
import com.google.gerrit.common.data.SubmitRecord;
import com.google.gerrit.reviewdb.client.Account;
import com.google.gerrit.reviewdb.client.Change;
import com.google.gerrit.reviewdb.client.ChangeMessage;
import com.google.gerrit.reviewdb.client.PatchLineComment;
import com.google.gerrit.reviewdb.client.PatchSet;
import com.google.gerrit.reviewdb.client.PatchLineComment.Status;
import com.google.gerrit.reviewdb.client.PatchSet.Id;
import com.google.gerrit.reviewdb.client.PatchSetApproval;
import com.google.gerrit.reviewdb.client.PatchSetApproval.LabelId;
import com.google.gerrit.reviewdb.client.Project;
import com.google.gerrit.server.git.GitRepositoryManager;
import com.google.gerrit.server.util.LabelVote;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import org.eclipse.jgit.errors.ConfigInvalidException;
import org.eclipse.jgit.errors.RepositoryNotFoundException;
import org.eclipse.jgit.lib.CommitBuilder;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.PersonIdent;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.notes.NoteMap;
import org.eclipse.jgit.revwalk.FooterKey;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.util.RawParseUtils;
import java.io.IOException;
import java.nio.charset.Charset;
import java.sql.Timestamp;
import java.text.ParseException;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/** View of a single {@link Change} based on the log of its notes branch. */
public class ChangeNotes extends AbstractChangeNotes<ChangeNotes> {
private static final Ordering<PatchSetApproval> PSA_BY_TIME =
Ordering.natural().onResultOf(
new Function<PatchSetApproval, Timestamp>() {
@Override
public Timestamp apply(PatchSetApproval input) {
return input.getGranted();
}
});
public static final Ordering<ChangeMessage> MESSAGE_BY_TIME =
Ordering.natural().onResultOf(
new Function<ChangeMessage, Timestamp>() {
@Override
public Timestamp apply(ChangeMessage input) {
return input.getWrittenOn();
}
});
public static Comparator<PatchLineComment> PatchLineCommentComparator =
new Comparator<PatchLineComment>() {
public int compare(PatchLineComment c1, PatchLineComment c2) {
String filename1 = c1.getKey().getParentKey().get();
String filename2 = c2.getKey().getParentKey().get();
return ComparisonChain.start()
.compare(filename1, filename2)
.compare(c1.getLine(), c2.getLine())
.compare(c1.getWrittenOn(), c2.getWrittenOn())
.result();
}
};
public static ConfigInvalidException parseException(Change.Id changeId,
String fmt, Object... args) {
return new ConfigInvalidException("Change " + changeId + ": "
+ String.format(fmt, args));
}
public static Account.Id parseIdent(PersonIdent ident, Change.Id changeId)
throws ConfigInvalidException {
String email = ident.getEmailAddress();
int at = email.indexOf('@');
if (at >= 0) {
String host = email.substring(at + 1, email.length());
Integer id = Ints.tryParse(email.substring(0, at));
if (id != null && host.equals(GERRIT_PLACEHOLDER_HOST)) {
return new Account.Id(id);
}
}
throw parseException(changeId, "invalid identity, expected <id>@%s: %s",
GERRIT_PLACEHOLDER_HOST, email);
}
@Singleton
public static class Factory {
private final GitRepositoryManager repoManager;
@VisibleForTesting
@Inject
public Factory(GitRepositoryManager repoManager) {
this.repoManager = repoManager;
}
public ChangeNotes create(Change change) {
return new ChangeNotes(repoManager, change);
}
}
private static class Parser {
private final Change.Id changeId;
private final ObjectId tip;
private final RevWalk walk;
private final Repository repo;
private final Map<PatchSet.Id,
Table<Account.Id, String, Optional<PatchSetApproval>>> approvals;
private final Map<Account.Id, ReviewerState> reviewers;
private final List<SubmitRecord> submitRecords;
private final Multimap<PatchSet.Id, ChangeMessage> changeMessages;
private final Multimap<Id, PatchLineComment> commentsForPs;
private final Multimap<PatchSet.Id, PatchLineComment> commentsForBase;
private NoteMap commentNoteMap;
private Change.Status status;
private Parser(Change change, ObjectId tip, RevWalk walk,
GitRepositoryManager repoManager) throws RepositoryNotFoundException,
IOException {
this.changeId = change.getId();
this.tip = tip;
this.walk = walk;
this.repo = repoManager.openRepository(getProjectName(change));
approvals = Maps.newHashMap();
reviewers = Maps.newLinkedHashMap();
submitRecords = Lists.newArrayListWithExpectedSize(1);
changeMessages = LinkedListMultimap.create();
commentsForPs = ArrayListMultimap.create();
commentsForBase = ArrayListMultimap.create();
}
private void parseAll() throws ConfigInvalidException, IOException, ParseException {
walk.markStart(walk.parseCommit(tip));
for (RevCommit commit : walk) {
parse(commit);
}
parseComments();
pruneReviewers();
}
private ImmutableListMultimap<PatchSet.Id, PatchSetApproval>
buildApprovals() {
Multimap<PatchSet.Id, PatchSetApproval> result =
ArrayListMultimap.create(approvals.keySet().size(), 3);
for (Table<?, ?, Optional<PatchSetApproval>> curr
: approvals.values()) {
for (PatchSetApproval psa : Optional.presentInstances(curr.values())) {
result.put(psa.getPatchSetId(), psa);
}
}
for (Collection<PatchSetApproval> v : result.asMap().values()) {
Collections.sort((List<PatchSetApproval>) v, PSA_BY_TIME);
}
return ImmutableListMultimap.copyOf(result);
}
private ImmutableListMultimap<PatchSet.Id, ChangeMessage> buildMessages() {
for (Collection<ChangeMessage> v : changeMessages.asMap().values()) {
Collections.sort((List<ChangeMessage>) v, MESSAGE_BY_TIME);
}
return ImmutableListMultimap.copyOf(changeMessages);
}
private void parse(RevCommit commit) throws ConfigInvalidException, IOException {
if (status == null) {
status = parseStatus(commit);
}
PatchSet.Id psId = parsePatchSetId(commit);
Account.Id accountId = parseIdent(commit);
parseChangeMessage(psId, accountId, commit);
if (submitRecords.isEmpty()) {
// Only parse the most recent set of submit records; any older ones are
// still there, but not currently used.
parseSubmitRecords(commit.getFooterLines(FOOTER_SUBMITTED_WITH));
}
for (String line : commit.getFooterLines(FOOTER_LABEL)) {
parseApproval(psId, accountId, commit, line);
}
for (ReviewerState state : ReviewerState.values()) {
for (String line : commit.getFooterLines(state.getFooterKey())) {
parseReviewer(state, line);
}
}
}
private Change.Status parseStatus(RevCommit commit)
throws ConfigInvalidException {
List<String> statusLines = commit.getFooterLines(FOOTER_STATUS);
if (statusLines.isEmpty()) {
return null;
} else if (statusLines.size() > 1) {
throw expectedOneFooter(FOOTER_STATUS, statusLines);
}
Optional<Change.Status> status = Enums.getIfPresent(
Change.Status.class, statusLines.get(0).toUpperCase());
if (!status.isPresent()) {
throw invalidFooter(FOOTER_STATUS, statusLines.get(0));
}
return status.get();
}
private PatchSet.Id parsePatchSetId(RevCommit commit)
throws ConfigInvalidException {
List<String> psIdLines = commit.getFooterLines(FOOTER_PATCH_SET);
if (psIdLines.size() != 1) {
throw expectedOneFooter(FOOTER_PATCH_SET, psIdLines);
}
Integer psId = Ints.tryParse(psIdLines.get(0));
if (psId == null) {
throw invalidFooter(FOOTER_PATCH_SET, psIdLines.get(0));
}
return new PatchSet.Id(changeId, psId);
}
private void parseChangeMessage(PatchSet.Id psId, Account.Id accountId,
RevCommit commit) {
byte[] raw = commit.getRawBuffer();
int size = raw.length;
Charset enc = RawParseUtils.parseEncoding(raw);
int subjectStart = RawParseUtils.commitMessage(raw, 0);
if (subjectStart < 0 || subjectStart >= size) {
return;
}
int subjectEnd = RawParseUtils.endOfParagraph(raw, subjectStart);
if (subjectEnd == size) {
return;
}
int changeMessageStart;
if (raw[subjectEnd] == '\n') {
changeMessageStart = subjectEnd + 2; //\n\n ends paragraph
} else if (raw[subjectEnd] == '\r') {
changeMessageStart = subjectEnd + 4; //\r\n\r\n ends paragraph
} else {
return;
}
int ptr = size - 1;
int changeMessageEnd = -1;
while(ptr > changeMessageStart) {
ptr = RawParseUtils.prevLF(raw, ptr, '\r');
if (ptr == -1) {
break;
}
if (raw[ptr] == '\n') {
changeMessageEnd = ptr - 1;
break;
} else if (raw[ptr] == '\r') {
changeMessageEnd = ptr - 3;
break;
}
}
if (ptr <= changeMessageStart) {
return;
}
String changeMsgString = RawParseUtils.decode(enc, raw,
changeMessageStart, changeMessageEnd + 1);
ChangeMessage changeMessage = new ChangeMessage(
new ChangeMessage.Key(psId.getParentKey(), commit.name()),
accountId,
new Timestamp(commit.getCommitterIdent().getWhen().getTime()),
psId);
changeMessage.setMessage(changeMsgString);
changeMessages.put(psId, changeMessage);
}
private void parseComments()
throws IOException, ConfigInvalidException, ParseException {
commentNoteMap = CommentsInNotesUtil.parseCommentsFromNotes(repo,
ChangeNoteUtil.changeRefName(changeId), walk, changeId,
commentsForBase, commentsForPs, Status.PUBLISHED);
}
private void parseApproval(PatchSet.Id psId, Account.Id accountId,
RevCommit commit, String line) throws ConfigInvalidException {
Table<Account.Id, String, Optional<PatchSetApproval>> curr =
approvals.get(psId);
if (curr == null) {
curr = Tables.newCustomTable(
Maps.<Account.Id, Map<String, Optional<PatchSetApproval>>>
newHashMapWithExpectedSize(2),
new Supplier<Map<String, Optional<PatchSetApproval>>>() {
@Override
public Map<String, Optional<PatchSetApproval>> get() {
return Maps.newLinkedHashMap();
}
});
approvals.put(psId, curr);
}
if (line.startsWith("-")) {
String label = line.substring(1);
if (!curr.contains(accountId, label)) {
curr.put(accountId, label, Optional.<PatchSetApproval> absent());
}
} else {
LabelVote l;
try {
l = LabelVote.parseWithEquals(line);
} catch (IllegalArgumentException e) {
ConfigInvalidException pe =
parseException("invalid %s: %s", FOOTER_LABEL, line);
pe.initCause(e);
throw pe;
}
if (!curr.contains(accountId, l.getLabel())) {
curr.put(accountId, l.getLabel(), Optional.of(new PatchSetApproval(
new PatchSetApproval.Key(
psId,
accountId,
new LabelId(l.getLabel())),
l.getValue(),
new Timestamp(commit.getCommitterIdent().getWhen().getTime()))));
}
}
}
private void parseSubmitRecords(List<String> lines)
throws ConfigInvalidException {
SubmitRecord rec = null;
for (String line : lines) {
int c = line.indexOf(": ");
if (c < 0) {
rec = new SubmitRecord();
submitRecords.add(rec);
int s = line.indexOf(' ');
String statusStr = s >= 0 ? line.substring(0, s) : line;
Optional<SubmitRecord.Status> status =
Enums.getIfPresent(SubmitRecord.Status.class, statusStr);
checkFooter(status.isPresent(), FOOTER_SUBMITTED_WITH, line);
rec.status = status.get();
if (s >= 0) {
rec.errorMessage = line.substring(s);
}
} else {
checkFooter(rec != null, FOOTER_SUBMITTED_WITH, line);
SubmitRecord.Label label = new SubmitRecord.Label();
if (rec.labels == null) {
rec.labels = Lists.newArrayList();
}
rec.labels.add(label);
Optional<SubmitRecord.Label.Status> status = Enums.getIfPresent(
SubmitRecord.Label.Status.class, line.substring(0, c));
checkFooter(status.isPresent(), FOOTER_SUBMITTED_WITH, line);
label.status = status.get();
int c2 = line.indexOf(": ", c + 2);
if (c2 >= 0) {
label.label = line.substring(c + 2, c2);
PersonIdent ident =
RawParseUtils.parsePersonIdent(line.substring(c2 + 2));
checkFooter(ident != null, FOOTER_SUBMITTED_WITH, line);
label.appliedBy = parseIdent(ident);
} else {
label.label = line.substring(c + 2);
}
}
}
}
private Account.Id parseIdent(RevCommit commit)
throws ConfigInvalidException {
return parseIdent(commit.getAuthorIdent());
}
private Account.Id parseIdent(PersonIdent ident)
throws ConfigInvalidException {
String email = ident.getEmailAddress();
int at = email.indexOf('@');
if (at >= 0) {
String host = email.substring(at + 1, email.length());
Integer id = Ints.tryParse(email.substring(0, at));
if (id != null && host.equals(GERRIT_PLACEHOLDER_HOST)) {
return new Account.Id(id);
}
}
throw parseException("invalid identity, expected <id>@%s: %s",
GERRIT_PLACEHOLDER_HOST, email);
}
private void parseReviewer(ReviewerState state, String line)
throws ConfigInvalidException {
PersonIdent ident = RawParseUtils.parsePersonIdent(line);
if (ident == null) {
throw invalidFooter(state.getFooterKey(), line);
}
Account.Id accountId = parseIdent(ident);
if (!reviewers.containsKey(accountId)) {
reviewers.put(accountId, state);
}
}
private void pruneReviewers() {
Iterator<Map.Entry<Account.Id, ReviewerState>> rit =
reviewers.entrySet().iterator();
while (rit.hasNext()) {
Map.Entry<Account.Id, ReviewerState> e = rit.next();
if (e.getValue() == ReviewerState.REMOVED) {
rit.remove();
for (Table<Account.Id, ?, ?> curr : approvals.values()) {
curr.rowKeySet().remove(e.getKey());
}
}
}
}
private ConfigInvalidException expectedOneFooter(FooterKey footer,
List<String> actual) {
return parseException("missing or multiple %s: %s",
footer.getName(), actual);
}
private ConfigInvalidException invalidFooter(FooterKey footer,
String actual) {
return parseException("invalid %s: %s", footer.getName(), actual);
}
private void checkFooter(boolean expr, FooterKey footer, String actual)
throws ConfigInvalidException {
if (!expr) {
throw invalidFooter(footer, actual);
}
}
private ConfigInvalidException parseException(String fmt, Object... args) {
return ChangeNotes.parseException(changeId, fmt, args);
}
}
private ImmutableListMultimap<PatchSet.Id, PatchSetApproval> approvals;
private ImmutableSetMultimap<ReviewerState, Account.Id> reviewers;
private ImmutableList<SubmitRecord> submitRecords;
private ImmutableListMultimap<PatchSet.Id, ChangeMessage> changeMessages;
private ImmutableListMultimap<PatchSet.Id, PatchLineComment> commentsForBase;
private ImmutableListMultimap<PatchSet.Id, PatchLineComment> commentsForPS;
NoteMap noteMap;
@VisibleForTesting
public ChangeNotes(GitRepositoryManager repoManager, Change change) {
super(repoManager, change);
}
public ImmutableListMultimap<PatchSet.Id, PatchSetApproval> getApprovals() {
return approvals;
}
public ImmutableSetMultimap<ReviewerState, Account.Id> getReviewers() {
return reviewers;
}
/**
* @return submit records stored during the most recent submit; only for
* changes that were actually submitted.
*/
public ImmutableList<SubmitRecord> getSubmitRecords() {
return submitRecords;
}
/** @return change messages by patch set, in chronological order. */
public ImmutableListMultimap<PatchSet.Id, ChangeMessage> getChangeMessages() {
return changeMessages;
}
/** @return inline comments on each patchset's base (side == 0). */
public ImmutableListMultimap<PatchSet.Id, PatchLineComment>
getBaseComments() {
return commentsForBase;
}
/** @return inline comments on each patchset (side == 1). */
public ImmutableListMultimap<PatchSet.Id, PatchLineComment>
getPatchSetComments() {
return commentsForPS;
}
/** @return the NoteMap */
NoteMap getNoteMap() {
return noteMap;
}
@Override
protected String getRefName() {
return ChangeNoteUtil.changeRefName(getChangeId());
}
@Override
protected void onLoad() throws IOException, ConfigInvalidException {
ObjectId rev = getRevision();
if (rev == null) {
loadDefaults();
return;
}
RevWalk walk = new RevWalk(reader);
try {
Change change = getChange();
Parser parser = new Parser(change, rev, walk, repoManager);
parser.parseAll();
if (parser.status != null) {
change.setStatus(parser.status);
}
approvals = parser.buildApprovals();
changeMessages = parser.buildMessages();
commentsForBase = ImmutableListMultimap.copyOf(parser.commentsForBase);
commentsForPS = ImmutableListMultimap.copyOf(parser.commentsForPs);
noteMap = parser.commentNoteMap;
ImmutableSetMultimap.Builder<ReviewerState, Account.Id> reviewers =
ImmutableSetMultimap.builder();
for (Map.Entry<Account.Id, ReviewerState> e
: parser.reviewers.entrySet()) {
reviewers.put(e.getValue(), e.getKey());
}
this.reviewers = reviewers.build();
submitRecords = ImmutableList.copyOf(parser.submitRecords);
} catch (ParseException e1) {
// TODO(yyonas): figure out how to handle this exception
throw new IOException(e1);
} finally {
walk.release();
}
}
private void loadDefaults() {
approvals = ImmutableListMultimap.of();
reviewers = ImmutableSetMultimap.of();
submitRecords = ImmutableList.of();
changeMessages = ImmutableListMultimap.of();
commentsForBase = ImmutableListMultimap.of();
commentsForPS = ImmutableListMultimap.of();
}
@Override
protected boolean onSave(CommitBuilder commit) {
throw new UnsupportedOperationException(
getClass().getSimpleName() + " is read-only");
}
private static Project.NameKey getProjectName(Change change) {
return change.getProject();
}
@Override
protected Project.NameKey getProjectName() {
return getProjectName(getChange());
}
}
| |
/*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okhttp3.ws;
import java.io.IOException;
import java.net.ProtocolException;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import javax.net.ssl.SSLContext;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
import okhttp3.ResponseBody;
import okhttp3.internal.SslContextBuilder;
import okhttp3.mockwebserver.MockResponse;
import okhttp3.mockwebserver.MockWebServer;
import okhttp3.testing.RecordingHostnameVerifier;
import okio.Buffer;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import static okhttp3.ws.WebSocket.TEXT;
public final class WebSocketCallTest {
@Rule public final MockWebServer server = new MockWebServer();
private final SSLContext sslContext = SslContextBuilder.localhost();
private final WebSocketRecorder listener = new WebSocketRecorder();
private final Random random = new Random(0);
private OkHttpClient client = new OkHttpClient();
@After public void tearDown() {
listener.assertExhausted();
}
@Test public void clientPingPong() throws IOException {
WebSocketListener serverListener = new EmptyWebSocketListener();
server.enqueue(new MockResponse().withWebSocketUpgrade(serverListener));
WebSocket webSocket = awaitWebSocket();
webSocket.sendPing(new Buffer().writeUtf8("Hello, WebSockets!"));
listener.assertPong(new Buffer().writeUtf8("Hello, WebSockets!"));
}
@Test public void clientMessage() throws IOException {
WebSocketRecorder serverListener = new WebSocketRecorder();
server.enqueue(new MockResponse().withWebSocketUpgrade(serverListener));
WebSocket webSocket = awaitWebSocket();
webSocket.sendMessage(RequestBody.create(TEXT, "Hello, WebSockets!"));
serverListener.assertTextMessage("Hello, WebSockets!");
}
@Test public void serverMessage() throws IOException {
WebSocketListener serverListener = new EmptyWebSocketListener() {
@Override public void onOpen(final WebSocket webSocket, Response response) {
new Thread() {
@Override public void run() {
try {
webSocket.sendMessage(RequestBody.create(TEXT, "Hello, WebSockets!"));
} catch (IOException e) {
throw new AssertionError(e);
}
}
}.start();
}
};
server.enqueue(new MockResponse().withWebSocketUpgrade(serverListener));
awaitWebSocket();
listener.assertTextMessage("Hello, WebSockets!");
}
@Test public void okButNotOk() {
server.enqueue(new MockResponse());
awaitWebSocket();
listener.assertFailure(ProtocolException.class, "Expected HTTP 101 response but was '200 OK'");
}
@Test public void notFound() {
server.enqueue(new MockResponse().setStatus("HTTP/1.1 404 Not Found"));
awaitWebSocket();
listener.assertFailure(ProtocolException.class,
"Expected HTTP 101 response but was '404 Not Found'");
}
@Test public void missingConnectionHeader() {
server.enqueue(new MockResponse()
.setResponseCode(101)
.setHeader("Upgrade", "websocket")
.setHeader("Sec-WebSocket-Accept", "ujmZX4KXZqjwy6vi1aQFH5p4Ygk="));
awaitWebSocket();
listener.assertFailure(ProtocolException.class,
"Expected 'Connection' header value 'Upgrade' but was 'null'");
}
@Test public void wrongConnectionHeader() {
server.enqueue(new MockResponse()
.setResponseCode(101)
.setHeader("Upgrade", "websocket")
.setHeader("Connection", "Downgrade")
.setHeader("Sec-WebSocket-Accept", "ujmZX4KXZqjwy6vi1aQFH5p4Ygk="));
awaitWebSocket();
listener.assertFailure(ProtocolException.class,
"Expected 'Connection' header value 'Upgrade' but was 'Downgrade'");
}
@Test public void missingUpgradeHeader() {
server.enqueue(new MockResponse()
.setResponseCode(101)
.setHeader("Connection", "Upgrade")
.setHeader("Sec-WebSocket-Accept", "ujmZX4KXZqjwy6vi1aQFH5p4Ygk="));
awaitWebSocket();
listener.assertFailure(ProtocolException.class,
"Expected 'Upgrade' header value 'websocket' but was 'null'");
}
@Test public void wrongUpgradeHeader() {
server.enqueue(new MockResponse()
.setResponseCode(101)
.setHeader("Connection", "Upgrade")
.setHeader("Upgrade", "Pepsi")
.setHeader("Sec-WebSocket-Accept", "ujmZX4KXZqjwy6vi1aQFH5p4Ygk="));
awaitWebSocket();
listener.assertFailure(ProtocolException.class,
"Expected 'Upgrade' header value 'websocket' but was 'Pepsi'");
}
@Test public void missingMagicHeader() {
server.enqueue(new MockResponse()
.setResponseCode(101)
.setHeader("Connection", "Upgrade")
.setHeader("Upgrade", "websocket"));
awaitWebSocket();
listener.assertFailure(ProtocolException.class,
"Expected 'Sec-WebSocket-Accept' header value 'ujmZX4KXZqjwy6vi1aQFH5p4Ygk=' but was 'null'");
}
@Test public void wrongMagicHeader() {
server.enqueue(new MockResponse()
.setResponseCode(101)
.setHeader("Connection", "Upgrade")
.setHeader("Upgrade", "websocket")
.setHeader("Sec-WebSocket-Accept", "magic"));
awaitWebSocket();
listener.assertFailure(ProtocolException.class,
"Expected 'Sec-WebSocket-Accept' header value 'ujmZX4KXZqjwy6vi1aQFH5p4Ygk=' but was 'magic'");
}
@Test public void wsScheme() throws IOException {
websocketScheme("ws");
}
@Test public void wsUppercaseScheme() throws IOException {
websocketScheme("WS");
}
@Test public void wssScheme() throws IOException {
server.useHttps(sslContext.getSocketFactory(), false);
client = client.newBuilder()
.sslSocketFactory(sslContext.getSocketFactory())
.hostnameVerifier(new RecordingHostnameVerifier())
.build();
websocketScheme("wss");
}
@Test public void httpsScheme() throws IOException {
server.useHttps(sslContext.getSocketFactory(), false);
client = client.newBuilder()
.sslSocketFactory(sslContext.getSocketFactory())
.hostnameVerifier(new RecordingHostnameVerifier())
.build();
websocketScheme("https");
}
private void websocketScheme(String scheme) throws IOException {
WebSocketRecorder serverListener = new WebSocketRecorder();
server.enqueue(new MockResponse().withWebSocketUpgrade(serverListener));
Request request1 = new Request.Builder()
.url(scheme + "://" + server.getHostName() + ":" + server.getPort() + "/")
.build();
WebSocket webSocket = awaitWebSocket(request1);
webSocket.sendMessage(RequestBody.create(TEXT, "abc"));
serverListener.assertTextMessage("abc");
}
private WebSocket awaitWebSocket() {
return awaitWebSocket(new Request.Builder().get().url(server.url("/")).build());
}
private WebSocket awaitWebSocket(Request request) {
WebSocketCall call = new WebSocketCall(client, request, random);
final AtomicReference<Response> responseRef = new AtomicReference<>();
final AtomicReference<WebSocket> webSocketRef = new AtomicReference<>();
final AtomicReference<IOException> failureRef = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
call.enqueue(new WebSocketListener() {
@Override public void onOpen(WebSocket webSocket, Response response) {
webSocketRef.set(webSocket);
responseRef.set(response);
latch.countDown();
}
@Override public void onMessage(ResponseBody message) throws IOException {
listener.onMessage(message);
}
@Override public void onPong(Buffer payload) {
listener.onPong(payload);
}
@Override public void onClose(int code, String reason) {
listener.onClose(code, reason);
}
@Override public void onFailure(IOException e, Response response) {
listener.onFailure(e, null);
failureRef.set(e);
latch.countDown();
}
});
try {
if (!latch.await(10, TimeUnit.SECONDS)) {
throw new AssertionError("Timed out.");
}
} catch (InterruptedException e) {
throw new AssertionError(e);
}
return webSocketRef.get();
}
private static class EmptyWebSocketListener implements WebSocketListener {
@Override public void onOpen(WebSocket webSocket, Response response) {
}
@Override public void onMessage(ResponseBody message) throws IOException {
}
@Override public void onPong(Buffer payload) {
}
@Override public void onClose(int code, String reason) {
}
@Override public void onFailure(IOException e, Response response) {
}
}
}
| |
package nl.surfsara.warcexamples.datascience;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.ReduceContext;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.task.annotation.Checkpointable;
import java.io.IOException;
import java.util.*;
import java.util.Iterator;
import java.util.Map.Entry;
/**
* Created by naward on 11-7-15.
*/
//Reducer works with keys and values in textual format:
//public class Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
public class WordCountReducer extends Reducer<Text, Text, Text, Text> {
@Override
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
int reducerUsed = 2;
//since we are using same mapper for both reducers 1st run is a job with no reducer
//Then we run 2 jobs with no mapper, but 2 different reduce functions
switch(reducerUsed){
//reducer 0: (no reducer)
case 0:
for (Text value : values) {
context.write(key, value);
}
break;
//reducer 1: reduce domain's wordcount (avarage it out)
case 1:
//list of words to exclude (html tags)
//doing so might introduce errors in counts, but it is acceptable
List<String> htmlTags = Arrays.asList(
"!DOCTYPE",
"a",
"abbr",
"address",
"area",
"article",
"aside",
"audio",
"b",
"base",
"bdi",
"bdo",
"blockquote",
"body",
"br",
"button",
"canvas",
"caption",
"cite",
"code",
"col",
"colgroup",
"data",
"datalist",
"dd",
"del",
"details",
"dfn",
"dialog",
"div",
"dl",
"dt",
"em",
"embed",
"fieldset",
"figcaption",
"figure",
"footer",
"form",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"head",
"header",
"hgroup",
"hr",
"html",
"i",
"iframe",
"img",
"input",
"ins",
"kbd",
"keygen",
"label",
"legend",
"li",
"link",
"main",
"map",
"mark",
"menu",
"menuitem",
"meta",
"meter",
"nav",
"noscript",
"object",
"ol",
"optgroup",
"option",
"output",
"p",
"param",
"pre",
"progress",
"q",
"rb",
"rp",
"rt",
"rtc",
"ruby",
"s",
"samp",
"script",
"section",
"select",
"small",
"source",
"span",
"strong",
"style",
"sub",
"summary",
"sup",
"table",
"tbody",
"td",
"template",
"textarea",
"tfoot",
"th",
"thead",
"time",
"title",
"tr",
"track",
"u",
"ul",
"var",
"video",
"wbr",
"hidden",
"high",
"href",
"hreflang",
"http-equiv",
"icon",
"id",
"ismap",
"itemprop",
"keytype",
"kind",
"label",
"lang",
"language",
"list",
"loop",
"low",
"manifest",
"max",
"maxlength",
"media",
"method",
"min",
"multiple",
"email",
"file",
"name",
"novalidate",
"open",
"optimum",
"pattern",
"ping",
"placeholder",
"poster",
"preload",
"pubdate",
"radiogroup",
"readonly",
"rel",
"required",
"reversed",
"rows",
"rowspan",
"sandbox",
"spellcheck",
"scope",
"scoped",
"seamless",
"selected",
"shape",
"size",
"type",
"text",
"password",
"sizes",
"span",
"src",
"srcdoc",
"srclang",
"srcset",
"start",
"step",
"style",
"summary",
"tabindex",
"target",
"title",
"type",
"usemap",
"value",
"width",
"canvas",
"wrap",
"border",
"buffered",
"challenge",
"charset",
"checked",
"cite",
"class",
"code",
"codebase",
"color",
"color",
"cols",
"colspan",
"content",
"http-equiv",
"name",
"contenteditable",
"contextmenu",
"controls",
"coords",
"data",
"datetime",
"default",
"defer",
"dir",
"dirname",
"disabled",
"download",
"draggable",
"dropzone",
"enctype",
"method",
"for",
"form",
"formaction",
"headers",
"height",
"accept",
"accept-charset",
"accesskey",
"action",
"align",
"alt",
"async",
"autocomplete",
"autofocus",
"autoplay",
"autosave",
"bgcolor",
"background-color",
"rgba",
"rgb",
"nbsp",
"com",
"http",
"www",
"px",
"tag",
"item",
"amp",
"type",
"display",
"block",
"https",
"this"
);
//domain level hashmap and counter
int counter = 0;
HashMap<String, Integer> domainWordCount = new HashMap<String, Integer>();
for (Text value : values) {
//match mapped hashmaps
if(!value.toString().startsWith("Count:{"))
continue;
//else it does:
HashMap<String, Integer> tmp = WordCountMapper.parseToMap(value.toString());
//iterate over next hashmap and combine it with domainWordCount
for(Map.Entry<String, Integer> e : tmp.entrySet()){
//if html word - skip
if(htmlTags.contains(e.getKey().toLowerCase()))
continue;
if(!domainWordCount.containsKey(e.getKey())){
//add to list
domainWordCount.put(e.getKey(), e.getValue());
} else {
//combine them together
int prevVal = 0;
if(domainWordCount.get(e.getKey()) != null){ //was causing NPtr Exception
prevVal = domainWordCount.get(e.getKey());
}
domainWordCount.put(e.getKey(), prevVal+e.getValue());
}
}
}
//average out the output map:
//the reason = there are samples of different sizes, we need to avg them out
for(String _key : domainWordCount.keySet()){
if(counter>0){
domainWordCount.put(_key, domainWordCount.get(_key)/counter);
}
}
//sort the map - we do this to take top 100 words only
Map<String, Integer> sortedDomainWordCount = sortByComparator(domainWordCount);
//following would be normal output, modifying to match visualisation requirenments
//context.write(key, WordCountMapper.parseToString(domainWordCount));
StringBuilder sb = new StringBuilder();
sb.append("{url:\""+key+"\", words:[");
int MAX_LEN = 100;
int i = 0;
for(Map.Entry<String, Integer> e : sortedDomainWordCount.entrySet()){
if(i++ < MAX_LEN){
sb.append("{w:\""+e.getKey()+"\", c:"+e.getValue()+"},");
}
}
sb.append("]}");
//write:
context.write(key, new Text(sb.toString()));
break;
//reducer 2: count links from domain1 to each of the domains it points out to
case 2:
//domain level hashmap to store outbound links from key node
HashMap<String, Integer> domainOutboundLinksCount = new HashMap<String, Integer>();
for (Text value : values) {
//skip if it is hashmap (wordcount)
if(value.toString().startsWith("Count:{"))
continue;
String outURL = value.toString();
//else proceed
//if not in hashmap, add it
if(!domainOutboundLinksCount.containsKey(outURL)){
//add to hashmap
domainOutboundLinksCount.put(outURL, 1);
} else {
//otherwise increment link count
domainOutboundLinksCount.put(outURL, domainOutboundLinksCount.get(outURL)+1);
}
}
//normal MR code
//context.wrtie(key, domainOutboundLinksCount);
//tailored for visulaisations MR code:
for(Map.Entry<String, Integer> e : domainOutboundLinksCount.entrySet()){
String out = "{srce: \""+key+"\", dest:\"" + e.getKey()+"\", count:"+e.getValue()+"},";
context.write(key, new Text(out));
}
break;
}
}
public static Map<String, Integer> sortByComparator(Map<String, Integer> unsortMap){
List<Entry<String, Integer>> list = new LinkedList<Entry<String, Integer>>(unsortMap.entrySet());
// Sorting the list based on values
Collections.sort(list, new Comparator<Entry<String, Integer>>()
{
public int compare(Entry<String, Integer> o1,
Entry<String, Integer> o2)
{
//sort descending
return o2.getValue().compareTo(o1.getValue());
}
});
// Maintaining insertion order with the help of LinkedList
Map<String, Integer> sortedMap = new LinkedHashMap<String, Integer>();
for (Entry<String, Integer> entry : list)
{
sortedMap.put(entry.getKey(), entry.getValue());
}
return sortedMap;
}
/**
* DECOMPILED PARENT:
*/
// @Checkpointable
// @InterfaceAudience.Public
// @InterfaceStability.Stable
// public class Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
// public Reducer() {
// }
//
// protected void setup(Reducer.Context context) throws IOException, InterruptedException {
// }
//
// protected void reduce(KEYIN key, Iterable<VALUEIN> values, Reducer.Context context) throws IOException, InterruptedException {
// Iterator i$ = values.iterator();
//
// while(i$.hasNext()) {
// Object value = i$.next();
// context.write(key, value);
// }
//
// }
//
// protected void cleanup(Reducer.Context context) throws IOException, InterruptedException {
// }
//
// public void run(Reducer.Context context) throws IOException, InterruptedException {
// this.setup(context);
//
// try {
// while(context.nextKey()) {
// this.reduce(context.getCurrentKey(), context.getValues(), context);
// Iterator iter = context.getValues().iterator();
// if(iter instanceof ReduceContext.ValueIterator) {
// ((ReduceContext.ValueIterator)iter).resetBackupStore();
// }
// }
// } finally {
// this.cleanup(context);
// }
//
// }
//
// public abstract class Context implements ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
// public Context() {
// }
// }
// }
}
| |
/*******************************************************************************
* Copyright (c) 2012 Neil Bartlett.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Neil Bartlett - initial API and implementation
******************************************************************************/
package org.example.tests;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import java.io.File;
import java.io.IOException;
import java.io.StringWriter;
import java.net.InetAddress;
import java.net.URL;
import java.util.Dictionary;
import java.util.Hashtable;
import java.util.Properties;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.servlet.Servlet;
import org.bndtools.service.endpoint.Endpoint;
import org.example.tests.api.MyRunnable;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
import org.osgi.framework.FrameworkUtil;
import org.osgi.framework.ServiceReference;
import org.osgi.framework.ServiceRegistration;
import org.restlet.data.MediaType;
import org.restlet.resource.ClientResource;
import org.restlet.resource.ResourceException;
import aQute.lib.io.IO;
public class RestAdapterTest extends AbstractDelayedTestCase {
private static final int PORT1 = 18080;
private static final int HTTPS_PORT = 18443;
private final BundleContext context = FrameworkUtil.getBundle(this.getClass()).getBundleContext();
private final String localhost;
private final String address1;
public RestAdapterTest() throws Exception {
localhost = InetAddress.getLocalHost().getHostAddress();
address1 = localhost + ":" + PORT1;
}
public void testNothingRegisteredAtFirst() throws Exception {
ServiceReference[] refs = context.getAllServiceReferences(Endpoint.class.getName(), null);
assertNull(refs);
ClientResource resource = new ClientResource("http://" + address1 + "/");
resource.setRetryOnError(false);
try {
resource.get().write(new StringWriter());
fail("Should throw ResourceException");
} catch (ResourceException e) {
assertEquals(404, e.getStatus().getCode());
}
}
public void testSimpleSingleton() throws Exception {
// Register the singleton service
Dictionary<String, Object> svcProps = new Hashtable<String, Object>();
svcProps.put("osgi.rest.alias", "/test1");
svcProps.put("foo", "bar");
ServiceRegistration svcReg = context.registerService(Object.class.getName(), new SingletonServiceResource1(), svcProps);
// Check for advertised Servlet service
ServiceReference[] refs = context.getAllServiceReferences(Servlet.class.getName(), null);
assertNotNull(refs);
assertEquals(1, refs.length);
assertEquals("/test1", refs[0].getProperty("bndtools.rt.http.alias"));
assertEquals("bar", refs[0].getProperty("foo"));
// Check for advertised Endpoint service
ServiceReference[] endpointRefs = context.getAllServiceReferences(Endpoint.class.getName(), null);
assertNotNull(endpointRefs);
assertEquals(1, endpointRefs.length);
assertEquals("*", endpointRefs[0].getProperty("service.exported.interfaces"));
assertEquals("bar", endpointRefs[0].getProperty("foo"));
// Connect by HTTP
ClientResource resource = new ClientResource("http://" + address1 + "/test1/foo");
resource.setRetryOnError(false);
StringWriter writer = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(writer);
assertEquals("Hello World", writer.toString());
// Unregister
svcReg.unregister();
// Check it's gone
refs = context.getAllServiceReferences(Servlet.class.getName(), null);
assertNull(refs);
endpointRefs = context.getAllServiceReferences(Endpoint.class.getName(), null);
assertNull(endpointRefs);
// Check I can't connect
resource = new ClientResource("http://" + address1 + "/test1/foo");
resource.setRetryOnError(false);
try {
resource.get().write(new StringWriter());
fail("Should throw ResourceException");
} catch (ResourceException e) {
assertEquals(404, e.getStatus().getCode());
}
}
public void testSecuredSingleton() throws Exception {
// Register the singleton service
Dictionary<String, Object> svcProps = new Hashtable<String, Object>();
svcProps.put("osgi.rest.alias", "/test2");
svcProps.put("filter", "(confidential=true)");
ServiceRegistration svcReg = context.registerService(Object.class.getName(), new SingletonServiceResource1(), svcProps);
// Check for advertised Servlet service
ServiceReference[] refs = context.getAllServiceReferences(Servlet.class.getName(), null);
assertNotNull(refs);
assertEquals(1, refs.length);
assertEquals("/test2", refs[0].getProperty("bndtools.rt.http.alias"));
assertEquals("(confidential=true)", refs[0].getProperty("filter"));
// Check for advertised Endpoint service
ServiceReference[] endpointRefs = context.getAllServiceReferences(Endpoint.class.getName(), null);
assertNotNull(endpointRefs);
assertEquals(1, endpointRefs.length);
assertEquals("*", endpointRefs[0].getProperty("service.exported.interfaces"));
// Connect by HTTPS
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(null, new TrustManager[] { new NoopTrustManager() }, null);
HttpsURLConnection connection = (HttpsURLConnection) new URL("https://" + localhost + ":" + HTTPS_PORT + "/test2/foo").openConnection();
connection.setSSLSocketFactory(sslContext.getSocketFactory());
connection.setHostnameVerifier(new NoopHostnameVerifier());
connection.setRequestProperty("Accept", "text/plain");
String output = IO.collect(connection.getInputStream());
assertEquals("Hello World", output);
// Clean up
svcReg.unregister();
}
private Bundle installAndStart(File file) throws Exception {
Bundle bundle = context.installBundle(file.getAbsoluteFile().toURL().toString());
bundle.start();
return bundle;
}
public void testSimpleClassResource() throws Exception {
// Install & start bundle
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
// Check for the servlet service
ServiceReference[] refs = context.getAllServiceReferences(Servlet.class.getName(), null);
assertNotNull(refs);
assertEquals(1, refs.length);
assertEquals("/example1", refs[0].getProperty("bndtools.rt.http.alias"));
// Check for advertised Endpoint service
ServiceReference[] endpointRefs = context.getAllServiceReferences(Endpoint.class.getName(), null);
assertNotNull(endpointRefs);
assertEquals(1, endpointRefs.length);
assertEquals("*", endpointRefs[0].getProperty("service.exported.interfaces"));
// Connect by HTTP
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo1");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("This is an easy resource (as plain text)", output.toString());
// Uninstall bundle
exampleBundle.uninstall();
}
public void testClassResourceDefaultAlias() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example2.jar"));
// Check for the servlet service
ServiceReference[] refs = context.getAllServiceReferences(Servlet.class.getName(), null);
assertNotNull(refs);
assertEquals(1, refs.length);
assertEquals("/", refs[0].getProperty("bndtools.rt.http.alias"));
// Check for advertised Endpoint service
ServiceReference[] endpointRefs = context.getAllServiceReferences(Endpoint.class.getName(), null);
assertNotNull(endpointRefs);
assertEquals(1, endpointRefs.length);
assertEquals("*", endpointRefs[0].getProperty("service.exported.interfaces"));
// Connect by HTTP
ClientResource resource = new ClientResource("http://" + address1 + "/foo1");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("This is an easy resource (as plain text)", output.toString());
// Clean up
exampleBundle.uninstall();
}
public void testClassInjectionMissingMandatoryRef() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo2");
resource.setRetryOnError(false);
try {
resource.get(MediaType.TEXT_PLAIN);
fail("Should fail with ResourceException");
} catch (ResourceException e) {
// expected
assertEquals(503, e.getStatus().getCode());
assertEquals(MediaType.TEXT_PLAIN, resource.getResponseEntity().getMediaType());
StringWriter output = new StringWriter();
resource.getResponseEntity().write(output);
assertEquals(MyRunnable.class.getName(), output.toString());
}
exampleBundle.uninstall();
}
public void testClassInjectionSatisfiedMandatoryRef() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
MyRunnable mockRunnable = mock(MyRunnable.class);
ServiceRegistration svcReg = context.registerService(MyRunnable.class.getName(), mockRunnable, null);
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo2");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("This is an easy resource (as plain text)", output.toString());
verify(mockRunnable).run();
verifyNoMoreInteractions(mockRunnable);
svcReg.unregister();
exampleBundle.uninstall();
}
public void testClassInjectionUnsatisfiedOptionalRef() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo3");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("NULL", output.toString());
exampleBundle.uninstall();
}
public void testClassInjectionSatisfiedOptionalRef() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
MyRunnable mockRunnable = mock(MyRunnable.class);
ServiceRegistration svcReg = context.registerService(MyRunnable.class.getName(), mockRunnable, null);
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo3");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("NOT NULL", output.toString());
svcReg.unregister();
exampleBundle.uninstall();
}
public void testClassInjectionUnsatisfiedOptionalRefAlternateOrder() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo4");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("NULL", output.toString());
exampleBundle.uninstall();
}
public void testClassInjectionSatisfiedOptionalRefAlternateOrder() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
MyRunnable mockRunnable = mock(MyRunnable.class);
ServiceRegistration svcReg = context.registerService(MyRunnable.class.getName(), mockRunnable, null);
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo4");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("NOT NULL", output.toString());
svcReg.unregister();
exampleBundle.uninstall();
}
public void testClassInjectionUnsatisfiedFilterRef() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
MyRunnable mockRunnable = mock(MyRunnable.class);
ServiceRegistration svcReg = context.registerService(MyRunnable.class.getName(), mockRunnable, null);
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo5");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("NULL", output.toString());
svcReg.unregister();
exampleBundle.uninstall();
}
public void testClassInjectionSatisfiedFilterRef() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
MyRunnable mockRunnable = mock(MyRunnable.class);
Properties props = new Properties();
props.put("foo", "bar");
ServiceRegistration svcReg = context.registerService(MyRunnable.class.getName(), mockRunnable, props);
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo5");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("NOT NULL", output.toString());
svcReg.unregister();
exampleBundle.uninstall();
}
public void testClassInjectionCollection() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
MyRunnable mockRunnable1 = mock(MyRunnable.class);
MyRunnable mockRunnable2 = mock(MyRunnable.class);
ServiceRegistration svcReg1 = context.registerService(MyRunnable.class.getName(), mockRunnable1, null);
ServiceRegistration svcReg2 = context.registerService(MyRunnable.class.getName(), mockRunnable2, null);
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo6");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("2", output.toString());
svcReg1.unregister();
svcReg2.unregister();
exampleBundle.uninstall();
}
public void XtestClassInjectionCollectionUnsatisfied() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo6");
resource.setRetryOnError(false);
try {
resource.get(MediaType.TEXT_PLAIN);
fail("Should fail with ResourceException");
} catch (ResourceException e) {
// expected
assertEquals(503, e.getStatus().getCode());
assertEquals(MediaType.TEXT_PLAIN, resource.getResponseEntity().getMediaType());
StringWriter output = new StringWriter();
resource.getResponseEntity().write(output);
assertEquals(MyRunnable.class.getName(), output.toString());
}
exampleBundle.uninstall();
}
public void testClassInjectionCollectionOptionalSatisfied() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
MyRunnable mockRunnable1 = mock(MyRunnable.class);
MyRunnable mockRunnable2 = mock(MyRunnable.class);
ServiceRegistration svcReg1 = context.registerService(MyRunnable.class.getName(), mockRunnable1, null);
ServiceRegistration svcReg2 = context.registerService(MyRunnable.class.getName(), mockRunnable2, null);
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo7");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("2", output.toString());
svcReg1.unregister();
svcReg2.unregister();
exampleBundle.uninstall();
}
public void testClassInjectionCollectionOptionalUnsatisfied() throws Exception {
Bundle exampleBundle = installAndStart(new File("generated/org.bndtools.rt.rest.test.example1.jar"));
ClientResource resource = new ClientResource("http://" + address1 + "/example1/foo7");
resource.setRetryOnError(false);
StringWriter output = new StringWriter();
resource.get(MediaType.TEXT_PLAIN).write(output);
assertEquals("0", output.toString());
exampleBundle.uninstall();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.