repo_name stringlengths 5 108 | path stringlengths 6 333 | size stringlengths 1 6 | content stringlengths 4 977k | license stringclasses 15 values |
|---|---|---|---|---|
hpehl/hal.next | app/src/test/resources/org/jboss/hal/processor/mbui/table/Mbui_CustomActionView.java | 2940 | package org.jboss.hal.processor.mbui.table;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.Generated;
import javax.inject.Inject;
import com.google.gwt.safehtml.shared.SafeHtmlUtils;
import elemental2.dom.HTMLElement;
import org.jboss.gwt.elemento.core.builder.ElementsBuilder;
import org.jboss.gwt.elemento.core.Elements;
import org.jboss.hal.ballroom.form.Form;
import org.jboss.hal.ballroom.table.Scope;
import org.jboss.hal.ballroom.ExpressionUtil;
import org.jboss.hal.ballroom.LayoutBuilder;
import org.jboss.hal.ballroom.autocomplete.ReadChildrenAutoComplete;
import org.jboss.hal.core.mbui.dialog.AddResourceDialog;
import org.jboss.hal.core.mbui.form.GroupedForm;
import org.jboss.hal.core.mbui.form.ModelNodeForm;
import org.jboss.hal.core.mbui.table.ModelNodeTable;
import org.jboss.hal.core.mbui.MbuiContext;
import org.jboss.hal.dmr.Operation;
import org.jboss.hal.dmr.ResourceAddress;
import org.jboss.hal.meta.AddressTemplate;
import org.jboss.hal.meta.Metadata;
import org.jboss.hal.meta.security.Constraint;
import org.jboss.hal.resources.Ids;
import org.jboss.hal.spi.Message;
import org.jboss.hal.spi.MessageEvent;
import static java.util.Arrays.asList;
import static org.jboss.gwt.elemento.core.Elements.*;
import static org.jboss.hal.ballroom.LayoutBuilder.column;
import static org.jboss.hal.ballroom.LayoutBuilder.row;
import static org.jboss.hal.dmr.ModelDescriptionConstants.ADD;
import static org.jboss.hal.dmr.ModelDescriptionConstants.READ_RESOURCE_OPERATION;
/*
* WARNING! This class is generated. Do not modify.
*/
@Generated("org.jboss.hal.processor.mbui.MbuiViewProcessor")
public final class Mbui_CustomActionView extends CustomActionView {
private final Metadata metadata0;
private final Map<String, HTMLElement> expressionElements;
@Inject
@SuppressWarnings("unchecked")
public Mbui_CustomActionView(MbuiContext mbuiContext) {
super(mbuiContext);
AddressTemplate metadata0Template = AddressTemplate.of("/subsystem=foo");
this.metadata0 = mbuiContext.metadataRegistry().lookup(metadata0Template);
this.expressionElements = new HashMap<>();
table = new ModelNodeTable.Builder<org.jboss.hal.dmr.NamedNode>("table", metadata0)
.button("Foo", table -> presenter.reload())
.columns("name")
.build();
HTMLElement html0;
HTMLElement root = row()
.add(column()
.add(html0 = div()
.innerHtml(SafeHtmlUtils.fromSafeConstant("<h1>Table</h1>"))
.element())
.add(table)
)
.element();
expressionElements.put("html0", html0);
registerAttachable(table);
initElement(root);
}
@Override
public void attach() {
super.attach();
}
}
| apache-2.0 |
wangshijun101/JavaSenior | JavaLaoA/src/main/java/com/flying/promotion/atomic/array/AtomicIntegerArrayTest.java | 2594 | package com.flying.promotion.atomic.array;
import java.util.concurrent.atomic.AtomicIntegerArray;
/**
* ������˵����
* 10���̲߳����ҵ�
* @author zhongyin.xy
*
*/
public class AtomicIntegerArrayTest {
/**
* �����ķ����б�
* @see AtomicIntegerArray#addAndGet(int, int) ִ�мӷ�����һ������Ϊ������±꣬�ڶ�������Ϊ���ӵ��������������Ӻ�Ľ��
* @see AtomicIntegerArray#compareAndSet(int, int, int) �Ա��ģ�����1�������±꣬����2��ԭʼֵ������3����Ŀ��ֵ���ijɹ�����true����false
* @see AtomicIntegerArray#decrementAndGet(int) ����Ϊ�����±꣬�������Ӧ���ּ���1�����ؼ��ٺ������
* @see AtomicIntegerArray#incrementAndGet(int) ����Ϊ�����±꣬�������Ӧ��������1���������Ӻ������
*
* @see AtomicIntegerArray#getAndAdd(int, int) ��addAndGet���ƣ������Ƿ���ֵ�DZ仯ǰ������
* @see AtomicIntegerArray#getAndDecrement(int) ��decrementAndGet���ƣ������Ƿ��ر仯ǰ������
* @see AtomicIntegerArray#getAndIncrement(int) ��incrementAndGet���ƣ������Ƿ��ر仯ǰ������
* @see AtomicIntegerArray#getAndSet(int, int) ����Ӧ�±����������Ϊָ��ֵ���ڶ�������Ϊ���õ�ֵ�������DZ仯ǰ������
*/
private final static AtomicIntegerArray ATOMIC_INTEGER_ARRAY = new AtomicIntegerArray(10);
public static void main(String []args) throws InterruptedException {
Thread []threads = new Thread[100];
for(int i = 0 ; i < 100 ; i++) {
final int index = i % 10;
final int threadNum = i;
threads[i] = new Thread() {
public void run() {
int result = ATOMIC_INTEGER_ARRAY.addAndGet(index, index + 1);
System.out.println("�̱߳��Ϊ��" + threadNum + " , ��Ӧ��ԭʼֵΪ��" + (index + 1) + "�����Ӻ�Ľ��Ϊ��" + result);
}
};
threads[i].start();
}
for(Thread thread : threads) {
thread.join();
}
System.out.println("=========================>\nִ���Ѿ���ɣ�����б�");
for(int i = 0 ; i < ATOMIC_INTEGER_ARRAY.length() ; i++) {
System.out.println(ATOMIC_INTEGER_ARRAY.get(i));
}
}
}
| apache-2.0 |
horsy/SchoolManage | src/com/school/view/web/DelStudent.java | 2167 | package com.school.view.web;
import java.util.ArrayList;
import java.util.List;
import javax.servlet.http.HttpServletResponse;
import org.apache.struts2.ServletActionContext;
import org.springframework.beans.factory.annotation.Autowired;
import com.opensymphony.xwork2.ActionSupport;
import com.school.publicSomething.PublicSTH;
import com.school.service.ServiceDao;
public class DelStudent extends ActionSupport {
/**
*
*/
private static final long serialVersionUID = 6944211538404971343L;
private String id;
@Autowired
private ServiceDao serviceDao;
public void setId(String id) {
this.id = id;
}
private String classId;
public void setClassId(String classId) {
this.classId = classId;
}
@Override
public String execute() throws Exception {
// TODO Auto-generated method stub
String info = "";
String success = "删除学生成功\\n";
if (id != null && id.length() > 0) {
if (id.indexOf(";") > -1 || id.indexOf("'") > -1) {
info = "参数错误";
} else {
String[] idArray = id.split("_obj");
List<Integer> idList = new ArrayList<Integer>();
for (String i : idArray) {
if (PublicSTH.isNumber(i)) {
idList.add(Integer.parseInt(i));
}
}
Integer[] idA = new Integer[idList.size()];
for (int i = 0; i < idList.size(); i++) {
idA[i] = idList.get(i);
}
serviceDao.delStudentByIdArray(idA);
info = success;
}
} else {
info = "请选择要删除的学生\\n";
}
HttpServletResponse response = ServletActionContext.getResponse();
response.setContentType("text/html;charset=UTF-8");
response.getWriter().append("<script type='text/javascript'>");
if (!"".equals(info)) {
response.getWriter().append("alert('" + info + "');");
}
if (info.equals(success)) {
response.getWriter().append(
"parent.showStudentList('" + classId + "');");
// response.getWriter().append("parent.dialog_close('bgDiv_2');");
} else {
}
response.getWriter().append("</script>");
response.getWriter().flush();
response.getWriter().close();
return null;
}
}
| apache-2.0 |
chacha1337/SmartCityUniversalMarket | SmartCityUniversalMarket/app/src/main/java/com/l/smartcityuniversalmarket/loader/LoadAdapter.java | 1599 | package com.l.smartcityuniversalmarket.loader;
import android.app.Activity;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.BaseAdapter;
import android.widget.ImageView;
import android.widget.TextView;
import com.l.smartcityuniversalmarket.R;
public class LoadAdapter extends BaseAdapter {
private Activity activity;
private String[] data;
private static LayoutInflater inflater = null;
private ImageLoader imageLoader;
public LoadAdapter(Activity activity, String[] d) {
this.activity = activity;
data = d;
inflater = (LayoutInflater) activity.getSystemService(Context.LAYOUT_INFLATER_SERVICE);
imageLoader = new ImageLoader(activity.getApplicationContext());
}
public View getView(int position, View convertView, ViewGroup parent) {
View view = convertView;
if (convertView == null)
view = inflater.inflate(R.layout.item, null);
TextView text = (TextView) view.findViewById(R.id.text);
ImageView image = (ImageView) view.findViewById(R.id.image);
String str = "item" + position;
text.setText(str);
// imageLoader.displayImage(data[position], image);
return view;
}
public int getCount() {
return data.length;
}
public Object getItem(int position) {
return position;
}
public long getItemId(int position) {
return position;
}
public ImageLoader getImageLoader(){
return imageLoader;
}
}
| apache-2.0 |
NJUJYB/disYarn | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java | 26636 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client.api.impl;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.UndeclaredThrowableException;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.security.GeneralSecurityException;
import java.security.PrivilegedExceptionAction;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLSocketFactory;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.client.api.TimelineClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientHandlerException;
import com.sun.jersey.api.client.ClientRequest;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.config.ClientConfig;
import com.sun.jersey.api.client.config.DefaultClientConfig;
import com.sun.jersey.api.client.filter.ClientFilter;
import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
@Private
@Evolving
public class TimelineClientImpl extends TimelineClient {
private static final Log LOG = LogFactory.getLog(TimelineClientImpl.class);
private static final String RESOURCE_URI_STR = "/ws/v1/timeline/";
private static final Joiner JOINER = Joiner.on("");
public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
private static Options opts;
private static final String ENTITY_DATA_TYPE = "entity";
private static final String DOMAIN_DATA_TYPE = "domain";
static {
opts = new Options();
opts.addOption("put", true, "Put the timeline entities/domain in a JSON file");
opts.getOption("put").setArgName("Path to the JSON file");
opts.addOption(ENTITY_DATA_TYPE, false, "Specify the JSON file contains the entities");
opts.addOption(DOMAIN_DATA_TYPE, false, "Specify the JSON file contains the domain");
opts.addOption("help", false, "Print usage");
}
private Client client;
private ConnectionConfigurator connConfigurator;
private DelegationTokenAuthenticator authenticator;
private DelegationTokenAuthenticatedURL.Token token;
private URI resURI;
private UserGroupInformation authUgi;
private String doAsUser;
private Configuration configuration;
private float timelineServiceVersion;
private TimelineWriter timelineWriter;
@Private
@VisibleForTesting
TimelineClientConnectionRetry connectionRetry;
// Abstract class for an operation that should be retried by timeline client
@Private
@VisibleForTesting
public static abstract class TimelineClientRetryOp {
// The operation that should be retried
public abstract Object run() throws IOException;
// The method to indicate if we should retry given the incoming exception
public abstract boolean shouldRetryOn(Exception e);
}
// Class to handle retry
// Outside this class, only visible to tests
@Private
@VisibleForTesting
static class TimelineClientConnectionRetry {
// maxRetries < 0 means keep trying
@Private
@VisibleForTesting
public int maxRetries;
@Private
@VisibleForTesting
public long retryInterval;
// Indicates if retries happened last time. Only tests should read it.
// In unit tests, retryOn() calls should _not_ be concurrent.
private boolean retried = false;
@Private
@VisibleForTesting
boolean getRetired() {
return retried;
}
// Constructor with default retry settings
public TimelineClientConnectionRetry(Configuration conf) {
Preconditions.checkArgument(conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_MAX_RETRIES) >= -1,
"%s property value should be greater than or equal to -1",
YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES);
Preconditions
.checkArgument(
conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS);
maxRetries = conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_MAX_RETRIES);
retryInterval = conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS);
}
public Object retryOn(TimelineClientRetryOp op)
throws RuntimeException, IOException {
int leftRetries = maxRetries;
retried = false;
// keep trying
while (true) {
try {
// try perform the op, if fail, keep retrying
return op.run();
} catch (IOException | RuntimeException e) {
// break if there's no retries left
if (leftRetries == 0) {
break;
}
if (op.shouldRetryOn(e)) {
logException(e, leftRetries);
} else {
throw e;
}
}
if (leftRetries > 0) {
leftRetries--;
}
retried = true;
try {
// sleep for the given time interval
Thread.sleep(retryInterval);
} catch (InterruptedException ie) {
LOG.warn("Client retry sleep interrupted! ");
}
}
throw new RuntimeException("Failed to connect to timeline server. "
+ "Connection retries limit exceeded. "
+ "The posted timeline event may be missing");
};
private void logException(Exception e, int leftRetries) {
if (leftRetries > 0) {
LOG.info("Exception caught by TimelineClientConnectionRetry,"
+ " will try " + leftRetries + " more time(s).\nMessage: "
+ e.getMessage());
} else {
// note that maxRetries may be -1 at the very beginning
LOG.info("ConnectionException caught by TimelineClientConnectionRetry,"
+ " will keep retrying.\nMessage: "
+ e.getMessage());
}
}
}
private class TimelineJerseyRetryFilter extends ClientFilter {
@Override
public ClientResponse handle(final ClientRequest cr)
throws ClientHandlerException {
// Set up the retry operation
TimelineClientRetryOp jerseyRetryOp = new TimelineClientRetryOp() {
@Override
public Object run() {
// Try pass the request, if fail, keep retrying
return getNext().handle(cr);
}
@Override
public boolean shouldRetryOn(Exception e) {
// Only retry on connection exceptions
return (e instanceof ClientHandlerException)
&& (e.getCause() instanceof ConnectException);
}
};
try {
return (ClientResponse) connectionRetry.retryOn(jerseyRetryOp);
} catch (IOException e) {
throw new ClientHandlerException("Jersey retry failed!\nMessage: "
+ e.getMessage());
}
}
}
public TimelineClientImpl() {
super(TimelineClientImpl.class.getName());
}
protected void serviceInit(Configuration conf) throws Exception {
this.configuration = conf;
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
UserGroupInformation realUgi = ugi.getRealUser();
if (realUgi != null) {
authUgi = realUgi;
doAsUser = ugi.getShortUserName();
} else {
authUgi = ugi;
doAsUser = null;
}
ClientConfig cc = new DefaultClientConfig();
cc.getClasses().add(YarnJacksonJaxbJsonProvider.class);
connConfigurator = newConnConfigurator(conf);
if (UserGroupInformation.isSecurityEnabled()) {
authenticator = new KerberosDelegationTokenAuthenticator();
} else {
authenticator = new PseudoDelegationTokenAuthenticator();
}
authenticator.setConnectionConfigurator(connConfigurator);
token = new DelegationTokenAuthenticatedURL.Token();
connectionRetry = new TimelineClientConnectionRetry(conf);
client = new Client(new URLConnectionClientHandler(
new TimelineURLConnectionFactory()), cc);
TimelineJerseyRetryFilter retryFilter = new TimelineJerseyRetryFilter();
client.addFilter(retryFilter);
if (YarnConfiguration.useHttps(conf)) {
resURI = URI
.create(JOINER.join("https://", conf.get(
YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS),
RESOURCE_URI_STR));
} else {
resURI = URI.create(JOINER.join("http://", conf.get(
YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS),
RESOURCE_URI_STR));
}
LOG.info("Timeline service address: " + resURI);
timelineServiceVersion =
conf.getFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_VERSION);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
timelineWriter = createTimelineWriter(
configuration, authUgi, client, resURI);
}
protected TimelineWriter createTimelineWriter(Configuration conf,
UserGroupInformation ugi, Client webClient, URI uri)
throws IOException {
if (Float.compare(this.timelineServiceVersion, 1.5f) == 0) {
return new FileSystemTimelineWriter(
conf, ugi, webClient, uri);
} else {
return new DirectTimelineWriter(ugi, webClient, uri);
}
}
@Override
protected void serviceStop() throws Exception {
if (this.timelineWriter != null) {
this.timelineWriter.close();
}
super.serviceStop();
}
@Override
public void flush() throws IOException {
if (timelineWriter != null) {
timelineWriter.flush();
}
}
@Override
public TimelinePutResponse putEntities(
TimelineEntity... entities) throws IOException, YarnException {
return timelineWriter.putEntities(entities);
}
@Override
public void putDomain(TimelineDomain domain) throws IOException,
YarnException {
timelineWriter.putDomain(domain);
}
@SuppressWarnings("unchecked")
@Override
public Token<TimelineDelegationTokenIdentifier> getDelegationToken(
final String renewer) throws IOException, YarnException {
PrivilegedExceptionAction<Token<TimelineDelegationTokenIdentifier>> getDTAction =
new PrivilegedExceptionAction<Token<TimelineDelegationTokenIdentifier>>() {
@Override
public Token<TimelineDelegationTokenIdentifier> run()
throws Exception {
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(authenticator,
connConfigurator);
return (Token) authUrl.getDelegationToken(
resURI.toURL(), token, renewer, doAsUser);
}
};
return (Token<TimelineDelegationTokenIdentifier>) operateDelegationToken(getDTAction);
}
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(
final Token<TimelineDelegationTokenIdentifier> timelineDT)
throws IOException, YarnException {
final boolean isTokenServiceAddrEmpty =
timelineDT.getService().toString().isEmpty();
final String scheme = isTokenServiceAddrEmpty ? null
: (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
final InetSocketAddress address = isTokenServiceAddrEmpty ? null
: SecurityUtil.getTokenServiceAddr(timelineDT);
PrivilegedExceptionAction<Long> renewDTAction =
new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
// If the timeline DT to renew is different than cached, replace it.
// Token to set every time for retry, because when exception happens,
// DelegationTokenAuthenticatedURL will reset it to null;
if (!timelineDT.equals(token.getDelegationToken())) {
token.setDelegationToken((Token) timelineDT);
}
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(authenticator,
connConfigurator);
// If the token service address is not available, fall back to use
// the configured service address.
final URI serviceURI = isTokenServiceAddrEmpty ? resURI
: new URI(scheme, null, address.getHostName(),
address.getPort(), RESOURCE_URI_STR, null, null);
return authUrl
.renewDelegationToken(serviceURI.toURL(), token, doAsUser);
}
};
return (Long) operateDelegationToken(renewDTAction);
}
@SuppressWarnings("unchecked")
@Override
public void cancelDelegationToken(
final Token<TimelineDelegationTokenIdentifier> timelineDT)
throws IOException, YarnException {
final boolean isTokenServiceAddrEmpty =
timelineDT.getService().toString().isEmpty();
final String scheme = isTokenServiceAddrEmpty ? null
: (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
final InetSocketAddress address = isTokenServiceAddrEmpty ? null
: SecurityUtil.getTokenServiceAddr(timelineDT);
PrivilegedExceptionAction<Void> cancelDTAction =
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// If the timeline DT to cancel is different than cached, replace it.
// Token to set every time for retry, because when exception happens,
// DelegationTokenAuthenticatedURL will reset it to null;
if (!timelineDT.equals(token.getDelegationToken())) {
token.setDelegationToken((Token) timelineDT);
}
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(authenticator,
connConfigurator);
// If the token service address is not available, fall back to use
// the configured service address.
final URI serviceURI = isTokenServiceAddrEmpty ? resURI
: new URI(scheme, null, address.getHostName(),
address.getPort(), RESOURCE_URI_STR, null, null);
authUrl.cancelDelegationToken(serviceURI.toURL(), token, doAsUser);
return null;
}
};
operateDelegationToken(cancelDTAction);
}
@Override
public String toString() {
return super.toString() + " with timeline server " + resURI
+ " and writer " + timelineWriter;
}
private Object operateDelegationToken(
final PrivilegedExceptionAction<?> action)
throws IOException, YarnException {
// Set up the retry operation
TimelineClientRetryOp tokenRetryOp =
createTimelineClientRetryOpForOperateDelegationToken(action);
return connectionRetry.retryOn(tokenRetryOp);
}
private class TimelineURLConnectionFactory
implements HttpURLConnectionFactory {
@Override
public HttpURLConnection getHttpURLConnection(final URL url) throws IOException {
authUgi.checkTGTAndReloginFromKeytab();
try {
return new DelegationTokenAuthenticatedURL(
authenticator, connConfigurator).openConnection(url, token,
doAsUser);
} catch (UndeclaredThrowableException e) {
throw new IOException(e.getCause());
} catch (AuthenticationException ae) {
throw new IOException(ae);
}
}
}
private static ConnectionConfigurator newConnConfigurator(Configuration conf) {
try {
return newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
} catch (Exception e) {
LOG.debug("Cannot load customized ssl related configuration. " +
"Fallback to system-generic settings.", e);
return DEFAULT_TIMEOUT_CONN_CONFIGURATOR;
}
}
private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR =
new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT);
return conn;
}
};
private static ConnectionConfigurator newSslConnConfigurator(final int timeout,
Configuration conf) throws IOException, GeneralSecurityException {
final SSLFactory factory;
final SSLSocketFactory sf;
final HostnameVerifier hv;
factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
factory.init();
sf = factory.createSSLSocketFactory();
hv = factory.getHostnameVerifier();
return new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
if (conn instanceof HttpsURLConnection) {
HttpsURLConnection c = (HttpsURLConnection) conn;
c.setSSLSocketFactory(sf);
c.setHostnameVerifier(hv);
}
setTimeouts(conn, timeout);
return conn;
}
};
}
private static void setTimeouts(URLConnection connection, int socketTimeout) {
connection.setConnectTimeout(socketTimeout);
connection.setReadTimeout(socketTimeout);
}
public static void main(String[] argv) throws Exception {
CommandLine cliParser = new GnuParser().parse(opts, argv);
if (cliParser.hasOption("put")) {
String path = cliParser.getOptionValue("put");
if (path != null && path.length() > 0) {
if (cliParser.hasOption(ENTITY_DATA_TYPE)) {
putTimelineDataInJSONFile(path, ENTITY_DATA_TYPE);
return;
} else if (cliParser.hasOption(DOMAIN_DATA_TYPE)) {
putTimelineDataInJSONFile(path, DOMAIN_DATA_TYPE);
return;
}
}
}
printUsage();
}
/**
* Put timeline data in a JSON file via command line.
*
* @param path
* path to the timeline data JSON file
* @param type
* the type of the timeline data in the JSON file
*/
private static void putTimelineDataInJSONFile(String path, String type) {
File jsonFile = new File(path);
if (!jsonFile.exists()) {
LOG.error("File [" + jsonFile.getAbsolutePath() + "] doesn't exist");
return;
}
ObjectMapper mapper = new ObjectMapper();
YarnJacksonJaxbJsonProvider.configObjectMapper(mapper);
TimelineEntities entities = null;
TimelineDomains domains = null;
try {
if (type.equals(ENTITY_DATA_TYPE)) {
entities = mapper.readValue(jsonFile, TimelineEntities.class);
} else if (type.equals(DOMAIN_DATA_TYPE)){
domains = mapper.readValue(jsonFile, TimelineDomains.class);
}
} catch (Exception e) {
LOG.error("Error when reading " + e.getMessage());
e.printStackTrace(System.err);
return;
}
Configuration conf = new YarnConfiguration();
TimelineClient client = TimelineClient.createTimelineClient();
client.init(conf);
client.start();
try {
if (UserGroupInformation.isSecurityEnabled()
&& conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false)) {
Token<TimelineDelegationTokenIdentifier> token =
client.getDelegationToken(
UserGroupInformation.getCurrentUser().getUserName());
UserGroupInformation.getCurrentUser().addToken(token);
}
if (type.equals(ENTITY_DATA_TYPE)) {
TimelinePutResponse response = client.putEntities(
entities.getEntities().toArray(
new TimelineEntity[entities.getEntities().size()]));
if (response.getErrors().size() == 0) {
LOG.info("Timeline entities are successfully put");
} else {
for (TimelinePutResponse.TimelinePutError error : response.getErrors()) {
LOG.error("TimelineEntity [" + error.getEntityType() + ":" +
error.getEntityId() + "] is not successfully put. Error code: " +
error.getErrorCode());
}
}
} else if (type.equals(DOMAIN_DATA_TYPE)) {
boolean hasError = false;
for (TimelineDomain domain : domains.getDomains()) {
try {
client.putDomain(domain);
} catch (Exception e) {
LOG.error("Error when putting domain " + domain.getId(), e);
hasError = true;
}
}
if (!hasError) {
LOG.info("Timeline domains are successfully put");
}
}
} catch(RuntimeException e) {
LOG.error("Error when putting the timeline data", e);
} catch (Exception e) {
LOG.error("Error when putting the timeline data", e);
} finally {
client.stop();
}
}
/**
* Helper function to print out usage
*/
private static void printUsage() {
new HelpFormatter().printHelp("TimelineClient", opts);
}
@VisibleForTesting
@Private
public UserGroupInformation getUgi() {
return authUgi;
}
@Override
public TimelinePutResponse putEntities(ApplicationAttemptId appAttemptId,
TimelineEntityGroupId groupId, TimelineEntity... entities)
throws IOException, YarnException {
if (Float.compare(this.timelineServiceVersion, 1.5f) != 0) {
throw new YarnException(
"This API is not supported under current Timeline Service Version: "
+ timelineServiceVersion);
}
return timelineWriter.putEntities(appAttemptId, groupId, entities);
}
@Override
public void putDomain(ApplicationAttemptId appAttemptId,
TimelineDomain domain) throws IOException, YarnException {
if (Float.compare(this.timelineServiceVersion, 1.5f) != 0) {
throw new YarnException(
"This API is not supported under current Timeline Service Version: "
+ timelineServiceVersion);
}
timelineWriter.putDomain(appAttemptId, domain);
}
@Private
@VisibleForTesting
public void setTimelineWriter(TimelineWriter writer) {
this.timelineWriter = writer;
}
@Private
@VisibleForTesting
public TimelineClientRetryOp
createTimelineClientRetryOpForOperateDelegationToken(
final PrivilegedExceptionAction<?> action) throws IOException {
return new TimelineClientRetryOpForOperateDelegationToken(
this.authUgi, action);
}
@Private
@VisibleForTesting
public class TimelineClientRetryOpForOperateDelegationToken
extends TimelineClientRetryOp {
private final UserGroupInformation authUgi;
private final PrivilegedExceptionAction<?> action;
public TimelineClientRetryOpForOperateDelegationToken(
UserGroupInformation authUgi, PrivilegedExceptionAction<?> action) {
this.authUgi = authUgi;
this.action = action;
}
@Override
public Object run() throws IOException {
// Try pass the request, if fail, keep retrying
authUgi.checkTGTAndReloginFromKeytab();
try {
return authUgi.doAs(action);
} catch (UndeclaredThrowableException e) {
throw new IOException(e.getCause());
} catch (InterruptedException e) {
throw new IOException(e);
}
}
@Override
public boolean shouldRetryOn(Exception e) {
// retry on connection exceptions
// and SocketTimeoutException
return (e instanceof ConnectException
|| e instanceof SocketTimeoutException);
}
}
}
| apache-2.0 |
compomics/mascotdatfile | src/main/java/com/compomics/mascotdatfile/util/exception/MascotDatfileException.java | 591 | package com.compomics.mascotdatfile.util.exception;
/**
* Created by IntelliJ IDEA.
* User: kennyhelsens
* Date: Oct 12, 2010
* Time: 2:41:39 PM
* To change this template use File | Settings | File Templates.
*/
public class MascotDatfileException extends RuntimeException {
/**
* This error is thrown upon breaking assumptions within MascotDatfile.
* @param message - String detailing the error.
*/
public MascotDatfileException(String message) {
super(message); //To change body of overridden methods use File | Settings | File Templates.
}
}
| apache-2.0 |
Assumeru/Imperator4J | src/main/java/com/ee/imperator/template/thymeleaf/ThymeleafTemplate.java | 1076 | package com.ee.imperator.template.thymeleaf;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import org.thymeleaf.TemplateEngine;
import org.thymeleaf.context.AbstractContext;
import com.ee.imperator.template.Template;
public class ThymeleafTemplate implements Template {
private final String template;
private final TemplateEngine engine;
private final AbstractContext context;
public ThymeleafTemplate(String template, TemplateEngine engine, AbstractContext context) {
this.template = template;
this.engine = engine;
this.context = context;
}
@Override
public Template setVariable(String key, Object value) {
context.setVariable(key, value);
return this;
}
@Override
public String process() {
return engine.process(template, context);
}
@Override
public void process(OutputStream output) throws IOException {
final Writer writer = new OutputStreamWriter(output);
engine.process(template, context, writer);
writer.flush();
}
}
| apache-2.0 |
pon-prisma/PrismaDemo | BusinessLayer/src/main/java/it/prisma/businesslayer/bizws/config/security/package-info.java | 96 | /**
*
*/
/**
* @author l.biava
*
*/
package it.prisma.businesslayer.bizws.config.security; | apache-2.0 |
selecterskyandroid/framework | src/main/java/net/tsz/afinal/FinalActivity.java | 3721 | /**
* Copyright (c) 2012-2013, Michael Yang 杨福海 (www.yangfuhai.com).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.tsz.afinal;
import android.app.Activity;
import android.text.TextUtils;
import android.view.View;
import android.view.ViewGroup.LayoutParams;
import android.widget.AbsListView;
import net.tsz.afinal.annotation.view.EventListener;
import net.tsz.afinal.annotation.view.Select;
import net.tsz.afinal.annotation.view.ViewInject;
import java.lang.reflect.Field;
public abstract class FinalActivity extends Activity {
public void setContentView(int layoutResID) {
super.setContentView(layoutResID);
initView();
}
public void setContentView(View view, LayoutParams params) {
super.setContentView(view, params);
initView();
}
public void setContentView(View view) {
super.setContentView(view);
initView();
}
private void initView(){
Field[] fields = getClass().getDeclaredFields();
if(fields!=null && fields.length>0){
for(Field field : fields){
try {
field.setAccessible(true);
if(field.get(this)!= null )
continue;
ViewInject viewInject = field.getAnnotation(ViewInject.class);
if(viewInject!=null){
int viewId = viewInject.id();
field.set(this,findViewById(viewId));
setListener(field,viewInject.click(),Method.Click);
setListener(field,viewInject.longClick(),Method.LongClick);
setListener(field,viewInject.itemClick(),Method.ItemClick);
setListener(field,viewInject.itemLongClick(),Method.itemLongClick);
Select select = viewInject.select();
if(!TextUtils.isEmpty(select.selected())){
setViewSelectListener(field,select.selected(),select.noSelected());
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
private void setViewSelectListener(Field field,String select,String noSelect)throws Exception{
Object obj = field.get(this);
if(obj instanceof View){
((AbsListView)obj).setOnItemSelectedListener(new EventListener(this).select(select).noSelect(noSelect));
}
}
private void setListener(Field field,String methodName,Method method)throws Exception{
if(methodName == null || methodName.trim().length() == 0)
return;
Object obj = field.get(this);
switch (method) {
case Click:
if(obj instanceof View){
((View)obj).setOnClickListener(new EventListener(this).click(methodName));
}
break;
case ItemClick:
if(obj instanceof AbsListView){
((AbsListView)obj).setOnItemClickListener(new EventListener(this).itemClick(methodName));
}
break;
case LongClick:
if(obj instanceof View){
((View)obj).setOnLongClickListener(new EventListener(this).longClick(methodName));
}
break;
case itemLongClick:
if(obj instanceof AbsListView){
((AbsListView)obj).setOnItemLongClickListener(new EventListener(this).itemLongClick(methodName));
}
break;
default:
break;
}
}
public enum Method{
Click,LongClick,ItemClick,itemLongClick
}
}
| apache-2.0 |
dbflute-test/dbflute-test-active-hangar | src/main/java/org/docksidestage/hangar/dbflute/cbean/bs/BsWhiteDeprecatedSelectByPkuqCB.java | 25925 | package org.docksidestage.hangar.dbflute.cbean.bs;
import org.dbflute.cbean.AbstractConditionBean;
import org.dbflute.cbean.ConditionBean;
import org.dbflute.cbean.ConditionQuery;
import org.dbflute.cbean.chelper.*;
import org.dbflute.cbean.coption.*;
import org.dbflute.cbean.dream.*;
import org.dbflute.cbean.sqlclause.SqlClause;
import org.dbflute.cbean.sqlclause.SqlClauseCreator;
import org.dbflute.cbean.scoping.*;
import org.dbflute.dbmeta.DBMetaProvider;
import org.dbflute.twowaysql.factory.SqlAnalyzerFactory;
import org.dbflute.twowaysql.style.BoundDateDisplayTimeZoneProvider;
import org.docksidestage.hangar.dbflute.allcommon.DBFluteConfig;
import org.docksidestage.hangar.dbflute.allcommon.DBMetaInstanceHandler;
import org.docksidestage.hangar.dbflute.allcommon.ImplementedInvokerAssistant;
import org.docksidestage.hangar.dbflute.allcommon.ImplementedSqlClauseCreator;
import org.docksidestage.hangar.dbflute.cbean.*;
import org.docksidestage.hangar.dbflute.cbean.cq.*;
/**
* The base condition-bean of WHITE_DEPRECATED_SELECT_BY_PKUQ.
* @author DBFlute(AutoGenerator)
*/
public class BsWhiteDeprecatedSelectByPkuqCB extends AbstractConditionBean {
// ===================================================================================
// Attribute
// =========
protected WhiteDeprecatedSelectByPkuqCQ _conditionQuery;
// ===================================================================================
// Constructor
// ===========
public BsWhiteDeprecatedSelectByPkuqCB() {
if (DBFluteConfig.getInstance().isPagingCountLater()) {
enablePagingCountLater();
}
if (DBFluteConfig.getInstance().isPagingCountLeastJoin()) {
enablePagingCountLeastJoin();
}
if (DBFluteConfig.getInstance().isNonSpecifiedColumnAccessAllowed()) {
enableNonSpecifiedColumnAccess();
}
if (DBFluteConfig.getInstance().isSpecifyColumnRequired()) {
enableSpecifyColumnRequired();
}
xsetSpecifyColumnRequiredExceptDeterminer(DBFluteConfig.getInstance().getSpecifyColumnRequiredExceptDeterminer());
if (DBFluteConfig.getInstance().isSpecifyColumnRequiredWarningOnly()) {
xenableSpecifyColumnRequiredWarningOnly();
}
if (DBFluteConfig.getInstance().isQueryUpdateCountPreCheck()) {
enableQueryUpdateCountPreCheck();
}
}
// ===================================================================================
// SqlClause
// =========
@Override
protected SqlClause createSqlClause() {
SqlClauseCreator creator = DBFluteConfig.getInstance().getSqlClauseCreator();
if (creator != null) {
return creator.createSqlClause(this);
}
return new ImplementedSqlClauseCreator().createSqlClause(this); // as default
}
// ===================================================================================
// DB Meta
// =======
@Override
protected DBMetaProvider getDBMetaProvider() {
return DBMetaInstanceHandler.getProvider(); // as default
}
public String asTableDbName() {
return "WHITE_DEPRECATED_SELECT_BY_PKUQ";
}
// ===================================================================================
// PrimaryKey Handling
// ===================
/**
* Accept the query condition of primary key as equal.
* @param selectByPkuqId : PK, NotNull, DECIMAL(16). (NotNull)
* @return this. (NotNull)
*/
public WhiteDeprecatedSelectByPkuqCB acceptPK(Long selectByPkuqId) {
assertObjectNotNull("selectByPkuqId", selectByPkuqId);
BsWhiteDeprecatedSelectByPkuqCB cb = this;
cb.query().setSelectByPkuqId_Equal(selectByPkuqId);
return (WhiteDeprecatedSelectByPkuqCB)this;
}
/**
* Accept the query condition of unique key as equal.
* @param selectByPkuqCode : UQ, NotNull, VARCHAR(16). (NotNull)
* @return this. (NotNull)
*/
public WhiteDeprecatedSelectByPkuqCB acceptUniqueOf(String selectByPkuqCode) {
assertObjectNotNull("selectByPkuqCode", selectByPkuqCode);
BsWhiteDeprecatedSelectByPkuqCB cb = this;
cb.query().setSelectByPkuqCode_Equal(selectByPkuqCode);
return (WhiteDeprecatedSelectByPkuqCB)this;
}
public ConditionBean addOrderBy_PK_Asc() {
query().addOrderBy_SelectByPkuqId_Asc();
return this;
}
public ConditionBean addOrderBy_PK_Desc() {
query().addOrderBy_SelectByPkuqId_Desc();
return this;
}
// ===================================================================================
// Query
// =====
/**
* Prepare for various queries. <br>
* Examples of main functions are following:
* <pre>
* <span style="color: #3F7E5E">// Basic Queries</span>
* cb.query().setMemberId_Equal(value); <span style="color: #3F7E5E">// =</span>
* cb.query().setMemberId_NotEqual(value); <span style="color: #3F7E5E">// !=</span>
* cb.query().setMemberId_GreaterThan(value); <span style="color: #3F7E5E">// ></span>
* cb.query().setMemberId_LessThan(value); <span style="color: #3F7E5E">// <</span>
* cb.query().setMemberId_GreaterEqual(value); <span style="color: #3F7E5E">// >=</span>
* cb.query().setMemberId_LessEqual(value); <span style="color: #3F7E5E">// <=</span>
* cb.query().setMemberName_InScope(valueList); <span style="color: #3F7E5E">// in ('a', 'b')</span>
* cb.query().setMemberName_NotInScope(valueList); <span style="color: #3F7E5E">// not in ('a', 'b')</span>
* <span style="color: #3F7E5E">// LikeSearch with various options: (versatile)</span>
* <span style="color: #3F7E5E">// {like ... [options]}</span>
* cb.query().setMemberName_LikeSearch(value, option);
* cb.query().setMemberName_NotLikeSearch(value, option); <span style="color: #3F7E5E">// not like ...</span>
* <span style="color: #3F7E5E">// FromTo with various options: (versatile)</span>
* <span style="color: #3F7E5E">// {(default) fromDatetime <= BIRTHDATE <= toDatetime}</span>
* cb.query().setBirthdate_FromTo(fromDatetime, toDatetime, option);
* <span style="color: #3F7E5E">// DateFromTo: (Date means yyyy/MM/dd)</span>
* <span style="color: #3F7E5E">// {fromDate <= BIRTHDATE < toDate + 1 day}</span>
* cb.query().setBirthdate_IsNull(); <span style="color: #3F7E5E">// is null</span>
* cb.query().setBirthdate_IsNotNull(); <span style="color: #3F7E5E">// is not null</span>
*
* <span style="color: #3F7E5E">// ExistsReferrer: (correlated sub-query)</span>
* <span style="color: #3F7E5E">// {where exists (select PURCHASE_ID from PURCHASE where ...)}</span>
* cb.query().existsPurchase(purchaseCB <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* purchaseCB.query().set... <span style="color: #3F7E5E">// referrer sub-query condition</span>
* });
* cb.query().notExistsPurchase...
*
* <span style="color: #3F7E5E">// (Query)DerivedReferrer: (correlated sub-query)</span>
* cb.query().derivedPurchaseList().max(purchaseCB <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* purchaseCB.specify().columnPurchasePrice(); <span style="color: #3F7E5E">// derived column for function</span>
* purchaseCB.query().set... <span style="color: #3F7E5E">// referrer sub-query condition</span>
* }).greaterEqual(value);
*
* <span style="color: #3F7E5E">// ScalarCondition: (self-table sub-query)</span>
* cb.query().scalar_Equal().max(scalarCB <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* scalarCB.specify().columnBirthdate(); <span style="color: #3F7E5E">// derived column for function</span>
* scalarCB.query().set... <span style="color: #3F7E5E">// scalar sub-query condition</span>
* });
*
* <span style="color: #3F7E5E">// OrderBy</span>
* cb.query().addOrderBy_MemberName_Asc();
* cb.query().addOrderBy_MemberName_Desc().withManualOrder(option);
* cb.query().addOrderBy_MemberName_Desc().withNullsFirst();
* cb.query().addOrderBy_MemberName_Desc().withNullsLast();
* cb.query().addSpecifiedDerivedOrderBy_Desc(aliasName);
*
* <span style="color: #3F7E5E">// Query(Relation)</span>
* cb.query().queryMemberStatus()...;
* cb.query().queryMemberAddressAsValid(targetDate)...;
* </pre>
* @return The instance of condition-query for base-point table to set up query. (NotNull)
*/
public WhiteDeprecatedSelectByPkuqCQ query() {
assertQueryPurpose(); // assert only when user-public query
return doGetConditionQuery();
}
public WhiteDeprecatedSelectByPkuqCQ xdfgetConditionQuery() { // public for parameter comment and internal
return doGetConditionQuery();
}
protected WhiteDeprecatedSelectByPkuqCQ doGetConditionQuery() {
if (_conditionQuery == null) {
_conditionQuery = createLocalCQ();
}
return _conditionQuery;
}
protected WhiteDeprecatedSelectByPkuqCQ createLocalCQ() {
return xcreateCQ(null, getSqlClause(), getSqlClause().getBasePointAliasName(), 0);
}
protected WhiteDeprecatedSelectByPkuqCQ xcreateCQ(ConditionQuery childQuery, SqlClause sqlClause, String aliasName, int nestLevel) {
WhiteDeprecatedSelectByPkuqCQ cq = xnewCQ(childQuery, sqlClause, aliasName, nestLevel);
cq.xsetBaseCB(this);
return cq;
}
protected WhiteDeprecatedSelectByPkuqCQ xnewCQ(ConditionQuery childQuery, SqlClause sqlClause, String aliasName, int nestLevel) {
return new WhiteDeprecatedSelectByPkuqCQ(childQuery, sqlClause, aliasName, nestLevel);
}
/**
* {@inheritDoc}
*/
public ConditionQuery localCQ() {
return doGetConditionQuery();
}
// ===================================================================================
// Union
// =====
/**
* Set up 'union' for base-point table. <br>
* You don't need to call SetupSelect in union-query,
* because it inherits calls before. (Don't call SetupSelect after here)
* <pre>
* cb.query().<span style="color: #CC4747">union</span>(<span style="color: #553000">unionCB</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">unionCB</span>.query().set...
* });
* </pre>
* @param unionCBLambda The callback for query of 'union'. (NotNull)
*/
public void union(UnionQuery<WhiteDeprecatedSelectByPkuqCB> unionCBLambda) {
final WhiteDeprecatedSelectByPkuqCB cb = new WhiteDeprecatedSelectByPkuqCB(); cb.xsetupForUnion(this); xsyncUQ(cb);
try { lock(); unionCBLambda.query(cb); } finally { unlock(); } xsaveUCB(cb);
final WhiteDeprecatedSelectByPkuqCQ cq = cb.query(); query().xsetUnionQuery(cq);
}
/**
* Set up 'union all' for base-point table. <br>
* You don't need to call SetupSelect in union-query,
* because it inherits calls before. (Don't call SetupSelect after here)
* <pre>
* cb.query().<span style="color: #CC4747">unionAll</span>(<span style="color: #553000">unionCB</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">unionCB</span>.query().set...
* });
* </pre>
* @param unionCBLambda The callback for query of 'union all'. (NotNull)
*/
public void unionAll(UnionQuery<WhiteDeprecatedSelectByPkuqCB> unionCBLambda) {
final WhiteDeprecatedSelectByPkuqCB cb = new WhiteDeprecatedSelectByPkuqCB(); cb.xsetupForUnion(this); xsyncUQ(cb);
try { lock(); unionCBLambda.query(cb); } finally { unlock(); } xsaveUCB(cb);
final WhiteDeprecatedSelectByPkuqCQ cq = cb.query(); query().xsetUnionAllQuery(cq);
}
// ===================================================================================
// SetupSelect
// ===========
// [DBFlute-0.7.4]
// ===================================================================================
// Specify
// =======
protected HpSpecification _specification;
/**
* Prepare for SpecifyColumn, (Specify)DerivedReferrer. <br>
* This method should be called after SetupSelect.
* <pre>
* <span style="color: #0000C0">memberBhv</span>.selectEntity(<span style="color: #553000">cb</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">cb</span>.setupSelect_MemberStatus(); <span style="color: #3F7E5E">// should be called before specify()</span>
* <span style="color: #553000">cb</span>.specify().columnMemberName();
* <span style="color: #553000">cb</span>.specify().specifyMemberStatus().columnMemberStatusName();
* <span style="color: #553000">cb</span>.specify().derivedPurchaseList().max(<span style="color: #553000">purchaseCB</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">purchaseCB</span>.specify().columnPurchaseDatetime();
* <span style="color: #553000">purchaseCB</span>.query().set...
* }, aliasName);
* }).alwaysPresent(<span style="color: #553000">member</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* ...
* });
* </pre>
* @return The instance of specification. (NotNull)
*/
public HpSpecification specify() {
assertSpecifyPurpose();
if (_specification == null) { _specification = new HpSpecification(this
, xcreateSpQyCall(() -> true, () -> xdfgetConditionQuery())
, _purpose, getDBMetaProvider(), xcSDRFnFc()); }
return _specification;
}
public HpColumnSpHandler localSp() {
return specify();
}
public boolean hasSpecifiedLocalColumn() {
return _specification != null && _specification.hasSpecifiedColumn();
}
public static class HpSpecification extends HpAbstractSpecification<WhiteDeprecatedSelectByPkuqCQ> {
public HpSpecification(ConditionBean baseCB, HpSpQyCall<WhiteDeprecatedSelectByPkuqCQ> qyCall
, HpCBPurpose purpose, DBMetaProvider dbmetaProvider
, HpSDRFunctionFactory sdrFuncFactory)
{ super(baseCB, qyCall, purpose, dbmetaProvider, sdrFuncFactory); }
/**
* SELECT_BY_PKUQ_ID: {PK, NotNull, DECIMAL(16)}
* @return The information object of specified column. (NotNull)
*/
public SpecifiedColumn columnSelectByPkuqId() { return doColumn("SELECT_BY_PKUQ_ID"); }
/**
* SELECT_BY_PKUQ_NAME: {NotNull, VARCHAR(200)}
* @return The information object of specified column. (NotNull)
*/
public SpecifiedColumn columnSelectByPkuqName() { return doColumn("SELECT_BY_PKUQ_NAME"); }
/**
* SELECT_BY_PKUQ_CODE: {UQ, NotNull, VARCHAR(16)}
* @return The information object of specified column. (NotNull)
*/
public SpecifiedColumn columnSelectByPkuqCode() { return doColumn("SELECT_BY_PKUQ_CODE"); }
public void everyColumn() { doEveryColumn(); }
public void exceptRecordMetaColumn() { doExceptRecordMetaColumn(); }
@Override
protected void doSpecifyRequiredColumn() {
columnSelectByPkuqId(); // PK
}
@Override
protected String getTableDbName() { return "WHITE_DEPRECATED_SELECT_BY_PKUQ"; }
/**
* Prepare for (Specify)MyselfDerived (SubQuery).
* @return The object to set up a function for myself table. (NotNull)
*/
public HpSDRFunction<WhiteDeprecatedSelectByPkuqCB, WhiteDeprecatedSelectByPkuqCQ> myselfDerived() {
assertDerived("myselfDerived"); if (xhasSyncQyCall()) { xsyncQyCall().qy(); } // for sync (for example, this in ColumnQuery)
return cHSDRF(_baseCB, _qyCall.qy(), (String fn, SubQuery<WhiteDeprecatedSelectByPkuqCB> sq, WhiteDeprecatedSelectByPkuqCQ cq, String al, DerivedReferrerOption op)
-> cq.xsmyselfDerive(fn, sq, al, op), _dbmetaProvider);
}
}
// ===================================================================================
// Dream Cruise
// ============
/**
* Welcome to the Dream Cruise for condition-bean deep world. <br>
* This is very specialty so you can get the frontier spirit. Bon voyage!
* @return The condition-bean for dream cruise, which is linked to main condition-bean.
*/
public WhiteDeprecatedSelectByPkuqCB dreamCruiseCB() {
WhiteDeprecatedSelectByPkuqCB cb = new WhiteDeprecatedSelectByPkuqCB();
cb.xsetupForDreamCruise((WhiteDeprecatedSelectByPkuqCB) this);
return cb;
}
protected ConditionBean xdoCreateDreamCruiseCB() {
return dreamCruiseCB();
}
// [DBFlute-0.9.5.3]
// ===================================================================================
// Column Query
// ============
/**
* Set up column-query. {column1 = column2}
* <pre>
* <span style="color: #3F7E5E">// where FOO < BAR</span>
* cb.<span style="color: #CC4747">columnQuery</span>(<span style="color: #553000">colCB</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">colCB</span>.specify().<span style="color: #CC4747">columnFoo()</span>; <span style="color: #3F7E5E">// left column</span>
* }).lessThan(<span style="color: #553000">colCB</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">colCB</span>.specify().<span style="color: #CC4747">columnBar()</span>; <span style="color: #3F7E5E">// right column</span>
* }); <span style="color: #3F7E5E">// you can calculate for right column like '}).plus(3);'</span>
* </pre>
* @param colCBLambda The callback for specify-query of left column. (NotNull)
* @return The object for setting up operand and right column. (NotNull)
*/
public HpColQyOperand<WhiteDeprecatedSelectByPkuqCB> columnQuery(final SpecifyQuery<WhiteDeprecatedSelectByPkuqCB> colCBLambda) {
return xcreateColQyOperand((rightSp, operand) -> {
return xcolqy(xcreateColumnQueryCB(), xcreateColumnQueryCB(), colCBLambda, rightSp, operand);
});
}
protected WhiteDeprecatedSelectByPkuqCB xcreateColumnQueryCB() {
WhiteDeprecatedSelectByPkuqCB cb = new WhiteDeprecatedSelectByPkuqCB();
cb.xsetupForColumnQuery((WhiteDeprecatedSelectByPkuqCB)this);
return cb;
}
// [DBFlute-0.9.6.3]
// ===================================================================================
// OrScope Query
// =============
/**
* Set up the query for or-scope. <br>
* (Same-column-and-same-condition-key conditions are allowed in or-scope)
* <pre>
* <span style="color: #3F7E5E">// where (FOO = '...' or BAR = '...')</span>
* cb.<span style="color: #CC4747">orScopeQuery</span>(<span style="color: #553000">orCB</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">orCB</span>.query().setFoo...
* <span style="color: #553000">orCB</span>.query().setBar...
* });
* </pre>
* @param orCBLambda The callback for query of or-condition. (NotNull)
*/
public void orScopeQuery(OrQuery<WhiteDeprecatedSelectByPkuqCB> orCBLambda) {
xorSQ((WhiteDeprecatedSelectByPkuqCB)this, orCBLambda);
}
/**
* Set up the and-part of or-scope. <br>
* (However nested or-scope query and as-or-split of like-search in and-part are unsupported)
* <pre>
* <span style="color: #3F7E5E">// where (FOO = '...' or (BAR = '...' and QUX = '...'))</span>
* cb.<span style="color: #994747">orScopeQuery</span>(<span style="color: #553000">orCB</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">orCB</span>.query().setFoo...
* <span style="color: #553000">orCB</span>.<span style="color: #CC4747">orScopeQueryAndPart</span>(<span style="color: #553000">andCB</span> <span style="color: #90226C; font-weight: bold"><span style="font-size: 120%">-</span>></span> {
* <span style="color: #553000">andCB</span>.query().setBar...
* <span style="color: #553000">andCB</span>.query().setQux...
* });
* });
* </pre>
* @param andCBLambda The callback for query of and-condition. (NotNull)
*/
public void orScopeQueryAndPart(AndQuery<WhiteDeprecatedSelectByPkuqCB> andCBLambda) {
xorSQAP((WhiteDeprecatedSelectByPkuqCB)this, andCBLambda);
}
// ===================================================================================
// DisplaySQL
// ==========
@Override
protected SqlAnalyzerFactory getSqlAnalyzerFactory()
{ return new ImplementedInvokerAssistant().assistSqlAnalyzerFactory(); }
@Override
protected String getConfiguredLogDatePattern() { return DBFluteConfig.getInstance().getLogDatePattern(); }
@Override
protected String getConfiguredLogTimestampPattern() { return DBFluteConfig.getInstance().getLogTimestampPattern(); }
@Override
protected String getConfiguredLogTimePattern() { return DBFluteConfig.getInstance().getLogTimePattern(); }
@Override
protected BoundDateDisplayTimeZoneProvider getConfiguredLogTimeZoneProvider() { return DBFluteConfig.getInstance().getLogTimeZoneProvider(); }
// ===================================================================================
// Meta Handling
// =============
public boolean hasUnionQueryOrUnionAllQuery() {
return query().hasUnionQueryOrUnionAllQuery();
}
// ===================================================================================
// Purpose Type
// ============
@Override
protected void xprepareSyncQyCall(ConditionBean mainCB) {
final WhiteDeprecatedSelectByPkuqCB cb;
if (mainCB != null) {
cb = (WhiteDeprecatedSelectByPkuqCB)mainCB;
} else {
cb = new WhiteDeprecatedSelectByPkuqCB();
}
specify().xsetSyncQyCall(xcreateSpQyCall(() -> true, () -> cb.query()));
}
// ===================================================================================
// Internal
// ========
// very internal (for suppressing warn about 'Not Use Import')
protected String xgetConditionBeanClassNameInternally() { return WhiteDeprecatedSelectByPkuqCB.class.getName(); }
protected String xgetConditionQueryClassNameInternally() { return WhiteDeprecatedSelectByPkuqCQ.class.getName(); }
protected String xgetSubQueryClassNameInternally() { return SubQuery.class.getName(); }
protected String xgetConditionOptionClassNameInternally() { return ConditionOption.class.getName(); }
}
| apache-2.0 |
AFaust/simple-content-stores | src/test/java/de/acosix/alfresco/simplecontentstores/repo/integration/TransformingFacadeStoresTest.java | 27365 | /*
* Copyright 2017 - 2021 Acosix GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.acosix.alfresco.simplecontentstores.repo.integration;
import com.thedeanda.lorem.LoremIpsum;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.UUID;
import javax.ws.rs.NotFoundException;
import org.apache.commons.io.IOUtils;
import org.jboss.resteasy.client.jaxrs.ResteasyClient;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runners.MethodSorters;
import de.acosix.alfresco.rest.client.api.NodesV1;
import de.acosix.alfresco.rest.client.api.PeopleV1;
import de.acosix.alfresco.rest.client.model.nodes.NodeCreationRequestEntity;
import de.acosix.alfresco.rest.client.model.nodes.NodeResponseEntity;
import de.acosix.alfresco.rest.client.model.people.PersonRequestEntity;
/**
* The tests in this class are meant to cover any of the generic content store facades that apply transformative operations on content
* before it is stored via actual, backing content stores.
*
* @author Axel Faust
*/
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class TransformingFacadeStoresTest extends AbstractStoresTest
{
private static ResteasyClient client;
private static final String testUser = "test";
private static final String testUserPassword = "test";
@Rule
public ExpectedException expectedException = ExpectedException.none();
@BeforeClass
public static void setup()
{
client = setupResteasyClient();
final String ticket = obtainTicket(client, baseUrl, "admin", "admin");
final PeopleV1 people = createAPI(client, baseUrl, PeopleV1.class, ticket);
try
{
people.getPerson(testUser);
}
catch (final NotFoundException nfe)
{
final PersonRequestEntity personToCreate = new PersonRequestEntity();
personToCreate.setEmail(testUser + "@example.com");
personToCreate.setFirstName("Test");
personToCreate.setLastName("Guy");
personToCreate.setId(testUser);
personToCreate.setPassword(testUserPassword);
people.createPerson(personToCreate);
}
}
@Test
public void compressingFacadeStore() throws IOException
{
// need to record pre-existing files to exclude in verification
final Collection<Path> exclusions = listFilesInAlfData("compressingFileFacadeStore");
final String ticket = obtainTicket(client, baseUrl, testUser, testUserPassword);
final NodesV1 nodes = createAPI(client, baseUrl, NodesV1.class, ticket);
final String documentLibraryNodeId = getOrCreateSiteAndDocumentLibrary(client, baseUrl, ticket, "compressing-file-facade",
"Compressing File Facade Site");
final NodeCreationRequestEntity createRequest = new NodeCreationRequestEntity();
createRequest.setName(UUID.randomUUID().toString());
createRequest.setNodeType("cm:content");
NodeResponseEntity createdNode = nodes.createNode(documentLibraryNodeId, createRequest);
// tests generic text/* pattern for compressible mimetypes
byte[] contentBytes = LoremIpsum.getInstance().getParagraphs(4, 20).getBytes(StandardCharsets.UTF_8);
nodes.setContent(createdNode.getId(), new ByteArrayInputStream(contentBytes), "text/plain");
Path lastModifiedFileInContent = findLastModifiedFileInAlfData("compressingFileFacadeStore", exclusions);
Assert.assertNotNull(lastModifiedFileInContent);
Assert.assertTrue(contentBytes.length > Files.size(lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode.getId())));
// tests/verifies generic text/* pattern for compressible mimetypes
createRequest.setName(UUID.randomUUID().toString());
createdNode = nodes.createNode(documentLibraryNodeId, createRequest);
contentBytes = LoremIpsum.getInstance().getHtmlParagraphs(4, 20).getBytes(StandardCharsets.UTF_8);
nodes.setContent(createdNode.getId(), new ByteArrayInputStream(contentBytes), "text/html");
exclusions.add(lastModifiedFileInContent);
lastModifiedFileInContent = findLastModifiedFileInAlfData("compressingFileFacadeStore", exclusions);
Assert.assertNotNull(lastModifiedFileInContent);
Assert.assertTrue(contentBytes.length > Files.size(lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode.getId())));
// tests explicit application/json pattern for compressible mimetypes
createRequest.setName(UUID.randomUUID().toString());
createdNode = nodes.createNode(documentLibraryNodeId, createRequest);
try (InputStream is = this.getClass().getClassLoader().getResourceAsStream("testContentFiles/random.json"))
{
final ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
IOUtils.copy(is, baos);
contentBytes = baos.toByteArray();
}
nodes.setContent(createdNode.getId(), new ByteArrayInputStream(contentBytes), "application/json");
exclusions.add(lastModifiedFileInContent);
lastModifiedFileInContent = findLastModifiedFileInAlfData("compressingFileFacadeStore", exclusions);
Assert.assertNotNull(lastModifiedFileInContent);
Assert.assertTrue(contentBytes.length > Files.size(lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode.getId())));
// test non-compressible file type
createRequest.setName(UUID.randomUUID().toString());
createdNode = nodes.createNode(documentLibraryNodeId, createRequest);
try (InputStream is = this.getClass().getClassLoader().getResourceAsStream("testContentFiles/sample.pdf"))
{
final ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
IOUtils.copy(is, baos);
contentBytes = baos.toByteArray();
}
nodes.setContent(createdNode.getId(), new ByteArrayInputStream(contentBytes), "application/pdf");
exclusions.add(lastModifiedFileInContent);
lastModifiedFileInContent = findLastModifiedFileInAlfData("compressingFileFacadeStore", exclusions);
Assert.assertNotNull(lastModifiedFileInContent);
Assert.assertEquals(contentBytes.length, Files.size(lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode.getId())));
}
@Test
public void deduplicatingFacadeStore() throws IOException
{
// need to record pre-existing files to exclude in verification
final Collection<Path> exclusions = listFilesInAlfData("deduplicatingFileFacadeStore");
final String ticket = obtainTicket(client, baseUrl, testUser, testUserPassword);
final NodesV1 nodes = createAPI(client, baseUrl, NodesV1.class, ticket);
final String documentLibraryNodeId = getOrCreateSiteAndDocumentLibrary(client, baseUrl, ticket, "deduplicating-file-facade",
"Deduplicating File Facade Site");
final NodeCreationRequestEntity createRequest = new NodeCreationRequestEntity();
createRequest.setName(UUID.randomUUID().toString());
createRequest.setNodeType("cm:content");
final NodeResponseEntity createdNode1 = nodes.createNode(documentLibraryNodeId, createRequest);
createRequest.setName(UUID.randomUUID().toString());
createRequest.setNodeType("cm:content");
final NodeResponseEntity createdNode2 = nodes.createNode(documentLibraryNodeId, createRequest);
final byte[] contentBytes = LoremIpsum.getInstance().getParagraphs(4, 20).getBytes(StandardCharsets.UTF_8);
nodes.setContent(createdNode1.getId(), new ByteArrayInputStream(contentBytes), "text/plain");
Path lastModifiedFileInContent = findLastModifiedFileInAlfData("deduplicatingFileFacadeStore", exclusions);
Assert.assertNotNull(lastModifiedFileInContent);
Assert.assertEquals(contentBytes.length, Files.size(lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode1.getId())));
// test no new content file is created when exact same content is stored for another node
nodes.setContent(createdNode2.getId(), new ByteArrayInputStream(contentBytes), "text/html");
exclusions.add(lastModifiedFileInContent);
lastModifiedFileInContent = findLastModifiedFileInAlfData("deduplicatingFileFacadeStore", exclusions);
Assert.assertNull(lastModifiedFileInContent);
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode2.getId())));
// test new content file is written for a minimal change in content
final int idxToChange = new SecureRandom().nextInt(contentBytes.length);
contentBytes[idxToChange] = (byte) (contentBytes[idxToChange] + 1);
nodes.setContent(createdNode2.getId(), new ByteArrayInputStream(contentBytes), "text/plain");
lastModifiedFileInContent = findLastModifiedFileInAlfData("deduplicatingFileFacadeStore", exclusions);
Assert.assertNotNull(lastModifiedFileInContent);
Assert.assertEquals(contentBytes.length, Files.size(lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, lastModifiedFileInContent));
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode2.getId())));
}
@Test
// we need to order encryption tests as key management / state is global and some tests can impact others
// would be simpler if we were to re-create Alfresco from scratch for every test, but that would be excessive
// order is based on method name (ascending)
public void encryption0WithExpectedMasterKeys()
{
final SimpleContentStoresCommandPlugin commandConsolePlugin = createAPI(client, baseUrl, SimpleContentStoresCommandPlugin.class,
"admin", "admin");
final List<String> activeKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("active"))
.getPreformattedOutputLines();
final List<String> inactiveKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("inactive"))
.getPreformattedOutputLines();
Assert.assertEquals(3, activeKeyList.size());
Assert.assertEquals(1, inactiveKeyList.size());
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-effs:effs")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes ")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2")));
Assert.assertTrue(inactiveKeyList.contains("No keys found"));
}
@Test
// we need to order encryption tests as key management / state is global and some tests can impact others
// would be simpler if we were to re-create Alfresco from scratch for every test, but that would be excessive
// order is based on method name (ascending)
public void encryption1WithDifferentMasterKeys()
{
final String ticket = obtainTicket(client, baseUrl, testUser, testUserPassword);
final NodesV1 nodes = createAPI(client, baseUrl, NodesV1.class, ticket);
final SimpleContentStoresCommandPlugin commandConsolePlugin = createAPI(client, baseUrl, SimpleContentStoresCommandPlugin.class,
"admin", "admin");
// there should not be any encrypted data yet
final List<String> initialCounts = commandConsolePlugin.countEncryptedSymmetricKeys(CommandConsolePluginRequest.from())
.getPreformattedOutputLines();
Assert.assertEquals(1, initialCounts.size());
Assert.assertTrue(initialCounts.contains("No symmetric keys found"));
final String documentLibraryNodeId = getOrCreateSiteAndDocumentLibrary(client, baseUrl, ticket, "encrypting-file-facade",
"Encrypting File Facade Site");
this.createRandomContents(30, nodes, documentLibraryNodeId);
// master key usage is random, but with three configured keys and 30 contents created, each key should be used at least once
final List<String> updatedCounts = commandConsolePlugin.countEncryptedSymmetricKeys(CommandConsolePluginRequest.from())
.getPreformattedOutputLines();
Assert.assertEquals(3, updatedCounts.size());
int combinedCount = 0;
final List<String> keys = new ArrayList<>(3);
for (final String countLine : updatedCounts)
{
final String count = countLine.substring(0, countLine.indexOf(' '));
combinedCount += Integer.parseInt(count);
final String key = countLine.substring(countLine.lastIndexOf(" by ") + 4);
keys.add(key);
}
Assert.assertEquals(30, combinedCount);
Assert.assertTrue(keys.contains("scs-effs:effs"));
Assert.assertTrue(keys.contains("scs-aesks:effs-aes"));
Assert.assertTrue(keys.contains("scs-aesks:effs-aes2"));
}
@Test
// we need to order encryption tests as key management / state is global and some tests can impact others
// would be simpler if we were to re-create Alfresco from scratch for every test, but that would be excessive
// order is based on method name (ascending)
public void encryption2WithDifferentSymmetricKeys() throws IOException
{
// need to record pre-existing files to exclude in verification
final Collection<Path> exclusions = listFilesInAlfData("encryptingFileFacadeStore");
final String ticket = obtainTicket(client, baseUrl, testUser, testUserPassword);
final NodesV1 nodes = createAPI(client, baseUrl, NodesV1.class, ticket);
final String documentLibraryNodeId = getOrCreateSiteAndDocumentLibrary(client, baseUrl, ticket, "encrypting-file-facade",
"Encrypting File Facade Site");
final NodeCreationRequestEntity createRequest = new NodeCreationRequestEntity();
createRequest.setName(UUID.randomUUID().toString());
createRequest.setNodeType("cm:content");
final NodeResponseEntity createdNode1 = nodes.createNode(documentLibraryNodeId, createRequest);
createRequest.setName(UUID.randomUUID().toString());
createRequest.setNodeType("cm:content");
final NodeResponseEntity createdNode2 = nodes.createNode(documentLibraryNodeId, createRequest);
final byte[] contentBytes = LoremIpsum.getInstance().getParagraphs(4, 20).getBytes(StandardCharsets.UTF_8);
nodes.setContent(createdNode1.getId(), new ByteArrayInputStream(contentBytes), "text/plain");
final Path lastModifiedFileInContent1 = findLastModifiedFileInAlfData("encryptingFileFacadeStore", exclusions);
Assert.assertNotNull(lastModifiedFileInContent1);
Assert.assertNotEquals(contentBytes.length, Files.size(lastModifiedFileInContent1));
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode1.getId())));
// test new content file with different encryption result (due to different symmetric key per content) is created when exact same
// content is stored for another node
nodes.setContent(createdNode2.getId(), new ByteArrayInputStream(contentBytes), "text/plain");
exclusions.add(lastModifiedFileInContent1);
final Path lastModifiedFileInContent2 = findLastModifiedFileInAlfData("encryptingFileFacadeStore", exclusions);
Assert.assertNotNull(lastModifiedFileInContent2);
Assert.assertNotEquals(contentBytes.length, Files.size(lastModifiedFileInContent2));
Assert.assertTrue(contentMatches(contentBytes, nodes.getContent(createdNode2.getId())));
Assert.assertFalse(contentMatches(lastModifiedFileInContent1, lastModifiedFileInContent2));
// overall length after encrypting must still be the same due to same algorithm / block size
Assert.assertEquals(Files.size(lastModifiedFileInContent1), Files.size(lastModifiedFileInContent2));
}
@Test
// we need to order encryption tests as key management / state is global and some tests can impact others
// would be simpler if we were to re-create Alfresco from scratch for every test, but that would be excessive
// order is based on method name (ascending)
public void encryption3WithDisabledKey()
{
final String ticket = obtainTicket(client, baseUrl, testUser, testUserPassword);
final NodesV1 nodes = createAPI(client, baseUrl, NodesV1.class, ticket);
final SimpleContentStoresCommandPlugin commandConsolePlugin = createAPI(client, baseUrl, SimpleContentStoresCommandPlugin.class,
"admin", "admin");
List<String> activeKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("active"))
.getPreformattedOutputLines();
List<String> inactiveKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("inactive"))
.getPreformattedOutputLines();
Assert.assertEquals(3, activeKeyList.size());
Assert.assertEquals(1, inactiveKeyList.size());
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-effs:effs")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes ")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2")));
Assert.assertTrue(inactiveKeyList.contains("No keys found"));
final String documentLibraryNodeId = getOrCreateSiteAndDocumentLibrary(client, baseUrl, ticket, "encrypting-file-facade",
"Encrypting File Facade Site");
this.createRandomContents(30, nodes, documentLibraryNodeId);
final List<String> referenceCounts = commandConsolePlugin
.countEncryptedSymmetricKeys(CommandConsolePluginRequest.from("scs-aesks:effs-aes2")).getPreformattedOutputLines();
final String referenceCountLine = referenceCounts.get(0);
Assert.assertNotEquals("No symmetric keys found", referenceCountLine);
final int referenceCount = Integer.parseInt(referenceCountLine.substring(0, referenceCountLine.indexOf(' ')));
commandConsolePlugin.disableEncryptionKey(CommandConsolePluginRequest.from("scs-aesks:effs-aes2"));
activeKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("active")).getPreformattedOutputLines();
inactiveKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("inactive"))
.getPreformattedOutputLines();
Assert.assertEquals(2, activeKeyList.size());
Assert.assertEquals(1, inactiveKeyList.size());
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-effs:effs")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes ")));
Assert.assertFalse(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2")));
Assert.assertTrue(inactiveKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2")));
this.createRandomContents(30, nodes, documentLibraryNodeId);
final List<String> updatedCounts = commandConsolePlugin
.countEncryptedSymmetricKeys(CommandConsolePluginRequest.from("scs-aesks:effs-aes2")).getPreformattedOutputLines();
final String updatedCountLine = updatedCounts.get(0);
final int updatedCount = Integer.parseInt(updatedCountLine.substring(0, updatedCountLine.indexOf(' ')));
Assert.assertEquals(referenceCount, updatedCount);
commandConsolePlugin.enableEncryptionKey(CommandConsolePluginRequest.from("scs-aesks:effs-aes2"));
activeKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("active")).getPreformattedOutputLines();
inactiveKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("inactive"))
.getPreformattedOutputLines();
Assert.assertEquals(3, activeKeyList.size());
Assert.assertEquals(1, inactiveKeyList.size());
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-effs:effs")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes ")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2")));
Assert.assertTrue(inactiveKeyList.contains("No keys found"));
this.createRandomContents(30, nodes, documentLibraryNodeId);
final List<String> finalCounts = commandConsolePlugin
.countEncryptedSymmetricKeys(CommandConsolePluginRequest.from("scs-aesks:effs-aes2")).getPreformattedOutputLines();
final String finalCountLine = finalCounts.get(0);
final int finalCount = Integer.parseInt(finalCountLine.substring(0, finalCountLine.indexOf(' ')));
Assert.assertNotEquals(referenceCount, finalCount);
}
@Test
// we need to order encryption tests as key management / state is global and some tests can impact others
// would be simpler if we were to re-create Alfresco from scratch for every test, but that would be excessive
// order is based on method name (ascending)
public void encryption4WithReEncryption()
{
final String ticket = obtainTicket(client, baseUrl, testUser, testUserPassword);
final NodesV1 nodes = createAPI(client, baseUrl, NodesV1.class, ticket);
final SimpleContentStoresCommandPlugin commandConsolePlugin = createAPI(client, baseUrl, SimpleContentStoresCommandPlugin.class,
"admin", "admin");
List<String> activeKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("active"))
.getPreformattedOutputLines();
List<String> inactiveKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("inactive"))
.getPreformattedOutputLines();
List<String> eligibleKeyList = commandConsolePlugin.listEncryptionKeysEligibleForReEncryption(CommandConsolePluginRequest.from())
.getPreformattedOutputLines();
Assert.assertEquals(3, activeKeyList.size());
Assert.assertEquals(1, inactiveKeyList.size());
Assert.assertEquals(1, eligibleKeyList.size());
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-effs:effs")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes ")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2")));
Assert.assertTrue(inactiveKeyList.contains("No keys found"));
Assert.assertTrue(eligibleKeyList.contains("No keys found"));
final String documentLibraryNodeId = getOrCreateSiteAndDocumentLibrary(client, baseUrl, ticket, "encrypting-file-facade",
"Encrypting File Facade Site");
this.createRandomContents(30, nodes, documentLibraryNodeId);
final List<String> referenceCounts = commandConsolePlugin
.countEncryptedSymmetricKeys(CommandConsolePluginRequest.from("scs-aesks:effs-aes2")).getPreformattedOutputLines();
final String referenceCountLine = referenceCounts.get(0);
Assert.assertNotEquals("No symmetric keys found", referenceCountLine);
commandConsolePlugin.disableEncryptionKey(CommandConsolePluginRequest.from("scs-aesks:effs-aes2"));
activeKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("active")).getPreformattedOutputLines();
inactiveKeyList = commandConsolePlugin.listEncryptionKeys(CommandConsolePluginRequest.from("inactive"))
.getPreformattedOutputLines();
eligibleKeyList = commandConsolePlugin.listEncryptionKeysEligibleForReEncryption(CommandConsolePluginRequest.from())
.getPreformattedOutputLines();
Assert.assertEquals(2, activeKeyList.size());
Assert.assertEquals(1, inactiveKeyList.size());
Assert.assertEquals(1, eligibleKeyList.size());
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-effs:effs ")));
Assert.assertTrue(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes ")));
Assert.assertFalse(activeKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2 ")));
Assert.assertTrue(inactiveKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2 ")));
Assert.assertTrue(eligibleKeyList.stream().anyMatch(line -> line.startsWith("scs-aesks:effs-aes2 ")));
commandConsolePlugin.reEncryptSymmetricKeys(CommandConsolePluginRequest.from("scs-aesks:effs-aes2"));
final List<String> updatedCounts = commandConsolePlugin
.countEncryptedSymmetricKeys(CommandConsolePluginRequest.from("scs-aesks:effs-aes2")).getPreformattedOutputLines();
final String updatedCountLine = updatedCounts.get(0);
Assert.assertEquals("No symmetric keys found", updatedCountLine);
}
protected void createRandomContents(final int maxCount, final NodesV1 nodes, final String documentLibraryNodeId)
{
for (int count = 0; count < maxCount; count++)
{
final NodeCreationRequestEntity createRequest = new NodeCreationRequestEntity();
createRequest.setName(UUID.randomUUID().toString());
createRequest.setNodeType("cm:content");
final NodeResponseEntity createdNode = nodes.createNode(documentLibraryNodeId, createRequest);
final byte[] contentBytes = LoremIpsum.getInstance().getParagraphs(4, 20).getBytes(StandardCharsets.UTF_8);
nodes.setContent(createdNode.getId(), new ByteArrayInputStream(contentBytes), "text/plain");
}
}
}
| apache-2.0 |
kevinxucs/purdue-course-parser | src/net/kevxu/purdueassist/course/shared/RequestNotFinishedException.java | 480 | package net.kevxu.purdueassist.course.shared;
public class RequestNotFinishedException extends Exception {
private static final long serialVersionUID = -2042384911352208656L;
public RequestNotFinishedException() {
}
public RequestNotFinishedException(String message) {
super(message);
}
public RequestNotFinishedException(Throwable cause) {
super(cause);
}
public RequestNotFinishedException(String message, Throwable cause) {
super(message, cause);
}
}
| apache-2.0 |
jexp/idea2 | platform/platform-impl/src/com/intellij/ide/dnd/DnDEnabler.java | 8432 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.dnd;
import com.intellij.ide.ui.LafManager;
import com.intellij.ide.ui.LafManagerListener;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.util.Disposer;
import com.intellij.util.ui.AwtVisitor;
import com.intellij.util.ui.update.Activatable;
import com.intellij.util.ui.update.UiNotifyConnector;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import java.awt.*;
import java.awt.dnd.MouseDragGestureRecognizer;
import java.awt.event.AWTEventListener;
import java.awt.event.MouseEvent;
import java.awt.event.MouseListener;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EventListener;
import java.util.List;
/**
* Utility tool to patch DnD listeners to enable multiple selection when original dnd is switched off
*
* @author spleaner
*/
public class DnDEnabler implements Activatable, Disposable {
@NonNls public static final String KEY = "DragAndDropMultipleSelectionEnabler";
private final AWTEventListener myAwtListener = new MyAwtListener();
private List<EventListener> myMouseListeners;
//private List<MouseListener> myMousePreprocessors = new ArrayList<MouseListener>();
private final DnDAware myDnDSource;
private MouseListener myOriginalDragGestureRecognizer;
private LafManagerListener myLafManagerListener = new LafManagerListener() {
public void lookAndFeelChanged(LafManager source) {
// todo[spleaner]: does default listeners are recreated onSetUI() and what 'bout custom listeners??
onSetUI();
}
};
private MouseListener myTooltipListener1;
private MouseListener myTooltipListener2;
public DnDEnabler(@NotNull final DnDAware source, Disposable parent) {
myDnDSource = source;
final Component component = source.getComponent();
LafManager.getInstance().addLafManagerListener(myLafManagerListener);
final UiNotifyConnector connector = new UiNotifyConnector(component, this);// todo: disposable???
Disposer.register(this, connector);
Disposer.register(parent, this);
onSetUI();
}
public void dispose() {
LafManager.getInstance().removeLafManagerListener(myLafManagerListener);
myOriginalDragGestureRecognizer = null;
}
public void showNotify() {
Toolkit.getDefaultToolkit().removeAWTEventListener(myAwtListener);
Toolkit.getDefaultToolkit().addAWTEventListener(myAwtListener, MouseEvent.MOUSE_EVENT_MASK);
}
public void hideNotify() {
Toolkit.getDefaultToolkit().removeAWTEventListener(myAwtListener);
}
public void onSetUI() {
myMouseListeners = new ArrayList<EventListener>();
new AwtVisitor(myDnDSource.getComponent()) {
public boolean visit(Component component) {
EventListener[] mouseListeners = component.getListeners(MouseListener.class);
if (mouseListeners.length > 0) {
myMouseListeners.addAll(Arrays.asList(mouseListeners));
for (EventListener each : mouseListeners) {
if (each instanceof MouseDragGestureRecognizer) {
myOriginalDragGestureRecognizer = (MouseListener)each;
myMouseListeners.remove(each);
}
component.removeMouseListener((MouseListener)each);
}
}
return false;
}
};
readTooltipListeners();
}
private void readTooltipListeners() {
final ToolTipManager manager = ToolTipManager.sharedInstance();
myTooltipListener1 = manager;
try {
//todo kirillk to detach mouseMotion listeners as well
final Field moveBefore = manager.getClass().getDeclaredField("moveBeforeEnterListener");
if (!MouseListener.class.isAssignableFrom(moveBefore.getType())) return;
moveBefore.setAccessible(true);
myTooltipListener2 = (MouseListener)moveBefore.get(manager);
}
catch (Exception ignored) {
}
}
private static void dispatchMouseEvent(MouseListener listener, MouseEvent e) {
if (listener != null) {
int id = e.getID();
switch (id) {
case MouseEvent.MOUSE_PRESSED:
listener.mousePressed(e);
break;
case MouseEvent.MOUSE_RELEASED:
listener.mouseReleased(e);
break;
case MouseEvent.MOUSE_CLICKED:
listener.mouseClicked(e);
break;
case MouseEvent.MOUSE_EXITED:
listener.mouseExited(e);
break;
case MouseEvent.MOUSE_ENTERED:
listener.mouseEntered(e);
break;
}
}
}
@SuppressWarnings({"SimplifiableIfStatement"})
public boolean isPressedToSelection(MouseEvent e) {
if (MouseEvent.MOUSE_PRESSED != e.getID()) return false;
return isToSelection(e);
}
private boolean isToSelection(final MouseEvent e) {
if (!isPureButton1Event(e)) return false;
return e.getClickCount() == 1 && myDnDSource.isOverSelection(e.getPoint());
}
@SuppressWarnings({"SimplifiableIfStatement"})
public boolean isPopupToSelection(MouseEvent e) {
return e.isPopupTrigger() && myDnDSource.isOverSelection(e.getPoint());
}
private static boolean isPureButton1Event(MouseEvent event) {
int button1 = MouseEvent.BUTTON1_MASK | MouseEvent.BUTTON1_DOWN_MASK;
return (event.getModifiersEx() | button1) == button1;
}
private class MyAwtListener implements AWTEventListener {
public void eventDispatched(AWTEvent event) {
if (event instanceof MouseEvent) {
MouseEvent e = (MouseEvent)event;
Component comp = myDnDSource.getComponent();
if (e.getComponent() != comp) return;
//for (MouseListener each : myMousePreprocessors) {
// dispatchMouseEvent(each, e);
// if (e.isConsumed()) return;
//}
if (e.getComponent() == comp) {
boolean shouldProcessTooltipManager = true;
if (e.getComponent() instanceof JComponent) {
final JComponent c = (JComponent)e.getComponent();
if (c.getToolTipText() == null) {
shouldProcessTooltipManager = false;
}
}
if (isPressedToSelection(e)) {
if (myDnDSource.getComponent().isFocusable()) {
myDnDSource.getComponent().requestFocus();
}
}
else {
final boolean popupToSelection = isPopupToSelection(e);
if (!e.isConsumed()) {
assert e.getComponent() != null : "component is null! IDEADEV-6339";
final EventListener[] eventListeners = myMouseListeners.toArray(new EventListener[myMouseListeners.size()]);
for (EventListener each : eventListeners) {
if (!shouldProcessTooltipManager) {
if (each == myTooltipListener1 || each == myTooltipListener2) continue;
}
if (popupToSelection) {
if (each != null && each.getClass().getName().indexOf("BasicTreeUI$DragFixHandler") >= 0) continue;
}
if (isToSelection(e) && e.getID() == MouseEvent.MOUSE_RELEASED) {
myDnDSource.dropSelectionButUnderPoint(e.getPoint());
}
dispatchMouseEvent((MouseListener)each, e);
if (e.isConsumed()) break;
}
if (shouldProcessTooltipManager) {
((JComponent)e.getComponent()).setToolTipText(null);
}
}
}
if (myOriginalDragGestureRecognizer != null && !shouldIgnore(e, comp)) {
dispatchMouseEvent(myOriginalDragGestureRecognizer, e);
}
}
}
}
}
private static boolean shouldIgnore(MouseEvent event, Component c) {
return c == null || !c.isEnabled()
|| !SwingUtilities.isLeftMouseButton(event);
}
}
| apache-2.0 |
jydimir/Music-Player | app/src/main/java/ua/edu/cdu/fotius/lisun/musicplayer/cab_menu/AlbumPlay.java | 1041 | package ua.edu.cdu.fotius.lisun.musicplayer.cab_menu;
import android.app.Fragment;
import ua.edu.cdu.fotius.lisun.musicplayer.service.MediaPlaybackServiceWrapper;
import ua.edu.cdu.fotius.lisun.musicplayer.async_tasks.AlbumTracksQueryAsyncTask;
import ua.edu.cdu.fotius.lisun.musicplayer.async_tasks.BaseTracksQueryAsyncTask;
public class AlbumPlay extends Play implements BaseTracksQueryAsyncTask.Callbacks{
private long mArtistID;
public AlbumPlay(Fragment fragment, MediaPlaybackServiceWrapper serviceWrapper, long artistID) {
super(fragment, serviceWrapper);
mArtistID = artistID;
}
@Override
public void execute(long[] albumsID) {
BaseTracksQueryAsyncTask query =
new AlbumTracksQueryAsyncTask(mFragment, this, mArtistID);
query.execute(albumsID);
}
@Override
public void onQueryStart() {
}
@Override
public void onQueryCompleted(long[] trackIds) {
if(trackIds != null) {
super.execute(trackIds);
}
}
}
| apache-2.0 |
consulo/consulo | modules/base/editor-ui-ex/src/main/java/com/intellij/openapi/editor/highlighter/EditorHighlighterFactoryImpl.java | 5244 | /*
* Copyright 2000-2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.editor.highlighter;
import com.intellij.lang.Language;
import consulo.logging.Logger;
import com.intellij.openapi.editor.colors.EditorColorsManager;
import com.intellij.openapi.editor.colors.EditorColorsScheme;
import com.intellij.openapi.editor.ex.util.LexerEditorHighlighter;
import com.intellij.openapi.fileTypes.*;
import com.intellij.openapi.progress.ProcessCanceledException;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.LanguageSubstitutors;
import com.intellij.testFramework.LightVirtualFile;
import javax.annotation.Nonnull;
import jakarta.inject.Singleton;
/**
* @author yole
*/
@Singleton
public class EditorHighlighterFactoryImpl extends EditorHighlighterFactory {
private static final Logger LOG = Logger.getInstance(EditorHighlighterFactoryImpl.class);
@Nonnull
@Override
public EditorHighlighter createEditorHighlighter(SyntaxHighlighter highlighter, @Nonnull final EditorColorsScheme colors) {
if (highlighter == null) highlighter = new PlainSyntaxHighlighter();
return new LexerEditorHighlighter(highlighter, colors);
}
@Nonnull
@Override
public EditorHighlighter createEditorHighlighter(@Nonnull final FileType fileType, @Nonnull final EditorColorsScheme settings, final Project project) {
if (fileType instanceof LanguageFileType) {
return FileTypeEditorHighlighterProviders.INSTANCE.forFileType(fileType).getEditorHighlighter(project, fileType, null, settings);
}
SyntaxHighlighter highlighter = SyntaxHighlighterFactory.getSyntaxHighlighter(fileType, project, null);
return createEditorHighlighter(highlighter, settings);
}
@Nonnull
@Override
public EditorHighlighter createEditorHighlighter(final Project project, @Nonnull final FileType fileType) {
return createEditorHighlighter(fileType, EditorColorsManager.getInstance().getGlobalScheme(), project);
}
@Nonnull
@Override
public EditorHighlighter createEditorHighlighter(@Nonnull VirtualFile vFile, @Nonnull EditorColorsScheme settings, @javax.annotation.Nullable Project project) {
FileType fileType = vFile.getFileType();
if (fileType instanceof LanguageFileType) {
LanguageFileType substFileType = substituteFileType(((LanguageFileType)fileType).getLanguage(), vFile, project);
if (substFileType != null) {
EditorHighlighterProvider provider = FileTypeEditorHighlighterProviders.INSTANCE.forFileType(substFileType);
EditorHighlighter editorHighlighter = provider.getEditorHighlighter(project, fileType, vFile, settings);
boolean isPlain = editorHighlighter.getClass() == LexerEditorHighlighter.class && ((LexerEditorHighlighter)editorHighlighter).isPlain();
if (!isPlain) {
return editorHighlighter;
}
}
try {
return FileTypeEditorHighlighterProviders.INSTANCE.forFileType(fileType).getEditorHighlighter(project, fileType, vFile, settings);
}
catch (ProcessCanceledException e) {
throw e;
}
catch (Exception e) {
LOG.error(e);
}
}
SyntaxHighlighter highlighter = SyntaxHighlighterFactory.getSyntaxHighlighter(fileType, project, vFile);
return createEditorHighlighter(highlighter, settings);
}
@javax.annotation.Nullable
private static LanguageFileType substituteFileType(Language language, VirtualFile vFile, Project project) {
LanguageFileType fileType = null;
if (vFile != null && project != null) {
Language substLanguage = LanguageSubstitutors.INSTANCE.substituteLanguage(language, vFile, project);
if (substLanguage != language) {
fileType = substLanguage.getAssociatedFileType();
}
}
return fileType;
}
@Nonnull
@Override
public EditorHighlighter createEditorHighlighter(final Project project, @Nonnull final VirtualFile file) {
return createEditorHighlighter(file, EditorColorsManager.getInstance().getGlobalScheme(), project);
}
@Nonnull
@Override
public EditorHighlighter createEditorHighlighter(final Project project, @Nonnull final String fileName) {
return createEditorHighlighter(EditorColorsManager.getInstance().getGlobalScheme(), fileName, project);
}
@Nonnull
@Override
public EditorHighlighter createEditorHighlighter(@Nonnull final EditorColorsScheme settings,
@Nonnull final String fileName,
@javax.annotation.Nullable final Project project) {
return createEditorHighlighter(new LightVirtualFile(fileName), settings, project);
}
}
| apache-2.0 |
lbenda/Coursing | client/src/main/java/cz/lbenda/coursing/client/gui/action/DogAddAction.java | 1656 | /*
* Copyright 2014 Lukas Benda <lbenda at lbenda.cz>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cz.lbenda.coursing.client.gui.action;
import cz.lbenda.coursing.client.ClientServiceLocator;
import cz.lbenda.coursing.client.gui.DogForm;
import cz.lbenda.coursing.service.DogService;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import org.openide.awt.ActionID;
import org.openide.awt.ActionReference;
import org.openide.awt.ActionRegistration;
import org.openide.util.NbBundle.Messages;
@ActionID(
category = "Coursing/Dog",
id = "cz.lbenda.coursing.client.gui.action.DogAddAction"
)
@ActionRegistration(
iconBase = "cz/lbenda/coursing/client/icon/dog-add.png",
displayName = "#CTL_DogAddAction"
)
@ActionReference(path = "Toolbars/Coursing", position = 300)
@Messages("CTL_DogAddAction=Add new DOG")
public final class DogAddAction implements ActionListener {
@Override
public void actionPerformed(ActionEvent ev) {
DogService dogService = ClientServiceLocator.getInstance().getBean(DogService.class);
DogForm.showDialog(dogService.createNew());
}
}
| apache-2.0 |
Maximuspayne/NavyCraft2-Lite | src/com/maximuspayne/aimcannon/Torpedo.java | 508 | package com.maximuspayne.aimcannon;
import org.bukkit.block.Block;
import org.bukkit.block.BlockFace;
public class Torpedo {
Block warhead;
BlockFace hdg;
int torpDepth;
int rudder=0;
int rudderSetting=0;
int turnProgress = -1;
int torpSetHeading=-1;
boolean doubleTurn=false;
int tubeNum=0;
boolean active=false;
boolean auto=true;
boolean dead = false;
public Torpedo(Block b, BlockFace bf, int depth)
{
warhead = b;
hdg = bf;
torpDepth = depth;
}
}
| apache-2.0 |
zouzhberk/ambaridemo | demo-server/src/main/java/org/apache/ambari/server/state/State.java | 6782 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ambari.server.state;
import org.apache.ambari.server.AmbariException;
public enum State {
/**
* Initial/Clean state.
*/
INIT,
/**
* In the process of installing.
*/
INSTALLING,
/**
* Install failed.
*/
INSTALL_FAILED,
/**
* State when install completed successfully.
*/
INSTALLED,
/**
* In the process of starting.
*/
STARTING,
/**
* State when start completed successfully.
*/
STARTED,
/**
* In the process of stopping.
*/
STOPPING,
/**
* In the process of uninstalling.
*/
UNINSTALLING,
/**
* State when uninstall completed successfully.
*/
UNINSTALLED,
/**
* In the process of wiping out the install.
*/
WIPING_OUT,
/**
* In the process of upgrading the deployed bits.
*/
UPGRADING,
/**
* Disabled master's backup state
*/
DISABLED,
/**
* State could not be determined.
*/
UNKNOWN;
/**
* Indicates whether or not it is a valid desired state.
*
* @return true if this is a valid desired state.
*/
public boolean isValidDesiredState() {
switch (this) {
case INIT:
case INSTALLED:
case STARTED:
case UNINSTALLED:
case DISABLED:
return true;
default:
return false;
}
}
/**
* Indicates whether or not it is a valid state for the client component.
*
* @return true if this is a valid state for a client component.
*/
public boolean isValidClientComponentState() {
switch (this) {
case STARTING:
case STARTED:
case STOPPING:
return false;
default:
return true;
}
}
/**
* Indicates whether or not the resource with this state can be removed.
*
* @return true if this is a removable state
*/
public boolean isRemovableState() {
switch (this) {
case INIT:
case INSTALLING:
case INSTALLED:
case INSTALL_FAILED:
case UNINSTALLED:
case UNKNOWN:
case DISABLED:
return true;
default:
return false;
}
}
/**
* Utility method to determine whether or not a valid transition can be made from the given states.
*
* @param startState the starting state
* @param desiredState the desired state
*
* @return true iff a valid transition can be made from the starting state to the desired state
*/
public static boolean isValidStateTransition(State startState, State desiredState) {
switch(desiredState) {
case INSTALLED:
if (startState == State.INIT
|| startState == State.UNINSTALLED
|| startState == State.INSTALLED
|| startState == State.INSTALLING
|| startState == State.STARTED
|| startState == State.INSTALL_FAILED
|| startState == State.UPGRADING
|| startState == State.STOPPING
|| startState == State.UNKNOWN
|| startState == State.DISABLED) {
return true;
}
break;
case STARTED:
if (startState == State.INSTALLED
|| startState == State.STARTING
|| startState == State.STARTED) {
return true;
}
break;
case UNINSTALLED:
if (startState == State.INSTALLED
|| startState == State.UNINSTALLED
|| startState == State.UNINSTALLING) {
return true;
}
case INIT:
if (startState == State.UNINSTALLED
|| startState == State.INIT
|| startState == State.WIPING_OUT) {
return true;
}
case DISABLED:
if (startState == State.INSTALLED
|| startState == State.INSTALL_FAILED
|| startState == State.UNKNOWN) {
return true;
}
}
return false;
}
/**
* Utility method to determine whether or not the given desired state is valid for the given starting state.
*
* @param startState the starting state
* @param desiredState the desired state
*
* @return true iff the given desired state is valid for the given starting state
*/
public static boolean isValidDesiredStateTransition(State startState, State desiredState) {
switch(desiredState) {
case INSTALLED:
if (startState == State.INIT
|| startState == State.UNINSTALLED
|| startState == State.INSTALLED
|| startState == State.STARTED
|| startState == State.STOPPING) {
return true;
}
break;
case STARTED:
if (startState == State.INSTALLED
|| startState == State.STARTED) {
return true;
}
break;
}
return false;
}
/**
* Determine whether or not it is safe to update the configuration of the given service
* component host for the given states.
*
* @param serviceComponentHost the service component host
* @param currentState the current state
* @param desiredState the desired state
*
* @throws AmbariException if the changing of configuration is not supported
*/
public static void checkUpdateConfiguration(
ServiceComponentHost serviceComponentHost,
State currentState, State desiredState)
throws AmbariException {
if (desiredState != null) {
if (!(desiredState == State.INIT
|| desiredState == State.INSTALLED
|| desiredState == State.STARTED)) {
throw new AmbariException("Changing of configs not supported"
+ " for this transition"
+ ", clusterName=" + serviceComponentHost.getClusterName()
+ ", serviceName=" + serviceComponentHost.getServiceName()
+ ", componentName=" + serviceComponentHost.getServiceComponentName()
+ ", hostname=" + serviceComponentHost.getHostName()
+ ", currentState=" + currentState
+ ", newDesiredState=" + desiredState);
}
}
}
}
| apache-2.0 |
Wenpei/incubator-systemml | src/main/java/org/apache/sysml/yarn/ropt/ResourceOptimizerCPMigration.java | 10018 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.yarn.ropt;
public class ResourceOptimizerCPMigration
{
/*// FIXME MB
private static final Log LOG = LogFactory.getLog(CPMigrationOptimizer.class.getName());
public static void initResumeInfoFromFile(String file, ExecutionContext ec) throws IOException, DMLRuntimeException {
DMLScript.resumeSbIdRStack.clear();
DMLScript.resumeFuncVarRStack.clear();
DMLScript.resumeLoopAndFuncEntryVarRStack.clear();
if (file == null)
return;
FileSystem fs = FileSystem.get(new YarnConfiguration());
Path path = new Path(file);
if (!fs.exists(path))
throw new IOException("File " + file + " does not exist");
//BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(path)));
FSDataInputStream reader = fs.open(path);
DMLScript.predecessorAppIdStr = reader.readUTF().trim();
DMLScript.resumeSbIdRStack = DMLScript.deserializeToReverseStack(reader.readUTF().trim());
// Read and invert the function symbol table stack
Stack<LocalVariableMap> tmpStack = new Stack<LocalVariableMap>();
int n = Integer.parseInt(reader.readUTF().trim());
for (int i = 0; i < n; i++)
tmpStack.push(LocalVariableMap.deserialize(reader.readUTF().trim()));
for (int i = 0; i < n; i++)
DMLScript.resumeFuncVarRStack.push(tmpStack.pop());
// Read and invert the loop and func entry symbol table stack
tmpStack.clear();
n = Integer.parseInt(reader.readUTF().trim());
for (int i = 0; i < n; i++)
tmpStack.push(LocalVariableMap.deserialize(reader.readUTF().trim()));
for (int i = 0; i < n; i++)
DMLScript.resumeLoopAndFuncEntryVarRStack.push(tmpStack.pop());
reader.close();
// Append this CP's AppId to the predecessor's resume file
FSDataOutputStream fout = fs.append(path);
fout.writeUTF(MyApplicationMaster.appId + "\n");
fout.close();
// Log the resume info
StringBuilder sb = new StringBuilder();
sb.append("Predecessor is " + DMLScript.predecessorAppIdStr + "\n");
sb.append("Inverse sbID stack: ");
for (Long l : DMLScript.resumeSbIdRStack)
sb.append(l + ", ");
sb.append("\n" + DMLScript.resumeFuncVarRStack.size() + " inverse function symbol table stack:\n");
for (LocalVariableMap symbolTable : DMLScript.resumeFuncVarRStack)
sb.append("\t" + symbolTable.serialize() + "\n");
sb.append(DMLScript.resumeLoopAndFuncEntryVarRStack.size() + " inverse loop and func entry symbol table stack:\n");
for (LocalVariableMap symbolTable : DMLScript.resumeLoopAndFuncEntryVarRStack)
sb.append("\t" + symbolTable.serialize() + "\n");
LOG.info(sb.toString());
// Recover the most outer layer of symbol table before execution starts
LocalVariableMap varMap = DMLScript.resumeFuncVarRStack.pop();
DMLScript.lastPopedFuncVarStr = varMap.serialize(); // For later correctness check
ec.setVariables(varMap);
}
// Try to migrate to a new CP, return false if failed
public static boolean migrateCP(LocalVariableMap currentSymbolTable) throws DMLRuntimeException {
DMLScript.execFuncVarStack.push(currentSymbolTable);
long start = System.currentTimeMillis();
for (LocalVariableMap symbolTable : DMLScript.execFuncVarStack) {
for (String var : symbolTable.keySet()) {
Data data = symbolTable.get(var);
if ( data.getDataType() == DataType.MATRIX ) {
long time = System.currentTimeMillis();
MatrixObject matrix = (MatrixObject) data;
matrix.exportData();
time = System.currentTimeMillis() - time;
LOG.info("Exporting " + var + " took " + time + "ms");
}
}
}
start = System.currentTimeMillis() - start;
LOG.info("Exporting data to hdfs took " + start + "ms");
DMLConfig config = ConfigurationManager.getConfig();
String hdfsWorkingDir = MyYarnClient.getHDFSWorkingDir(config, MyApplicationMaster.appId);
try {
FileSystem fs = FileSystem.get(MyApplicationMaster.conf);
Path resume_file = new Path(hdfsWorkingDir, DMLScript.CP_RESUME_HDFS_FILE);
if (fs.exists(resume_file))
throw new IOException("File " + resume_file + " already exists?");
FSDataOutputStream fout = fs.create(resume_file);
fout.writeUTF(MyApplicationMaster.appId + "\n");
String tmp = DMLScript.serializeExecSbIdStack(DMLScript.execSbIdStack);
fout.writeUTF(tmp + "\n");
LOG.info("SbId Stack: " + tmp);
fout.writeUTF(DMLScript.execFuncVarStack.size() + "\n");
LOG.info(DMLScript.execFuncVarStack.size() + " func symbol tables");
for (LocalVariableMap symbolTable : DMLScript.execFuncVarStack) {
tmp = symbolTable.serialize();
fout.writeUTF(symbolTable.serialize() + "\n");
LOG.info("\t" + tmp);
}
fout.writeUTF(DMLScript.execLoopAndFuncEntryVarStack.size() + "\n");
LOG.info(DMLScript.execLoopAndFuncEntryVarStack.size() + " loop and func entry symbol tables");
for (LocalVariableMap symbolTable : DMLScript.execLoopAndFuncEntryVarStack) {
tmp = symbolTable.serialize();
fout.writeUTF(symbolTable.serialize() + "\n");
LOG.info("\t" + tmp);
}
fout.close();
// If specified, run the new CP within this old CP automatically
if (DMLScript.newCpBudgetByte != -1) {
YarnApplicationState state;
MyYarnClient client = new MyYarnClient();
state = client.runResumeCP(MyApplicationMaster.fullArgs, DMLScript.newCpBudgetByte, hdfsWorkingDir);
if (state != YarnApplicationState.FINISHED)
throw new DMLRuntimeException("Resuming CP " + state);
}
} catch (Exception e) {
throw new DMLRuntimeException(e);
}
// Make this CP stop gracefully
DMLScript.migratedAndStop = true;
LOG.info("CP migration done, exiting gracefully");
return true;
}
// A complete full copy of the runtime plan for recompile and costing
public ArrayList<ProgramBlock> copiedProgramBlocks;
Program prog;
public HashMap<Long, ProgramBlock> sbIdMap; // Pointer to all reachable program blocks
public CPMigrationOptimizer(Program rtprog) throws DMLRuntimeException, HopsException {
prog = rtprog;
copiedProgramBlocks = ProgramConverter.rcreateDeepCopyProgramBlocks(prog._programBlocks, 1, -1, new HashSet<String>(), false);
// Temporarily disable dynamic recompile to clear all flags
boolean flag = OptimizerUtils.ALLOW_DYN_RECOMPILATION;
OptimizerUtils.ALLOW_DYN_RECOMPILATION = false;
sbIdMap = new HashMap<Long, ProgramBlock> ();
for (ProgramBlock pb : copiedProgramBlocks)
initTraverse(pb);
OptimizerUtils.ALLOW_DYN_RECOMPILATION = flag;
}
public void recompile() throws HopsException {
resetAllFlags();
}
public boolean shouldMigrate(Stack<Long> execSbIdStack, Stack<LocalVariableMap> execLoopAndFuncEntryVarStack)
throws HopsException {
int i = 0, j = 0;
int base = -1; // Index of base function, -1 for main
LocalVariableMap baseVar = new LocalVariableMap();
int loop = -1; // Index of the outer most loop
HashMap<Long, LocalVariableMap> loopEntryVarMap = new HashMap<Long, LocalVariableMap>();
for (Long sbId : execSbIdStack) {
ProgramBlock pb = sbIdMap.get(sbId);
if (pb instanceof FunctionProgramBlock) {
base = i;
baseVar = execLoopAndFuncEntryVarStack.get(j);
loop = -1;
loopEntryVarMap.clear();
j++;
} else if (pb instanceof WhileProgramBlock || pb instanceof ForProgramBlock) {
if (loop == -1)
loop = i;
loopEntryVarMap.put(sbId, execLoopAndFuncEntryVarStack.get(j));
j++;
}
i++;
}
// To be continued !!!!!
return false;
}
public void resetAllFlags() throws HopsException {
boolean flag = OptimizerUtils.ALLOW_DYN_RECOMPILATION;
OptimizerUtils.ALLOW_DYN_RECOMPILATION = false;
for (Map.Entry<Long, ProgramBlock> entry : sbIdMap.entrySet())
entry.getValue().getStatementBlock().updateRecompilationFlag();
OptimizerUtils.ALLOW_DYN_RECOMPILATION = flag;
}
public void initTraverse(ProgramBlock pb) throws HopsException, DMLRuntimeException {
long sbId = pb.getStatementBlock().getID();
if (sbIdMap.containsKey(sbId))
return;
sbIdMap.put(sbId, pb);
if (pb instanceof WhileProgramBlock) {
WhileProgramBlock tmp = (WhileProgramBlock)pb;
for (ProgramBlock pb2 : tmp.getChildBlocks())
initTraverse(pb2);
} else if (pb instanceof IfProgramBlock) {
IfProgramBlock tmp = (IfProgramBlock)pb;
for (ProgramBlock pb2 : tmp.getChildBlocksIfBody())
initTraverse(pb2);
for (ProgramBlock pb2 : tmp.getChildBlocksElseBody())
initTraverse(pb2);
} else if (pb instanceof ForProgramBlock) {
ForProgramBlock tmp = (ForProgramBlock)pb;
for (ProgramBlock pb2 : tmp.getChildBlocks())
initTraverse(pb2);
} else if (pb instanceof FunctionProgramBlock && !(pb instanceof ExternalFunctionProgramBlock)) {
FunctionProgramBlock tmp = (FunctionProgramBlock) pb;
for (ProgramBlock pb2 : tmp.getChildBlocks())
initTraverse(pb2);
} else {
// Clear the flag on leaf blocks
pb.getStatementBlock().updateRecompilationFlag();
for (Instruction inst : pb.getInstructions()) {
if (inst instanceof FunctionCallCPInstruction) {
FunctionCallCPInstruction finst = (FunctionCallCPInstruction)inst;
initTraverse(prog.getFunctionProgramBlock(finst.getNamespace(), finst.getFunctionName()));
}
}
}
}
*/
}
| apache-2.0 |
KarloKnezevic/Ferko | src/java/hr/fer/zemris/jcms/beans/cached/STEScore.java | 3510 | package hr.fer.zemris.jcms.beans.cached;
import hr.fer.zemris.jcms.model.AssessmentScore;
import hr.fer.zemris.jcms.service.assessments.AssessmentStatus;
public class STEScore extends ScoreTableEntry {
private static final long serialVersionUID = 1L;
private Long assignerID;
private double score;
private double rawScore;
private boolean error;
private boolean present;
private AssessmentStatus status;
private short rank;
private AssessmentStatus effectiveStatus;
private double effectiveScore;
private boolean effectivePresent;
private short effectiveRank;
public STEScore(Long id, boolean present, double score, double rawScore,
AssessmentStatus status, short rank, Long assignerID, boolean error,
AssessmentStatus effectiveStatus, double effectiveScore, boolean effectivePresent, short effectiveRank) {
super(id);
this.present = present;
this.score = score;
this.rawScore = rawScore;
this.status = status;
this.rank = rank;
this.assignerID = assignerID;
this.error = error;
this.effectivePresent = effectivePresent;
this.effectiveScore = effectiveScore;
this.effectiveStatus = effectiveStatus;
this.effectiveRank = effectiveRank;
}
public STEScore(AssessmentScore as) {
super(as!=null ? as.getId() : null);
if(as!=null) {
this.present = as.getPresent();
this.score = as.getScore();
this.rawScore = as.getRawScore();
this.status = as.getStatus();
this.rank = as.getRank();
this.assignerID = as.getAssigner()!=null ? as.getAssigner().getId() : null;
this.error = as.isError();
this.effectivePresent = as.getEffectivePresent();
this.effectiveScore = as.getEffectiveScore();
this.effectiveStatus = as.getEffectiveStatus();
this.effectiveRank = as.getEffectiveRank();
} else {
this.present = false;
this.score = 0;
this.rawScore = 0;
this.status = AssessmentStatus.FAILED;
this.rank = 30000;
this.assignerID = null;
this.error = false;
this.effectivePresent = false;
this.effectiveScore = 0;
this.effectiveStatus = AssessmentStatus.FAILED;
this.effectiveRank = 30000;
}
}
public Long getAssignerID() {
return assignerID;
}
public String getScoreAsString() {
return String.format("%1$.2f", score);
}
public String getEffectiveScoreAsString() {
return String.format("%1$.2f", effectiveScore);
}
public double getScore() {
return score;
}
public String getRawScoreAsString() {
return String.format("%1$.2f", rawScore);
}
public double getRawScore() {
return rawScore;
}
public boolean isError() {
return error;
}
public boolean isPresent() {
return present;
}
public AssessmentStatus getStatus() {
return status;
}
public short getRank() {
return rank;
}
public String getRankAsString() {
if(rank>=30000) return "";
return String.valueOf(rank);
}
public String getEffectiveRankAsString() {
if(effectiveRank>=30000) return "";
return String.valueOf(effectiveRank);
}
@Override
public String toString() {
if(error) return "*";
if(!effectivePresent) return "";
return effectiveScore + "|" + effectiveStatus + "("+(effectiveRank>=30000 ? "" : String.valueOf(effectiveRank))+")";
}
@Override
public byte getType() {
return (byte)1;
}
public AssessmentStatus getEffectiveStatus() {
return effectiveStatus;
}
public double getEffectiveScore() {
return effectiveScore;
}
public boolean getEffectivePresent() {
return effectivePresent;
}
public short getEffectiveRank() {
return effectiveRank;
}
}
| apache-2.0 |
xuegongzi/rabbitframework | rabbitframework-security-pom/rabbitframework-security/src/main/java/com/rabbitframework/security/web/filter/authz/UriPermissionsFilter.java | 940 | package com.rabbitframework.security.web.filter.authz;
import com.rabbitframework.security.web.filter.authz.PermissionsAuthorizationFilter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
/**
* 权限过虑器
*
* @author: justin
* @date: 2018-04-21 下午11:46
*/
public class UriPermissionsFilter extends PermissionsAuthorizationFilter {
private static final Logger logger = LoggerFactory.getLogger(UriPermissionsFilter.class);
@Override
public boolean isAccessAllowed(ServletRequest request, ServletResponse response, Object mappedValue)
throws IOException {
String requestUri = getPathWithinApplication(request);
if (logger.isDebugEnabled()) {
logger.debug("requestUrl:" + requestUri);
}
boolean result = super.isAccessAllowed(request, response, new String[] { requestUri });
return result;
}
}
| apache-2.0 |
Catherine22/DesignPattern | src/com/catherine/singleton/LazyInitializingSingleton.java | 1087 | package com.catherine.singleton;
/**
* Created by Catherine on 2016/10/4.
* Soft-World Inc.
* catherine919@soft-world.com.tw
*/
/**
* 单例模式:保证整个应用中某个实例有且只有一个。
* <p>
* 懒汉模式(与其用懒汉模式不如直接用内部静态类{@link BillPughSingleton})
* <p>
* 在多线程的情况下,可能就会出现重复创建单例对象的问题。 特点是加载类时比较快,但运行时获取对象的速度比较慢,线程不安全
* 某个单例用的次数不是很多,但是这个单例提供的功能又非常复杂,而且加载和初始化要消耗大量的资源,这个时候使用懒汉式就是非常不错的选择。
*/
public class LazyInitializingSingleton {
private static LazyInitializingSingleton instance = null;
private LazyInitializingSingleton() {
}
public static LazyInitializingSingleton getInstance() {
if (instance == null)
instance = new LazyInitializingSingleton();
return instance;
}
public void print() {
System.out.println("Singleton:LazyInitializingSingleton");
}
}
| apache-2.0 |
blangel/ply | src/main/java/net/ocheyedan/ply/cmd/build/ShellScript.java | 2393 | package net.ocheyedan.ply.cmd.build;
import net.ocheyedan.ply.exec.Execution;
import net.ocheyedan.ply.props.Context;
import net.ocheyedan.ply.props.Filter;
import net.ocheyedan.ply.props.Props;
import net.ocheyedan.ply.props.Scope;
import java.util.ArrayList;
import java.util.List;
/**
* User: blangel
* Date: 1/15/12
* Time: 10:50 AM
*
* Extends {@link Script} to allow for shell invocations. Shell invocation within ply are denoted by surrounding
* the script with '`' (tick marks).
*/
public final class ShellScript extends Script {
ShellScript(Script script) {
this(script.name, script.scope, script.arguments, script.unparsedName);
}
private ShellScript(String name, Scope scope, List<String> arguments, String unparsedName) {
super(name, scope, arguments, unparsedName);
}
@Override Script filter() {
return new ShellScript(Filter.filter(name, Context.named("alias"), String.valueOf(System.identityHashCode(this)), Props.get(scope)), scope, filterArguments(), unparsedName);
}
/**
* Removes the tick marks from the script and creates an appropriate {@link Execution} object.
* Note, any {@link #arguments} are concatenated (space delimited) into a string and appended to the end of the
* {@link #name} as the shell invocation's arguments are the argument to the resolved shell (i.e., bash) and
* not considered part of the actual scripts arguments (i.e., ls).
* @param overriddenExecutionName to use in the converted {@link Execution} objects' {@link Execution#name} values.
* @return an shell script execution
*/
@Override protected List<Execution> convert(String overriddenExecutionName) {
String[] executableArgs = new String[1];
// remove the '`' leading and trailing tick marks
String cleaned = name.substring(1, name.length() - 1);
// replace any "\`" with simply '`'
cleaned = cleaned.replace("\\`", "`");
StringBuilder shellScript = new StringBuilder(cleaned);
for (String arg : arguments) {
shellScript.append(" ");
shellScript.append(arg);
}
executableArgs[0] = shellScript.toString();
List<Execution> executions = new ArrayList<Execution>(1);
executions.add(new Execution(overriddenExecutionName, this, executableArgs));
return executions;
}
}
| apache-2.0 |
multi-os-engine/moe-core | moe.apple/moe.platform.ios/src/main/java/apple/gamecontroller/GCColor.java | 5487 | package apple.gamecontroller;
import apple.NSObject;
import apple.foundation.NSArray;
import apple.foundation.NSCoder;
import apple.foundation.NSMethodSignature;
import apple.foundation.NSSet;
import apple.foundation.protocol.NSCopying;
import apple.foundation.protocol.NSSecureCoding;
import org.moe.natj.c.ann.FunctionPtr;
import org.moe.natj.general.NatJ;
import org.moe.natj.general.Pointer;
import org.moe.natj.general.ann.Generated;
import org.moe.natj.general.ann.Library;
import org.moe.natj.general.ann.Mapped;
import org.moe.natj.general.ann.MappedReturn;
import org.moe.natj.general.ann.NInt;
import org.moe.natj.general.ann.NUInt;
import org.moe.natj.general.ann.Owned;
import org.moe.natj.general.ann.Runtime;
import org.moe.natj.general.ptr.VoidPtr;
import org.moe.natj.objc.Class;
import org.moe.natj.objc.ObjCRuntime;
import org.moe.natj.objc.SEL;
import org.moe.natj.objc.ann.ObjCClassBinding;
import org.moe.natj.objc.ann.ProtocolClassMethod;
import org.moe.natj.objc.ann.Selector;
import org.moe.natj.objc.map.ObjCObjectMapper;
/**
* Represents a color used by a GCDeviceLight.
*
* @see GCDeviceLight
*/
@Generated
@Library("GameController")
@Runtime(ObjCRuntime.class)
@ObjCClassBinding
public class GCColor extends NSObject implements NSCopying, NSSecureCoding {
static {
NatJ.register();
}
@Generated
protected GCColor(Pointer peer) {
super(peer);
}
@Generated
@Selector("accessInstanceVariablesDirectly")
public static native boolean accessInstanceVariablesDirectly();
@Generated
@Owned
@Selector("alloc")
public static native GCColor alloc();
@Owned
@Generated
@Selector("allocWithZone:")
public static native GCColor allocWithZone(VoidPtr zone);
@Generated
@Selector("automaticallyNotifiesObserversForKey:")
public static native boolean automaticallyNotifiesObserversForKey(String key);
@Generated
@Selector("blue")
public native float blue();
@Generated
@Selector("cancelPreviousPerformRequestsWithTarget:")
public static native void cancelPreviousPerformRequestsWithTarget(@Mapped(ObjCObjectMapper.class) Object aTarget);
@Generated
@Selector("cancelPreviousPerformRequestsWithTarget:selector:object:")
public static native void cancelPreviousPerformRequestsWithTargetSelectorObject(
@Mapped(ObjCObjectMapper.class) Object aTarget, SEL aSelector,
@Mapped(ObjCObjectMapper.class) Object anArgument);
@Generated
@Selector("classFallbacksForKeyedArchiver")
public static native NSArray<String> classFallbacksForKeyedArchiver();
@Generated
@Selector("classForKeyedUnarchiver")
public static native Class classForKeyedUnarchiver();
@Generated
@Owned
@Selector("copyWithZone:")
@MappedReturn(ObjCObjectMapper.class)
public native Object copyWithZone(VoidPtr zone);
@Generated
@Selector("debugDescription")
public static native String debugDescription_static();
@Generated
@Selector("description")
public static native String description_static();
@Generated
@Selector("encodeWithCoder:")
public native void encodeWithCoder(NSCoder coder);
@Generated
@Selector("green")
public native float green();
@Generated
@Selector("hash")
@NUInt
public static native long hash_static();
@Generated
@Selector("init")
public native GCColor init();
@Generated
@Selector("initWithCoder:")
public native GCColor initWithCoder(NSCoder coder);
@Generated
@Selector("initWithRed:green:blue:")
public native GCColor initWithRedGreenBlue(float red, float green, float blue);
@Generated
@Selector("instanceMethodForSelector:")
@FunctionPtr(name = "call_instanceMethodForSelector_ret")
public static native NSObject.Function_instanceMethodForSelector_ret instanceMethodForSelector(SEL aSelector);
@Generated
@Selector("instanceMethodSignatureForSelector:")
public static native NSMethodSignature instanceMethodSignatureForSelector(SEL aSelector);
@Generated
@Selector("instancesRespondToSelector:")
public static native boolean instancesRespondToSelector(SEL aSelector);
@Generated
@Selector("isSubclassOfClass:")
public static native boolean isSubclassOfClass(Class aClass);
@Generated
@Selector("keyPathsForValuesAffectingValueForKey:")
public static native NSSet<String> keyPathsForValuesAffectingValueForKey(String key);
@Generated
@Owned
@Selector("new")
public static native GCColor new_objc();
@Generated
@Selector("red")
public native float red();
@Generated
@Selector("resolveClassMethod:")
public static native boolean resolveClassMethod(SEL sel);
@Generated
@Selector("resolveInstanceMethod:")
public static native boolean resolveInstanceMethod(SEL sel);
@Generated
@Selector("setVersion:")
public static native void setVersion_static(@NInt long aVersion);
@Generated
@Selector("superclass")
public static native Class superclass_static();
@Generated
@Selector("supportsSecureCoding")
public static native boolean supportsSecureCoding();
@Generated
@ProtocolClassMethod("supportsSecureCoding")
public boolean _supportsSecureCoding() {
return supportsSecureCoding();
}
@Generated
@Selector("version")
@NInt
public static native long version_static();
}
| apache-2.0 |
marcocor/smaph | src/main/java/it/unipi/di/acube/smaph/learn/models/linkback/bindingRegressor/RankLibBindingRegressor.java | 1039 | package it.unipi.di.acube.smaph.learn.models.linkback.bindingRegressor;
import java.io.IOException;
import java.net.URL;
import java.util.HashSet;
import java.util.List;
import it.unipi.di.acube.batframework.data.Annotation;
import it.unipi.di.acube.smaph.learn.featurePacks.FeaturePack;
import it.unipi.di.acube.smaph.learn.models.RankLibModel;
import it.unipi.di.acube.smaph.learn.normalizer.FeatureNormalizer;
public class RankLibBindingRegressor extends RankLibModel<HashSet<Annotation>> implements BindingRegressor {
private RankLibBindingRegressor(URL modelUrl) throws IOException {
super(modelUrl);
}
public static RankLibBindingRegressor fromUrl(URL modelUrl) throws IOException {
return new RankLibBindingRegressor(modelUrl);
}
@Override
public double[] getScores(List<FeaturePack<HashSet<Annotation>>> features, FeatureNormalizer fn) {
double[] scores = new double[features.size()];
for (int i = 0; i < features.size(); i++) {
scores[i] = super.predictScore(features.get(i), fn);
}
return scores;
}
}
| apache-2.0 |
stephraleigh/flowable-engine | modules/flowable-engine/src/main/java/org/flowable/engine/impl/history/async/json/transformer/ActivityEndHistoryJsonTransformer.java | 2651 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.engine.impl.history.async.json.transformer;
import java.util.Date;
import org.flowable.engine.common.impl.interceptor.CommandContext;
import org.flowable.engine.delegate.event.FlowableEngineEventType;
import org.flowable.engine.delegate.event.impl.FlowableEventBuilder;
import org.flowable.engine.impl.history.async.HistoryJsonConstants;
import org.flowable.engine.impl.persistence.entity.HistoricActivityInstanceEntity;
import org.flowable.engine.impl.persistence.entity.HistoryJobEntity;
import com.fasterxml.jackson.databind.node.ObjectNode;
public class ActivityEndHistoryJsonTransformer extends AbstractNeedsUnfinishedHistoricActivityHistoryJsonTransformer {
@Override
public String getType() {
return HistoryJsonConstants.TYPE_ACTIVITY_END;
}
@Override
public void transformJson(HistoryJobEntity job, ObjectNode historicalData, CommandContext commandContext) {
String executionId = getStringFromJson(historicalData, HistoryJsonConstants.EXECUTION_ID);
String activityId = getStringFromJson(historicalData, HistoryJsonConstants.ACTIVITY_ID);
HistoricActivityInstanceEntity historicActivityInstanceEntity = findUnfinishedHistoricActivityInstance(commandContext, executionId, activityId);
if (historicActivityInstanceEntity != null) {
Date endTime = getDateFromJson(historicalData, HistoryJsonConstants.END_TIME);
historicActivityInstanceEntity.setEndTime(endTime);
historicActivityInstanceEntity.setDeleteReason(getStringFromJson(historicalData, HistoryJsonConstants.DELETE_REASON));
Date startTime = historicActivityInstanceEntity.getStartTime();
if (startTime != null && endTime != null) {
historicActivityInstanceEntity.setDurationInMillis(endTime.getTime() - startTime.getTime());
}
dispatchEvent(commandContext, FlowableEventBuilder.createEntityEvent(
FlowableEngineEventType.HISTORIC_ACTIVITY_INSTANCE_ENDED, historicActivityInstanceEntity));
}
}
}
| apache-2.0 |
johngmyers/airlift | stats/src/main/java/io/airlift/stats/DecayCounter.java | 5261 | package io.airlift.stats;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Ticker;
import org.weakref.jmx.Managed;
import java.util.concurrent.TimeUnit;
import static com.google.common.base.MoreObjects.toStringHelper;
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
/*
* A counter that decays exponentially. Values are weighted according to the formula
* w(t, α) = e^(-α * t), where α is the decay factor and t is the age in seconds
*
* The implementation is based on the ideas from
* http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf
* to not have to rely on a timer that decays the value periodically
*/
public class DecayCounter
{
// needs to be such that Math.exp(alpha * seconds) does not grow too big
static final long RESCALE_THRESHOLD_SECONDS = 50;
private final double alpha;
private final Ticker ticker;
private long landmarkInSeconds;
private double count;
public DecayCounter(double alpha)
{
this(alpha, Ticker.systemTicker());
}
public DecayCounter(double alpha, Ticker ticker)
{
this.alpha = alpha;
this.ticker = ticker;
landmarkInSeconds = getTickInSeconds();
}
public synchronized void add(long value)
{
long nowInSeconds = getTickInSeconds();
if (nowInSeconds - landmarkInSeconds >= RESCALE_THRESHOLD_SECONDS) {
rescaleToNewLandmark(nowInSeconds);
}
count += value * weight(nowInSeconds, landmarkInSeconds);
}
public synchronized void merge(DecayCounter decayCounter)
{
requireNonNull(decayCounter, "decayCounter is null");
checkArgument(decayCounter.alpha == alpha, "Expected decayCounter to have alpha %s, but was %s", alpha, decayCounter.alpha);
synchronized (decayCounter) {
// if the landmark this counter is behind the other counter
if (landmarkInSeconds < decayCounter.landmarkInSeconds) {
// rescale this counter to the other counter, and add
rescaleToNewLandmark(decayCounter.landmarkInSeconds);
count += decayCounter.count;
}
else {
// rescale the other counter and add
double otherRescaledCount = decayCounter.count / weight(landmarkInSeconds, decayCounter.landmarkInSeconds);
count += otherRescaledCount;
}
}
}
private void rescaleToNewLandmark(long newLandMarkInSeconds)
{
// rescale the count based on a new landmark to avoid numerical overflow issues
count = count / weight(newLandMarkInSeconds, landmarkInSeconds);
landmarkInSeconds = newLandMarkInSeconds;
}
@Managed
public synchronized void reset()
{
landmarkInSeconds = getTickInSeconds();
count = 0;
}
/**
* This is a hack to work around limitations in Jmxutils.
*/
@Deprecated
public synchronized void resetTo(DecayCounter counter)
{
synchronized (counter) {
landmarkInSeconds = counter.landmarkInSeconds;
count = counter.count;
}
}
@Managed
public synchronized double getCount()
{
long nowInSeconds = getTickInSeconds();
return count / weight(nowInSeconds, landmarkInSeconds);
}
@Managed
public synchronized double getRate()
{
// The total time covered by this counter is equivalent to the integral of the weight function from 0 to Infinity,
// which equals 1/alpha. The count per unit time is, therefore, count / (1/alpha)
return getCount() * alpha;
}
private double weight(long timestampInSeconds, long landmarkInSeconds)
{
return Math.exp(alpha * (timestampInSeconds - landmarkInSeconds));
}
private long getTickInSeconds()
{
return TimeUnit.NANOSECONDS.toSeconds(ticker.read());
}
public DecayCounterSnapshot snapshot()
{
return new DecayCounterSnapshot(getCount(), getRate());
}
@Override
public String toString()
{
return toStringHelper(this)
.add("count", getCount())
.add("rate", getRate())
.toString();
}
public double getAlpha()
{
return alpha;
}
public static class DecayCounterSnapshot
{
private final double count;
private final double rate;
@JsonCreator
public DecayCounterSnapshot(@JsonProperty("count") double count, @JsonProperty("rate") double rate)
{
this.count = count;
this.rate = rate;
}
@JsonProperty
public double getCount()
{
return count;
}
@JsonProperty
public double getRate()
{
return rate;
}
@Override
public String toString()
{
return toStringHelper(this)
.add("count", count)
.add("rate", rate)
.toString();
}
}
}
| apache-2.0 |
qyj415/openfire | work/plugins-dev/kraken/target/jspc/java/org/jivesoftware/openfire/plugin/kraken/kraken_002dsettings_jsp.java | 19985 | package org.jivesoftware.openfire.plugin.kraken;
import javax.servlet.*;
import javax.servlet.http.*;
import javax.servlet.jsp.*;
import java.util.ArrayList;
import org.apache.log4j.Logger;
import java.util.Collection;
import java.util.Iterator;
import org.dom4j.Element;
import org.dom4j.Attribute;
import org.jivesoftware.util.JiveGlobals;
import org.jivesoftware.util.LocaleUtils;
import org.dom4j.Document;
import net.sf.kraken.KrakenPlugin;
import org.jivesoftware.openfire.XMPPServer;
public final class kraken_002dsettings_jsp extends org.apache.jasper.runtime.HttpJspBase
implements org.apache.jasper.runtime.JspSourceDependent {
private static java.util.List _jspx_dependants;
private org.apache.jasper.runtime.TagHandlerPool _jspx_tagPool_fmt_message_key_nobody;
public Object getDependants() {
return _jspx_dependants;
}
public void _jspInit() {
_jspx_tagPool_fmt_message_key_nobody = org.apache.jasper.runtime.TagHandlerPool.getTagHandlerPool(getServletConfig());
}
public void _jspDestroy() {
_jspx_tagPool_fmt_message_key_nobody.release();
}
public void _jspService(HttpServletRequest request, HttpServletResponse response)
throws java.io.IOException, ServletException {
JspFactory _jspxFactory = null;
PageContext pageContext = null;
HttpSession session = null;
ServletContext application = null;
ServletConfig config = null;
JspWriter out = null;
Object page = this;
JspWriter _jspx_out = null;
PageContext _jspx_page_context = null;
try {
_jspxFactory = JspFactory.getDefaultFactory();
response.setContentType("text/html;charset=UTF-8");
pageContext = _jspxFactory.getPageContext(this, request, response,
"error.jsp", true, 8192, true);
_jspx_page_context = pageContext;
application = pageContext.getServletContext();
config = pageContext.getServletConfig();
session = pageContext.getSession();
out = pageContext.getOut();
_jspx_out = out;
out.write("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
final KrakenPlugin plugin =
(KrakenPlugin) XMPPServer.getInstance().getPluginManager().getPlugin("kraken");
final ArrayList<String> optionTypes = new ArrayList<String>();
class GatewaySettings {
Logger Log = Logger.getLogger(GatewaySettings.class);
JspWriter out = null;
Integer jsID = 0; // Javascript incrementable id
GatewaySettings(JspWriter out) {
this.out = out;
pollConfigOptions();
}
public String join(Collection s, String delimiter) {
// Borrowed from http://www.bigbold.com/snippets/posts/show/91
StringBuffer buffer = new StringBuffer();
Iterator iter = s.iterator();
while (iter.hasNext()) {
buffer.append(iter.next());
if (iter.hasNext()) {
buffer.append(delimiter);
}
}
return buffer.toString();
}
void printConfigNode(Element node) {
try {
Attribute type = node.attribute("type");
if (type.getText().equals("text")) {
// Required fields
Attribute desckey = node.attribute("desckey");
Attribute var = node.attribute("var");
Attribute sysprop = node.attribute("sysprop");
// Optional fields
Attribute def = node.attribute("default");
Attribute size = node.attribute("size");
Attribute maxlen = node.attribute("maxlength");
if (desckey == null || var == null || sysprop == null) {
Log.error("Missing variable from options config.");
return;
}
String defStr = "";
if (def != null) {
defStr = def.getText();
}
String descStr = LocaleUtils.getLocalizedString(desckey.getText(), "kraken");
String setting = JiveGlobals.getProperty(sysprop.getText(), defStr);
String inputId = var.getText();
out.println("<tr valign='middle'>");
out.println("<td align='right' width='20%'><label for='" + inputId + "'>" +
descStr + "</label>:</td>");
out.print("<td><input type='text' id='" + inputId + "' name='" + inputId + "'" +
(size != null ? " size='" + size.getText() + "'" : "") +
(size != null ? " maxlength='" + maxlen.getText() + "'" : "") +
" value='" + setting + "'");
out.println(" /></td>");
out.println("</tr>");
} else if (type.getText().equals("toggle")) {
// Required fields
Attribute desckey = node.attribute("desckey");
Attribute var = node.attribute("var");
Attribute sysprop = node.attribute("sysprop");
// Optional fields
Attribute def = node.attribute("default");
Attribute alert = node.attribute("alert");
if (desckey == null || var == null || sysprop == null) {
Log.error("Missing variable from options config.");
return;
}
boolean defBool = false;
if (def != null && (def.getText().equals("1") || def.getText().equals("true") ||
def.getText().equals("enabled") || def.getText().equals("yes"))) {
defBool = true;
}
String descStr = LocaleUtils.getLocalizedString(desckey.getText(), "kraken");
String alertStr = null;
if (alert != null && alert.getText() != null && alert.getText().length() > 0) {
alertStr = LocaleUtils.getLocalizedString(alert.getText(), "kraken");
}
boolean setting = JiveGlobals.getBooleanProperty(sysprop.getText(), defBool);
String jsStr = (++jsID).toString();
String checkId = var.getText();
boolean hasChildren = node.elements("item").size() > 0;
out.println("<tr valign='top'>");
out.print("<td align='right' width='20%'><input type='checkbox' id='" +
checkId + "' name='" + checkId + "' value='true' " +
(setting ? " checked='checked'" : ""));
if (hasChildren) {
out.print(" onClick='elem = document.getElementById(\"" + jsStr +
"\"); if (elem) { if (this.checked) { elem.style.display=\"table\"} else { elem.style.display=\"none\"} }'");
}
if (alertStr != null) {
out.print(" onClick='elem = document.getElementById(\"" + checkId +
"\"); if (elem) { if (this.checked) { return confirm(\""+alertStr+"\") } else { return true; } } else { return true; }'");
}
out.println("/></td>");
out.print("<td><label for='" + checkId + "'>" + descStr + "</label>");
if (hasChildren) {
out.println("<table id='" + jsStr + "' width='100%' style='display: " +
(setting ? "table" : "none") + "'>");
for (Object itemObj : node.elements("item")) {
Element item = (Element) itemObj;
printConfigNode(item);
}
out.println("</table>");
}
out.println("</td>");
out.println("</tr>");
}
}
catch (Exception e) {
// Uhm, yeah, that sucks.
Log.error("Error printing config node:", e);
}
}
void pollConfigOptions() {
Document optConfig = plugin.getOptionsConfig();
Element mainPanel = optConfig.getRootElement().element("mainpanel");
if (mainPanel != null && mainPanel.nodeCount() > 0) {
for (Object nodeObj : mainPanel.elements("item")) {
Element node = (Element) nodeObj;
getConfigOptions(node);
}
}
}
void getConfigOptions(Element node) {
try {
Attribute type = node.attribute("type");
if (type.getText().equals("text")) {
// Required fields
Attribute desckey = node.attribute("desckey");
Attribute var = node.attribute("var");
Attribute sysprop = node.attribute("sysprop");
if (desckey == null || var == null || sysprop == null) {
Log.error("Missing variable from options config.");
return;
}
// Store a copy of the node variable for later use.
if (!optionTypes.contains(var.getText())) {
optionTypes.add(var.getText());
}
} else if (type.getText().equals("toggle")) {
// Required fields
Attribute desckey = node.attribute("desckey");
Attribute var = node.attribute("var");
Attribute sysprop = node.attribute("sysprop");
if (desckey == null || var == null || sysprop == null) {
Log.error("Missing variable from options config.");
return;
}
// Store a copy of the node variable for later use.
if (!optionTypes.contains(var.getText())) {
optionTypes.add(var.getText());
}
for (Object itemObj : node.elements("item")) {
Element item = (Element) itemObj;
getConfigOptions(item);
}
}
}
catch (Exception e) {
// Uhm, yeah, that sucks.
Log.error("Error reading config node:", e);
}
}
void printSettingsDialog() {
try {
Document optConfig = plugin.getOptionsConfig();
Element mainPanel = optConfig.getRootElement().element("mainpanel");
out.write("\n <!-- Options Window -->\n <div>\n <form id=\"jiveoptionsform\" action=\"\" onSubmit=\"return false\">\n <table border=\"0\" cellpadding=\"0\" cellspacing=\"0\" width=\"100%\">\n <tr>\n <td align=\"left\">\n");
if (mainPanel != null && mainPanel.nodeCount() > 0) {
out.println("<table border='0' cellpadding='1' cellspacing='2'>");
for (Object nodeObj : mainPanel.elements("item")) {
Element node = (Element)nodeObj;
printConfigNode(node);
}
out.println("</table>");
}
else {
out.println(" ");
}
out.write("\n </td>\n </tr>\n </table>\n\n <span id=\"optionsresults\" class=\"saveResultsMsg\"></span>\n <input type=\"submit\" name=\"submit\" value=\"");
out.print( LocaleUtils.getLocalizedString("gateway.web.settings.saveoptions", "kraken") );
out.write("\" onclick=\"saveOptions(); return false\" class=\"jive-formButton\">\n <input type=\"reset\" name=\"cancel\" value=\"");
out.print( LocaleUtils.getLocalizedString("gateway.web.settings.cancelchanges", "kraken") );
out.write("\" onclick=\"cancelOptions(); return true\" class=\"jive-formButton\">\n </form>\n </div>\n\n\n");
}
catch (Exception e) {
// Uhm, yeah, that sucks.
Log.error("Error printing settings section:", e);
}
}
}
GatewaySettings settings = new GatewaySettings(out);
out.write("\n\n<html>\n\n<head>\n<title>");
if (_jspx_meth_fmt_message_0(_jspx_page_context))
return;
out.write("</title>\n<meta name=\"pageID\" content=\"kraken-settings\">\n<style type=\"text/css\">\n<!--\t@import url(\"style/kraken.css\"); -->\n</style>\n<script src=\"dwr/engine.js\" type=\"text/javascript\"></script>\n<script src=\"dwr/util.js\" type=\"text/javascript\"></script>\n<script src=\"dwr/interface/ConfigManager.js\" type=\"text/javascript\"></script>\n<script type=\"text/javascript\" >\n DWREngine.setErrorHandler(handleError);\n window.onerror = handleError;\n\n function handleError(error) {\n // swallow errors\n }\n\n var optionTypes = new Array(\n");
Boolean first = true;
for (String var : optionTypes) {
if (!first) {
out.println(",");
}
out.print(" \""+var+"\"");
if (first) {
first = false;
}
}
out.write("\n );\n\n function saveOptions(transportID) {\n var globalSettings = new Object();\n for (var x in optionTypes) {\n var optType = optionTypes[x];\n var optionId = transportID+optType;\n var testoption = document.getElementById(optionId);\n if (testoption != null) {\n transportSettings[optType] = DWRUtil.getValue(optionId);\n }\n }\n ConfigManager.saveSettings(globalSettings);\n document.getElementById(\"setStatusMsg\").style.display = \"\";\n document.getElementById(\"setStatusMsg\").innerHTML = \"<span class='successresults'><img src='images/success-16x16.gif' align='absmiddle' />");
if (_jspx_meth_fmt_message_1(_jspx_page_context))
return;
out.write("</span>\";\n setTimeout(\"to_saveOptions()\", 5000);\n }\n\n function cancelOptions(transportID) {\n document.getElementById(\"setStatusMsg\").style.display = \"\";\n document.getElementById(\"setStatusMsg\").innerHTML = \"<span class='warningresults'><img src='images/warning-16x16.gif' align='absmiddle' />");
if (_jspx_meth_fmt_message_2(_jspx_page_context))
return;
out.write("</span>\";\n setTimeout(\"to_saveOptions()\", 5000);\n }\n\n function to_saveOptions() {\n Effect.Fade(\"setStatusMsg\");\n }\n\n function pingSession() {\n ConnectionTester.pingSession();\n setTimeout(\"pingSession()\", 60000); // Every minute\n }\n\n setTimeout(\"pingSession()\", 60000); // One minute after first load\n</script>\n</head>\n\n<body>\n<p>");
if (_jspx_meth_fmt_message_3(_jspx_page_context))
return;
out.write("</p>\n\n<div id=\"setStatusMsg\" style=\"display: none\"></div>\n\n<form action=\"\" name=\"gatewayForm\">\n\n");
settings.printSettingsDialog();
out.write("\n\n</form>\n\n<br clear=\"all\" />\n</body>\n\n</html>\n");
} catch (Throwable t) {
if (!(t instanceof SkipPageException)){
out = _jspx_out;
if (out != null && out.getBufferSize() != 0)
out.clearBuffer();
if (_jspx_page_context != null) _jspx_page_context.handlePageException(t);
}
} finally {
if (_jspxFactory != null) _jspxFactory.releasePageContext(_jspx_page_context);
}
}
private boolean _jspx_meth_fmt_message_0(PageContext _jspx_page_context)
throws Throwable {
PageContext pageContext = _jspx_page_context;
JspWriter out = _jspx_page_context.getOut();
// fmt:message
org.apache.taglibs.standard.tag.rt.fmt.MessageTag _jspx_th_fmt_message_0 = (org.apache.taglibs.standard.tag.rt.fmt.MessageTag) _jspx_tagPool_fmt_message_key_nobody.get(org.apache.taglibs.standard.tag.rt.fmt.MessageTag.class);
_jspx_th_fmt_message_0.setPageContext(_jspx_page_context);
_jspx_th_fmt_message_0.setParent(null);
_jspx_th_fmt_message_0.setKey("gateway.web.settings.title");
int _jspx_eval_fmt_message_0 = _jspx_th_fmt_message_0.doStartTag();
if (_jspx_th_fmt_message_0.doEndTag() == javax.servlet.jsp.tagext.Tag.SKIP_PAGE) {
_jspx_tagPool_fmt_message_key_nobody.reuse(_jspx_th_fmt_message_0);
return true;
}
_jspx_tagPool_fmt_message_key_nobody.reuse(_jspx_th_fmt_message_0);
return false;
}
private boolean _jspx_meth_fmt_message_1(PageContext _jspx_page_context)
throws Throwable {
PageContext pageContext = _jspx_page_context;
JspWriter out = _jspx_page_context.getOut();
// fmt:message
org.apache.taglibs.standard.tag.rt.fmt.MessageTag _jspx_th_fmt_message_1 = (org.apache.taglibs.standard.tag.rt.fmt.MessageTag) _jspx_tagPool_fmt_message_key_nobody.get(org.apache.taglibs.standard.tag.rt.fmt.MessageTag.class);
_jspx_th_fmt_message_1.setPageContext(_jspx_page_context);
_jspx_th_fmt_message_1.setParent(null);
_jspx_th_fmt_message_1.setKey("gateway.web.settings.settingssaved");
int _jspx_eval_fmt_message_1 = _jspx_th_fmt_message_1.doStartTag();
if (_jspx_th_fmt_message_1.doEndTag() == javax.servlet.jsp.tagext.Tag.SKIP_PAGE) {
_jspx_tagPool_fmt_message_key_nobody.reuse(_jspx_th_fmt_message_1);
return true;
}
_jspx_tagPool_fmt_message_key_nobody.reuse(_jspx_th_fmt_message_1);
return false;
}
private boolean _jspx_meth_fmt_message_2(PageContext _jspx_page_context)
throws Throwable {
PageContext pageContext = _jspx_page_context;
JspWriter out = _jspx_page_context.getOut();
// fmt:message
org.apache.taglibs.standard.tag.rt.fmt.MessageTag _jspx_th_fmt_message_2 = (org.apache.taglibs.standard.tag.rt.fmt.MessageTag) _jspx_tagPool_fmt_message_key_nobody.get(org.apache.taglibs.standard.tag.rt.fmt.MessageTag.class);
_jspx_th_fmt_message_2.setPageContext(_jspx_page_context);
_jspx_th_fmt_message_2.setParent(null);
_jspx_th_fmt_message_2.setKey("gateway.web.settings.cancelledchanges");
int _jspx_eval_fmt_message_2 = _jspx_th_fmt_message_2.doStartTag();
if (_jspx_th_fmt_message_2.doEndTag() == javax.servlet.jsp.tagext.Tag.SKIP_PAGE) {
_jspx_tagPool_fmt_message_key_nobody.reuse(_jspx_th_fmt_message_2);
return true;
}
_jspx_tagPool_fmt_message_key_nobody.reuse(_jspx_th_fmt_message_2);
return false;
}
private boolean _jspx_meth_fmt_message_3(PageContext _jspx_page_context)
throws Throwable {
PageContext pageContext = _jspx_page_context;
JspWriter out = _jspx_page_context.getOut();
// fmt:message
org.apache.taglibs.standard.tag.rt.fmt.MessageTag _jspx_th_fmt_message_3 = (org.apache.taglibs.standard.tag.rt.fmt.MessageTag) _jspx_tagPool_fmt_message_key_nobody.get(org.apache.taglibs.standard.tag.rt.fmt.MessageTag.class);
_jspx_th_fmt_message_3.setPageContext(_jspx_page_context);
_jspx_th_fmt_message_3.setParent(null);
_jspx_th_fmt_message_3.setKey("gateway.web.settings.instructions");
int _jspx_eval_fmt_message_3 = _jspx_th_fmt_message_3.doStartTag();
if (_jspx_th_fmt_message_3.doEndTag() == javax.servlet.jsp.tagext.Tag.SKIP_PAGE) {
_jspx_tagPool_fmt_message_key_nobody.reuse(_jspx_th_fmt_message_3);
return true;
}
_jspx_tagPool_fmt_message_key_nobody.reuse(_jspx_th_fmt_message_3);
return false;
}
}
| apache-2.0 |
mtransitapps/commons-android | src/main/java/org/mtransit/android/commons/ToastUtils.java | 6095 | package org.mtransit.android.commons;
import android.app.Activity;
import android.content.Context;
import android.graphics.Color;
import android.os.Build;
import android.view.Gravity;
import android.view.View;
import android.view.WindowManager;
import android.widget.PopupWindow;
import android.widget.TextView;
import android.widget.Toast;
import androidx.annotation.DrawableRes;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.StringRes;
import androidx.core.content.res.ResourcesCompat;
@SuppressWarnings({"SameParameterValue", "WeakerAccess", "unused"})
public final class ToastUtils implements MTLog.Loggable {
private static final String LOG_TAG = ToastUtils.class.getSimpleName();
private static final int TOAST_MARGIN_IN_DP = 10;
private static final int NAVIGATION_HEIGHT_IN_DP = 48;
@NonNull
@Override
public String getLogTag() {
return LOG_TAG;
}
private ToastUtils() {
}
public static void makeTextAndShowCentered(@Nullable Context context, @StringRes int resId) {
makeTextAndShowCentered(context, resId, Toast.LENGTH_SHORT);
}
public static void makeTextAndShowCentered(@Nullable Context context, @StringRes int resId, int duration) {
if (context == null) {
return;
}
Toast toast = Toast.makeText(context, resId, duration);
setGravityTextCenter(toast);
toast.show();
}
public static void makeTextAndShowCentered(@Nullable Context context, @NonNull CharSequence text) {
makeTextAndShowCentered(context, text, Toast.LENGTH_SHORT);
}
public static void makeTextAndShowCentered(@Nullable Context context, @NonNull CharSequence text, int duration) {
if (context == null) {
return;
}
Toast toast = Toast.makeText(context, text, duration);
setGravityTextCenter(toast);
toast.show();
}
/**
* Android SDK:
* <p><strong>Warning:</strong> Starting from Android {@link Build.VERSION_CODES#R}, for apps
* targeting API level {@link Build.VERSION_CODES#R} or higher, this method is a no-op when
* called on text toasts.
*/
private static void setGravityTextCenter(Toast toast) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
return;
}
toast.setGravity(Gravity.CENTER, 0, 0);
}
public static void makeTextAndShow(@Nullable Context context, @StringRes int resId) {
makeTextAndShow(context, resId, Toast.LENGTH_SHORT);
}
public static void makeTextAndShow(@Nullable Context context, @StringRes int resId, int duration) {
if (context == null) {
return;
}
Toast toast = Toast.makeText(context, resId, duration);
toast.show();
}
public static void makeTextAndShow(@NonNull Context context, @NonNull CharSequence text) {
makeTextAndShow(context, text, Toast.LENGTH_SHORT);
}
public static void makeTextAndShow(@NonNull Context context, @NonNull CharSequence text, int duration) {
Toast toast = Toast.makeText(context, text, duration);
toast.show();
}
public static boolean showTouchableToast(@Nullable Activity activity, @Nullable PopupWindow touchableToast, @Nullable View parent) {
int additionalBottomMarginInDp = 90; // smart ad banner max height
return showTouchableToast(activity, touchableToast, parent, additionalBottomMarginInDp);
}
public static boolean showTouchableToast(@Nullable Activity activity, @Nullable PopupWindow touchableToast, @Nullable View parent, int additionalBottomMarginInDp) {
return showTouchableToastPx(activity, touchableToast, parent,
(int) ResourceUtils.convertDPtoPX(activity, additionalBottomMarginInDp) // additional bottom margin
);
}
public static boolean showTouchableToastPx(@Nullable Activity activity, @Nullable PopupWindow touchableToast, @Nullable View parent, int additionalBottomMarginInPx) {
return showTouchableToastPx(activity, touchableToast, parent,
(int) ResourceUtils.convertDPtoPX(activity, NAVIGATION_HEIGHT_IN_DP + TOAST_MARGIN_IN_DP) + additionalBottomMarginInPx, // bottom
(int) ResourceUtils.convertDPtoPX(activity, TOAST_MARGIN_IN_DP) // left
);
}
public static boolean showTouchableToast(@Nullable Activity activity, @Nullable PopupWindow touchableToast, @Nullable View parent, int bottomMarginInDp, int leftMarginInDp) {
if (activity == null || touchableToast == null || parent == null) {
return false;
}
int bottomMarginInPx = (int) ResourceUtils.convertDPtoPX(activity, bottomMarginInDp);
int leftMarginInPx = (int) ResourceUtils.convertDPtoPX(activity, leftMarginInDp);
return showTouchableToastPx(activity, touchableToast, parent, bottomMarginInPx, leftMarginInPx);
}
public static boolean showTouchableToastPx(@Nullable Activity activity, @Nullable PopupWindow touchableToast, @Nullable View parent, int bottomMarginInPx, int leftMarginInPx) {
if (activity == null || touchableToast == null || parent == null) {
return false;
}
parent.post(() -> {
if (activity.isFinishing()) {
return;
}
touchableToast.showAtLocation(
parent,
Gravity.LEFT | Gravity.BOTTOM,
leftMarginInPx,
bottomMarginInPx
);
}
);
return true;
}
@Nullable
public static PopupWindow getNewTouchableToast(@Nullable Context context, @StringRes int textResId) {
return getNewTouchableToast(context, android.R.drawable.toast_frame, textResId);
}
@Nullable
public static PopupWindow getNewTouchableToast(@Nullable Context context, @DrawableRes int toastResId, @StringRes int textResId) {
if (context == null) {
return null;
}
try {
TextView contentView = new TextView(context);
contentView.setText(textResId);
contentView.setTextColor(Color.WHITE);
PopupWindow newTouchableToast = new PopupWindow(WindowManager.LayoutParams.WRAP_CONTENT, WindowManager.LayoutParams.WRAP_CONTENT);
newTouchableToast.setContentView(contentView);
newTouchableToast.setTouchable(true);
newTouchableToast.setBackgroundDrawable(ResourcesCompat.getDrawable(context.getResources(), toastResId, context.getTheme()));
return newTouchableToast;
} catch (Exception e) {
MTLog.w(LOG_TAG, e, "Error while creating touchable toast!");
return null;
}
}
}
| apache-2.0 |
spring-cloud/spring-cloud-stream-app-starters | app-starters-test-support/src/main/java/org/springframework/cloud/stream/app/test/sftp/SftpTestSupport.java | 2783 | /*
* Copyright 2015-2016 the original author or authors.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.app.test.sftp;
import java.io.File;
import java.util.Arrays;
import org.apache.sshd.SshServer;
import org.apache.sshd.common.NamedFactory;
import org.apache.sshd.common.file.FileSystemView;
import org.apache.sshd.common.file.nativefs.NativeFileSystemFactory;
import org.apache.sshd.common.file.nativefs.NativeFileSystemView;
import org.apache.sshd.server.Command;
import org.apache.sshd.server.PasswordAuthenticator;
import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider;
import org.apache.sshd.server.sftp.SftpSubsystem;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.springframework.cloud.stream.app.test.file.remote.RemoteFileTestSupport;
/**
* Provides an embedded SFTP Server for test cases.
*
* @author David Turanski
* @author Gary Russell
*/
public class SftpTestSupport extends RemoteFileTestSupport {
private static SshServer server;
@Override
public String prefix() {
return "sftp";
}
@BeforeClass
public static void createServer() throws Exception {
server = SshServer.setUpDefaultServer();
server.setPasswordAuthenticator(new PasswordAuthenticator() {
@Override
public boolean authenticate(String username, String password,
org.apache.sshd.server.session.ServerSession session) {
return true;
}
});
server.setPort(port);
server.setKeyPairProvider(new SimpleGeneratorHostKeyProvider("hostkey.ser"));
SftpSubsystem.Factory sftp = new SftpSubsystem.Factory();
server.setSubsystemFactories(Arrays.<NamedFactory<Command>>asList(sftp));
server.setFileSystemFactory(new NativeFileSystemFactory() {
@Override
public FileSystemView createFileSystemView(org.apache.sshd.common.Session session) {
return new NativeFileSystemView(session.getUsername(), false) {
@Override
public String getVirtualUserDir() {
return remoteTemporaryFolder.getRoot().getAbsolutePath();
}
};
}
});
server.start();
}
@AfterClass
public static void stopServer() throws Exception {
server.stop();
File hostkey = new File("hostkey.ser");
if (hostkey.exists()) {
hostkey.delete();
}
}
}
| apache-2.0 |
indeedeng/util | io/src/test/java/com/indeed/util/io/BufferedFileDataInputOutputStreamTest.java | 4068 | package com.indeed.util.io;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
/** @author kenh */
public class BufferedFileDataInputOutputStreamTest {
@Rule public TemporaryFolder tempDir = new TemporaryFolder();
@Test
public void testFileInputOutput() throws IOException {
final File somefile = tempDir.newFile("somefile");
try (final BufferedFileDataOutputStream outputStream =
new BufferedFileDataOutputStream(somefile)) {
outputStream.writeInt(1);
outputStream.writeInt(2);
outputStream.writeInt(3);
outputStream.writeInt(Integer.MAX_VALUE);
outputStream.writeChar('0');
outputStream.writeChars("abc");
outputStream.writeFloat(1.2f);
outputStream.writeDouble(Double.MAX_VALUE);
outputStream.writeBoolean(true);
outputStream.writeBoolean(false);
outputStream.writeShort(5);
outputStream.writeLong(Long.MAX_VALUE);
}
try (final BufferedFileDataInputStream inputStream =
new BufferedFileDataInputStream(somefile)) {
Assert.assertEquals(1, inputStream.readInt());
Assert.assertEquals(2, inputStream.readInt());
Assert.assertEquals(3, inputStream.readInt());
Assert.assertEquals(Integer.MAX_VALUE, inputStream.readInt());
Assert.assertEquals('0', inputStream.readChar());
Assert.assertEquals('a', inputStream.readChar());
Assert.assertEquals('b', inputStream.readChar());
Assert.assertEquals('c', inputStream.readChar());
Assert.assertEquals(1.2f, inputStream.readFloat(), 1e-9);
Assert.assertEquals(Double.MAX_VALUE, inputStream.readDouble(), 1e-9);
Assert.assertEquals(true, inputStream.readBoolean());
Assert.assertEquals(false, inputStream.readBoolean());
Assert.assertEquals(5, inputStream.readShort());
Assert.assertEquals(Long.MAX_VALUE, inputStream.readLong());
}
}
@Test
public void testPathInputOutput() throws IOException {
final File somefile = tempDir.newFile("somefile");
try (final BufferedFileDataOutputStream outputStream =
new BufferedFileDataOutputStream(somefile.toPath())) {
outputStream.writeInt(1);
outputStream.writeInt(2);
outputStream.writeInt(3);
outputStream.writeInt(Integer.MAX_VALUE);
outputStream.writeChar('0');
outputStream.writeChars("abc");
outputStream.writeFloat(1.2f);
outputStream.writeDouble(Double.MAX_VALUE);
outputStream.writeBoolean(true);
outputStream.writeBoolean(false);
outputStream.writeShort(5);
outputStream.writeLong(Long.MAX_VALUE);
}
try (final BufferedFileDataInputStream inputStream =
new BufferedFileDataInputStream(somefile.toPath())) {
Assert.assertEquals(1, inputStream.readInt());
Assert.assertEquals(2, inputStream.readInt());
Assert.assertEquals(3, inputStream.readInt());
Assert.assertEquals(Integer.MAX_VALUE, inputStream.readInt());
Assert.assertEquals('0', inputStream.readChar());
Assert.assertEquals('a', inputStream.readChar());
Assert.assertEquals('b', inputStream.readChar());
Assert.assertEquals('c', inputStream.readChar());
Assert.assertEquals(1.2f, inputStream.readFloat(), 1e-9);
Assert.assertEquals(Double.MAX_VALUE, inputStream.readDouble(), 1e-9);
Assert.assertEquals(true, inputStream.readBoolean());
Assert.assertEquals(false, inputStream.readBoolean());
Assert.assertEquals(5, inputStream.readShort());
Assert.assertEquals(Long.MAX_VALUE, inputStream.readLong());
}
}
}
| apache-2.0 |
vauvenal5/pieShare | pieShareApp/src/main/java/org/pieShare/pieShareApp/model/message/api/IMetaMessage.java | 515 | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package org.pieShare.pieShareApp.model.message.api;
import org.pieShare.pieShareApp.model.pieFile.FileMeta;
/**
*
* @author Svetoslav
*/
public interface IMetaMessage extends IFileMessageBase {
FileMeta getFileMeta();
void setFileMeta(FileMeta fileMeta);
byte[] getMetaInfo();
void setMetaInfo(byte[] metaInfo);
}
| apache-2.0 |
eFaps/eFapsApp-Payroll | src/main/efaps/ESJP/org/efaps/esjp/payroll/basis/BasisAttribute_Base.java | 9497 | /*
* Copyright 2003 - 2015 The eFaps Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.efaps.esjp.payroll.basis;
import java.math.BigDecimal;
import java.util.List;
import org.efaps.admin.datamodel.Attribute;
import org.efaps.admin.datamodel.IEnum;
import org.efaps.admin.datamodel.IJaxb;
import org.efaps.admin.datamodel.ui.UIValue;
import org.efaps.admin.dbproperty.DBProperties;
import org.efaps.admin.event.Parameter;
import org.efaps.admin.program.esjp.EFapsApplication;
import org.efaps.admin.program.esjp.EFapsUUID;
import org.efaps.admin.ui.AbstractUserInterfaceObject.TargetMode;
import org.efaps.ci.CIAttribute;
import org.efaps.db.Instance;
import org.efaps.db.PrintQuery;
import org.efaps.db.SelectBuilder;
import org.efaps.esjp.ci.CIHumanResource;
import org.efaps.esjp.ci.CIPayroll;
import org.efaps.esjp.payroll.basis.xml.AbstractValue;
import org.efaps.esjp.payroll.basis.xml.DateValue;
import org.efaps.esjp.payroll.basis.xml.DecimalValue;
import org.efaps.esjp.payroll.basis.xml.StringValue;
import org.efaps.esjp.payroll.basis.xml.ValueList;
import org.efaps.esjp.ui.html.Table;
import org.efaps.ui.wicket.util.EnumUtil;
import org.efaps.util.EFapsException;
import org.efaps.util.cache.CacheReloadException;
import org.joda.time.DateTime;
/**
* TODO comment!
*
* @author The eFaps Team
*/
@EFapsUUID("396cf26e-df12-4429-95e6-6d0b25bb81e3")
@EFapsApplication("eFapsApp-Payroll")
public abstract class BasisAttribute_Base
implements IJaxb
{
@Override
public Class<?>[] getClasses()
{
return new Class<?>[] { ValueList.class, StringValue.class, DateValue.class, DecimalValue.class };
}
@Override
public String getUISnipplet(final TargetMode _mode,
final UIValue _value)
{
final ValueList val = (ValueList) _value.getDbValue();
final StringBuilder ret = new StringBuilder();
if (val != null) {
final Table table = new Table();
for (final AbstractValue<?> value : val.getValues()) {
Attribute attr;
try {
attr = Attribute.get(value.getAttribute());
table.addRow().addColumn(attr == null ? "" : DBProperties.getProperty(attr.getLabelKey()))
.addColumn(value.getHtml());
} catch (final CacheReloadException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
ret.append(table.toHtml());
}
return ret.toString();
}
/**
* Gets the value list4 inst.
*
* @param _parameter the _parameter
* @param _inst the _inst
* @return the value list4 inst
* @throws EFapsException on error
*/
protected static ValueList getValueList4Inst(final Parameter _parameter,
final Instance _inst)
throws EFapsException
{
final ValueList ret = new ValueList();
Instance emplInst = null;
if (_inst.getType().isKindOf(CIHumanResource.EmployeeAbstract)) {
emplInst = _inst;
} else if (_inst.getType().isKindOf(CIPayroll.DocumentAbstract)) {
final PrintQuery print = new PrintQuery(_inst);
final SelectBuilder selEmplInst = SelectBuilder.get().linkto(
CIPayroll.DocumentAbstract.EmployeeAbstractLink).instance();
print.addSelect(selEmplInst);
print.execute();
emplInst = print.getSelect(selEmplInst);
}
if (emplInst != null && emplInst.isValid()) {
final PrintQuery print = new PrintQuery(emplInst);
final SelectBuilder selEmplRem = SelectBuilder.get().clazz(CIHumanResource.ClassTR_Labor).attribute(
CIHumanResource.ClassTR_Labor.Remuneration);
final SelectBuilder selEmplPR = SelectBuilder.get().clazz(CIHumanResource.ClassTR_Health).linkto(
CIHumanResource.ClassTR_Health.PensionRegimeLink).attribute(
CIHumanResource.AttributeDefinitionPensionRegime.Value);
final SelectBuilder selEmplPRT = SelectBuilder.get().clazz(CIHumanResource.ClassTR_Health).linkto(
CIHumanResource.ClassTR_Health.PensionRegimeTypeLink).attribute(
CIHumanResource.AttributeDefinitionPensionRegimeType.Value);
final SelectBuilder selEmplST = SelectBuilder.get().clazz(CIHumanResource.ClassTR).attribute(
CIHumanResource.ClassTR.StartDate);
final SelectBuilder selPeri = SelectBuilder.get().clazz(CIHumanResource.ClassTR_Labor).linkto(
CIHumanResource.ClassTR_Labor.PeriodicityLink).attribute(
CIHumanResource.AttributeDefinitionPeriodicity.Value);
final SelectBuilder selEmplEmpl = SelectBuilder.get().linkto(CIHumanResource.Employee.EmployLink).attribute(
CIHumanResource.AttributeDefinitionEmploy.Value);
final SelectBuilder selEmplAct = SelectBuilder.get().attribute(CIHumanResource.Employee.Activation);
print.addSelect(selEmplRem, selEmplPR, selEmplPRT, selEmplST, selPeri, selEmplEmpl, selEmplAct);
print.executeWithoutAccessCheck();
final BigDecimal emplRem = print.getSelect(selEmplRem);
if (emplRem != null) {
ret.getValues().add(new DecimalValue().setObject(emplRem).setAttribute(
CIHumanResource.ClassTR_Labor.Remuneration));
}
final String emplPR = print.getSelect(selEmplPR);
if (emplPR != null && !emplPR.isEmpty()) {
ret.getValues().add(new StringValue().setObject(emplPR).setAttribute(
CIHumanResource.ClassTR_Health.PensionRegimeLink));
}
final String emplPRT = print.getSelect(selEmplPRT);
if (emplPRT != null && !emplPRT.isEmpty()) {
ret.getValues().add(new StringValue().setObject(emplPRT).setAttribute(
CIHumanResource.ClassTR_Health.PensionRegimeTypeLink));
}
final DateTime emplST = print.getSelect(selEmplST);
if (emplST != null) {
ret.getValues().add(new DateValue().setObject(emplST).setAttribute(CIHumanResource.ClassTR.StartDate));
}
final String peri = print.getSelect(selPeri);
if (peri != null && !peri.isEmpty()) {
ret.getValues().add(new StringValue().setObject(peri).setAttribute(
CIHumanResource.ClassTR_Labor.PeriodicityLink));
}
final String emplEmpl = print.getSelect(selEmplEmpl);
if (emplEmpl != null && !emplEmpl.isEmpty()) {
ret.getValues().add(new StringValue().setObject(emplEmpl).setAttribute(
CIHumanResource.EmployeeAbstract.EmployLink));
}
final Object emplAct = print.getSelect(selEmplAct);
if (emplAct != null && emplAct instanceof List) {
final StringBuilder bldr = new StringBuilder();
boolean first = true;
for (final Object obj : (List<?>) emplAct) {
if (first) {
first = false;
} else {
bldr.append(", ");
}
bldr.append(EnumUtil.getUILabel((IEnum) obj));
}
ret.getValues().add(new StringValue().setObject(bldr.toString()).setAttribute(
CIHumanResource.Employee.Activation));
}
}
return ret;
}
/**
* Gets the object value.
*
* @param _parameter the _parameter
* @param _valueList the _value list
* @param _attribute the _attribute
* @param _aternativeValue the _aternative value
* @return the object value
*/
protected static Object getObjectValue(final Parameter _parameter,
final ValueList _valueList,
final CIAttribute _attribute,
final Object _aternativeValue)
{
Object ret = _aternativeValue;
if ((_valueList != null && !_valueList.getValues().isEmpty())) {
final String attr = _attribute.ciType.getType().getAttribute(_attribute.name).getKey();
for (final AbstractValue<?> value : _valueList.getValues()) {
if (value.getAttribute().equals(attr)) {
ret = value.getObject();
break;
}
}
}
return ret;
}
}
| apache-2.0 |
jonvestal/open-kilda | src-java/stats-topology/stats-storm-topology/src/main/java/org/openkilda/wfm/topology/stats/metrics/TableStatsMetricGenBolt.java | 2168 | /* Copyright 2019 Telstra Open Source
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openkilda.wfm.topology.stats.metrics;
import static org.openkilda.wfm.topology.stats.StatsTopology.STATS_FIELD;
import org.openkilda.messaging.info.stats.SwitchTableStatsData;
import org.openkilda.messaging.info.stats.TableStatsEntry;
import org.openkilda.model.SwitchId;
import com.google.common.collect.ImmutableMap;
import org.apache.storm.tuple.Tuple;
import java.util.Map;
public class TableStatsMetricGenBolt extends MetricGenBolt {
public TableStatsMetricGenBolt(String metricPrefix) {
super(metricPrefix);
}
@Override
protected void handleInput(Tuple input) throws Exception {
SwitchTableStatsData statsData = pullValue(input, STATS_FIELD, SwitchTableStatsData.class);
for (TableStatsEntry entry : statsData.getTableStatsEntries()) {
emit(statsData.getSwitchId(), entry);
}
}
private void emit(SwitchId switchId, TableStatsEntry entry) {
long timestamp = getCommandContext().getCreateTime();
Map<String, String> tags = ImmutableMap.of(
"switchid", switchId.toOtsdFormat(),
"tableid", String.valueOf(entry.getTableId())
);
emitMetric("switch.table.active", timestamp, entry.getActiveEntries(), tags);
emitMetric("switch.table.lookup", timestamp, entry.getLookupCount(), tags);
emitMetric("switch.table.matched", timestamp, entry.getMatchedCount(), tags);
emitMetric("switch.table.missed", timestamp, entry.getLookupCount() - entry.getMatchedCount(), tags);
}
}
| apache-2.0 |
sankha93/selenium | java/server/src/org/openqa/grid/selenium/GridLauncherV3.java | 11755 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.grid.selenium;
import com.google.common.collect.ImmutableMap;
import com.beust.jcommander.JCommander;
import org.openqa.grid.common.GridRole;
import org.openqa.grid.common.RegistrationRequest;
import org.openqa.grid.internal.utils.SelfRegisteringRemote;
import org.openqa.grid.internal.utils.configuration.CoreRunnerConfiguration;
import org.openqa.grid.internal.utils.configuration.GridHubConfiguration;
import org.openqa.grid.internal.utils.configuration.GridNodeConfiguration;
import org.openqa.grid.internal.utils.configuration.StandaloneConfiguration;
import org.openqa.grid.shared.CliUtils;
import org.openqa.grid.web.Hub;
import org.openqa.selenium.internal.BuildInfo;
import org.openqa.selenium.remote.server.SeleniumServer;
import org.openqa.selenium.remote.server.log.LoggingOptions;
import org.openqa.selenium.remote.server.log.TerseFormatter;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.function.Supplier;
import java.util.logging.ConsoleHandler;
import java.util.logging.FileHandler;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.Logger;
public class GridLauncherV3 {
private static final Logger log = Logger.getLogger(GridLauncherV3.class.getName());
private static final String CORE_RUNNER_CLASS =
"org.openqa.selenium.server.htmlrunner.HTMLLauncher";
private static abstract class GridItemLauncher {
protected StandaloneConfiguration configuration;
protected boolean helpRequested;
abstract void setConfiguration(String[] args);
abstract void launch() throws Exception;
void printUsage() {
new JCommander(configuration).usage();
}
}
private static ImmutableMap<String, Supplier<GridItemLauncher>> LAUNCHERS = buildLaunchers();
public static void main(String[] args) throws Exception {
GridItemLauncher launcher = buildLauncher(args);
if (launcher == null) {
return;
}
if (launcher.helpRequested) {
launcher.printUsage();
return;
}
configureLogging(launcher.configuration);
BuildInfo buildInfo = new BuildInfo();
log.info(String.format(
"Selenium build info: version: '%s', revision: '%s'",
buildInfo.getReleaseLabel(),
buildInfo.getBuildRevision()));
try {
launcher.launch();
} catch (Exception e) {
launcher.printUsage();
e.printStackTrace();
}
}
/**
* From the {@code args}, builds a new {@link GridItemLauncher} and populates it properly.
*
* @return null if no role is found, or a properly populated {@link GridItemLauncher}.
*/
private static GridItemLauncher buildLauncher(String[] args) {
String role = "standalone";
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-htmlSuite")) {
Supplier<GridItemLauncher> launcherSupplier = LAUNCHERS.get("corerunner");
if (launcherSupplier == null) {
System.err.println(
"Unable to find the HTML runner. This is normally because you have not downloaded " +
"or made available the 'selenium-leg-rc' jar on the CLASSPATH. Your test will " +
"not be run.");
return null;
}
GridItemLauncher launcher = launcherSupplier.get();
launcher.setConfiguration(args);
return launcher;
}
if (args[i].startsWith("-role=")) {
role = args[i].substring("-role=".length());
} else if (args[i].equals("-role")) {
i++; // Increment, because we're going to need this.
if (i < args.length) {
role = args[i];
} else {
role = null; // Will cause us to print the usage information.
}
}
}
GridRole gridRole = GridRole.get(role);
if (gridRole == null) {
printInfoAboutRoles(role);
return null;
}
Supplier<GridItemLauncher> supplier = LAUNCHERS.get(gridRole.toString());
if (supplier == null) {
System.err.println("Unknown role: " + gridRole);
return null;
}
GridItemLauncher toReturn = supplier.get();
toReturn.setConfiguration(args);
return toReturn;
}
private static void printInfoAboutRoles(String roleCommandLineArg) {
if (roleCommandLineArg != null) {
CliUtils.printWrappedLine(
"",
"Error: the role '" + roleCommandLineArg + "' does not match a recognized server role: node/hub/standalone\n");
} else {
CliUtils.printWrappedLine(
"",
"Error: -role option needs to be followed by the value that defines role of this component in the grid\n");
}
System.out.println(
"Selenium server can run in one of the following roles:\n" +
" hub as a hub of a Selenium grid\n" +
" node as a node of a Selenium grid\n" +
" standalone as a standalone server not being a part of a grid\n" +
"\n" +
"If -role option is omitted the server runs standalone\n");
CliUtils.printWrappedLine(
"",
"To get help on the options available for a specific role run the server"
+ " with -help option and the corresponding -role option value");
}
private static void configureLogging(StandaloneConfiguration configuration) {
Level logLevel =
configuration.debug
? Level.FINE
: LoggingOptions.getDefaultLogLevel();
if (logLevel == null) {
logLevel = Level.INFO;
}
Logger.getLogger("").setLevel(logLevel);
Logger.getLogger("org.openqa.jetty").setLevel(Level.WARNING);
String logFilename =
configuration.log != null
? configuration.log
: LoggingOptions.getDefaultLogOutFile();
if (logFilename != null) {
for (Handler handler : Logger.getLogger("").getHandlers()) {
if (handler instanceof ConsoleHandler) {
Logger.getLogger("").removeHandler(handler);
}
}
try {
Handler logFile = new FileHandler(new File(logFilename).getAbsolutePath(), true);
logFile.setFormatter(new TerseFormatter(true));
logFile.setLevel(logLevel);
Logger.getLogger("").addHandler(logFile);
} catch (IOException e) {
throw new RuntimeException(e);
}
} else {
for (Handler handler : Logger.getLogger("").getHandlers()) {
if (handler instanceof ConsoleHandler) {
handler.setLevel(logLevel);
handler.setFormatter(new TerseFormatter(configuration.logLongForm));
}
}
}
}
private static ImmutableMap<String, Supplier<GridItemLauncher>> buildLaunchers() {
ImmutableMap.Builder<String, Supplier<GridItemLauncher>> launchers =
ImmutableMap.<String, Supplier<GridItemLauncher>>builder()
.put(GridRole.NOT_GRID.toString(), () -> new GridItemLauncher() {
public void setConfiguration(String[] args) {
configuration = new StandaloneConfiguration();
new JCommander(configuration, args);
helpRequested = configuration.help;
}
public void launch() throws Exception {
log.info("Launching a standalone Selenium Server");
SeleniumServer server = new SeleniumServer(configuration);
server.boot();
log.info("Selenium Server is up and running");
}
})
.put(GridRole.HUB.toString(), () -> new GridItemLauncher() {
public void setConfiguration(String[] args) {
GridHubConfiguration pending = new GridHubConfiguration();
new JCommander(pending, args);
configuration = pending;
//re-parse the args using any -hubConfig specified to init
if (pending.hubConfig != null) {
configuration = GridHubConfiguration.loadFromJSON(pending.hubConfig);
new JCommander(configuration, args); //args take precedence
}
helpRequested = configuration.help;
}
public void launch() throws Exception {
log.info("Launching Selenium Grid hub");
Hub h = new Hub((GridHubConfiguration) configuration);
h.start();
log.info("Nodes should register to " + h.getRegistrationURL());
log.info("Selenium Grid hub is up and running");
}
})
.put(GridRole.NODE.toString(), () -> new GridItemLauncher() {
public void setConfiguration(String[] args) {
GridNodeConfiguration pending = new GridNodeConfiguration();
new JCommander(pending, args);
configuration = pending;
//re-parse the args using any -nodeConfig specified to init
if (pending.nodeConfigFile != null) {
configuration = GridNodeConfiguration.loadFromJSON(pending.nodeConfigFile);
new JCommander(configuration, args); //args take precedence
}
helpRequested = configuration.help;
if (configuration.port == null) {
configuration.port = 5555;
}
}
public void launch() throws Exception {
log.info("Launching a Selenium Grid node");
RegistrationRequest
c =
RegistrationRequest.build((GridNodeConfiguration) configuration);
SelfRegisteringRemote remote = new SelfRegisteringRemote(c);
remote.setRemoteServer(new SeleniumServer(configuration));
remote.startRemoteServer();
log.info("Selenium Grid node is up and ready to register to the hub");
remote.startRegistrationProcess();
}
});
try {
Class.forName(CORE_RUNNER_CLASS, false, GridLauncherV3.class.getClassLoader());
launchers.put("corerunner", () -> new GridItemLauncher() {
@Override
void setConfiguration(String[] args) {
configuration = new CoreRunnerConfiguration();
new JCommander(configuration, args);
helpRequested = configuration.help;
}
@Override
void launch() throws Exception {
Class<?> coreRunnerClass = Class.forName(CORE_RUNNER_CLASS);
Object coreRunner = coreRunnerClass.newInstance();
Method mainInt = coreRunnerClass.getMethod("mainInt", String[].class);
CoreRunnerConfiguration runnerConfig = (CoreRunnerConfiguration) this.configuration;
String[] args = new String[] {
/* Results file */ runnerConfig.htmlSuite.get(3),
/* suite */ runnerConfig.htmlSuite.get(2),
/* start url */ runnerConfig.htmlSuite.get(1),
/* multi window */ "true",
/* browser string */ runnerConfig.htmlSuite.get(0),
};
Integer result = (Integer) mainInt.invoke(coreRunner, (Object) args);
System.exit(result);
}
});
} catch (ReflectiveOperationException e) {
// Do nothing. It's fine.
}
return launchers.build();
}
}
| apache-2.0 |
DaanHoogland/cloudstack | server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java | 23400 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.vpc;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.api.command.admin.config.UpdateCfgCmd;
import org.apache.cloudstack.api.command.admin.network.CreateManagementNetworkIpRangeCmd;
import org.apache.cloudstack.api.command.admin.network.CreateNetworkOfferingCmd;
import org.apache.cloudstack.api.command.admin.network.DeleteManagementNetworkIpRangeCmd;
import org.apache.cloudstack.api.command.admin.network.DeleteNetworkOfferingCmd;
import org.apache.cloudstack.api.command.admin.network.UpdateNetworkOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.CreateDiskOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.CreateServiceOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.DeleteDiskOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.DeleteServiceOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.UpdateDiskOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.UpdateServiceOfferingCmd;
import org.apache.cloudstack.api.command.admin.pod.DeletePodCmd;
import org.apache.cloudstack.api.command.admin.pod.UpdatePodCmd;
import org.apache.cloudstack.api.command.admin.region.CreatePortableIpRangeCmd;
import org.apache.cloudstack.api.command.admin.region.DeletePortableIpRangeCmd;
import org.apache.cloudstack.api.command.admin.region.ListPortableIpRangesCmd;
import org.apache.cloudstack.api.command.admin.vlan.CreateVlanIpRangeCmd;
import org.apache.cloudstack.api.command.admin.vlan.DedicatePublicIpRangeCmd;
import org.apache.cloudstack.api.command.admin.vlan.DeleteVlanIpRangeCmd;
import org.apache.cloudstack.api.command.admin.vlan.ReleasePublicIpRangeCmd;
import org.apache.cloudstack.api.command.admin.zone.CreateZoneCmd;
import org.apache.cloudstack.api.command.admin.zone.DeleteZoneCmd;
import org.apache.cloudstack.api.command.admin.zone.UpdateZoneCmd;
import org.apache.cloudstack.api.command.user.network.ListNetworkOfferingsCmd;
import org.apache.cloudstack.config.Configuration;
import org.apache.cloudstack.region.PortableIp;
import org.apache.cloudstack.region.PortableIpRange;
import org.springframework.stereotype.Component;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.configuration.ConfigurationService;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.DataCenter.NetworkType;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.Pod;
import com.cloud.dc.Vlan;
import com.cloud.domain.Domain;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.Network.Capability;
import com.cloud.network.Network.GuestType;
import com.cloud.network.Network.Provider;
import com.cloud.network.Network.Service;
import com.cloud.network.Networks.TrafficType;
import com.cloud.offering.DiskOffering;
import com.cloud.offering.NetworkOffering;
import com.cloud.offering.NetworkOffering.Availability;
import com.cloud.offering.ServiceOffering;
import com.cloud.offerings.NetworkOfferingVO;
import com.cloud.offerings.dao.NetworkOfferingDaoImpl;
import com.cloud.org.Grouping.AllocationState;
import com.cloud.user.Account;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ManagerBase;
@Component
public class MockConfigurationManagerImpl extends ManagerBase implements ConfigurationManager, ConfigurationService {
@Inject
NetworkOfferingDaoImpl _ntwkOffDao;
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#updateConfiguration(org.apache.cloudstack.api.commands.UpdateCfgCmd)
*/
@Override
public Configuration updateConfiguration(UpdateCfgCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#createServiceOffering(org.apache.cloudstack.api.commands.CreateServiceOfferingCmd)
*/
@Override
public ServiceOffering createServiceOffering(CreateServiceOfferingCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#updateServiceOffering(org.apache.cloudstack.api.commands.UpdateServiceOfferingCmd)
*/
@Override
public ServiceOffering updateServiceOffering(UpdateServiceOfferingCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#deleteServiceOffering(org.apache.cloudstack.api.commands.DeleteServiceOfferingCmd)
*/
@Override
public boolean deleteServiceOffering(DeleteServiceOfferingCmd cmd) {
// TODO Auto-generated method stub
return false;
}
@Override
public List<Long> getServiceOfferingDomains(Long serviceOfferingId) {
// TODO Auto-generated method stub
return null;
}
@Override
public List<Long> getServiceOfferingZones(Long serviceOfferingId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#updateDiskOffering(org.apache.cloudstack.api.commands.UpdateDiskOfferingCmd)
*/
@Override
public DiskOffering updateDiskOffering(UpdateDiskOfferingCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#deleteDiskOffering(org.apache.cloudstack.api.commands.DeleteDiskOfferingCmd)
*/
@Override
public boolean deleteDiskOffering(DeleteDiskOfferingCmd cmd) {
// TODO Auto-generated method stub
return false;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#createDiskOffering(org.apache.cloudstack.api.commands.CreateDiskOfferingCmd)
*/
@Override
public DiskOffering createDiskOffering(CreateDiskOfferingCmd cmd) {
// TODO Auto-generated method stub
return null;
}
@Override
public List<Long> getDiskOfferingDomains(Long diskOfferingId) {
// TODO Auto-generated method stub
return null;
}
@Override
public List<Long> getDiskOfferingZones(Long diskOfferingId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#createPod(long, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String)
*/
@Override
public Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#createPodIpRange(org.apache.cloudstack.api.command.admin.network.CreateManagementNetworkIpRangeCmd)
*/
@Override
public Pod createPodIpRange(CreateManagementNetworkIpRangeCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#deletePodIpRange(org.apache.cloudstack.api.command.admin.network.DeleteManagementNetworkIpRangeCmd)
*/
@Override
public void deletePodIpRange(DeleteManagementNetworkIpRangeCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException {
// TODO Auto-generated method stub
return;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#editPod(org.apache.cloudstack.api.commands.UpdatePodCmd)
*/
@Override
public Pod editPod(UpdatePodCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#deletePod(org.apache.cloudstack.api.commands.DeletePodCmd)
*/
@Override
public boolean deletePod(DeletePodCmd cmd) {
// TODO Auto-generated method stub
return false;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#createZone(org.apache.cloudstack.api.commands.CreateZoneCmd)
*/
@Override
public DataCenter createZone(CreateZoneCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#editZone(org.apache.cloudstack.api.commands.UpdateZoneCmd)
*/
@Override
public DataCenter editZone(UpdateZoneCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#deleteZone(org.apache.cloudstack.api.commands.DeleteZoneCmd)
*/
@Override
public boolean deleteZone(DeleteZoneCmd cmd) {
// TODO Auto-generated method stub
return false;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#createVlanAndPublicIpRange(org.apache.cloudstack.api.commands.CreateVlanIpRangeCmd)
*/
@Override
public Vlan createVlanAndPublicIpRange(CreateVlanIpRangeCmd cmd) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
ResourceAllocationException {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#markDefaultZone(java.lang.String, long, long)
*/
@Override
public Account markDefaultZone(String accountName, long domainId, long defaultZoneId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#deleteVlanIpRange(org.apache.cloudstack.api.commands.DeleteVlanIpRangeCmd)
*/
@Override
public boolean deleteVlanIpRange(DeleteVlanIpRangeCmd cmd) {
// TODO Auto-generated method stub
return false;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#createNetworkOffering(org.apache.cloudstack.api.commands.CreateNetworkOfferingCmd)
*/
@Override
public NetworkOffering createNetworkOffering(CreateNetworkOfferingCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#updateNetworkOffering(org.apache.cloudstack.api.commands.UpdateNetworkOfferingCmd)
*/
@Override
public NetworkOffering updateNetworkOffering(UpdateNetworkOfferingCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#searchForNetworkOfferings(org.apache.cloudstack.api.commands.ListNetworkOfferingsCmd)
*/
@Override
public Pair<List<? extends NetworkOffering>, Integer> searchForNetworkOfferings(ListNetworkOfferingsCmd cmd) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#deleteNetworkOffering(org.apache.cloudstack.api.commands.DeleteNetworkOfferingCmd)
*/
@Override
public boolean deleteNetworkOffering(DeleteNetworkOfferingCmd cmd) {
// TODO Auto-generated method stub
return false;
}
@Override
public List<Long> getNetworkOfferingDomains(Long networkOfferingId) {
// TODO Auto-generated method stub
return null;
}
@Override
public List<Long> getNetworkOfferingZones(Long networkOfferingId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#getNetworkOfferingNetworkRate(long)
*/
@Override
public Integer getNetworkOfferingNetworkRate(long networkOfferingId, Long dataCenterId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#getVlanAccount(long)
*/
@Override
public Account getVlanAccount(long vlanId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#listNetworkOfferings(com.cloud.network.Networks.TrafficType, boolean)
*/
@Override
public List<? extends NetworkOffering> listNetworkOfferings(TrafficType trafficType, boolean systemOnly) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#getDefaultPageSize()
*/
@Override
public Long getDefaultPageSize() {
return 500L;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#getServiceOfferingNetworkRate(long)
*/
@Override
public Integer getServiceOfferingNetworkRate(long serviceOfferingId, Long dataCenterId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationService#isOfferingForVpc(com.cloud.offering.NetworkOffering)
*/
@Override
public boolean isOfferingForVpc(NetworkOffering offering) {
// TODO Auto-generated method stub
return false;
}
@Override
public PortableIpRange createPortableIpRange(CreatePortableIpRangeCmd cmd) throws ConcurrentOperationException {
return null;// TODO Auto-generated method stub
}
@Override
public boolean deletePortableIpRange(DeletePortableIpRangeCmd cmd) {
return false;// TODO Auto-generated method stub
}
@Override
public List<? extends PortableIpRange> listPortableIpRanges(ListPortableIpRangesCmd cmd) {
return null;// TODO Auto-generated method stub
}
@Override
public List<? extends PortableIp> listPortableIps(long id) {
return null;// TODO Auto-generated method stub
}
/* (non-Javadoc)
* @see com.cloud.utils.component.Manager#configure(java.lang.String, java.util.Map)
*/
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
// TODO Auto-generated method stub
return true;
}
/* (non-Javadoc)
* @see com.cloud.utils.component.Manager#start()
*/
@Override
public boolean start() {
// TODO Auto-generated method stub
return true;
}
/* (non-Javadoc)
* @see com.cloud.utils.component.Manager#stop()
*/
@Override
public boolean stop() {
// TODO Auto-generated method stub
return true;
}
/* (non-Javadoc)
* @see com.cloud.utils.component.Manager#getName()
*/
@Override
public String getName() {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#updateConfiguration(long, java.lang.String, java.lang.String, java.lang.String)
*/
@Override
public String updateConfiguration(long userId, String name, String category, String value, String scope, Long resourceId) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#createPod(long, java.lang.String, long, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, boolean)
*/
@Override
public HostPodVO createPod(long userId, String podName, long zoneId, String gateway, String cidr, String startIp, String endIp, String allocationState,
boolean skipGatewayOverlapCheck) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#deleteVlanAndPublicIpRange(long, long, com.cloud.user.Account)
*/
@Override
public boolean deleteVlanAndPublicIpRange(long userId, long vlanDbId, Account caller) {
// TODO Auto-generated method stub
return false;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#checkZoneAccess(com.cloud.user.Account, com.cloud.dc.DataCenter)
*/
@Override
public void checkZoneAccess(Account caller, DataCenter zone) {
// TODO Auto-generated method stub
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#checkDiskOfferingAccess(com.cloud.user.Account, com.cloud.offering.DiskOffering)
*/
@Override
public void checkDiskOfferingAccess(Account caller, DiskOffering dof, DataCenter zone) {
// TODO Auto-generated method stub
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#createNetworkOffering(java.lang.String, java.lang.String, com.cloud.network.Networks.TrafficType, java.lang.String, boolean, com.cloud.offering.NetworkOffering.Availability, java.lang.Integer, java.util.Map, boolean, com.cloud.network.Network.GuestType, boolean, java.lang.Long, boolean, java.util.Map, boolean)
*/
@Override
public NetworkOfferingVO createNetworkOffering(String name, String displayText, TrafficType trafficType, String tags, boolean specifyVlan, Availability availability,
Integer networkRate, Map<Service, Set<Provider>> serviceProviderMap, boolean isDefault, GuestType type, boolean systemOnly, Long serviceOfferingId,
boolean conserveMode, Map<Service, Map<Capability, String>> serviceCapabilityMap, boolean specifyIpRanges, boolean isPersistent,
Map<NetworkOffering.Detail, String> details, boolean egressDefaultPolicy, Integer maxconn, boolean enableKeepAlive, Boolean forVpc, List<Long> domainIds, List<Long> zoneIds) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#createVlanAndPublicIpRange(long, long, long, boolean, java.lang.Long, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, com.cloud.user.Account)
*/
@Override
public Vlan createVlanAndPublicIpRange(long zoneId, long networkId, long physicalNetworkId, boolean forVirtualNetwork, boolean forSystemVms, Long podId, String startIP, String endIP,
String vlanGateway, String vlanNetmask, String vlanId, boolean bypassVlanOverlapCheck, Domain domain, Account vlanOwner, String startIPv6, String endIPv6, String vlanGatewayv6, String vlanCidrv6)
throws InsufficientCapacityException, ConcurrentOperationException, InvalidParameterValueException {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#createDefaultSystemNetworks(long)
*/
@Override
public void createDefaultSystemNetworks(long zoneId) throws ConcurrentOperationException {
// TODO Auto-generated method stub
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#releaseDomainSpecificVirtualRanges(long)
*/
@Override
public boolean releaseDomainSpecificVirtualRanges(long domainId) {
// TODO Auto-generated method stub
return false;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#releaseAccountSpecificVirtualRanges(long)
*/
@Override
public boolean releaseAccountSpecificVirtualRanges(long accountId) {
// TODO Auto-generated method stub
return false;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#editPod(long, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String)
*/
@Override
public Pod editPod(long id, String name, String startIp, String endIp, String gateway, String netmask, String allocationStateStr) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#checkPodCidrSubnets(long, java.lang.Long, java.lang.String)
*/
@Override
public void checkPodCidrSubnets(long zoneId, Long podIdToBeSkipped, String cidr) {
// TODO Auto-generated method stub
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#findPodAllocationState(com.cloud.dc.HostPodVO)
*/
@Override
public AllocationState findPodAllocationState(HostPodVO pod) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#findClusterAllocationState(com.cloud.dc.ClusterVO)
*/
@Override
public AllocationState findClusterAllocationState(ClusterVO cluster) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see com.cloud.configuration.ConfigurationManager#createZone(long, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.Long, com.cloud.dc.DataCenter.NetworkType, java.lang.String, java.lang.String, boolean, boolean)
*/
@Override
public DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain,
Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1,
String ip6Dns2) {
// TODO Auto-generated method stub
return null;
}
@Override
public Vlan dedicatePublicIpRange(DedicatePublicIpRangeCmd cmd) throws ResourceAllocationException {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean releasePublicIpRange(ReleasePublicIpRangeCmd cmd) {
// TODO Auto-generated method stub
return false;
}
@Override
public Domain getVlanDomain(long vlanId) {
// TODO Auto-generated method stub
return null;
}
}
| apache-2.0 |
nicolashernandez/dev-star | uima-star/uima-connectors/src/main/java/org/apache/uima/SentenceAnnotation.java | 3786 |
/* First created by JCasGen Sun Nov 28 23:36:26 CET 2010 */
package org.apache.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.cas.FSArray;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Fri Jun 07 17:39:54 CEST 2013
* XML source: /media/ext4/workspace/uima-connectors/desc/connectors/wstspl/AdhocWSTSPL2CASAE.xml
* @generated */
public class SentenceAnnotation extends Annotation {
/** @generated
* @ordered
*/
public final static int typeIndexID = JCasRegistry.register(SentenceAnnotation.class);
/** @generated
* @ordered
*/
public final static int type = typeIndexID;
/** @generated */
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected SentenceAnnotation() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated */
public SentenceAnnotation(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated */
public SentenceAnnotation(JCas jcas) {
super(jcas);
readObject();
}
/** @generated */
public SentenceAnnotation(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/** <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
@generated modifiable */
private void readObject() {}
//*--------------*
//* Feature: tokenList
/** getter for tokenList - gets
* @generated */
public FSArray getTokenList() {
if (SentenceAnnotation_Type.featOkTst && ((SentenceAnnotation_Type)jcasType).casFeat_tokenList == null)
jcasType.jcas.throwFeatMissing("tokenList", "org.apache.uima.SentenceAnnotation");
return (FSArray)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((SentenceAnnotation_Type)jcasType).casFeatCode_tokenList)));}
/** setter for tokenList - sets
* @generated */
public void setTokenList(FSArray v) {
if (SentenceAnnotation_Type.featOkTst && ((SentenceAnnotation_Type)jcasType).casFeat_tokenList == null)
jcasType.jcas.throwFeatMissing("tokenList", "org.apache.uima.SentenceAnnotation");
jcasType.ll_cas.ll_setRefValue(addr, ((SentenceAnnotation_Type)jcasType).casFeatCode_tokenList, jcasType.ll_cas.ll_getFSRef(v));}
/** indexed getter for tokenList - gets an indexed value -
* @generated */
public Annotation getTokenList(int i) {
if (SentenceAnnotation_Type.featOkTst && ((SentenceAnnotation_Type)jcasType).casFeat_tokenList == null)
jcasType.jcas.throwFeatMissing("tokenList", "org.apache.uima.SentenceAnnotation");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((SentenceAnnotation_Type)jcasType).casFeatCode_tokenList), i);
return (Annotation)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((SentenceAnnotation_Type)jcasType).casFeatCode_tokenList), i)));}
/** indexed setter for tokenList - sets an indexed value -
* @generated */
public void setTokenList(int i, Annotation v) {
if (SentenceAnnotation_Type.featOkTst && ((SentenceAnnotation_Type)jcasType).casFeat_tokenList == null)
jcasType.jcas.throwFeatMissing("tokenList", "org.apache.uima.SentenceAnnotation");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((SentenceAnnotation_Type)jcasType).casFeatCode_tokenList), i);
jcasType.ll_cas.ll_setRefArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((SentenceAnnotation_Type)jcasType).casFeatCode_tokenList), i, jcasType.ll_cas.ll_getFSRef(v));}
}
| apache-2.0 |
IHTSDO/snow-owl | snomed/com.b2international.snowowl.snomed.scg/src-gen/com/b2international/snowowl/dsl/parser/antlr/SCGAntlrTokenFileProvider.java | 1077 | /*
* Copyright 2011-2015 B2i Healthcare Pte Ltd, http://b2i.sg
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.b2international.snowowl.dsl.parser.antlr;
import java.io.InputStream;
import org.eclipse.xtext.parser.antlr.IAntlrTokenFileProvider;
public class SCGAntlrTokenFileProvider implements IAntlrTokenFileProvider {
public InputStream getAntlrTokenFile() {
ClassLoader classLoader = getClass().getClassLoader();
return classLoader.getResourceAsStream("com/b2international/snowowl/dsl/parser/antlr/internal/InternalSCG.tokens");
}
} | apache-2.0 |
jentfoo/aws-sdk-java | aws-java-sdk-rekognition/src/main/java/com/amazonaws/services/rekognition/model/DescribeCollectionRequest.java | 3496 | /*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.rekognition.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeCollectionRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* <p>
* The ID of the collection to describe.
* </p>
*/
private String collectionId;
/**
* <p>
* The ID of the collection to describe.
* </p>
*
* @param collectionId
* The ID of the collection to describe.
*/
public void setCollectionId(String collectionId) {
this.collectionId = collectionId;
}
/**
* <p>
* The ID of the collection to describe.
* </p>
*
* @return The ID of the collection to describe.
*/
public String getCollectionId() {
return this.collectionId;
}
/**
* <p>
* The ID of the collection to describe.
* </p>
*
* @param collectionId
* The ID of the collection to describe.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeCollectionRequest withCollectionId(String collectionId) {
setCollectionId(collectionId);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getCollectionId() != null)
sb.append("CollectionId: ").append(getCollectionId());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DescribeCollectionRequest == false)
return false;
DescribeCollectionRequest other = (DescribeCollectionRequest) obj;
if (other.getCollectionId() == null ^ this.getCollectionId() == null)
return false;
if (other.getCollectionId() != null && other.getCollectionId().equals(this.getCollectionId()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getCollectionId() == null) ? 0 : getCollectionId().hashCode());
return hashCode;
}
@Override
public DescribeCollectionRequest clone() {
return (DescribeCollectionRequest) super.clone();
}
}
| apache-2.0 |
Qatja/processing | src/se/goransson/qatja/Connection.java | 1157 | package se.goransson.qatja;
/*
* Copyright (C) 2012 Andreas Goransson, David Cuartielles
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* Connection class, contains all streams for a network connection.
*
* @author andreas
*
*/
public abstract class Connection {
public static final int STATUS_CLOSED = 1;
public static final int STATUS_OPENED = 0;
public abstract InputStream getInputStream() throws IOException;
public abstract OutputStream getOutputStream() throws IOException;
public abstract void close() throws IOException;
}
| apache-2.0 |
benjchristensen/RxJava | src/main/java/io/reactivex/internal/schedulers/ScheduledRunnable.java | 3768 | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.internal.schedulers;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicReferenceArray;
import io.reactivex.disposables.Disposable;
import io.reactivex.internal.disposables.DisposableContainer;
import io.reactivex.plugins.RxJavaPlugins;
public final class ScheduledRunnable extends AtomicReferenceArray<Object>
implements Runnable, Callable<Object>, Disposable {
private static final long serialVersionUID = -6120223772001106981L;
final Runnable actual;
static final Object DISPOSED = new Object();
static final Object DONE = new Object();
static final int PARENT_INDEX = 0;
static final int FUTURE_INDEX = 1;
/**
* Creates a ScheduledRunnable by wrapping the given action and setting
* up the optional parent.
* @param actual the runnable to wrap, not-null (not verified)
* @param parent the parent tracking container or null if none
*/
public ScheduledRunnable(Runnable actual, DisposableContainer parent) {
super(2);
this.actual = actual;
this.lazySet(0, parent);
}
@Override
public Object call() {
// Being Callable saves an allocation in ThreadPoolExecutor
run();
return null;
}
@Override
public void run() {
try {
try {
actual.run();
} catch (Throwable e) {
// Exceptions.throwIfFatal(e); nowhere to go
RxJavaPlugins.onError(e);
}
} finally {
Object o = get(PARENT_INDEX);
if (o != DISPOSED && o != null && compareAndSet(PARENT_INDEX, o, DONE)) {
((DisposableContainer)o).delete(this);
}
for (;;) {
o = get(FUTURE_INDEX);
if (o == DISPOSED || compareAndSet(FUTURE_INDEX, o, DONE)) {
break;
}
}
}
}
public void setFuture(Future<?> f) {
for (;;) {
Object o = get(FUTURE_INDEX);
if (o == DONE) {
return;
}
if (o == DISPOSED) {
f.cancel(true);
return;
}
if (compareAndSet(FUTURE_INDEX, o, f)) {
return;
}
}
}
@Override
public void dispose() {
for (;;) {
Object o = get(FUTURE_INDEX);
if (o == DONE || o == DISPOSED) {
break;
}
if (compareAndSet(FUTURE_INDEX, o, DISPOSED)) {
if (o != null) {
((Future<?>)o).cancel(true);
}
break;
}
}
for (;;) {
Object o = get(PARENT_INDEX);
if (o == DONE || o == DISPOSED || o == null) {
return;
}
if (compareAndSet(PARENT_INDEX, o, DISPOSED)) {
((DisposableContainer)o).delete(this);
return;
}
}
}
@Override
public boolean isDisposed() {
Object o = get(FUTURE_INDEX);
return o == DISPOSED || o == DONE;
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-waf/src/main/java/com/amazonaws/services/waf/model/waf_regional/transform/CreateRegexPatternSetResultJsonUnmarshaller.java | 3186 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.waf.model.waf_regional.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.waf.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* CreateRegexPatternSetResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateRegexPatternSetResultJsonUnmarshaller implements Unmarshaller<CreateRegexPatternSetResult, JsonUnmarshallerContext> {
public CreateRegexPatternSetResult unmarshall(JsonUnmarshallerContext context) throws Exception {
CreateRegexPatternSetResult createRegexPatternSetResult = new CreateRegexPatternSetResult();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return createRegexPatternSetResult;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("RegexPatternSet", targetDepth)) {
context.nextToken();
createRegexPatternSetResult.setRegexPatternSet(RegexPatternSetJsonUnmarshaller.getInstance().unmarshall(context));
}
if (context.testExpression("ChangeToken", targetDepth)) {
context.nextToken();
createRegexPatternSetResult.setChangeToken(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return createRegexPatternSetResult;
}
private static CreateRegexPatternSetResultJsonUnmarshaller instance;
public static CreateRegexPatternSetResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new CreateRegexPatternSetResultJsonUnmarshaller();
return instance;
}
}
| apache-2.0 |
BigFootprint/AndroidAnnotation | AndroidAnnotation/src/com/footprint/annotation/ViewAnno.java | 312 | package com.footprint.annotation;
import java.lang.annotation.*;
/**
* provide annotation for view field
*/
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
public @interface ViewAnno {
/** the id for view */
public int id() default -1;
public String click() default "";
}
| apache-2.0 |
asakusafw/asakusafw | testing-project/asakusa-test-data-generator/src/main/java/com/asakusafw/testdata/generator/TemplateGenerator.java | 1274 | /**
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.testdata.generator;
import java.io.IOException;
import com.asakusafw.dmdl.semantics.ModelDeclaration;
/**
* Abstract interface of test template generators.
* @since 0.2.0
*/
public interface TemplateGenerator {
/**
* Generates a test template for the specified model.
* @param model the target model
* @throws IOException if failed to generate a workbook
* @throws IllegalArgumentException if some parameters were {@code null}
*/
void generate(ModelDeclaration model) throws IOException;
/**
* Returns the title of this generator.
* @return the title
*/
String getTitle();
}
| apache-2.0 |
naipath/cyNeo4j | src/main/java/nl/maastrichtuniversity/networklibrary/cyneo4j/internal/CyActivator.java | 4067 | package nl.maastrichtuniversity.networklibrary.cyneo4j.internal;
import nl.maastrichtuniversity.networklibrary.cyneo4j.internal.configuration.AppConfiguration;
import nl.maastrichtuniversity.networklibrary.cyneo4j.internal.neo4j.Neo4jClient;
import nl.maastrichtuniversity.networklibrary.cyneo4j.internal.ui.CommandMenuAction;
import nl.maastrichtuniversity.networklibrary.cyneo4j.internal.ui.connect.ConnectInstanceMenuAction;
import nl.maastrichtuniversity.networklibrary.cyneo4j.internal.ui.exportnetwork.ExportNetworkMenuAction;
import nl.maastrichtuniversity.networklibrary.cyneo4j.internal.ui.importgraph.CypherQueryMenuAction;
import nl.maastrichtuniversity.networklibrary.cyneo4j.internal.ui.importgraph.querytemplate.QueryTemplateMenuAction;
import org.cytoscape.application.CyApplicationManager;
import org.cytoscape.application.swing.CySwingApplication;
import org.cytoscape.event.CyEventHelper;
import org.cytoscape.model.CyNetworkFactory;
import org.cytoscape.model.CyNetworkManager;
import org.cytoscape.service.util.AbstractCyActivator;
import org.cytoscape.view.layout.CyLayoutAlgorithmManager;
import org.cytoscape.view.model.CyNetworkViewFactory;
import org.cytoscape.view.model.CyNetworkViewManager;
import org.cytoscape.view.vizmap.VisualMappingManager;
import org.cytoscape.view.vizmap.VisualStyleFactory;
import org.cytoscape.work.swing.DialogTaskManager;
import org.osgi.framework.BundleContext;
import java.util.Properties;
public class CyActivator extends AbstractCyActivator {
private AppConfiguration appConfiguration = new AppConfiguration();
@Override
public void start(BundleContext context) throws Exception {
appConfiguration.load();
Services services = createServices(context);
ConnectInstanceMenuAction connectAction = ConnectInstanceMenuAction.create(services);
CypherQueryMenuAction cypherQueryMenuAction = CypherQueryMenuAction.create(services);
QueryTemplateMenuAction queryTemplateMenuAction = QueryTemplateMenuAction.create(services);
ExportNetworkMenuAction exportNetworkToNeo4jMenuAction = ExportNetworkMenuAction.create(services);
CommandMenuAction ImportGraphMenuAction = CommandMenuAction.create("Import all from Neo4j",services, () -> services.getCommandFactory().createImportGraphTask());
registerAllServices(context, connectAction, new Properties());
registerAllServices(context, cypherQueryMenuAction, new Properties());
registerAllServices(context, queryTemplateMenuAction, new Properties() );
registerAllServices(context, ImportGraphMenuAction, new Properties());
registerAllServices(context, exportNetworkToNeo4jMenuAction, new Properties());
}
private Services createServices(BundleContext context) {
Services services = new Services();
services.setAppConfiguration(appConfiguration);
services.setCySwingApplication(getService(context, CySwingApplication.class));
services.setCyApplicationManager(getService(context, CyApplicationManager.class));
services.setCyNetworkFactory(getService(context, CyNetworkFactory.class));
services.setCyNetworkManager(getService(context, CyNetworkManager.class));
services.setCyNetworkViewManager(getService(context, CyNetworkViewManager.class));
services.setDialogTaskManager(getService(context, DialogTaskManager.class));
services.setCyNetworkViewFactory(getService(context, CyNetworkViewFactory.class));
services.setCyLayoutAlgorithmManager(getService(context, CyLayoutAlgorithmManager.class));
services.setVisualMappingManager(getService(context, VisualMappingManager.class));
services.setCyEventHelper(getService(context, CyEventHelper.class));
services.setVisualStyleFactory(getService(context, VisualStyleFactory.class));
services.setNeo4jClient(new Neo4jClient());
services.setCommandFactory(CommandFactory.create(services));
services.setCommandRunner(CommandRunner.create(services));
return services;
}
}
| apache-2.0 |
fabioCollini/LifeCycleBinder | lifecyclebinder-demo-fragments/src/main/java/it/codingjam/lifecyclebinder/demo/ActivityLogger.java | 1915 | /*
* Copyright 2016 Fabio Collini.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.codingjam.lifecyclebinder.demo;
import android.content.Intent;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import it.codingjam.lifecyclebinder.BindLifeCycle;
import it.codingjam.lifecyclebinder.DefaultLifeCycleAware;
@BindLifeCycle
public class ActivityLogger extends DefaultLifeCycleAware<AppCompatActivity> {
private static final String TAG = "ACTIVITY_LOG";
@Override
public void onCreate(AppCompatActivity activity, Bundle savedInstanceState, Intent intent, Bundle arguments) {
Log.i(TAG, "Creating activity:" + activity);
}
@Override
public void onStart(AppCompatActivity activity) {
Log.i(TAG, "Starting activity:" + activity);
}
@Override
public void onResume(AppCompatActivity activity) {
Log.i(TAG, "Resuming activity:" + activity);
}
@Override
public void onPause(AppCompatActivity activity) {
Log.i(TAG, "Pausing activity:" + activity);
}
@Override
public void onStop(AppCompatActivity activity) {
Log.i(TAG, "Stopping activity:" + activity);
}
@Override
public void onDestroy(AppCompatActivity activity, boolean changingConfigurations) {
Log.i(TAG, "Destroying activity:" + activity);
}
}
| apache-2.0 |
rayrelay/devnote | devnote-example/src/main/java/com/jgraph/layout/JGraphExampleGraph.java | 6543 | /*
* $Id: JGraphExampleGraph.java,v 1.7 2008/02/25 09:22:27 david Exp $
* Copyright (c) 2001-2005, Gaudenz Alder
*
* All rights reserved.
*
* This file is licensed under the JGraph software license, a copy of which
* will have been provided to you in the file LICENSE at the root of your
* installation directory. If you are unable to locate this file please
* contact JGraph sales for another copy.
*/
package com.jgraph.layout;
import java.awt.event.ActionEvent;
import java.awt.geom.Rectangle2D;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import javax.swing.AbstractAction;
import javax.swing.Timer;
import org.jgraph.example.GraphEd.MyGraph;
import org.jgraph.graph.CellView;
import org.jgraph.graph.DefaultGraphModel;
import org.jgraph.graph.GraphConstants;
import org.jgraph.graph.GraphLayoutCache;
import org.jgraph.graph.GraphModel;
/**
* A graph that can animate changes (morph).
*/
public class JGraphExampleGraph extends MyGraph {
/**
* Specifies the delay between morphing steps
*/
protected int delay = 30;
/**
* Specified the number of steps in the morphing process
*/
protected int steps = 10;
/**
* Specified the current morhing step
*/
protected int step = 0;
/**
* Stores the previous bounds of morphed cells
*/
protected Map oldBounds = new Hashtable();
/**
* Stores the future bounds of morphed cells
*/
protected Map newBounds = new Hashtable();
/**
* Stores the previous collective bounds of the morphed cells
*/
protected Rectangle2D oldClipBounds;
/**
* Stores the new collective bounds of the morphed cells
*/
protected Rectangle2D newClipBounds;
/**
* Constructs an example graph for the specified graph model.
*
* @param model
*/
public JGraphExampleGraph(GraphModel model) {
super(model);
setGraphLayoutCache(new JGraphExampleLayoutCache(this));
}
public JGraphExampleGraph(GraphModel model, GraphLayoutCache cache) {
super(model, cache);
}
public void morph(final Map nestedMap, Set nomorph) {
Set parents = initMorphing(nestedMap, nomorph);
if (!newBounds.isEmpty()) {
final Object[] cells = parents.toArray();
Object[] edges = DefaultGraphModel.getEdges(getModel(), cells)
.toArray();
final CellView[] edgeViews = getGraphLayoutCache()
.getMapping(edges);
// Execute the morphing. This spawns a timer
// to not block the dispatcher thread (repaint)
Timer timer = new Timer(delay, new AbstractAction() {
public void actionPerformed(ActionEvent e) {
if (step >= steps) {
Timer timer = (Timer) e.getSource();
timer.stop();
restore();
getGraphLayoutCache().edit(nestedMap, null, null, null);
} else {
step++;
Iterator it = newBounds.keySet().iterator();
while (it.hasNext()) {
morphCell(it.next(), step);
}
getGraphLayoutCache().refresh(edgeViews, false);
addOffscreenDirty(oldClipBounds);
addOffscreenDirty(newClipBounds);
oldClipBounds = newClipBounds = null;
repaint();
}
}
});
timer.start();
} else {
getGraphLayoutCache().edit(nestedMap, null, null, null);
}
}
/**
* Initial step of the morphing process. Analyses the arguments and prepares
* internal datastructures for the morphing.
*
* @return Returns the set of all cells and ancestors to determine the dirty
* region and connected edges.
*/
protected Set initMorphing(Map nestedMap, Set nomorph) {
oldBounds.clear();
newBounds.clear();
step = 0;
Iterator it = nestedMap.entrySet().iterator();
while (it.hasNext()) {
Map.Entry entry = (Map.Entry) it.next();
Object cell = entry.getKey();
Map attrs = (Map) entry.getValue();
Rectangle2D rect = GraphConstants.getBounds(attrs);
if (rect != null) {
Rectangle2D old = getCellBounds(cell);
if (old != null && !old.equals(rect)) {
newBounds.put(cell, rect);
oldBounds.put(cell, old.clone());
}
}
}
// Make sure the cells in nomorph are at their future
// locations and fetches the set of all parents.
HashSet parents = new HashSet();
it = oldBounds.keySet().iterator();
while (it.hasNext()) {
Object cell = it.next();
Object parent = getModel().getParent(cell);
if (nomorph != null && nomorph.contains(cell)) {
Rectangle2D rect = (Rectangle2D) newBounds.remove(cell);
setCellBounds(cell, rect);
}
while (parent != null) {
parents.add(parent);
parent = getModel().getParent(parent);
}
}
parents.addAll(oldBounds.keySet());
return parents;
}
/**
* Restore the old bounds values for all cells. (This is required at the end
* of the morphing animation and before calling the edit method for the
* command history to work correctly.)
*
*/
protected void restore() {
Iterator it = oldBounds.entrySet().iterator();
while (it.hasNext()) {
Map.Entry entry = (Map.Entry) it.next();
setCellBounds(entry.getKey(), (Rectangle2D) entry.getValue());
}
}
/**
* Performs the morph positionon a cell for a particular step
*
* @param cell
* the cell being morphed
* @param step
* the number step into morph process
*/
protected void morphCell(Object cell, int step) {
Rectangle2D old = (Rectangle2D) oldBounds.get(cell);
Rectangle2D rect = (Rectangle2D) newBounds.get(cell);
// Add to total clip bounds
if (old != null) {
if (oldClipBounds == null) {
oldClipBounds = (Rectangle2D)old.clone();
} else {
oldClipBounds.add(old);
}
}
if (rect != null) {
if (newClipBounds == null) {
newClipBounds = (Rectangle2D)rect.clone();
} else {
newClipBounds.add(rect);
}
} double dx = (rect.getX() - old.getX()) * step / steps;
double dy = (rect.getY() - old.getY()) * step / steps;
Rectangle2D pos = new Rectangle2D.Double(old.getX() + dx, old.getY()
+ dy, old.getWidth(), old.getHeight());
setCellBounds(cell, pos);
}
/**
* Set the new cell bounds
*
* @param cell
* the cell whose bounds to set
* @param bounds
* the new bounds of the cell
*/
protected void setCellBounds(Object cell, Rectangle2D bounds) {
Rectangle2D rect = getCellBounds(cell);
if (rect != null && bounds != null) {
rect.setFrame(bounds.getX(), bounds.getY(), bounds.getWidth(),
bounds.getHeight());
CellView view = getGraphLayoutCache().getMapping(cell, false);
if (view != null)
view.update(getGraphLayoutCache());
}
}
}
| apache-2.0 |
thinkbigthings/tic-tac-toe | src/main/java/org/thinkbigthings/tictactoe/Game.java | 3786 | package org.thinkbigthings.tictactoe;
import org.thinkbigthings.tictactoe.player.HumanPlayer;
import org.thinkbigthings.tictactoe.player.PerfectPlayer;
import org.thinkbigthings.tictactoe.player.Player;
import org.thinkbigthings.tictactoe.player.RandomPlayer;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.Optional;
public class Game {
private final Player p1;
private final Player p2;
private final int boardSize;
public Game(GameConfig config) {
p1 = createPlayer(new PlayerToken(config.getTokenPlayer1()), config.getIdentityPlayer1());
p2 = createPlayer(new PlayerToken(config.getTokenPlayer2()), config.getIdentityPlayer2());
boardSize = config.getBoardSize();
}
protected Player createPlayer(PlayerToken symbol, String identity) {
if(identity.equals("human")) {
return new HumanPlayer(symbol, new BufferedReader(new InputStreamReader(System.in)));
}
if(identity.equals("computer")) {
return new RandomPlayer(symbol);
}
if(identity.equals("ai")) {
return new PerfectPlayer(symbol);
}
throw new IllegalArgumentException("can't determine player identity from " + identity);
}
public Optional<PlayerToken> play() {
Player currentPlayer = p1;
Board currentBoard = new Board(boardSize);
// TODO gradle wrapper jar isn't being checked in because of gitignore on jars
// should check in the wrapper jar but not necessarily gradle proper
// for 0.4.1
// TODO break build on decreasing code coverage
// https://github.com/gradle/gradle/issues/824
// http://stackoverflow.com/questions/35540823/minimum-code-coverage-threshold-in-jacoco-gradle
// TODO connect to mbeans to monitor cpu and boot endpoints, may be some properties to set too
// Although we don't have any health endpoints yet
// http://stackoverflow.com/que=stions/30069643/remote-monitoring-with-visualvm-and-jmx
// http://docs.spring.io/spring-boot/docs/current-SNAPSHOT/reference/htmlsingle/#boot-features-jmx
// try setting own mbeans?
// Logs: o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup
// TODO Use lambdas / async logs
// for 0.5.0
// TODO handle TODO's elsewhere in the code or move to this list
// TODO experiment with hot reloading
// for 1.0
// TODO improve algorithm for the AI
// account for number of ways to win besides just nearest win, account for blocking opponent imminent win
// TODO keep track of score between multiple games
// for 2.x
// TODO write a clickable interface with a web page
// TODO generalize to Gomoku (https://en.wikipedia.org/wiki/Tic-tac-toe)
// TODO experiment with board sizes besides 3
// TODO use websockets
System.out.println(currentBoard);
boolean inProgress = true;
Optional<PlayerToken> winner;
while (inProgress) {
System.out.print("Player " + currentPlayer.getPlaySymbol() + ": Enter your move: ");
currentBoard = currentPlayer.getNextMove(currentBoard);
currentPlayer = (currentPlayer == p1) ? p2 : p1;
System.out.println();
System.out.println(currentBoard);
inProgress = currentBoard.isMoveAvailable();
}
winner = currentBoard.getWinner();
if(winner.isPresent()) {
System.out.println("PLAYER " + winner.get() + " WINS!!!");
}
else {
System.out.println("ITS A DRAW");
}
System.out.println();
return winner;
}
}
| apache-2.0 |
zsigmond-czine-everit/jdbc-dsf-ecm | component/src/main/java/org/everit/persistence/jdbc/dsf/ecm/internal/XADataSourceAttributePriority.java | 1160 | /*
* Copyright (C) 2011 Everit Kft. (http://www.everit.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.everit.persistence.jdbc.dsf.ecm.internal;
/**
* Constants of priority.
*/
public final class XADataSourceAttributePriority {
public static final int P11_JDBC_MAX_IDLE_TIME = 11;
public static final int P12_JDBC_MAX_STATEMENTS = 12;
public static final int P13_JDBC_PROPERTY_CYCLE = 13;
public static final int P14_JDBC_INITIAL_POOL_SIZE = 14;
public static final int P15_JDBC_MIN_POOL_SIZE = 15;
public static final int P16_JDBC_MAX_POOL_SIZE = 16;
private XADataSourceAttributePriority() {
}
}
| apache-2.0 |
tectronics/hyracks | hyracks/hyracks-api/src/main/java/org/apache/hyracks/api/constraints/PartitionConstraintHelper.java | 2175 | /*
* Copyright 2009-2013 by The Regents of the University of California
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may obtain a copy of the License from
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hyracks.api.constraints;
import org.apache.hyracks.api.constraints.expressions.ConstantExpression;
import org.apache.hyracks.api.constraints.expressions.PartitionCountExpression;
import org.apache.hyracks.api.constraints.expressions.PartitionLocationExpression;
import org.apache.hyracks.api.dataflow.IOperatorDescriptor;
import org.apache.hyracks.api.job.JobSpecification;
public class PartitionConstraintHelper {
public static void addPartitionCountConstraint(JobSpecification spec, IOperatorDescriptor op, int count) {
spec.addUserConstraint(new Constraint(new PartitionCountExpression(op.getOperatorId()), new ConstantExpression(
count)));
}
public static void addLocationChoiceConstraint(JobSpecification spec, IOperatorDescriptor op, String[][] choices) {
addPartitionCountConstraint(spec, op, choices.length);
for (int i = 0; i < choices.length; ++i) {
spec.addUserConstraint(new Constraint(new PartitionLocationExpression(op.getOperatorId(), i),
new ConstantExpression(choices[i])));
}
}
public static void addAbsoluteLocationConstraint(JobSpecification spec, IOperatorDescriptor op, String... locations) {
addPartitionCountConstraint(spec, op, locations.length);
for (int i = 0; i < locations.length; ++i) {
spec.addUserConstraint(new Constraint(new PartitionLocationExpression(op.getOperatorId(), i),
new ConstantExpression(locations[i])));
}
}
} | apache-2.0 |
NLPchina/elasticsearch-sql | src/main/java/org/nlpcn/es4sql/domain/Select.java | 4418 | package org.nlpcn.es4sql.domain;
import org.elasticsearch.search.sort.ScriptSortBuilder;
import org.nlpcn.es4sql.parse.SubQueryExpression;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* 将sql语句转换为select 对象
*
* @author ansj
*/
public class Select extends Query {
public static int DEFAULT_ROWCOUNT = 1000;
// Using this functions, will cause query to execute as aggregation.
private final List<String> aggsFunctions = Arrays.asList("SUM", "MAX", "MIN", "AVG", "TOPHITS", "COUNT", "STATS","EXTENDED_STATS","PERCENTILES","SCRIPTED_METRIC", "PERCENTILE_RANKS", "MOVINGAVG", "ROLLINGSTD");//增加对移动平均值和滚动标准差的支持
private List<Field> fields = new ArrayList<>();
private List<List<Field>> groupBys = new ArrayList<>();
private List<Order> orderBys = new ArrayList<>();
private boolean containsSubQueries;
private List<SubQueryExpression> subQueries;
public boolean isQuery = false;
private boolean selectAll = false;
//added by xzb 增加 SQL中的 having 语法,实现对聚合结果进行过滤
//select count(age) as ageCnt, avg(age) as ageAvg from bank group by gender having ageAvg > 4.5 and ageCnt > 5 order by ageCnt asc
private String having;
public boolean isAgg = false;
public Select() {
setRowCount(DEFAULT_ROWCOUNT);
}
public List<Field> getFields() {
return fields;
}
public void addGroupBy(Field field) {
List<Field> wrapper = new ArrayList<>();
wrapper.add(field);
addGroupBy(wrapper);
}
public String getHaving() {
return having;
}
public void setHaving(String having) {
this.having = having;
}
public void addGroupBy(List<Field> fields) {
isAgg = true;
this.groupBys.add(fields);
}
public List<List<Field>> getGroupBys() {
return groupBys;
}
public List<Order> getOrderBys() {
return orderBys;
}
public void addOrderBy(String nestedPath, String name, String type, ScriptSortBuilder.ScriptSortType scriptSortType, Object missing, String unmappedType, String numericType, String format) {
if ("_score".equals(name)) { //zhongshu-comment 可以直接在order by子句中写_score,根据该字段排序 select * from tbl order by _score asc
isQuery = true;
}
Order order = new Order(nestedPath, name, type);
order.setScriptSortType(scriptSortType);
order.setMissing(missing);
order.setUnmappedType(unmappedType);
order.setNumericType(numericType);
order.setFormat(format);
this.orderBys.add(order);
}
public void addField(Field field) {
if (field == null ) {
return;
}
if(field.getName().equals("*")){
this.selectAll = true;
}
if(field instanceof MethodField && aggsFunctions.contains(field.getName().toUpperCase())) {
isAgg = true;
}
fields.add(field);
}
public void fillSubQueries() {
subQueries = new ArrayList<>();
Where where = this.getWhere();
fillSubQueriesFromWhereRecursive(where);
}
private void fillSubQueriesFromWhereRecursive(Where where) {
if(where == null) return;
if(where instanceof Condition){
Condition condition = (Condition) where;
if ( condition.getValue() instanceof SubQueryExpression){
this.subQueries.add((SubQueryExpression) condition.getValue());
this.containsSubQueries = true;
}
if(condition.getValue() instanceof Object[]){
for(Object o : (Object[]) condition.getValue()){
if ( o instanceof SubQueryExpression){
this.subQueries.add((SubQueryExpression) o);
this.containsSubQueries = true;
}
}
}
}
else {
for(Where innerWhere : where.getWheres())
fillSubQueriesFromWhereRecursive(innerWhere);
}
}
public boolean containsSubQueries() {
return containsSubQueries;
}
public List<SubQueryExpression> getSubQueries() {
return subQueries;
}
public boolean isOrderdSelect(){
return this.getOrderBys()!=null && this.getOrderBys().size() >0 ;
}
public boolean isSelectAll() {
return selectAll;
}
public void setFields(List<Field> fields) {
this.fields = fields;
}
}
| apache-2.0 |
piotr-j/VisEditor | ui/src/test/java/com/kotcrab/vis/ui/test/manual/TestListView.java | 4142 | /*
* Copyright 2014-2016 See AUTHORS file.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.kotcrab.vis.ui.test.manual;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.scenes.scene2d.Actor;
import com.badlogic.gdx.scenes.scene2d.utils.ChangeListener;
import com.badlogic.gdx.scenes.scene2d.utils.Drawable;
import com.badlogic.gdx.utils.Array;
import com.kotcrab.vis.ui.VisUI;
import com.kotcrab.vis.ui.util.TableUtils;
import com.kotcrab.vis.ui.util.adapter.ArrayAdapter;
import com.kotcrab.vis.ui.util.form.SimpleFormValidator;
import com.kotcrab.vis.ui.widget.*;
import com.kotcrab.vis.ui.widget.ListView.ItemClickListener;
import com.kotcrab.vis.ui.widget.ListView.UpdatePolicy;
import java.util.Comparator;
/** @author Kotcrab */
public class TestListView extends VisWindow {
public TestListView () {
super("listview");
TableUtils.setSpacingDefaults(this);
columnDefaults(0).left();
addCloseButton();
closeOnEscape();
Array<Model> array = new Array<Model>();
for (int i = 1; i <= 3; i++) {
array.add(new Model("Windows" + i, VisUI.getSkin().getColor("vis-red")));
array.add(new Model("Linux" + i, Color.GREEN));
array.add(new Model("OSX" + i, Color.WHITE));
}
final TestAdapter adapter = new TestAdapter(array);
ListView<Model> view = new ListView<Model>(adapter);
view.setUpdatePolicy(UpdatePolicy.ON_DRAW);
VisTable footerTable = new VisTable();
footerTable.addSeparator();
footerTable.add("Table Footer");
view.setFooter(footerTable);
final VisValidatableTextField nameField = new VisValidatableTextField();
VisTextButton addButton = new VisTextButton("Add");
SimpleFormValidator validator = new SimpleFormValidator(addButton);
validator.notEmpty(nameField, "");
add(new VisLabel("New Name:"));
add(nameField);
add(addButton);
row();
add(view.getMainTable()).colspan(3).grow();
addButton.addListener(new ChangeListener() {
@Override
public void changed (ChangeEvent event, Actor actor) {
//by changing array using adapter view will be invalidated automatically
adapter.add(new Model(nameField.getText(), Color.GRAY));
nameField.setText("");
}
});
view.setItemClickListener(new ItemClickListener<Model>() {
@Override
public void clicked (Model item) {
System.out.println("Clicked: " + item.name);
}
});
setSize(300, 300);
setPosition(458, 245);
}
private static class Model {
public String name;
public Color color;
public Model (String name, Color color) {
this.name = name;
this.color = color;
}
}
private static class TestAdapter extends ArrayAdapter<Model, VisTable> {
private final Drawable bg = VisUI.getSkin().getDrawable("window-bg");
private final Drawable selection = VisUI.getSkin().getDrawable("list-selection");
public TestAdapter (Array<Model> array) {
super(array);
setSelectionMode(SelectionMode.SINGLE);
setItemsSorter(new Comparator<Model>() {
@Override
public int compare (Model o1, Model o2) {
return o1.name.toLowerCase().compareTo(o2.name.toLowerCase());
}
});
}
@Override
protected VisTable createView (Model item) {
VisLabel label = new VisLabel(item.name);
label.setColor(item.color);
VisTable table = new VisTable();
table.left();
table.add(label);
return table;
}
@Override
protected void updateView (VisTable view, Model item) {
super.updateView(view, item);
}
@Override
protected void selectView (VisTable view) {
view.setBackground(selection);
}
@Override
protected void deselectView (VisTable view) {
view.setBackground(bg);
}
}
}
| apache-2.0 |
GeLiXin/hadoop | hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java | 22428 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.XceiverClient;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.Timeout;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
/**
* Tests ozone containers.
*/
public class TestOzoneContainer {
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
@Rule
public TemporaryFolder tempFolder = new TemporaryFolder();
@Test
public void testCreateOzoneContainer() throws Exception {
long containerID = ContainerTestHelper.getTestContainerID();
OzoneConfiguration conf = newOzoneConfiguration();
OzoneContainer container = null;
MiniOzoneCluster cluster = null;
try {
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
// We don't start Ozone Container via data node, we will do it
// independently in our test path.
Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
container = new OzoneContainer(TestUtils.randomDatanodeDetails(),
conf, null);
//Setting scmId, as we start manually ozone container.
container.getDispatcher().setScmId(UUID.randomUUID().toString());
container.start();
XceiverClient client = new XceiverClient(pipeline, conf);
client.connect();
createContainerForTesting(client, containerID);
} finally {
if (container != null) {
container.stop();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
static OzoneConfiguration newOzoneConfiguration() {
final OzoneConfiguration conf = new OzoneConfiguration();
ContainerTestHelper.setOzoneLocalStorageRoot(
TestOzoneContainer.class, conf);
return conf;
}
@Test
public void testOzoneContainerViaDataNode() throws Exception {
MiniOzoneCluster cluster = null;
try {
long containerID =
ContainerTestHelper.getTestContainerID();
OzoneConfiguration conf = newOzoneConfiguration();
// Start ozone container Via Datanode create.
Pipeline pipeline =
ContainerTestHelper.createSingleNodePipeline();
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
pipeline.getLeader()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false)
.build();
cluster.waitForClusterToBeReady();
// This client talks to ozone container via datanode.
XceiverClient client = new XceiverClient(pipeline, conf);
runTestOzoneContainerViaDataNode(containerID, client);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
static void runTestOzoneContainerViaDataNode(
long testContainerID, XceiverClientSpi client) throws Exception {
ContainerProtos.ContainerCommandRequestProto
request, writeChunkRequest, putKeyRequest,
updateRequest1, updateRequest2;
ContainerProtos.ContainerCommandResponseProto response,
updateResponse1, updateResponse2;
try {
client.connect();
Pipeline pipeline = client.getPipeline();
createContainerForTesting(client, testContainerID);
writeChunkRequest = writeChunkForContainer(client, testContainerID, 1024);
// Read Chunk
request = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
.getWriteChunk());
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
// Put Key
putKeyRequest = ContainerTestHelper.getPutKeyRequest(pipeline, writeChunkRequest
.getWriteChunk());
response = client.sendCommand(putKeyRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert
.assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
// Get Key
request = ContainerTestHelper.getKeyRequest(pipeline, putKeyRequest.getPutKey());
response = client.sendCommand(request);
int chunksCount = putKeyRequest.getPutKey().getKeyData().getChunksCount();
ContainerTestHelper.verifyGetKey(request, response, chunksCount);
// Delete Key
request =
ContainerTestHelper.getDeleteKeyRequest(pipeline, putKeyRequest.getPutKey());
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
//Delete Chunk
request = ContainerTestHelper.getDeleteChunkRequest(pipeline, writeChunkRequest
.getWriteChunk());
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
//Update an existing container
Map<String, String> containerUpdate = new HashMap<String, String>();
containerUpdate.put("container_updated_key", "container_updated_value");
updateRequest1 = ContainerTestHelper.getUpdateContainerRequest(
testContainerID, containerUpdate);
updateResponse1 = client.sendCommand(updateRequest1);
Assert.assertNotNull(updateResponse1);
Assert.assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
//Update an non-existing container
long nonExistingContinerID =
ContainerTestHelper.getTestContainerID();
updateRequest2 = ContainerTestHelper.getUpdateContainerRequest(
nonExistingContinerID, containerUpdate);
updateResponse2 = client.sendCommand(updateRequest2);
Assert.assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND,
updateResponse2.getResult());
} finally {
if (client != null) {
client.close();
}
}
}
@Test
public void testBothGetandPutSmallFile() throws Exception {
MiniOzoneCluster cluster = null;
XceiverClient client = null;
try {
OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false)
.build();
cluster.waitForClusterToBeReady();
long containerID = ContainerTestHelper.getTestContainerID();
runTestBothGetandPutSmallFile(containerID, client);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
static void runTestBothGetandPutSmallFile(
long containerID, XceiverClientSpi client) throws Exception {
try {
client.connect();
createContainerForTesting(client, containerID);
BlockID blockId = ContainerTestHelper.getTestBlockID(containerID);
final ContainerProtos.ContainerCommandRequestProto smallFileRequest
= ContainerTestHelper.getWriteSmallFileRequest(
client.getPipeline(), blockId, 1024);
ContainerProtos.ContainerCommandResponseProto response
= client.sendCommand(smallFileRequest);
Assert.assertNotNull(response);
Assert.assertTrue(smallFileRequest.getTraceID()
.equals(response.getTraceID()));
final ContainerProtos.ContainerCommandRequestProto getSmallFileRequest
= ContainerTestHelper.getReadSmallFileRequest(client.getPipeline(),
smallFileRequest.getPutSmallFile().getKey());
response = client.sendCommand(getSmallFileRequest);
Assert.assertArrayEquals(
smallFileRequest.getPutSmallFile().getData().toByteArray(),
response.getGetSmallFile().getData().getData().toByteArray());
} finally {
if (client != null) {
client.close();
}
}
}
@Test
public void testCloseContainer() throws Exception {
MiniOzoneCluster cluster = null;
XceiverClient client = null;
ContainerProtos.ContainerCommandResponseProto response;
ContainerProtos.ContainerCommandRequestProto
writeChunkRequest, putKeyRequest, request;
try {
OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false)
.build();
cluster.waitForClusterToBeReady();
client.connect();
long containerID = ContainerTestHelper.getTestContainerID();
createContainerForTesting(client, containerID);
writeChunkRequest = writeChunkForContainer(client, containerID, 1024);
putKeyRequest = ContainerTestHelper.getPutKeyRequest(client.getPipeline(),
writeChunkRequest.getWriteChunk());
// Put key before closing.
response = client.sendCommand(putKeyRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
Assert.assertTrue(
putKeyRequest.getTraceID().equals(response.getTraceID()));
// Close the contianer.
request = ContainerTestHelper.getCloseContainer(
client.getPipeline(), containerID);
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
// Assert that none of the write operations are working after close.
// Write chunks should fail now.
response = client.sendCommand(writeChunkRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
response.getResult());
Assert.assertTrue(
writeChunkRequest.getTraceID().equals(response.getTraceID()));
// Read chunk must work on a closed container.
request = ContainerTestHelper.getReadChunkRequest(client.getPipeline(),
writeChunkRequest.getWriteChunk());
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
// Put key will fail on a closed container.
response = client.sendCommand(putKeyRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
response.getResult());
Assert
.assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
// Get key must work on the closed container.
request = ContainerTestHelper.getKeyRequest(client.getPipeline(),
putKeyRequest.getPutKey());
response = client.sendCommand(request);
int chunksCount = putKeyRequest.getPutKey().getKeyData().getChunksCount();
ContainerTestHelper.verifyGetKey(request, response, chunksCount);
// Delete Key must fail on a closed container.
request =
ContainerTestHelper.getDeleteKeyRequest(client.getPipeline(),
putKeyRequest.getPutKey());
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
} finally {
if (client != null) {
client.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testDeleteContainer() throws Exception {
MiniOzoneCluster cluster = null;
XceiverClient client = null;
ContainerProtos.ContainerCommandResponseProto response;
ContainerProtos.ContainerCommandRequestProto request,
writeChunkRequest, putKeyRequest;
try {
OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false)
.build();
cluster.waitForClusterToBeReady();
client.connect();
long containerID = ContainerTestHelper.getTestContainerID();
createContainerForTesting(client, containerID);
writeChunkRequest = writeChunkForContainer(client, containerID, 1024);
putKeyRequest = ContainerTestHelper.getPutKeyRequest(client.getPipeline(),
writeChunkRequest.getWriteChunk());
// Put key before deleting.
response = client.sendCommand(putKeyRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
Assert.assertTrue(
putKeyRequest.getTraceID().equals(response.getTraceID()));
// Container cannot be deleted forcibly because
// the container is not closed.
request = ContainerTestHelper.getDeleteContainer(
client.getPipeline(), containerID, true);
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER,
response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
// Close the container.
request = ContainerTestHelper.getCloseContainer(
client.getPipeline(), containerID);
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
// Container cannot be deleted because the container is not empty.
request = ContainerTestHelper.getDeleteContainer(
client.getPipeline(), containerID, false);
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.ERROR_CONTAINER_NOT_EMPTY,
response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
// Container can be deleted forcibly because
// it is closed and non-empty.
request = ContainerTestHelper.getDeleteContainer(
client.getPipeline(), containerID, true);
response = client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
} finally {
if (client != null) {
client.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
// Runs a set of commands as Async calls and verifies that calls indeed worked
// as expected.
static void runAsyncTests(
long containerID, XceiverClientSpi client) throws Exception {
try {
client.connect();
createContainerForTesting(client, containerID);
final List<CompletableFuture> computeResults = new LinkedList<>();
int requestCount = 1000;
// Create a bunch of Async calls from this test.
for(int x = 0; x <requestCount; x++) {
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
final ContainerProtos.ContainerCommandRequestProto smallFileRequest
= ContainerTestHelper.getWriteSmallFileRequest(
client.getPipeline(), blockID, 1024);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto>
response = client.sendCommandAsync(smallFileRequest);
computeResults.add(response);
}
CompletableFuture<Void> combinedFuture =
CompletableFuture.allOf(computeResults.toArray(
new CompletableFuture[computeResults.size()]));
// Wait for all futures to complete.
combinedFuture.get();
// Assert that all futures are indeed done.
for (CompletableFuture future : computeResults) {
Assert.assertTrue(future.isDone());
}
} finally {
if (client != null) {
client.close();
}
}
}
@Test
public void testXcieverClientAsync() throws Exception {
MiniOzoneCluster cluster = null;
XceiverClient client = null;
try {
OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false)
.build();
cluster.waitForClusterToBeReady();
long containerID = ContainerTestHelper.getTestContainerID();
runAsyncTests(containerID, client);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testInvalidRequest() throws Exception {
MiniOzoneCluster cluster = null;
XceiverClient client;
ContainerProtos.ContainerCommandRequestProto request;
try {
OzoneConfiguration conf = newOzoneConfiguration();
client = createClientForTesting(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
.setRandomContainerPort(false)
.build();
cluster.waitForClusterToBeReady();
client.connect();
// Send a request without traceId.
long containerID = ContainerTestHelper.getTestContainerID();
request = ContainerTestHelper
.getRequestWithoutTraceId(client.getPipeline(), containerID);
client.sendCommand(request);
Assert.fail("IllegalArgumentException expected");
} catch(IllegalArgumentException iae){
GenericTestUtils.assertExceptionContains("Invalid trace ID", iae);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private static XceiverClient createClientForTesting(OzoneConfiguration conf)
throws Exception {
// Start ozone container Via Datanode create.
Pipeline pipeline =
ContainerTestHelper.createSingleNodePipeline();
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
pipeline.getLeader()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
// This client talks to ozone container via datanode.
return new XceiverClient(pipeline, conf);
}
private static void createContainerForTesting(XceiverClientSpi client,
long containerID) throws Exception {
// Create container
ContainerProtos.ContainerCommandRequestProto request =
ContainerTestHelper.getCreateContainerRequest(
containerID, client.getPipeline());
ContainerProtos.ContainerCommandResponseProto response =
client.sendCommand(request);
Assert.assertNotNull(response);
Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
}
private static ContainerProtos.ContainerCommandRequestProto
writeChunkForContainer(XceiverClientSpi client,
long containerID, int dataLen) throws Exception {
// Write Chunk
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);;
ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
ContainerTestHelper.getWriteChunkRequest(client.getPipeline(),
blockID, dataLen);
ContainerProtos.ContainerCommandResponseProto response =
client.sendCommand(writeChunkRequest);
Assert.assertNotNull(response);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
Assert.assertTrue(response.getTraceID().equals(response.getTraceID()));
return writeChunkRequest;
}
static void runRequestWithoutTraceId(
long containerID, XceiverClientSpi client) throws Exception {
try {
client.connect();
createContainerForTesting(client, containerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
final ContainerProtos.ContainerCommandRequestProto smallFileRequest
= ContainerTestHelper.getWriteSmallFileRequest(
client.getPipeline(), blockID, 1024);
ContainerProtos.ContainerCommandResponseProto response
= client.sendCommand(smallFileRequest);
Assert.assertNotNull(response);
Assert.assertTrue(smallFileRequest.getTraceID()
.equals(response.getTraceID()));
} finally {
if (client != null) {
client.close();
}
}
}
}
| apache-2.0 |
EvilMcJerkface/presto | presto-spark-launcher/src/main/java/com/facebook/presto/spark/launcher/PrestoSparkLauncherCommand.java | 3349 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.spark.launcher;
import com.facebook.presto.spark.classloader_interface.PrestoSparkConfInitializer;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.airlift.airline.Command;
import io.airlift.airline.HelpOption;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import javax.inject.Inject;
import java.io.File;
import java.util.Optional;
import static com.facebook.presto.spark.launcher.LauncherUtils.checkFile;
import static com.facebook.presto.spark.launcher.LauncherUtils.loadCatalogProperties;
import static com.facebook.presto.spark.launcher.LauncherUtils.loadProperties;
import static com.facebook.presto.spark.launcher.LauncherUtils.readFileUtf8;
@Command(name = "presto-spark-launcher", description = "Presto on Spark launcher")
public class PrestoSparkLauncherCommand
{
@Inject
public HelpOption helpOption;
@Inject
public PrestoSparkVersionOption versionOption = new PrestoSparkVersionOption();
@Inject
public PrestoSparkClientOptions clientOptions = new PrestoSparkClientOptions();
public void run()
{
SparkConf sparkConfiguration = new SparkConf()
.setAppName("Presto");
PrestoSparkConfInitializer.initialize(sparkConfiguration);
SparkContext sparkContext = new SparkContext(sparkConfiguration);
TargzBasedPackageSupplier packageSupplier = new TargzBasedPackageSupplier(new File(clientOptions.packagePath));
packageSupplier.deploy(sparkContext);
PrestoSparkDistribution distribution = new PrestoSparkDistribution(
sparkContext,
packageSupplier,
loadProperties(checkFile(new File(clientOptions.config))),
loadCatalogProperties(new File(clientOptions.catalogs)),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty());
String query = readFileUtf8(checkFile(new File(clientOptions.file)));
try (PrestoSparkRunner runner = new PrestoSparkRunner(distribution)) {
runner.run(
"test",
Optional.empty(),
ImmutableMap.of(),
clientOptions.catalog,
clientOptions.schema,
Optional.empty(),
Optional.empty(),
Optional.empty(),
ImmutableSet.of(),
ImmutableMap.of(),
ImmutableMap.of(),
Optional.empty(),
query,
Optional.empty(),
Optional.empty(),
Optional.empty());
}
}
}
| apache-2.0 |
shopzilla/hadoop-in-a-box | hadoop-repl/src/main/java/com/shopzilla/hadoop/repl/commands/Command.java | 1396 | /**
* Copyright 2012 Shopzilla.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* http://tech.shopzilla.com
*
*/
package com.shopzilla.hadoop.repl.commands;
import com.shopzilla.hadoop.repl.REPL;
import com.shopzilla.hadoop.repl.SessionState;
/**
* @author Jeremy Lucas
* @since 4/11/13
*/
public interface Command {
void execute(final CommandInvocation call, final SessionState sessionState) throws REPL.ExitSignal;
Usage usage(final SessionState sessionState);
public static class Usage {
public final String command;
public final String description;
public final String[] arguments;
public Usage(final String command, final String description, final String... arguments) {
this.command = command;
this.description = description;
this.arguments = arguments;
}
}
}
| apache-2.0 |
StackVista/tephra | tephra-core/src/test/java/co/cask/tephra/TransactionServiceMainTest.java | 2209 | /*
* Copyright © 2014 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package co.cask.tephra;
import co.cask.tephra.distributed.TransactionServiceClient;
import com.google.common.base.Throwables;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.twill.internal.zookeeper.InMemoryZKServer;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
/**
* Test for verifying TransactionServiceMain works correctly.
*/
public class TransactionServiceMainTest {
@ClassRule
public static TemporaryFolder tmpFolder = new TemporaryFolder();
@Test
public void testClientServer() throws Exception {
// Simply start a transaction server and connect to it with the client.
InMemoryZKServer zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build();
zkServer.startAndWait();
try {
Configuration conf = HBaseConfiguration.create();
conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkServer.getConnectionStr());
conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath());
final TransactionServiceMain main = new TransactionServiceMain(conf);
Thread t = new Thread() {
@Override
public void run() {
try {
main.start();
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
};
try {
t.start();
TransactionServiceClient.doMain(true, conf);
} finally {
main.stop();
t.join();
}
} finally {
zkServer.stopAndWait();
}
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-snowdevicemanagement/src/main/java/com/amazonaws/services/snowdevicemanagement/model/transform/ServiceQuotaExceededExceptionUnmarshaller.java | 3026 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.snowdevicemanagement.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.snowdevicemanagement.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* ServiceQuotaExceededException JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ServiceQuotaExceededExceptionUnmarshaller extends EnhancedJsonErrorUnmarshaller {
private ServiceQuotaExceededExceptionUnmarshaller() {
super(com.amazonaws.services.snowdevicemanagement.model.ServiceQuotaExceededException.class, "ServiceQuotaExceededException");
}
@Override
public com.amazonaws.services.snowdevicemanagement.model.ServiceQuotaExceededException unmarshallFromContext(JsonUnmarshallerContext context)
throws Exception {
com.amazonaws.services.snowdevicemanagement.model.ServiceQuotaExceededException serviceQuotaExceededException = new com.amazonaws.services.snowdevicemanagement.model.ServiceQuotaExceededException(
null);
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return null;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return serviceQuotaExceededException;
}
private static ServiceQuotaExceededExceptionUnmarshaller instance;
public static ServiceQuotaExceededExceptionUnmarshaller getInstance() {
if (instance == null)
instance = new ServiceQuotaExceededExceptionUnmarshaller();
return instance;
}
}
| apache-2.0 |
prime-framework/prime-mvc | src/main/java/org/primeframework/mvc/guice/Nullable.java | 1004 | /*
* Copyright (c) 2012, Inversoft Inc., All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.primeframework.mvc.guice;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Nullable for Guice.
*
* @author Brian Pontarelli
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.PARAMETER, ElementType.FIELD})
public @interface Nullable {
}
| apache-2.0 |
GoogleCloudPlatform/gcp-token-broker | code/extensions/caching/cloud-datastore/src/main/java/com/google/cloud/broker/caching/remote/DatastoreCacheCleanup.java | 1307 | // Copyright 2020 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.cloud.broker.caching.remote;
import java.lang.invoke.MethodHandles;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DatastoreCacheCleanup {
private static final Class<?> klass = MethodHandles.lookup().lookupClass();
private static final Logger logger = LoggerFactory.getLogger(klass);
public static void main(String[] args) {
Integer limit = null;
if (args.length > 0) {
limit = Integer.parseInt(args[0]);
}
CloudDatastoreCache cache = new CloudDatastoreCache();
int numDeletedItems = cache.deleteExpiredItems(limit);
logger.info(klass.getSimpleName() + " - Deleted expired item(s): " + numDeletedItems);
}
}
| apache-2.0 |
joel-costigliola/assertj-core | src/main/java/org/assertj/core/internal/Files.java | 27343 | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2020 the original author or authors.
*/
package org.assertj.core.internal;
import static java.lang.String.format;
import static java.nio.file.Files.readAllBytes;
import static java.util.Comparator.comparing;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.toList;
import static org.assertj.core.error.ShouldBeAbsolutePath.shouldBeAbsolutePath;
import static org.assertj.core.error.ShouldBeDirectory.shouldBeDirectory;
import static org.assertj.core.error.ShouldBeEmpty.shouldBeEmpty;
import static org.assertj.core.error.ShouldBeEmptyDirectory.shouldBeEmptyDirectory;
import static org.assertj.core.error.ShouldBeFile.shouldBeFile;
import static org.assertj.core.error.ShouldBeReadable.shouldBeReadable;
import static org.assertj.core.error.ShouldBeRelativePath.shouldBeRelativePath;
import static org.assertj.core.error.ShouldBeWritable.shouldBeWritable;
import static org.assertj.core.error.ShouldContain.directoryShouldContain;
import static org.assertj.core.error.ShouldContainRecursively.directoryShouldContainRecursively;
import static org.assertj.core.error.ShouldExist.shouldExist;
import static org.assertj.core.error.ShouldHaveBinaryContent.shouldHaveBinaryContent;
import static org.assertj.core.error.ShouldHaveContent.shouldHaveContent;
import static org.assertj.core.error.ShouldHaveDigest.shouldHaveDigest;
import static org.assertj.core.error.ShouldHaveExtension.shouldHaveExtension;
import static org.assertj.core.error.ShouldHaveName.shouldHaveName;
import static org.assertj.core.error.ShouldHaveNoParent.shouldHaveNoParent;
import static org.assertj.core.error.ShouldHaveParent.shouldHaveParent;
import static org.assertj.core.error.ShouldHaveSameContent.shouldHaveSameContent;
import static org.assertj.core.error.ShouldHaveSize.shouldHaveSize;
import static org.assertj.core.error.ShouldNotBeEmpty.shouldNotBeEmpty;
import static org.assertj.core.error.ShouldNotContain.directoryShouldNotContain;
import static org.assertj.core.error.ShouldNotExist.shouldNotExist;
import static org.assertj.core.internal.Digests.digestDiff;
import static org.assertj.core.util.Lists.list;
import static org.assertj.core.util.Preconditions.checkArgument;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.nio.charset.Charset;
import java.nio.charset.MalformedInputException;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import java.util.function.Predicate;
import java.util.stream.Stream;
import org.assertj.core.api.AssertionInfo;
import org.assertj.core.util.VisibleForTesting;
import org.assertj.core.util.diff.Delta;
/**
* Reusable assertions for <code>{@link File}</code>s.
*
* @author David DIDIER
* @author Yvonne Wang
* @author Alex Ruiz
* @author Olivier Demeijer
* @author Valeriy Vyrva
*/
public class Files {
private static final String UNABLE_TO_COMPARE_FILE_CONTENTS = "Unable to compare contents of files:<%s> and:<%s>";
private static final Files INSTANCE = new Files();
private static final Predicate<File> ANY = any -> true;
/**
* Returns the singleton instance of this class.
* @return the singleton instance of this class.
*/
public static Files instance() {
return INSTANCE;
}
@VisibleForTesting
Diff diff = new Diff();
@VisibleForTesting
BinaryDiff binaryDiff = new BinaryDiff();
@VisibleForTesting
Failures failures = Failures.instance();
@VisibleForTesting
NioFilesWrapper nioFilesWrapper = NioFilesWrapper.instance();
@VisibleForTesting
Files() {}
/**
* Asserts that the given files have same content. Adapted from <a
* href="http://junit-addons.sourceforge.net/junitx/framework/FileAssert.html" target="_blank">FileAssert</a> (from <a
* href="http://sourceforge.net/projects/junit-addons">JUnit-addons</a>.)
* @param info contains information about the assertion.
* @param actual the "actual" file.
* @param actualCharset {@link Charset} of the "actual" file.
* @param expected the "expected" file.
* @param expectedCharset {@link Charset} of the "actual" file.
* @throws NullPointerException if {@code expected} is {@code null}.
* @throws IllegalArgumentException if {@code expected} is not an existing file.
* @throws AssertionError if {@code actual} is {@code null}.
* @throws AssertionError if {@code actual} is not an existing file.
* @throws UncheckedIOException if an I/O error occurs.
* @throws AssertionError if the given files do not have same content.
*/
public void assertSameContentAs(AssertionInfo info, File actual, Charset actualCharset, File expected,
Charset expectedCharset) {
verifyIsFile(expected);
assertIsFile(info, actual);
try {
List<Delta<String>> diffs = diff.diff(actual, actualCharset, expected, expectedCharset);
if (diffs.isEmpty()) return;
throw failures.failure(info, shouldHaveSameContent(actual, expected, diffs));
} catch (MalformedInputException e) {
try {
// MalformedInputException is thrown by readLine() called in diff
// compute a binary diff, if there is a binary diff, it it shows the offset of the malformed input
BinaryDiffResult binaryDiffResult = binaryDiff.diff(actual, readAllBytes(expected.toPath()));
if (binaryDiffResult.hasNoDiff()) {
// fall back to the UncheckedIOException : not throwing an error is wrong as there was one in the first place.
throw e;
}
throw failures.failure(info, shouldHaveBinaryContent(actual, binaryDiffResult));
} catch (IOException ioe) {
throw new UncheckedIOException(format(UNABLE_TO_COMPARE_FILE_CONTENTS, actual, expected), ioe);
}
} catch (IOException e) {
throw new UncheckedIOException(format(UNABLE_TO_COMPARE_FILE_CONTENTS, actual, expected), e);
}
}
/**
* Asserts that the given files have the same binary content.
* @param info contains information about the assertion.
* @param actual the "actual" file.
* @param expected the "expected" file.
* @throws NullPointerException if {@code expected} is {@code null}.
* @throws IllegalArgumentException if {@code expected} is not an existing file.
* @throws AssertionError if {@code actual} is {@code null}.
* @throws AssertionError if {@code actual} is not an existing file.
* @throws UncheckedIOException if an I/O error occurs.
* @throws AssertionError if the given files do not have same content.
*/
public void assertSameBinaryContentAs(AssertionInfo info, File actual, File expected) {
verifyIsFile(expected);
assertIsFile(info, actual);
try {
BinaryDiffResult binaryDiffResult = binaryDiff.diff(actual, readAllBytes(expected.toPath()));
if (binaryDiffResult.hasDiff()) throw failures.failure(info, shouldHaveBinaryContent(actual, binaryDiffResult));
} catch (IOException ioe) {
throw new UncheckedIOException(format(UNABLE_TO_COMPARE_FILE_CONTENTS, actual, expected), ioe);
}
}
/**
* Asserts that the given file has the given binary content.
* @param info contains information about the assertion.
* @param actual the "actual" file.
* @param expected the "expected" binary content.
* @throws NullPointerException if {@code expected} is {@code null}.
* @throws AssertionError if {@code actual} is {@code null}.
* @throws AssertionError if {@code actual} is not an existing file.
* @throws UncheckedIOException if an I/O error occurs.
* @throws AssertionError if the file does not have the binary content.
*/
public void assertHasBinaryContent(AssertionInfo info, File actual, byte[] expected) {
requireNonNull(expected, "The binary content to compare to should not be null");
assertIsFile(info, actual);
try {
BinaryDiffResult result = binaryDiff.diff(actual, expected);
if (result.hasNoDiff()) return;
throw failures.failure(info, shouldHaveBinaryContent(actual, result));
} catch (IOException e) {
String msg = format("Unable to verify binary contents of file:<%s>", actual);
throw new UncheckedIOException(msg, e);
}
}
/**
* Asserts that the given file has the given size in bytes.
* @param info contains information about the assertion.
* @param actual the "actual" file.
* @param expectedSizeInBytes the "expected" file size.
* @throws AssertionError if {@code actual} is {@code null}.
* @throws AssertionError if {@code actual} is not an existing file.
*/
public void assertHasSizeInBytes(AssertionInfo info, File actual, long expectedSizeInBytes) {
assertIsFile(info, actual);
if (expectedSizeInBytes == actual.length()) return;
throw failures.failure(info, shouldHaveSize(actual, expectedSizeInBytes));
}
/**
* Asserts that the given file has the given text content.
* @param info contains information about the assertion.
* @param actual the "actual" file.
* @param expected the "expected" text content.
* @param charset the charset to use to read the file.
* @throws NullPointerException if {@code expected} is {@code null}.
* @throws AssertionError if {@code actual} is {@code null}.
* @throws AssertionError if {@code actual} is not an existing file.
* @throws UncheckedIOException if an I/O error occurs.
* @throws AssertionError if the file does not have the text content.
*/
public void assertHasContent(AssertionInfo info, File actual, String expected, Charset charset) {
requireNonNull(expected, "The text to compare to should not be null");
assertIsFile(info, actual);
try {
List<Delta<String>> diffs = diff.diff(actual, expected, charset);
if (diffs.isEmpty()) return;
throw failures.failure(info, shouldHaveContent(actual, charset, diffs));
} catch (IOException e) {
String msg = format("Unable to verify text contents of file:<%s>", actual);
throw new UncheckedIOException(msg, e);
}
}
/**
* Asserts that the given file is an existing file.
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given file is {@code null}.
* @throws AssertionError if the given file is not an existing file.
*/
public void assertIsFile(AssertionInfo info, File actual) {
assertNotNull(info, actual);
if (actual.isFile()) return;
throw failures.failure(info, shouldBeFile(actual));
}
/**
* Asserts that the given file is an existing directory.
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given file is {@code null}.
* @throws AssertionError if the given file is not an existing directory.
*/
public void assertIsDirectory(AssertionInfo info, File actual) {
assertNotNull(info, actual);
if (actual.isDirectory()) return;
throw failures.failure(info, shouldBeDirectory(actual));
}
/**
* Asserts that the given file is an absolute path.
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given file is {@code null}.
* @throws AssertionError if the given file is not an absolute path.
*/
public void assertIsAbsolute(AssertionInfo info, File actual) {
if (isAbsolutePath(info, actual)) return;
throw failures.failure(info, shouldBeAbsolutePath(actual));
}
/**
* Asserts that the given file is a relative path.
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given file is {@code null}.
* @throws AssertionError if the given file is not a relative path.
*/
public void assertIsRelative(AssertionInfo info, File actual) {
if (!isAbsolutePath(info, actual)) return;
throw failures.failure(info, shouldBeRelativePath(actual));
}
/**
* Asserts that the given file exists, regardless it's a file or directory.
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given file is {@code null}.
* @throws AssertionError if the given file does not exist.
*/
public void assertExists(AssertionInfo info, File actual) {
assertNotNull(info, actual);
if (actual.exists()) return;
throw failures.failure(info, shouldExist(actual));
}
/**
* Asserts that the given file does not exist.
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given file is {@code null}.
* @throws AssertionError if the given file exists.
*/
public void assertDoesNotExist(AssertionInfo info, File actual) {
assertNotNull(info, actual);
if (!actual.exists()) return;
throw failures.failure(info, shouldNotExist(actual));
}
/**
* Asserts that the given file can be modified by the application.
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given file is {@code null}.
* @throws AssertionError if the given file can not be modified.
*/
public void assertCanWrite(AssertionInfo info, File actual) {
assertNotNull(info, actual);
if (actual.canWrite()) return;
throw failures.failure(info, shouldBeWritable(actual));
}
/**
* Asserts that the given {@code File} is empty (i.e. size is equal to zero bytes).
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given {@code File} is {@code null}.
* @throws AssertionError if the given {@code File} does not exist.
* @throws AssertionError if the given {@code File} is not empty.
*/
public void assertIsEmptyFile(AssertionInfo info, File actual) {
assertIsFile(info, actual);
if (actual.length() == 0) return;
throw failures.failure(info, shouldBeEmpty(actual));
}
/**
* Asserts that the given {@code File} is not empty (i.e. size is greater than zero bytes).
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given {@code File} is {@code null}.
* @throws AssertionError if the given {@code File} does not exist.
* @throws AssertionError if the given {@code File} is empty.
*/
public void assertIsNotEmptyFile(AssertionInfo info, File actual) {
assertIsFile(info, actual);
if (actual.length() > 0) return;
throw failures.failure(info, shouldNotBeEmpty(actual));
}
/**
* Asserts that the given file can be read by the application.
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the given file is {@code null}.
* @throws AssertionError if the given file can not be modified.
*/
public void assertCanRead(AssertionInfo info, File actual) {
assertNotNull(info, actual);
if (actual.canRead()) return;
throw failures.failure(info, shouldBeReadable(actual));
}
/**
* Asserts that the given {@code File} has the given parent.
*
* @param info contains information about the assertion.
* @param actual the given file.
* @param expected the expected parent {@code File}.
* @throws NullPointerException if the expected parent {@code File} is {@code null}.
* @throws UncheckedIOException if an I/O error occurs.
* @throws AssertionError if the given {@code File} is {@code null}.
* @throws AssertionError if the given {@code File} does not have a parent.
* @throws AssertionError if the given {@code File} parent is not equal to the expected one.
*/
public void assertHasParent(AssertionInfo info, File actual, File expected) {
requireNonNull(expected, "The expected parent file should not be null.");
assertNotNull(info, actual);
try {
if (actual.getParentFile() != null
&& java.util.Objects.equals(expected.getCanonicalFile(), actual.getParentFile().getCanonicalFile()))
return;
} catch (IOException e) {
throw new UncheckedIOException(format("Unable to get canonical form of [%s] or [%s].", actual, expected), e);
}
throw failures.failure(info, shouldHaveParent(actual, expected));
}
/**
* Asserts that the given {@code File} has the given extension.
*
* @param info contains information about the assertion.
* @param actual the given file.
* @param expected the expected extension, it does not contains the {@code '.'}
* @throws NullPointerException if the expected extension is {@code null}.
* @throws AssertionError if the actual {@code File} is {@code null}.
* @throws AssertionError if the actual {@code File} is not a file (ie a directory).
* @throws AssertionError if the actual {@code File} does not have the expected extension.
*/
public void assertHasExtension(AssertionInfo info, File actual, String expected) {
requireNonNull(expected, "The expected extension should not be null.");
assertIsFile(info, actual);
String actualExtension = getFileExtension(actual);
if (expected.equals(actualExtension)) return;
throw failures.failure(info, shouldHaveExtension(actual, actualExtension, expected));
}
/**
* Asserts that the given {@code File} has the given name.
*
* @param info contains information about the assertion.
* @param actual the given file.
* @param expected the expected file name.
* @throws NullPointerException if the expected name is {@code null}.
* @throws AssertionError if the actual {@code File} is {@code null}.
* @throws AssertionError if the actual {@code File} does not have the expected name.
*/
public void assertHasName(AssertionInfo info, File actual, String expected) {
assertNotNull(info, actual);
requireNonNull(expected, "The expected name should not be null.");
if (expected.equals(actual.getName())) return;
throw failures.failure(info, shouldHaveName(actual, expected));
}
/**
* Asserts that the given {@code File} does not have a parent.
*
* @param info contains information about the assertion.
* @param actual the given file.
* @throws AssertionError if the actual {@code File} is {@code null}.
* @throws AssertionError if the actual {@code File} has a parent.
*/
public void assertHasNoParent(AssertionInfo info, File actual) {
assertNotNull(info, actual);
if (actual.getParentFile() == null) return;
throw failures.failure(info, shouldHaveNoParent(actual));
}
public void assertHasDigest(AssertionInfo info, File actual, MessageDigest digest, byte[] expected) {
requireNonNull(digest, "The message digest algorithm should not be null");
requireNonNull(expected, "The binary representation of digest to compare to should not be null");
assertExists(info, actual);
assertIsFile(info, actual);
assertCanRead(info, actual);
try (InputStream actualStream = nioFilesWrapper.newInputStream(actual.toPath())) {
DigestDiff digestDiff = digestDiff(actualStream, digest, expected);
if (digestDiff.digestsDiffer()) throw failures.failure(info, shouldHaveDigest(actual, digestDiff));
} catch (IOException e) {
throw new UncheckedIOException(format("Unable to calculate digest of path:<%s>", actual), e);
}
}
public void assertHasDigest(AssertionInfo info, File actual, MessageDigest digest, String expected) {
requireNonNull(expected, "The string representation of digest to compare to should not be null");
assertHasDigest(info, actual, digest, Digests.fromHex(expected));
}
public void assertHasDigest(AssertionInfo info, File actual, String algorithm, byte[] expected) {
requireNonNull(algorithm, "The message digest algorithm should not be null");
try {
assertHasDigest(info, actual, MessageDigest.getInstance(algorithm), expected);
} catch (NoSuchAlgorithmException e) {
throw new IllegalStateException(format("Unable to find digest implementation for: <%s>", algorithm), e);
}
}
public void assertHasDigest(AssertionInfo info, File actual, String algorithm, String expected) {
requireNonNull(expected, "The string representation of digest to compare to should not be null");
assertHasDigest(info, actual, algorithm, Digests.fromHex(expected));
}
public void assertIsEmptyDirectory(AssertionInfo info, File actual) {
List<File> files = directoryContent(info, actual);
if (!files.isEmpty()) throw failures.failure(info, shouldBeEmptyDirectory(actual, files));
}
public void assertIsNotEmptyDirectory(AssertionInfo info, File actual) {
boolean isEmptyDirectory = directoryContent(info, actual).isEmpty();
if (isEmptyDirectory) throw failures.failure(info, shouldNotBeEmpty());
}
public void assertIsDirectoryContaining(AssertionInfo info, File actual, Predicate<File> filter) {
requireNonNull(filter, "The files filter should not be null");
assertIsDirectoryContaining(info, actual, filter, "the given filter");
}
public void assertIsDirectoryContaining(AssertionInfo info, File actual, String syntaxAndPattern) {
requireNonNull(syntaxAndPattern, "The syntax and pattern should not be null");
Predicate<File> fileMatcher = fileMatcher(info, actual, syntaxAndPattern);
assertIsDirectoryContaining(info, actual, fileMatcher, format("the '%s' pattern", syntaxAndPattern));
}
public void assertIsDirectoryRecursivelyContaining(AssertionInfo info, File actual, String syntaxAndPattern) {
requireNonNull(syntaxAndPattern, "The syntax and pattern should not be null");
Predicate<File> fileMatcher = fileMatcher(info, actual, syntaxAndPattern);
assertIsDirectoryRecursivelyContaining(info, actual, fileMatcher,
format("the '%s' pattern", syntaxAndPattern));
}
public void assertIsDirectoryRecursivelyContaining(AssertionInfo info, File actual, Predicate<File> filter) {
requireNonNull(filter, "The files filter should not be null");
assertIsDirectoryRecursivelyContaining(info, actual, filter, "the given filter");
}
public void assertIsDirectoryNotContaining(AssertionInfo info, File actual, Predicate<File> filter) {
requireNonNull(filter, "The files filter should not be null");
assertIsDirectoryNotContaining(info, actual, filter, "the given filter");
}
public void assertIsDirectoryNotContaining(AssertionInfo info, File actual, String syntaxAndPattern) {
requireNonNull(syntaxAndPattern, "The syntax and pattern should not be null");
Predicate<File> fileMatcher = fileMatcher(info, actual, syntaxAndPattern);
assertIsDirectoryNotContaining(info, actual, fileMatcher, format("the '%s' pattern", syntaxAndPattern));
}
@VisibleForTesting
public static List<String> toFileNames(List<File> files) {
return files.stream()
.map(File::getName)
.collect(toList());
}
// non public section
private List<File> filterDirectory(AssertionInfo info, File actual, Predicate<File> filter) {
assertIsDirectory(info, actual);
File[] items = actual.listFiles(filter::test);
requireNonNull(items, "Directory listing should not be null");
return list(items);
}
private List<File> directoryContent(AssertionInfo info, File actual) {
return filterDirectory(info, actual, ANY);
}
private void assertIsDirectoryContaining(AssertionInfo info, File actual, Predicate<File> filter, String filterPresentation) {
List<File> matchingFiles = filterDirectory(info, actual, filter);
if (matchingFiles.isEmpty()) {
throw failures.failure(info, directoryShouldContain(actual, directoryContentDescription(info, actual), filterPresentation));
}
}
private void assertIsDirectoryNotContaining(AssertionInfo info, File actual, Predicate<File> filter,
String filterPresentation) {
List<File> matchingFiles = filterDirectory(info, actual, filter);
if (matchingFiles.size() > 0) {
throw failures.failure(info, directoryShouldNotContain(actual, toFileNames(matchingFiles), filterPresentation));
}
}
private List<String> directoryContentDescription(AssertionInfo info, File actual) {
return toFileNames(directoryContent(info, actual));
}
private boolean isDirectoryRecursivelyContaining(AssertionInfo info, File actual, Predicate<File> filter) {
assertIsDirectory(info, actual);
try (Stream<File> actualContent = recursiveContentOf(actual)) {
return actualContent.anyMatch(filter);
}
}
private List<File> sortedRecursiveContent(File directory) {
try (Stream<File> fileStream = recursiveContentOf(directory)) {
return fileStream.sorted(comparing(File::getAbsolutePath))
.collect(toList());
}
}
private Stream<File> recursiveContentOf(File directory) {
Path path = directory.toPath();
try {
return java.nio.file.Files.walk(path)
.filter(p -> !p.equals(path))
.map(Path::toFile);
} catch (IOException e) {
throw new UncheckedIOException(format("Unable to walk recursively the directory :<%s>", path), e);
}
}
private void assertIsDirectoryRecursivelyContaining(AssertionInfo info, File actual, Predicate<File> filter,
String filterPresentation) {
if (!isDirectoryRecursivelyContaining(info, actual, filter)) {
throw failures.failure(info, directoryShouldContainRecursively(actual, sortedRecursiveContent(actual), filterPresentation));
}
}
private static Predicate<File> fileMatcher(AssertionInfo info, File actual, String syntaxAndPattern) {
assertNotNull(info, actual);
PathMatcher pathMatcher = actual.toPath().getFileSystem().getPathMatcher(syntaxAndPattern);
return file -> pathMatcher.matches(file.toPath());
}
private static void assertNotNull(AssertionInfo info, File actual) {
Objects.instance().assertNotNull(info, actual);
}
private String getFileExtension(File file) {
String name = file.getName();
int dotAt = name.lastIndexOf('.');
return dotAt == -1 ? null : name.substring(dotAt + 1);
}
private void verifyIsFile(File expected) {
requireNonNull(expected, "The file to compare to should not be null");
checkArgument(expected.isFile(), "Expected file:<'%s'> should be an existing file", expected);
}
private boolean isAbsolutePath(AssertionInfo info, File actual) {
assertNotNull(info, actual);
return actual.isAbsolute();
}
}
| apache-2.0 |
otaviojava/redis-collections | src/main/java/redis/clients/collections/builder/ScorePointLong.java | 1303 | package redis.clients.collections.builder;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import redis.clients.collections.Ranking;
import redis.clients.collections.ScoresPoint;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Tuple;
class ScorePointLong extends AbstractScoresPoint<Long> implements ScoresPoint<Long> {
ScorePointLong(Jedis jedis,String keyWithNameSpace) {
super(jedis, keyWithNameSpace);
}
@Override
public void initialPoint(String field, Long value) {
jedis.zadd(keyWithNameSpace,value.doubleValue(), field);
}
@Override
public Long increment(String field, Long value) {
return jedis.zincrby(keyWithNameSpace, value.doubleValue(), field).longValue();
}
@Override
public Long decrement(String field, Long value) {
return increment(field, -value);
}
@Override
public List<Ranking<Long>> range(long start, long end) {
List<Ranking<Long>> topRanking = new ArrayList<>();
Set<Tuple> scores = jedis.zrevrangeWithScores(keyWithNameSpace, start, end);
for (Tuple tuple: scores) {
topRanking.add(new RankingLong(tuple.getElement(),(long)tuple.getScore()));
}
return topRanking;
}
@Override
public String toString() {
return "br.com.elo7.elodum.redis.builder.ScorePointLong at " + keyWithNameSpace;
}
}
| apache-2.0 |
tanhaichao/leopard | leopard-monitor/src/main/java/io/leopard/monitor/connection/JdbcConnectionListenerImpl.java | 2010 | package io.leopard.monitor.connection;
import io.leopard.burrow.lang.SynchronizedLRUMap;
import io.leopard.jdbc.JdbcConnectionListener;
import io.leopard.monitor.MonitorBeanFactory;
import java.sql.Connection;
import java.util.Map;
public class JdbcConnectionListenerImpl implements JdbcConnectionListener {
private ConnectionMonitorService connectionMonitorService = MonitorBeanFactory.getConnectionMonitorService();
protected String host;
protected int port;
private Map<Integer, Long> timeMap = new SynchronizedLRUMap<Integer, Long>(100, 500);
protected ConnectionInfo connectionInfo;
@Override
public void setPoolConfig(String host, int port, int timeout, int maxPoolSize, String database) {
this.host = host;
this.port = port;
// System.err.println("setPoolConfig host:" + host + " port:" + port + " maxPoolSize:" + maxPoolSize);
connectionInfo = new ConnectionInfo();
connectionInfo.setPort(port);
connectionInfo.setHost(host);// 是否要解析成IP?
connectionInfo.setMaxPoolSize(maxPoolSize);
connectionInfo.setContent("");
connectionInfo.setType("Jdbc");
// connectionInfo.setContent("database:" + database);
connectionMonitorService.add(connectionInfo);
}
@Override
public void open(Connection connection, long startTime) {
if (connection == null) {
// 表示获取连接不成功
long time = System.nanoTime() - startTime;
connectionInfo.incrTotalTime(time);
}
else {
timeMap.put(connection.hashCode(), startTime);
}
connectionInfo.incrConnectionCount(1);
connectionInfo.incrCurrentSize(1);
}
@Override
public void close(Connection connection) {
// System.out.println("close connection:" + connection);
Long startTime = timeMap.get(connection.hashCode());
long time = System.nanoTime() - startTime;
connectionInfo.incrTotalTime(time);
connectionInfo.incrCurrentSize(-1);
}
@Override
public void broken() {
connectionInfo.incrBrokenCount(1);
}
}
| apache-2.0 |
arhimondr/presto | presto-main/src/main/java/com/facebook/presto/execution/TaskThresholdMemoryRevokingScheduler.java | 6067 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.execution;
import com.facebook.airlift.log.Logger;
import com.facebook.presto.memory.QueryContext;
import com.facebook.presto.memory.VoidTraversingQueryContextVisitor;
import com.facebook.presto.operator.OperatorContext;
import com.facebook.presto.sql.analyzer.FeaturesConfig;
import com.google.common.annotations.VisibleForTesting;
import javax.annotation.Nullable;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.SECONDS;
public class TaskThresholdMemoryRevokingScheduler
{
private static final Logger log = Logger.get(TaskThresholdMemoryRevokingScheduler.class);
private final Supplier<List<SqlTask>> currentTasksSupplier;
private final ScheduledExecutorService taskManagementExecutor;
private final long maxRevocableMemoryPerTask;
// Technically not thread safe but should be fine since we only call this on PostConstruct and PreDestroy.
// PreDestroy isn't called until server shuts down/ in between tests.
@Nullable
private ScheduledFuture<?> scheduledFuture;
private final AtomicBoolean checkPending = new AtomicBoolean();
@Inject
public TaskThresholdMemoryRevokingScheduler(
SqlTaskManager sqlTaskManager,
TaskManagementExecutor taskManagementExecutor,
FeaturesConfig config)
{
this(
requireNonNull(sqlTaskManager, "sqlTaskManager cannot be null")::getAllTasks,
requireNonNull(taskManagementExecutor, "taskManagementExecutor cannot be null").getExecutor(),
config.getMaxRevocableMemoryPerTask());
log.debug("Using TaskThresholdMemoryRevokingScheduler spilling strategy");
}
@VisibleForTesting
TaskThresholdMemoryRevokingScheduler(
Supplier<List<SqlTask>> currentTasksSupplier,
ScheduledExecutorService taskManagementExecutor,
long maxRevocableMemoryPerTask)
{
this.currentTasksSupplier = requireNonNull(currentTasksSupplier, "currentTasksSupplier is null");
this.taskManagementExecutor = requireNonNull(taskManagementExecutor, "taskManagementExecutor is null");
this.maxRevocableMemoryPerTask = maxRevocableMemoryPerTask;
}
@PostConstruct
public void start()
{
registerTaskMemoryPeriodicCheck();
}
private void registerTaskMemoryPeriodicCheck()
{
this.scheduledFuture = taskManagementExecutor.scheduleWithFixedDelay(() -> {
try {
revokeHighMemoryTasksIfNeeded();
}
catch (Throwable e) {
log.error(e, "Error requesting task memory revoking");
}
}, 1, 1, SECONDS);
}
@PreDestroy
public void stop()
{
if (scheduledFuture != null) {
scheduledFuture.cancel(true);
scheduledFuture = null;
}
}
@VisibleForTesting
void revokeHighMemoryTasksIfNeeded()
{
if (checkPending.compareAndSet(false, true)) {
revokeHighMemoryTasks();
}
}
private synchronized void revokeHighMemoryTasks()
{
if (checkPending.getAndSet(false)) {
Collection<SqlTask> sqlTasks = requireNonNull(currentTasksSupplier.get());
for (SqlTask task : sqlTasks) {
long currentTaskRevocableMemory = task.getTaskInfo().getStats().getRevocableMemoryReservationInBytes();
if (currentTaskRevocableMemory < maxRevocableMemoryPerTask) {
continue;
}
AtomicLong remainingBytesToRevokeAtomic = new AtomicLong(currentTaskRevocableMemory - maxRevocableMemoryPerTask);
task.getQueryContext().accept(new VoidTraversingQueryContextVisitor<AtomicLong>()
{
@Override
public Void visitQueryContext(QueryContext queryContext, AtomicLong remainingBytesToRevoke)
{
if (remainingBytesToRevoke.get() < 0) {
// exit immediately if no work needs to be done
return null;
}
return super.visitQueryContext(queryContext, remainingBytesToRevoke);
}
@Override
public Void visitOperatorContext(OperatorContext operatorContext, AtomicLong remainingBytesToRevoke)
{
if (remainingBytesToRevoke.get() > 0) {
long revokedBytes = operatorContext.requestMemoryRevoking();
if (revokedBytes > 0) {
remainingBytesToRevoke.addAndGet(-revokedBytes);
log.debug("taskId=%s: requested revoking %s; remaining %s", task.getTaskInfo().getTaskId(), revokedBytes, remainingBytesToRevoke.get());
}
}
return null;
}
}, remainingBytesToRevokeAtomic);
}
}
}
}
| apache-2.0 |
aktse/akt-notes | akt-notes/gen/cs/ualberta/akt/akt_notes/R.java | 5738 | /* AUTO-GENERATED FILE. DO NOT MODIFY.
*
* This class was automatically generated by the
* aapt tool from the resource data it found. It
* should not be modified by hand.
*/
package cs.ualberta.akt.akt_notes;
public final class R {
public static final class attr {
}
public static final class dimen {
/** Default screen margins, per the Android Design guidelines.
Example customization of dimensions originally defined in res/values/dimens.xml
(such as screen margins) for screens with more than 820dp of available width. This
would include 7" and 10" devices in landscape (~960dp and ~1280dp respectively).
*/
public static final int activity_horizontal_margin=0x7f040000;
public static final int activity_vertical_margin=0x7f040001;
}
public static final class drawable {
public static final int ic_action_accept=0x7f020000;
public static final int ic_action_archive=0x7f020001;
public static final int ic_action_delete=0x7f020002;
public static final int ic_action_edit=0x7f020003;
public static final int ic_action_email=0x7f020004;
public static final int ic_action_new=0x7f020005;
public static final int ic_action_summary=0x7f020006;
public static final int ic_launcher=0x7f020007;
}
public static final class id {
public static final int action_archive=0x7f080014;
public static final int action_archive_edit=0x7f080011;
public static final int action_delete=0x7f080013;
public static final int action_done_email=0x7f080015;
public static final int action_edit=0x7f080016;
public static final int action_email=0x7f080012;
public static final int action_new=0x7f080017;
public static final int action_summary=0x7f080010;
public static final int archived_complete=0x7f08000e;
public static final int archived_incomplete=0x7f08000f;
public static final int archived_total=0x7f08000d;
public static final int checkBox=0x7f080009;
public static final int complete=0x7f08000b;
public static final int container=0x7f080007;
public static final int editCheckBox=0x7f080005;
public static final int editList=0x7f080000;
public static final int edit_list_item=0x7f080006;
public static final int email_archive=0x7f080002;
public static final int email_item=0x7f080001;
public static final int incomplete=0x7f08000c;
public static final int list_item=0x7f080008;
public static final int newEditText=0x7f080004;
public static final int pager=0x7f080003;
public static final int total=0x7f08000a;
}
public static final class layout {
public static final int activity_edit_mode=0x7f030000;
public static final int activity_email=0x7f030001;
public static final int activity_main=0x7f030002;
public static final int activity_new_item=0x7f030003;
public static final int edit_list_item=0x7f030004;
public static final int fragment_main=0x7f030005;
public static final int list_item=0x7f030006;
public static final int summary_dialog=0x7f030007;
}
public static final class menu {
public static final int archive_list=0x7f070000;
public static final int edit_mode=0x7f070001;
public static final int email=0x7f070002;
public static final int new_item=0x7f070003;
public static final int to_do_list=0x7f070004;
}
public static final class string {
public static final int action_archive=0x7f050009;
public static final int action_delete=0x7f05000a;
public static final int action_done=0x7f050007;
public static final int action_edit=0x7f050005;
public static final int action_email=0x7f050008;
public static final int action_new=0x7f050006;
public static final int action_summary=0x7f05000b;
public static final int app_name=0x7f050000;
public static final int dialog_cancel=0x7f05000e;
public static final int dialog_confirm=0x7f05000d;
public static final int email_archive_title=0x7f050010;
public static final int email_items_title=0x7f05000f;
public static final int hint_new_item=0x7f05000c;
public static final int title_activity_edit_mode=0x7f050002;
public static final int title_activity_email=0x7f050003;
public static final int title_activity_new_item=0x7f050001;
public static final int to_do_list=0x7f050004;
}
public static final class style {
/**
Base application theme, dependent on API level. This theme is replaced
by AppBaseTheme from res/values-vXX/styles.xml on newer devices.
Theme customizations available in newer API levels can go in
res/values-vXX/styles.xml, while customizations related to
backward-compatibility can go here.
Base application theme for API 11+. This theme completely replaces
AppBaseTheme from res/values/styles.xml on API 11+ devices.
API 11 theme customizations can go here.
Base application theme for API 14+. This theme completely replaces
AppBaseTheme from BOTH res/values/styles.xml and
res/values-v11/styles.xml on API 14+ devices.
API 14 theme customizations can go here.
*/
public static final int AppBaseTheme=0x7f060000;
/** Application theme.
All customizations that are NOT specific to a particular API-level can go here.
*/
public static final int AppTheme=0x7f060001;
}
}
| apache-2.0 |
MOBX/Thor | thor-rpc/src/main/java/com/mob/thor/rpc/common/utils/LogUtil.java | 3230 | package com.mob.thor.rpc.common.utils;
import java.util.Iterator;
import java.util.List;
import org.apache.log4j.Level;
import com.mob.thor.rpc.common.logger.Logger;
import com.mob.thor.rpc.common.logger.LoggerFactory;
public class LogUtil {
private static Logger Log = LoggerFactory.getLogger(LogUtil.class);
public static void start() {
DubboAppender.doStart();
}
public static void stop() {
DubboAppender.doStop();
}
public static boolean checkNoError() {
if (findLevel(Level.ERROR) == 0) {
return true;
} else {
return false;
}
}
public static int findName(String expectedLogName) {
int count = 0;
List<Log> logList = DubboAppender.logList;
for (int i = 0; i < logList.size(); i++) {
String logName = logList.get(i).getLogName();
if (logName.contains(expectedLogName)) count++;
}
return count;
}
public static int findLevel(Level expectedLevel) {
int count = 0;
List<Log> logList = DubboAppender.logList;
for (int i = 0; i < logList.size(); i++) {
Level logLevel = logList.get(i).getLogLevel();
if (logLevel.equals(expectedLevel)) count++;
}
return count;
}
public static int findLevelWithThreadName(Level expectedLevel, String threadName) {
int count = 0;
List<Log> logList = DubboAppender.logList;
for (int i = 0; i < logList.size(); i++) {
Log log = logList.get(i);
if (log.getLogLevel().equals(expectedLevel) && log.getLogThread().equals(threadName)) count++;
}
return count;
}
public static int findThread(String expectedThread) {
int count = 0;
List<Log> logList = DubboAppender.logList;
for (int i = 0; i < logList.size(); i++) {
String logThread = logList.get(i).getLogThread();
if (logThread.contains(expectedThread)) count++;
}
return count;
}
public static int findMessage(String expectedMessage) {
int count = 0;
List<Log> logList = DubboAppender.logList;
for (int i = 0; i < logList.size(); i++) {
String logMessage = logList.get(i).getLogMessage();
if (logMessage.contains(expectedMessage)) count++;
}
return count;
}
public static int findMessage(Level expectedLevel, String expectedMessage) {
int count = 0;
List<Log> logList = DubboAppender.logList;
for (int i = 0; i < logList.size(); i++) {
Level logLevel = logList.get(i).getLogLevel();
if (logLevel.equals(expectedLevel)) {
String logMessage = logList.get(i).getLogMessage();
if (logMessage.contains(expectedMessage)) count++;
}
}
return count;
}
public static <T> void printList(List<T> list) {
Log.info("PrintList:");
Iterator<T> it = list.iterator();
while (it.hasNext()) {
Log.info(it.next().toString());
}
}
}
| apache-2.0 |
glorycloud/GloryMail | MailProxy/src/fortunedog/mail/proxy/exchange/ExchangeException.java | 634 | package fortunedog.mail.proxy.exchange;
public class ExchangeException extends Exception
{
/**
*
*/
private static final long serialVersionUID = 1L;
public ExchangeException()
{
super();
// TODO Auto-generated constructor stub
}
public ExchangeException(String message, Throwable cause)
{
super(message, cause);
// TODO Auto-generated constructor stub
}
public ExchangeException(String message)
{
super(message);
// TODO Auto-generated constructor stub
}
public ExchangeException(Throwable cause)
{
super(cause);
// TODO Auto-generated constructor stub
}
}
| apache-2.0 |
jbertram/activemq-artemis-old | tests/integration-tests/src/test/java/org/apache/activemq/artemis/tests/integration/client/PagingTest.java | 183893 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.tests.integration.client;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
import org.apache.activemq.artemis.api.core.ActiveMQException;
import org.apache.activemq.artemis.api.core.ActiveMQExceptionType;
import org.apache.activemq.artemis.api.core.Message;
import org.apache.activemq.artemis.api.core.Pair;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.api.core.client.ClientConsumer;
import org.apache.activemq.artemis.api.core.client.ClientMessage;
import org.apache.activemq.artemis.api.core.client.ClientProducer;
import org.apache.activemq.artemis.api.core.client.ClientSession;
import org.apache.activemq.artemis.api.core.client.ClientSessionFactory;
import org.apache.activemq.artemis.api.core.client.MessageHandler;
import org.apache.activemq.artemis.api.core.client.ServerLocator;
import org.apache.activemq.artemis.tests.integration.IntegrationTestLogger;
import org.apache.activemq.artemis.tests.util.ServiceTestBase;
import org.apache.activemq.artemis.tests.util.UnitTestCase;
import org.apache.activemq.artemis.core.client.impl.ClientConsumerInternal;
import org.apache.activemq.artemis.core.config.Configuration;
import org.apache.activemq.artemis.core.config.DivertConfiguration;
import org.apache.activemq.artemis.core.filter.Filter;
import org.apache.activemq.artemis.core.journal.IOAsyncTask;
import org.apache.activemq.artemis.core.journal.PreparedTransactionInfo;
import org.apache.activemq.artemis.core.journal.RecordInfo;
import org.apache.activemq.artemis.core.journal.impl.JournalImpl;
import org.apache.activemq.artemis.core.journal.impl.NIOSequentialFileFactory;
import org.apache.activemq.artemis.core.paging.PagedMessage;
import org.apache.activemq.artemis.core.paging.PagingManager;
import org.apache.activemq.artemis.core.paging.PagingStore;
import org.apache.activemq.artemis.core.paging.cursor.PageCursorProvider;
import org.apache.activemq.artemis.core.paging.cursor.impl.PagePositionImpl;
import org.apache.activemq.artemis.core.paging.impl.Page;
import org.apache.activemq.artemis.core.persistence.OperationContext;
import org.apache.activemq.artemis.core.persistence.impl.journal.DescribeJournal;
import org.apache.activemq.artemis.core.persistence.impl.journal.DescribeJournal.ReferenceDescribe;
import org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds;
import org.apache.activemq.artemis.core.persistence.impl.journal.JournalStorageManager.AckDescribe;
import org.apache.activemq.artemis.core.persistence.impl.journal.OperationContextImpl;
import org.apache.activemq.artemis.core.server.ActiveMQServer;
import org.apache.activemq.artemis.core.server.Queue;
import org.apache.activemq.artemis.core.server.impl.ActiveMQServerImpl;
import org.apache.activemq.artemis.core.settings.impl.AddressFullMessagePolicy;
import org.apache.activemq.artemis.core.settings.impl.AddressSettings;
import org.apache.activemq.artemis.logs.AssertionLoggerHandler;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class PagingTest extends ServiceTestBase
{
private ServerLocator locator;
private ActiveMQServer server;
private ClientSessionFactory sf;
static final int MESSAGE_SIZE = 1024; // 1k
private static final IntegrationTestLogger log = IntegrationTestLogger.LOGGER;
private static final int RECEIVE_TIMEOUT = 5000;
private static final int PAGE_MAX = 100 * 1024;
private static final int PAGE_SIZE = 10 * 1024;
static final SimpleString ADDRESS = new SimpleString("SimpleAddress");
@Override
@Before
public void setUp() throws Exception
{
super.setUp();
locator = createInVMNonHALocator();
}
@Test
public void testPageOnLargeMessageMultipleQueues() throws Exception
{
Configuration config = createDefaultConfig();
final int PAGE_MAX = 20 * 1024;
final int PAGE_SIZE = 10 * 1024;
HashMap<String, AddressSettings> map = new HashMap<String, AddressSettings>();
AddressSettings value = new AddressSettings();
map.put(ADDRESS.toString(), value);
ActiveMQServer server = createServer(true, config, PAGE_SIZE, PAGE_MAX, map);
server.start();
final int numberOfBytes = 1024;
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
ClientSessionFactory sf = addSessionFactory(createSessionFactory(locator));
ClientSession session = sf.createSession(null, null, false, true, true, false, 0);
session.createQueue(ADDRESS, ADDRESS.concat("-0"), null, true);
session.createQueue(ADDRESS, ADDRESS.concat("-1"), null, true);
ClientProducer producer = session.createProducer(ADDRESS);
ClientMessage message = null;
for (int i = 0; i < 201; i++)
{
message = session.createMessage(true);
message.getBodyBuffer().writerIndex(0);
message.getBodyBuffer().writeBytes(new byte[numberOfBytes]);
for (int j = 1; j <= numberOfBytes; j++)
{
message.getBodyBuffer().writeInt(j);
}
producer.send(message);
}
session.close();
server.stop();
server = createServer(true, config, PAGE_SIZE, PAGE_MAX, map);
server.start();
sf = createSessionFactory(locator);
for (int ad = 0; ad < 2; ad++)
{
session = sf.createSession(false, false, false);
ClientConsumer consumer = session.createConsumer(ADDRESS.concat("-" + ad));
session.start();
for (int i = 0; i < 201; i++)
{
ClientMessage message2 = consumer.receive(LargeMessageTest.RECEIVE_WAIT_TIME);
Assert.assertNotNull(message2);
message2.acknowledge();
Assert.assertNotNull(message2);
}
try
{
if (ad > -1)
{
session.commit();
}
else
{
session.rollback();
for (int i = 0; i < 100; i++)
{
ClientMessage message2 = consumer.receive(LargeMessageTest.RECEIVE_WAIT_TIME);
Assert.assertNotNull(message2);
message2.acknowledge();
Assert.assertNotNull(message2);
}
session.commit();
}
}
catch (Throwable e)
{
System.err.println("here!!!!!!!");
e.printStackTrace();
System.exit(-1);
}
consumer.close();
session.close();
}
}
@Test
public void testPageCleanup() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server =
createServer(true, config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 5000;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
producer.close();
session.close();
session = sf.createSession(false, false, false);
producer = session.createProducer(PagingTest.ADDRESS);
producer.send(session.createMessage(true));
session.rollback();
producer.close();
session.close();
session = sf.createSession(false, false, false);
producer = session.createProducer(PagingTest.ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
producer.close();
session.close();
//System.out.println("Just sent " + numberOfMessages + " messages.");
Queue queue = server.locateQueue(PagingTest.ADDRESS);
session = sf.createSession(false, false, false);
session.start();
assertEquals(numberOfMessages * 2, getMessageCount(queue));
// The consumer has to be created after the getMessageCount(queue) assertion
// otherwise delivery could alter the messagecount and give us a false failure
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
ClientMessage msg = null;
for (int i = 0; i < numberOfMessages * 2; i++)
{
msg = consumer.receive(1000);
assertNotNull(msg);
msg.acknowledge();
if (i % 500 == 0)
{
session.commit();
}
}
session.commit();
consumer.close();
session.close();
sf.close();
locator.close();
assertEquals(0, getMessageCount(queue));
waitForNotPaging(queue);
server.stop();
HashMap<Integer, AtomicInteger> counts = countJournalLivingRecords(server.getConfiguration());
AtomicInteger pgComplete = counts.get(JournalRecordIds.PAGE_CURSOR_COMPLETE);
assertTrue(pgComplete == null || pgComplete.get() == 0);
System.out.println("pgComplete = " + pgComplete);
}
// First page is complete but it wasn't deleted
@Test
public void testFirstPageCompleteNotDeleted() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server =
createServer(true, config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 20;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, true, true);
Queue queue = server.createQueue(ADDRESS, ADDRESS, null, true, false);
queue.getPageSubscription().getPagingStore().startPaging();
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty("count", i);
producer.send(message);
if ((i + 1) % 5 == 0)
{
session.commit();
queue.getPageSubscription().getPagingStore().forceAnotherPage();
}
}
session.commit();
producer.close();
session.close();
// This will make the cursor to set the page complete and not actually delete it
queue.getPageSubscription().getPagingStore().disableCleanup();
session = sf.createSession(false, false, false);
ClientConsumer consumer = session.createConsumer(ADDRESS);
session.start();
for (int i = 0; i < 5; i++)
{
ClientMessage msg = consumer.receive(2000);
assertNotNull(msg);
assertEquals(i, msg.getIntProperty("count").intValue());
msg.individualAcknowledge();
System.out.println(msg);
}
session.commit();
session.close();
server.stop();
server.start();
sf = createSessionFactory(locator);
session = sf.createSession(false, false, false);
consumer = session.createConsumer(ADDRESS);
session.start();
for (int i = 5; i < numberOfMessages; i++)
{
ClientMessage msg = consumer.receive(2000);
assertNotNull(msg);
assertEquals(i, msg.getIntProperty("count").intValue());
msg.acknowledge();
System.out.println(msg);
}
assertNull(consumer.receiveImmediate());
session.commit();
session.close();
sf.close();
locator.close();
}
@Test
public void testPreparedACKAndRestart() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 50;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
locator.setAckBatchSize(0);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, true, true);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
Queue queue = server.locateQueue(PagingTest.ADDRESS);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
queue.getPageSubscription().getPagingStore().startPaging();
forcePage(queue);
// Send many messages, 5 on each page
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage message = session.createMessage(true);
message.putIntProperty("count", i);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
producer.send(message);
if ((i + 1) % 5 == 0)
{
System.out.println("Forcing at " + i);
session.commit();
queue.getPageSubscription().getPagingStore().forceAnotherPage();
}
}
session.close();
session = sf.createSession(true, false, false);
Xid xidConsumeNoCommit = newXID();
session.start(xidConsumeNoCommit, XAResource.TMNOFLAGS);
ClientConsumer cons = session.createConsumer(ADDRESS);
session.start();
// First message is consumed, prepared, will be rolled back later
ClientMessage firstMessageConsumed = cons.receive(5000);
assertNotNull(firstMessageConsumed);
firstMessageConsumed.acknowledge();
session.end(xidConsumeNoCommit, XAResource.TMSUCCESS);
session.prepare(xidConsumeNoCommit);
Xid xidConsumeCommit = newXID();
session.start(xidConsumeCommit, XAResource.TMNOFLAGS);
Xid neverCommittedXID = newXID();
for (int i = 1; i < numberOfMessages; i++)
{
if (i == 20)
{
// I elected a single message to be in prepared state, it won't ever be committed
session.end(xidConsumeCommit, XAResource.TMSUCCESS);
session.commit(xidConsumeCommit, true);
session.start(neverCommittedXID, XAResource.TMNOFLAGS);
}
ClientMessage message = cons.receive(5000);
assertNotNull(message);
System.out.println("ACK " + i);
message.acknowledge();
assertEquals(i, message.getIntProperty("count").intValue());
if (i == 20)
{
session.end(neverCommittedXID, XAResource.TMSUCCESS);
session.prepare(neverCommittedXID);
xidConsumeCommit = newXID();
session.start(xidConsumeCommit, XAResource.TMNOFLAGS);
}
}
session.end(xidConsumeCommit, XAResource.TMSUCCESS);
session.commit(xidConsumeCommit, true);
session.close();
sf.close();
// Restart the server, and we expect cleanup to not destroy any page with prepared data
server.stop();
server.start();
sf = createSessionFactory(locator);
session = sf.createSession(false, true, true);
queue = server.locateQueue(ADDRESS);
assertTrue(queue.getPageSubscription().getPagingStore().isPaging());
producer = session.createProducer(ADDRESS);
for (int i = numberOfMessages; i < numberOfMessages * 2; i++)
{
ClientMessage message = session.createMessage(true);
message.putIntProperty("count", i);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
producer.send(message);
if ((i + 1) % 5 == 0)
{
session.commit();
queue.getPageSubscription().getPagingStore().forceAnotherPage();
}
}
cons = session.createConsumer(ADDRESS);
session.start();
for (int i = numberOfMessages; i < numberOfMessages * 2; i++)
{
ClientMessage message = cons.receive(5000);
assertNotNull(message);
assertEquals(i, message.getIntProperty("count").intValue());
message.acknowledge();
}
assertNull(cons.receiveImmediate());
session.commit();
System.out.println("count = " + getMessageCount(queue));
session.commit();
session.close();
session = sf.createSession(true, false, false);
session.rollback(xidConsumeNoCommit);
session.start();
xidConsumeCommit = newXID();
session.start(xidConsumeCommit, XAResource.TMNOFLAGS);
cons = session.createConsumer(ADDRESS);
session.start();
ClientMessage message = cons.receive(5000);
assertNotNull(message);
message.acknowledge();
session.end(xidConsumeCommit, XAResource.TMSUCCESS);
session.commit(xidConsumeCommit, true);
session.close();
}
/**
* @param queue
* @throws InterruptedException
*/
private void forcePage(Queue queue) throws InterruptedException
{
for (long timeout = System.currentTimeMillis() + 5000; timeout > System.currentTimeMillis() && !queue.getPageSubscription()
.getPagingStore()
.isPaging(); )
{
Thread.sleep(10);
}
assertTrue(queue.getPageSubscription().getPagingStore().isPaging());
}
@Test
public void testMoveExpire() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalDirectory(getJournalDir())
.setJournalSyncNonTransactional(false)
.setJournalCompactMinFiles(0) // disable compact
.setMessageExpiryScanPeriod(500);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
AddressSettings defaultSetting = new AddressSettings();
defaultSetting.setPageSizeBytes(PAGE_SIZE);
defaultSetting.setMaxSizeBytes(PAGE_MAX);
// defaultSetting.setRedeliveryDelay(500);
defaultSetting.setExpiryAddress(new SimpleString("EXP"));
defaultSetting.setAddressFullMessagePolicy(AddressFullMessagePolicy.PAGE);
server.getAddressSettingsRepository().clear();
server.getAddressSettingsRepository().addMatch("#", defaultSetting);
server.start();
final int numberOfMessages = 5000;
locator = createInVMNonHALocator();
locator.setConsumerWindowSize(10 * 1024 * 1024);
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
session.createQueue("EXP", "EXP", null, true);
Queue queue1 = server.locateQueue(ADDRESS);
Queue qEXP = server.locateQueue(new SimpleString("EXP"));
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
final int MESSAGE_SIZE = 1024;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage message = session.createMessage(true);
if (i < 1000)
{
message.setExpiration(System.currentTimeMillis() + 1000);
}
message.putIntProperty("tst-count", i);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
producer.close();
for (long timeout = System.currentTimeMillis() + 60000; timeout > System.currentTimeMillis() && getMessageCount(qEXP) < 1000; )
{
System.out.println("count = " + getMessageCount(qEXP));
Thread.sleep(100);
}
assertEquals(1000, getMessageCount(qEXP));
session.start();
ClientConsumer consumer = session.createConsumer(ADDRESS);
for (int i = 0; i < numberOfMessages - 1000; i++)
{
ClientMessage message = consumer.receive(5000);
assertNotNull(message);
message.acknowledge();
assertTrue(message.getIntProperty("tst-count") >= 1000);
}
session.commit();
assertNull(consumer.receiveImmediate());
for (long timeout = System.currentTimeMillis() + 5000; timeout > System.currentTimeMillis() && getMessageCount(queue1) != 0; )
{
Thread.sleep(100);
}
assertEquals(0, getMessageCount(queue1));
consumer.close();
consumer = session.createConsumer("EXP");
for (int i = 0; i < 1000; i++)
{
ClientMessage message = consumer.receive(5000);
assertNotNull(message);
message.acknowledge();
assertTrue(message.getIntProperty("tst-count") < 1000);
}
assertNull(consumer.receiveImmediate());
// This is just to hold some messages as being delivered
ClientConsumerInternal cons = (ClientConsumerInternal) session.createConsumer(ADDRESS);
session.commit();
producer.close();
session.close();
server.stop();
}
@Test
public void testDeleteQueueRestart() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalDirectory(getJournalDir())
.setJournalSyncNonTransactional(false)
.setJournalCompactMinFiles(0); // disable compact
ActiveMQServer server =
createServer(true, config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 5000;
locator = createInVMNonHALocator();
locator.setConsumerWindowSize(10 * 1024 * 1024);
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
SimpleString QUEUE2 = ADDRESS.concat("-2");
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
session.createQueue(PagingTest.ADDRESS, QUEUE2, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
// This is just to hold some messages as being delivered
ClientConsumerInternal cons = (ClientConsumerInternal) session.createConsumer(ADDRESS);
ClientConsumerInternal cons2 = (ClientConsumerInternal) session.createConsumer(QUEUE2);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
producer.close();
session.start();
long timeout = System.currentTimeMillis() + 5000;
// I want the buffer full to make sure there are pending messages on the server's side
while (System.currentTimeMillis() < timeout && cons.getBufferSize() < 1000 && cons2.getBufferSize() < 1000)
{
System.out.println("cons1 buffer = " + cons.getBufferSize() + ", cons2 buffer = " + cons2.getBufferSize());
Thread.sleep(100);
}
assertTrue(cons.getBufferSize() >= 1000);
assertTrue(cons2.getBufferSize() >= 1000);
session.close();
Queue queue = server.locateQueue(QUEUE2);
long deletedQueueID = queue.getID();
server.destroyQueue(QUEUE2);
sf.close();
locator.close();
locator = null;
sf = null;
server.stop();
final HashMap<Integer, AtomicInteger> recordsType = countJournal(config);
for (Map.Entry<Integer, AtomicInteger> entry : recordsType.entrySet())
{
System.out.println(entry.getKey() + "=" + entry.getValue());
}
assertNull("The system is acking page records instead of just delete data",
recordsType.get(new Integer(JournalRecordIds.ACKNOWLEDGE_CURSOR)));
Pair<List<RecordInfo>, List<PreparedTransactionInfo>> journalData = loadMessageJournal(config);
HashSet<Long> deletedQueueReferences = new HashSet<Long>();
for (RecordInfo info : journalData.getA())
{
if (info.getUserRecordType() == JournalRecordIds.ADD_REF)
{
DescribeJournal.ReferenceDescribe ref = (ReferenceDescribe) DescribeJournal.newObjectEncoding(info);
if (ref.refEncoding.queueID == deletedQueueID)
{
deletedQueueReferences.add(new Long(info.id));
}
}
else if (info.getUserRecordType() == JournalRecordIds.ACKNOWLEDGE_REF)
{
AckDescribe ref = (AckDescribe) DescribeJournal.newObjectEncoding(info);
if (ref.refEncoding.queueID == deletedQueueID)
{
deletedQueueReferences.remove(new Long(info.id));
}
}
}
if (!deletedQueueReferences.isEmpty())
{
for (Long value : deletedQueueReferences)
{
System.out.println("Deleted Queue still has a reference:" + value);
}
fail("Deleted queue still have references");
}
server.start();
locator = createInVMNonHALocator();
locator.setConsumerWindowSize(10 * 1024 * 1024);
sf = locator.createSessionFactory();
session = sf.createSession(false, false, false);
cons = (ClientConsumerInternal) session.createConsumer(ADDRESS);
session.start();
for (int i = 0; i < numberOfMessages; i++)
{
message = cons.receive(5000);
assertNotNull(message);
message.acknowledge();
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
producer.close();
session.close();
queue = server.locateQueue(PagingTest.ADDRESS);
assertEquals(0, getMessageCount(queue));
timeout = System.currentTimeMillis() + 10000;
while (timeout > System.currentTimeMillis() && queue.getPageSubscription().getPagingStore().isPaging())
{
Thread.sleep(100);
}
assertFalse(queue.getPageSubscription().getPagingStore().isPaging());
server.stop();
}
@Test
public void testPreparePersistent() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 5000;
final int numberOfTX = 10;
final int messagesPerTX = numberOfMessages / numberOfTX;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
session.close();
session = null;
sf.close();
locator.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
sf = createSessionFactory(locator);
Queue queue = server.locateQueue(ADDRESS);
assertEquals(numberOfMessages, getMessageCount(queue));
LinkedList<Xid> xids = new LinkedList<Xid>();
int msgReceived = 0;
for (int i = 0; i < numberOfTX; i++)
{
ClientSession sessionConsumer = sf.createSession(true, false, false);
Xid xid = newXID();
xids.add(xid);
sessionConsumer.start(xid, XAResource.TMNOFLAGS);
sessionConsumer.start();
ClientConsumer consumer = sessionConsumer.createConsumer(PagingTest.ADDRESS);
for (int msgCount = 0; msgCount < messagesPerTX; msgCount++)
{
if (msgReceived == numberOfMessages)
{
break;
}
msgReceived++;
ClientMessage msg = consumer.receive(10000);
assertNotNull(msg);
msg.acknowledge();
}
sessionConsumer.end(xid, XAResource.TMSUCCESS);
sessionConsumer.prepare(xid);
sessionConsumer.close();
}
ClientSession sessionCheck = sf.createSession(true, true);
ClientConsumer consumer = sessionCheck.createConsumer(PagingTest.ADDRESS);
assertNull(consumer.receiveImmediate());
sessionCheck.close();
assertEquals(numberOfMessages, getMessageCount(queue));
sf.close();
locator.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
waitForServer(server);
queue = server.locateQueue(ADDRESS);
locator = createInVMNonHALocator();
sf = createSessionFactory(locator);
session = sf.createSession(true, false, false);
consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
assertEquals(numberOfMessages, getMessageCount(queue));
ClientMessage msg = consumer.receive(5000);
if (msg != null)
{
while (true)
{
ClientMessage msg2 = consumer.receive(1000);
if (msg2 == null)
{
break;
}
}
}
assertNull(msg);
for (int i = xids.size() - 1; i >= 0; i--)
{
Xid xid = xids.get(i);
session.rollback(xid);
}
xids.clear();
session.close();
session = sf.createSession(false, false, false);
session.start();
consumer = session.createConsumer(PagingTest.ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
msg = consumer.receive(1000);
assertNotNull(msg);
msg.acknowledge();
assertEquals(i, msg.getIntProperty("id").intValue());
if (i % 500 == 0)
{
session.commit();
}
}
session.commit();
session.close();
sf.close();
locator.close();
assertEquals(0, getMessageCount(queue));
waitForNotPaging(queue);
}
@Test
public void testSendOverBlockingNoFlowControl() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
AddressFullMessagePolicy.BLOCK,
new HashMap<String, AddressSettings>());
server.start();
final int biggerMessageSize = 10 * 1024;
final int numberOfMessages = 500;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
locator.setProducerWindowSize(-1);
locator.setMinLargeMessageSize(1024 * 1024);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[biggerMessageSize];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= biggerMessageSize; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
if (i % 10 == 0)
{
session.commit();
}
}
session.commit();
session.start();
ClientConsumer cons = session.createConsumer(ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
message = cons.receive(5000);
assertNotNull(message);
message.acknowledge();
if (i % 10 == 0)
{
session.commit();
}
}
session.commit();
}
@Test
public void testReceiveImmediate() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 1000;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
session.close();
session = null;
sf.close();
locator.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
sf = createSessionFactory(locator);
Queue queue = server.locateQueue(ADDRESS);
assertEquals(numberOfMessages, getMessageCount(queue));
int msgReceived = 0;
ClientSession sessionConsumer = sf.createSession(false, false, false);
sessionConsumer.start();
ClientConsumer consumer = sessionConsumer.createConsumer(PagingTest.ADDRESS);
for (int msgCount = 0; msgCount < numberOfMessages; msgCount++)
{
log.info("Received " + msgCount);
msgReceived++;
ClientMessage msg = consumer.receiveImmediate();
if (msg == null)
{
log.info("It's null. leaving now");
sessionConsumer.commit();
fail("Didn't receive a message");
}
msg.acknowledge();
if (msgCount % 5 == 0)
{
log.info("commit");
sessionConsumer.commit();
}
}
sessionConsumer.commit();
sessionConsumer.close();
sf.close();
locator.close();
assertEquals(0, getMessageCount(queue));
long timeout = System.currentTimeMillis() + 5000;
while (timeout > System.currentTimeMillis() && queue.getPageSubscription().getPagingStore().isPaging())
{
Thread.sleep(100);
}
assertFalse(queue.getPageSubscription().getPagingStore().isPaging());
}
/**
* This test will remove all the page directories during a restart, simulating a crash scenario. The server should still start after this
*/
@Test
public void testDeletePhysicalPages() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setPersistDeliveryCountBeforeDelivery(true);
config.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 1000;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
session.close();
session = null;
sf.close();
locator.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
sf = createSessionFactory(locator);
Queue queue = server.locateQueue(ADDRESS);
assertEquals(numberOfMessages, getMessageCount(queue));
int msgReceived = 0;
ClientSession sessionConsumer = sf.createSession(false, false, false);
sessionConsumer.start();
ClientConsumer consumer = sessionConsumer.createConsumer(PagingTest.ADDRESS);
for (int msgCount = 0; msgCount < numberOfMessages; msgCount++)
{
log.info("Received " + msgCount);
msgReceived++;
ClientMessage msg = consumer.receiveImmediate();
if (msg == null)
{
log.info("It's null. leaving now");
sessionConsumer.commit();
fail("Didn't receive a message");
}
msg.acknowledge();
if (msgCount % 5 == 0)
{
log.info("commit");
sessionConsumer.commit();
}
}
sessionConsumer.commit();
sessionConsumer.close();
sf.close();
locator.close();
assertEquals(0, getMessageCount(queue));
long timeout = System.currentTimeMillis() + 5000;
while (timeout > System.currentTimeMillis() && queue.getPageSubscription().getPagingStore().isPaging())
{
Thread.sleep(100);
}
assertFalse(queue.getPageSubscription().getPagingStore().isPaging());
server.stop();
// Deleting the paging data. Simulating a failure
// a dumb user, or anything that will remove the data
deleteDirectory(new File(getPageDir()));
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
queue = server.locateQueue(ADDRESS);
sf = createSessionFactory(locator);
session = sf.createSession(false, false, false);
producer = session.createProducer(PagingTest.ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
sf = createSessionFactory(locator);
queue = server.locateQueue(ADDRESS);
// assertEquals(numberOfMessages, getMessageCount(queue));
msgReceived = 0;
sessionConsumer = sf.createSession(false, false, false);
sessionConsumer.start();
consumer = sessionConsumer.createConsumer(PagingTest.ADDRESS);
for (int msgCount = 0; msgCount < numberOfMessages; msgCount++)
{
log.info("Received " + msgCount);
msgReceived++;
ClientMessage msg = consumer.receiveImmediate();
if (msg == null)
{
log.info("It's null. leaving now");
sessionConsumer.commit();
fail("Didn't receive a message");
}
msg.acknowledge();
if (msgCount % 5 == 0)
{
log.info("commit");
sessionConsumer.commit();
}
}
sessionConsumer.commit();
sessionConsumer.close();
}
@Test
public void testMissingTXEverythingAcked() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 5000;
final int numberOfTX = 10;
final int messagesPerTX = numberOfMessages / numberOfTX;
try
{
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(ADDRESS.toString(), "q1", true);
session.createQueue(ADDRESS.toString(), "q2", true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
if (i % messagesPerTX == 0)
{
session.commit();
}
}
session.commit();
session.close();
}
finally
{
try
{
server.stop();
}
catch (Throwable ignored)
{
}
}
ArrayList<RecordInfo> records = new ArrayList<RecordInfo>();
List<PreparedTransactionInfo> list = new ArrayList<PreparedTransactionInfo>();
JournalImpl jrn = new JournalImpl(config.getJournalFileSize(),
2,
0,
0,
new NIOSequentialFileFactory(getJournalDir()),
"activemq-data",
"amq",
1);
jrn.start();
jrn.load(records, list, null);
// Delete everything from the journal
for (RecordInfo info : records)
{
if (!info.isUpdate && info.getUserRecordType() != JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE &&
info.getUserRecordType() != JournalRecordIds.PAGE_CURSOR_COUNTER_INC &&
info.getUserRecordType() != JournalRecordIds.PAGE_CURSOR_COMPLETE)
{
jrn.appendDeleteRecord(info.id, false);
}
}
jrn.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
Page pg = server.getPagingManager().getPageStore(ADDRESS).getCurrentPage();
pg.open();
List<PagedMessage> msgs = pg.read(server.getStorageManager());
assertTrue(msgs.size() > 0);
pg.close();
long[] queues = new long[]{
server.locateQueue(new SimpleString("q1")).getID(),
server.locateQueue(new SimpleString("q2")).getID()};
for (long q : queues)
{
for (int i = 0; i < msgs.size(); i++)
{
server.getStorageManager().storeCursorAcknowledge(q, new PagePositionImpl(pg.getPageId(), i));
}
}
server.stop();
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
ClientSessionFactory csf = createSessionFactory(locator);
ClientSession sess = csf.createSession();
sess.start();
ClientConsumer cons = sess.createConsumer("q1");
assertNull(cons.receiveImmediate());
ClientConsumer cons2 = sess.createConsumer("q2");
assertNull(cons2.receiveImmediate());
Queue q1 = server.locateQueue(new SimpleString("q1"));
Queue q2 = server.locateQueue(new SimpleString("q2"));
System.err.println("isComplete = " + q1.getPageSubscription().isComplete(619) + " on queue " + q1.getID());
System.err.println("isComplete = " + q2.getPageSubscription().isComplete(619) + " on queue " + q2.getID());
q1.getPageSubscription().cleanupEntries(false);
q2.getPageSubscription().cleanupEntries(false);
PageCursorProvider provider = q1.getPageSubscription().getPagingStore().getCursorProvider();
provider.cleanup();
waitForNotPaging(q1);
sess.close();
}
@Test
public void testMissingTXEverythingAcked2() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 6;
final int numberOfTX = 2;
final int messagesPerTX = numberOfMessages / numberOfTX;
try
{
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(ADDRESS.toString(), "q1", true);
session.createQueue(ADDRESS.toString(), "q2", true);
server.getPagingManager().getPageStore(ADDRESS).startPaging();
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putStringProperty("id", "str-" + i);
producer.send(message);
if ((i + 1) % messagesPerTX == 0)
{
session.commit();
}
}
session.commit();
session.start();
for (int i = 1; i <= 2; i++)
{
ClientConsumer cons = session.createConsumer("q" + i);
for (int j = 0; j < 3; j++)
{
ClientMessage msg = cons.receive(5000);
assertNotNull(msg);
assertEquals("str-" + j, msg.getStringProperty("id"));
msg.acknowledge();
}
session.commit();
}
session.close();
}
finally
{
locator.close();
try
{
server.stop();
}
catch (Throwable ignored)
{
}
}
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
ClientSessionFactory csf = createSessionFactory(locator);
ClientSession session = csf.createSession();
session.start();
for (int i = 1; i <= 2; i++)
{
ClientConsumer cons = session.createConsumer("q" + i);
for (int j = 3; j < 6; j++)
{
ClientMessage msg = cons.receive(5000);
assertNotNull(msg);
assertEquals("str-" + j, msg.getStringProperty("id"));
msg.acknowledge();
}
session.commit();
assertNull(cons.receive(500));
}
session.close();
long timeout = System.currentTimeMillis() + 5000;
while (System.currentTimeMillis() < timeout && server.getPagingManager().getPageStore(ADDRESS).isPaging())
{
Thread.sleep(100);
}
}
@Test
public void testTwoQueuesOneNoRouting() throws Exception
{
boolean persistentMessages = true;
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 1000;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
session.createQueue(PagingTest.ADDRESS,
PagingTest.ADDRESS.concat("-invalid"),
new SimpleString(ActiveMQServerImpl.GENERIC_IGNORED_FILTER),
true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(persistentMessages);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
session.start();
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
message = consumer.receive(5000);
assertNotNull(message);
message.acknowledge();
assertEquals(i, message.getIntProperty("id").intValue());
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
session.commit();
session.commit();
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
store.getCursorProvider().cleanup();
long timeout = System.currentTimeMillis() + 5000;
while (store.isPaging() && timeout > System.currentTimeMillis())
{
Thread.sleep(100);
}
// It's async, so need to wait a bit for it happening
assertFalse(server.getPagingManager().getPageStore(ADDRESS).isPaging());
}
@Test
public void testSendReceivePagingPersistent() throws Exception
{
internaltestSendReceivePaging(true);
}
@Test
public void testSendReceivePagingNonPersistent() throws Exception
{
internaltestSendReceivePaging(false);
}
@Test
public void testWithDiverts() throws Exception
{
internalMultiQueuesTest(true);
}
@Test
public void testWithMultiQueues() throws Exception
{
internalMultiQueuesTest(false);
}
public void internalMultiQueuesTest(final boolean divert) throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
if (divert)
{
DivertConfiguration divert1 = new DivertConfiguration()
.setName("dv1")
.setRoutingName("nm1")
.setAddress(PagingTest.ADDRESS.toString())
.setForwardingAddress(PagingTest.ADDRESS.toString() + "-1")
.setExclusive(true);
DivertConfiguration divert2 = new DivertConfiguration()
.setName("dv2")
.setRoutingName("nm2")
.setAddress(PagingTest.ADDRESS.toString())
.setForwardingAddress(PagingTest.ADDRESS.toString() + "-2")
.setExclusive(true);
ArrayList<DivertConfiguration> divertList = new ArrayList<DivertConfiguration>();
divertList.add(divert1);
divertList.add(divert2);
config.setDivertConfigurations(divertList);
}
server.start();
final int numberOfMessages = 3000;
final byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
final AtomicBoolean running = new AtomicBoolean(true);
class TCount extends Thread
{
Queue queue;
TCount(Queue queue)
{
this.queue = queue;
}
@Override
public void run()
{
try
{
while (running.get())
{
// this will be overusing what some users do. flush / getCount
getMessagesAdded(queue);
getMessageCount(queue);
Thread.sleep(10);
}
}
catch (InterruptedException e)
{
log.info("Thread interrupted");
}
}
}
TCount tcount1 = null;
TCount tcount2 = null;
try
{
{
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
if (divert)
{
session.createQueue(PagingTest.ADDRESS + "-1", PagingTest.ADDRESS + "-1", null, true);
session.createQueue(PagingTest.ADDRESS + "-2", PagingTest.ADDRESS + "-2", null, true);
}
else
{
session.createQueue(PagingTest.ADDRESS.toString(), PagingTest.ADDRESS + "-1", null, true);
session.createQueue(PagingTest.ADDRESS.toString(), PagingTest.ADDRESS + "-2", null, true);
}
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
for (int i = 0; i < numberOfMessages; i++)
{
if (i % 500 == 0)
{
log.info("Sent " + i + " messages");
session.commit();
}
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
}
session.commit();
session.close();
server.stop();
sf.close();
locator.close();
}
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
Queue queue1 = server.locateQueue(PagingTest.ADDRESS.concat("-1"));
Queue queue2 = server.locateQueue(PagingTest.ADDRESS.concat("-2"));
assertNotNull(queue1);
assertNotNull(queue2);
assertNotSame(queue1, queue2);
tcount1 = new TCount(queue1);
tcount2 = new TCount(queue2);
tcount1.start();
tcount2.start();
locator = createInVMNonHALocator();
final ClientSessionFactory sf2 = createSessionFactory(locator);
final AtomicInteger errors = new AtomicInteger(0);
Thread[] threads = new Thread[2];
for (int start = 1; start <= 2; start++)
{
final String addressToSubscribe = PagingTest.ADDRESS + "-" + start;
threads[start - 1] = new Thread()
{
@Override
public void run()
{
try
{
ClientSession session = sf2.createSession(null, null, false, true, true, false, 0);
ClientConsumer consumer = session.createConsumer(addressToSubscribe);
session.start();
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage message2 = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message2);
Assert.assertEquals(i, message2.getIntProperty("id").intValue());
message2.acknowledge();
Assert.assertNotNull(message2);
if (i % 100 == 0)
{
if (i % 5000 == 0)
{
log.info(addressToSubscribe + " consumed " + i + " messages");
}
session.commit();
}
try
{
assertBodiesEqual(body, message2.getBodyBuffer());
}
catch (AssertionError e)
{
PagingTest.log.info("Expected buffer:" + UnitTestCase.dumbBytesHex(body, 40));
PagingTest.log.info("Arriving buffer:" + UnitTestCase.dumbBytesHex(message2.getBodyBuffer()
.toByteBuffer()
.array(), 40));
throw e;
}
}
session.commit();
consumer.close();
session.close();
}
catch (Throwable e)
{
e.printStackTrace();
errors.incrementAndGet();
}
}
};
}
for (int i = 0; i < 2; i++)
{
threads[i].start();
}
for (int i = 0; i < 2; i++)
{
threads[i].join();
}
sf2.close();
locator.close();
assertEquals(0, errors.get());
for (int i = 0; i < 20 && server.getPagingManager().getTransactions().size() != 0; i++)
{
if (server.getPagingManager().getTransactions().size() != 0)
{
// The delete may be asynchronous, giving some time case it eventually happen asynchronously
Thread.sleep(500);
}
}
assertEquals(0, server.getPagingManager().getTransactions().size());
}
finally
{
running.set(false);
if (tcount1 != null)
{
tcount1.interrupt();
tcount1.join();
}
if (tcount2 != null)
{
tcount2.interrupt();
tcount2.join();
}
try
{
server.stop();
}
catch (Throwable ignored)
{
}
}
}
@Test
public void testMultiQueuesNonPersistentAndPersistent() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 3000;
final byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++)
{
bb.put(getSamplebyte(j));
}
{
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS.toString(), PagingTest.ADDRESS + "-1", null, true);
session.createQueue(PagingTest.ADDRESS.toString(), PagingTest.ADDRESS + "-2", null, false);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
for (int i = 0; i < numberOfMessages; i++)
{
if (i % 500 == 0)
{
session.commit();
}
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
}
session.commit();
session.close();
server.stop();
sf.close();
locator.close();
}
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
ServerLocator locator1 = createInVMNonHALocator();
final ClientSessionFactory sf2 = locator1.createSessionFactory();
final AtomicInteger errors = new AtomicInteger(0);
Thread t = new Thread()
{
@Override
public void run()
{
try
{
ClientSession session = sf2.createSession(null, null, false, true, true, false, 0);
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS + "-1");
session.start();
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage message2 = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message2);
Assert.assertEquals(i, message2.getIntProperty("id").intValue());
message2.acknowledge();
Assert.assertNotNull(message2);
if (i % 1000 == 0)
{
session.commit();
}
try
{
assertBodiesEqual(body, message2.getBodyBuffer());
}
catch (AssertionError e)
{
PagingTest.log.info("Expected buffer:" + UnitTestCase.dumbBytesHex(body, 40));
PagingTest.log.info("Arriving buffer:" + UnitTestCase.dumbBytesHex(message2.getBodyBuffer()
.toByteBuffer()
.array(), 40));
throw e;
}
}
session.commit();
consumer.close();
session.close();
}
catch (Throwable e)
{
e.printStackTrace();
errors.incrementAndGet();
}
}
};
t.start();
t.join();
assertEquals(0, errors.get());
for (int i = 0; i < 20 && server.getPagingManager().getPageStore(ADDRESS).isPaging(); i++)
{
// The delete may be asynchronous, giving some time case it eventually happen asynchronously
Thread.sleep(500);
}
assertFalse(server.getPagingManager().getPageStore(ADDRESS).isPaging());
for (int i = 0; i < 20 && server.getPagingManager().getTransactions().size() != 0; i++)
{
// The delete may be asynchronous, giving some time case it eventually happen asynchronously
Thread.sleep(500);
}
assertEquals(0, server.getPagingManager().getTransactions().size());
}
private void internaltestSendReceivePaging(final boolean persistentMessages) throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfIntegers = 256;
final int numberOfMessages = 1000;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(null, null, false, true, true, false, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
Queue queue = server.locateQueue(ADDRESS);
queue.getPageSubscription().getPagingStore().startPaging();
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[numberOfIntegers * 4];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= numberOfIntegers; j++)
{
bb.putInt(j);
}
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(persistentMessages);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
}
session.close();
sf.close();
locator.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
sf = createSessionFactory(locator);
session = sf.createSession(null, null, false, true, true, false, 0);
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage message2 = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message2);
Assert.assertEquals(i, message2.getIntProperty("id").intValue());
assertEquals(body.length, message2.getBodySize());
message2.acknowledge();
Assert.assertNotNull(message2);
if (i % 1000 == 0)
{
session.commit();
}
try
{
assertBodiesEqual(body, message2.getBodyBuffer());
}
catch (AssertionError e)
{
PagingTest.log.info("Expected buffer:" + UnitTestCase.dumbBytesHex(body, 40));
PagingTest.log.info("Arriving buffer:" + UnitTestCase.dumbBytesHex(message2.getBodyBuffer()
.toByteBuffer()
.array(), 40));
throw e;
}
}
consumer.close();
session.close();
}
private void assertBodiesEqual(final byte[] body, final ActiveMQBuffer buffer)
{
byte[] other = new byte[body.length];
buffer.readBytes(other);
UnitTestCase.assertEqualsByteArrays(body, other);
}
/**
* - Make a destination in page mode
* - Add stuff to a transaction
* - Consume the entire destination (not in page mode any more)
* - Add stuff to a transaction again
* - Check order
*/
@Test
public void testDepageDuringTransaction() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(null, null, false, true, true, false, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
byte[] body = new byte[MESSAGE_SIZE];
// ActiveMQBuffer bodyLocal = ActiveMQChannelBuffers.buffer(DataConstants.SIZE_INT * numberOfIntegers);
ClientMessage message = null;
int numberOfMessages = 0;
while (true)
{
message = session.createMessage(true);
message.getBodyBuffer().writeBytes(body);
// Stop sending message as soon as we start paging
if (server.getPagingManager().getPageStore(PagingTest.ADDRESS).isPaging())
{
break;
}
numberOfMessages++;
producer.send(message);
}
Assert.assertTrue(server.getPagingManager().getPageStore(PagingTest.ADDRESS).isPaging());
session.start();
ClientSession sessionTransacted = sf.createSession(null, null, false, false, false, false, 0);
ClientProducer producerTransacted = sessionTransacted.createProducer(PagingTest.ADDRESS);
for (int i = 0; i < 10; i++)
{
message = session.createMessage(true);
message.getBodyBuffer().writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
// Consume messages to force an eventual out of order delivery
if (i == 5)
{
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
for (int j = 0; j < numberOfMessages; j++)
{
ClientMessage msg = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
msg.acknowledge();
Assert.assertNotNull(msg);
}
Assert.assertNull(consumer.receiveImmediate());
consumer.close();
}
Integer messageID = (Integer) message.getObjectProperty(new SimpleString("id"));
Assert.assertNotNull(messageID);
Assert.assertEquals(messageID.intValue(), i);
producerTransacted.send(message);
}
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
Assert.assertNull(consumer.receiveImmediate());
sessionTransacted.commit();
sessionTransacted.close();
for (int i = 0; i < 10; i++)
{
message = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message);
Integer messageID = (Integer) message.getObjectProperty(new SimpleString("id"));
Assert.assertNotNull(messageID);
Assert.assertEquals("message received out of order", messageID.intValue(), i);
message.acknowledge();
}
Assert.assertNull(consumer.receiveImmediate());
consumer.close();
session.close();
}
/**
* - Make a destination in page mode
* - Add stuff to a transaction
* - Consume the entire destination (not in page mode any more)
* - Add stuff to a transaction again
* - Check order
* <p/>
* Test under discussion at : http://community.jboss.org/thread/154061?tstart=0
*/
@Test
public void testDepageDuringTransaction2() throws Exception
{
boolean IS_DURABLE_MESSAGE = true;
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
byte[] body = new byte[MESSAGE_SIZE];
ClientSession sessionTransacted = sf.createSession(null, null, false, false, false, false, 0);
ClientProducer producerTransacted = sessionTransacted.createProducer(PagingTest.ADDRESS);
ClientSession session = sf.createSession(null, null, false, true, true, false, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientMessage firstMessage = sessionTransacted.createMessage(IS_DURABLE_MESSAGE);
firstMessage.getBodyBuffer().writeBytes(body);
firstMessage.putIntProperty(new SimpleString("id"), 0);
producerTransacted.send(firstMessage);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
int numberOfMessages = 0;
while (true)
{
message = session.createMessage(IS_DURABLE_MESSAGE);
message.getBodyBuffer().writeBytes(body);
message.putIntProperty("id", numberOfMessages);
message.putBooleanProperty("new", false);
// Stop sending message as soon as we start paging
if (server.getPagingManager().getPageStore(PagingTest.ADDRESS).isPaging())
{
break;
}
numberOfMessages++;
producer.send(message);
}
Assert.assertTrue(server.getPagingManager().getPageStore(PagingTest.ADDRESS).isPaging());
session.start();
for (int i = 1; i < 10; i++)
{
message = session.createMessage(true);
message.getBodyBuffer().writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
// Consume messages to force an eventual out of order delivery
if (i == 5)
{
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
for (int j = 0; j < numberOfMessages; j++)
{
ClientMessage msg = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
msg.acknowledge();
assertEquals(j, msg.getIntProperty("id").intValue());
assertFalse(msg.getBooleanProperty("new"));
Assert.assertNotNull(msg);
}
ClientMessage msgReceived = consumer.receiveImmediate();
Assert.assertNull(msgReceived);
consumer.close();
}
Integer messageID = (Integer) message.getObjectProperty(new SimpleString("id"));
Assert.assertNotNull(messageID);
Assert.assertEquals(messageID.intValue(), i);
producerTransacted.send(message);
}
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
Assert.assertNull(consumer.receiveImmediate());
sessionTransacted.commit();
sessionTransacted.close();
for (int i = 0; i < 10; i++)
{
message = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message);
Integer messageID = (Integer) message.getObjectProperty(new SimpleString("id"));
// System.out.println(messageID);
Assert.assertNotNull(messageID);
Assert.assertEquals("message received out of order", i, messageID.intValue());
message.acknowledge();
}
Assert.assertNull(consumer.receiveImmediate());
consumer.close();
session.close();
}
@Test
public void testDepageDuringTransaction3() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
byte[] body = new byte[MESSAGE_SIZE];
ClientSession sessionTransacted = sf.createSession(null, null, false, false, false, false, 0);
ClientProducer producerTransacted = sessionTransacted.createProducer(PagingTest.ADDRESS);
ClientSession sessionNonTX = sf.createSession(true, true, 0);
sessionNonTX.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producerNonTransacted = sessionNonTX.createProducer(PagingTest.ADDRESS);
sessionNonTX.start();
for (int i = 0; i < 50; i++)
{
ClientMessage message = sessionNonTX.createMessage(true);
message.getBodyBuffer().writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
message.putStringProperty(new SimpleString("tst"), new SimpleString("i=" + i));
producerTransacted.send(message);
if (i % 2 == 0)
{
for (int j = 0; j < 20; j++)
{
ClientMessage msgSend = sessionNonTX.createMessage(true);
msgSend.putStringProperty(new SimpleString("tst"), new SimpleString("i=" + i + ", j=" + j));
msgSend.getBodyBuffer().writeBytes(new byte[10 * 1024]);
producerNonTransacted.send(msgSend);
}
assertTrue(server.getPagingManager().getPageStore(PagingTest.ADDRESS).isPaging());
}
else
{
ClientConsumer consumer = sessionNonTX.createConsumer(PagingTest.ADDRESS);
for (int j = 0; j < 20; j++)
{
ClientMessage msgReceived = consumer.receive(10000);
assertNotNull(msgReceived);
msgReceived.acknowledge();
}
consumer.close();
}
}
ClientConsumer consumerNonTX = sessionNonTX.createConsumer(PagingTest.ADDRESS);
while (true)
{
ClientMessage msgReceived = consumerNonTX.receive(1000);
if (msgReceived == null)
{
break;
}
msgReceived.acknowledge();
}
consumerNonTX.close();
ClientConsumer consumer = sessionNonTX.createConsumer(PagingTest.ADDRESS);
Assert.assertNull(consumer.receiveImmediate());
sessionTransacted.commit();
sessionTransacted.close();
for (int i = 0; i < 50; i++)
{
ClientMessage message = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message);
Integer messageID = (Integer) message.getObjectProperty(new SimpleString("id"));
Assert.assertNotNull(messageID);
Assert.assertEquals("message received out of order", i, messageID.intValue());
message.acknowledge();
}
Assert.assertNull(consumer.receiveImmediate());
consumer.close();
sessionNonTX.close();
}
@Test
public void testDepageDuringTransaction4() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.getConfiguration().setJournalSyncNonTransactional(false);
server.getConfiguration().setJournalSyncTransactional(false);
server.start();
final AtomicInteger errors = new AtomicInteger(0);
final int numberOfMessages = 10000;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(false);
sf = createSessionFactory(locator);
final byte[] body = new byte[MESSAGE_SIZE];
Thread producerThread = new Thread()
{
@Override
public void run()
{
ClientSession sessionProducer = null;
try
{
sessionProducer = sf.createSession(false, false);
ClientProducer producer = sessionProducer.createProducer(ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage msg = sessionProducer.createMessage(true);
msg.getBodyBuffer().writeBytes(body);
msg.putIntProperty("count", i);
producer.send(msg);
if (i % 100 == 0 && i != 0)
{
sessionProducer.commit();
// Thread.sleep(500);
}
}
sessionProducer.commit();
}
catch (Throwable e)
{
e.printStackTrace(); // >> junit report
errors.incrementAndGet();
}
finally
{
try
{
if (sessionProducer != null)
{
sessionProducer.close();
}
}
catch (Throwable e)
{
e.printStackTrace();
errors.incrementAndGet();
}
}
}
};
ClientSession session = sf.createSession(true, true, 0);
session.start();
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
producerThread.start();
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage msg = consumer.receive(5000);
assertNotNull(msg);
assertEquals(i, msg.getIntProperty("count").intValue());
msg.acknowledge();
if (i > 0 && i % 10 == 0)
{
session.commit();
}
}
session.commit();
session.close();
producerThread.join();
locator.close();
sf.close();
assertEquals(0, errors.get());
}
@Test
public void testOrderingNonTX() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_SIZE * 2,
new HashMap<String, AddressSettings>());
server.getConfiguration().setJournalSyncNonTransactional(false);
server.getConfiguration().setJournalSyncTransactional(false);
server.start();
final AtomicInteger errors = new AtomicInteger(0);
final int numberOfMessages = 2000;
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
final CountDownLatch ready = new CountDownLatch(1);
final byte[] body = new byte[MESSAGE_SIZE];
Thread producerThread = new Thread()
{
@Override
public void run()
{
ClientSession sessionProducer = null;
try
{
sessionProducer = sf.createSession(true, true);
ClientProducer producer = sessionProducer.createProducer(ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage msg = sessionProducer.createMessage(true);
msg.getBodyBuffer().writeBytes(body);
msg.putIntProperty("count", i);
producer.send(msg);
if (i == 1000)
{
// The session is not TX, but we do this just to perform a round trip to the server
// and make sure there are no pending messages
sessionProducer.commit();
assertTrue(server.getPagingManager().getPageStore(ADDRESS).isPaging());
ready.countDown();
}
}
sessionProducer.commit();
log.info("Producer gone");
}
catch (Throwable e)
{
e.printStackTrace(); // >> junit report
errors.incrementAndGet();
}
finally
{
try
{
if (sessionProducer != null)
{
sessionProducer.close();
}
}
catch (Throwable e)
{
e.printStackTrace();
errors.incrementAndGet();
}
}
}
};
ClientSession session = sf.createSession(true, true, 0);
session.start();
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
producerThread.start();
assertTrue(ready.await(100, TimeUnit.SECONDS));
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage msg = consumer.receive(5000);
assertNotNull(msg);
if (i != msg.getIntProperty("count").intValue())
{
log.info("Received " + i + " with property = " + msg.getIntProperty("count"));
log.info("###### different");
}
// assertEquals(i, msg.getIntProperty("count").intValue());
msg.acknowledge();
}
session.close();
producerThread.join();
assertEquals(0, errors.get());
}
@Test
public void testPageOnSchedulingNoRestart() throws Exception
{
internalTestPageOnScheduling(false);
}
@Test
public void testPageOnSchedulingRestart() throws Exception
{
internalTestPageOnScheduling(true);
}
public void internalTestPageOnScheduling(final boolean restart) throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 1000;
final int numberOfBytes = 1024;
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(null, null, false, true, true, false, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[numberOfBytes];
for (int j = 0; j < numberOfBytes; j++)
{
body[j] = UnitTestCase.getSamplebyte(j);
}
long scheduledTime = System.currentTimeMillis() + 5000;
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
message.getBodyBuffer().writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
PagingStore store = server.getPagingManager()
.getPageStore(PagingTest.ADDRESS);
// Worse scenario possible... only schedule what's on pages
if (store.getCurrentPage() != null)
{
message.putLongProperty(Message.HDR_SCHEDULED_DELIVERY_TIME, scheduledTime);
}
producer.send(message);
}
if (restart)
{
session.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
sf = createSessionFactory(locator);
session = sf.createSession(null, null, false, true, true, false, 0);
}
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage message2 = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message2);
message2.acknowledge();
Assert.assertNotNull(message2);
Long scheduled = (Long) message2.getObjectProperty(Message.HDR_SCHEDULED_DELIVERY_TIME);
if (scheduled != null)
{
Assert.assertTrue("Scheduling didn't work", System.currentTimeMillis() >= scheduledTime);
}
try
{
assertBodiesEqual(body, message2.getBodyBuffer());
}
catch (AssertionError e)
{
PagingTest.log.info("Expected buffer:" + UnitTestCase.dumbBytesHex(body, 40));
PagingTest.log.info("Arriving buffer:" + UnitTestCase.dumbBytesHex(message2.getBodyBuffer()
.toByteBuffer()
.array(), 40));
throw e;
}
}
consumer.close();
session.close();
}
@Test
public void testRollbackOnSend() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfIntegers = 256;
final int numberOfMessages = 10;
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(null, null, false, false, true, false, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
for (int j = 1; j <= numberOfIntegers; j++)
{
bodyLocal.writeInt(j);
}
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
}
session.rollback();
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
Assert.assertNull(consumer.receiveImmediate());
session.close();
}
@Test
public void testCommitOnSend() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfIntegers = 10;
final int numberOfMessages = 500;
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(null, null, false, false, false, false, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
for (int j = 1; j <= numberOfIntegers; j++)
{
bodyLocal.writeInt(j);
}
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
}
session.commit();
session.close();
locator.close();
locator = createInVMNonHALocator();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
sf = createSessionFactory(locator);
session = sf.createSession(null, null, false, false, false, false, 0);
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
for (int i = 0; i < numberOfMessages; i++)
{
if (i == 55)
{
System.out.println("i = 55");
}
ClientMessage msg = consumer.receive(5000);
Assert.assertNotNull(msg);
msg.acknowledge();
session.commit();
}
session.close();
}
@Test
public void testParialConsume() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 1000;
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(null, null, false, false, false, false, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(new byte[1024]);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
}
session.commit();
session.close();
locator.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
sf = createSessionFactory(locator);
session = sf.createSession(null, null, false, false, false, false, 0);
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
// 347 = I just picked any odd number, not rounded, to make sure it's not at the beginning of any page
for (int i = 0; i < 347; i++)
{
ClientMessage msg = consumer.receive(5000);
assertEquals(i, msg.getIntProperty("id").intValue());
Assert.assertNotNull(msg);
msg.acknowledge();
session.commit();
}
session.close();
locator.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator = createInVMNonHALocator();
sf = createSessionFactory(locator);
session = sf.createSession(null, null, false, false, false, false, 0);
consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
for (int i = 347; i < numberOfMessages; i++)
{
ClientMessage msg = consumer.receive(5000);
assertEquals(i, msg.getIntProperty("id").intValue());
Assert.assertNotNull(msg);
msg.acknowledge();
session.commit();
}
session.close();
}
@Test
public void testPageMultipleDestinations() throws Exception
{
internalTestPageMultipleDestinations(false);
}
@Test
public void testPageMultipleDestinationsTransacted() throws Exception
{
internalTestPageMultipleDestinations(true);
}
@Test
public void testDropMessages() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
HashMap<String, AddressSettings> settings = new HashMap<String, AddressSettings>();
AddressSettings set = new AddressSettings();
set.setAddressFullMessagePolicy(AddressFullMessagePolicy.DROP);
settings.put(PagingTest.ADDRESS.toString(), set);
server = createServer(true, config, 1024, 10 * 1024, settings);
server.start();
final int numberOfMessages = 1000;
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(null, null, false, true, true, false, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
for (int i = 0; i < numberOfMessages; i++)
{
byte[] body = new byte[1024];
message = session.createMessage(true);
message.getBodyBuffer().writeBytes(body);
producer.send(message);
}
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
for (int i = 0; i < 6; i++)
{
ClientMessage message2 = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message2);
message2.acknowledge();
}
Assert.assertNull(consumer.receiveImmediate());
Assert.assertEquals(0, server.getPagingManager()
.getPageStore(PagingTest.ADDRESS)
.getAddressSize());
for (int i = 0; i < numberOfMessages; i++)
{
byte[] body = new byte[1024];
message = session.createMessage(true);
message.getBodyBuffer().writeBytes(body);
producer.send(message);
}
for (int i = 0; i < 6; i++)
{
ClientMessage message2 = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message2);
message2.acknowledge();
}
Assert.assertNull(consumer.receiveImmediate());
session.close();
session = sf.createSession(false, true, true);
producer = session.createProducer(PagingTest.ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
byte[] body = new byte[1024];
message = session.createMessage(true);
message.getBodyBuffer().writeBytes(body);
producer.send(message);
}
session.commit();
consumer = session.createConsumer(PagingTest.ADDRESS);
session.start();
for (int i = 0; i < 6; i++)
{
ClientMessage message2 = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message2);
message2.acknowledge();
}
session.commit();
Assert.assertNull(consumer.receiveImmediate());
session.close();
Assert.assertEquals(0, server.getPagingManager().getPageStore(PagingTest.ADDRESS).getAddressSize());
}
@Test
public void testDropMessagesExpiring() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
HashMap<String, AddressSettings> settings = new HashMap<String, AddressSettings>();
AddressSettings set = new AddressSettings();
set.setAddressFullMessagePolicy(AddressFullMessagePolicy.DROP);
settings.put(PagingTest.ADDRESS.toString(), set);
server = createServer(true, config, 1024, 1024 * 1024, settings);
server.start();
final int numberOfMessages = 30000;
locator.setAckBatchSize(0);
sf = createSessionFactory(locator);
ClientSession sessionProducer = sf.createSession();
sessionProducer.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = sessionProducer.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
ClientSession sessionConsumer = sf.createSession();
class MyHandler implements MessageHandler
{
int count;
public void onMessage(ClientMessage message1)
{
try
{
Thread.sleep(1);
}
catch (Exception e)
{
}
count++;
if (count % 1000 == 0)
{
log.info("received " + count);
}
try
{
message1.acknowledge();
}
catch (Exception e)
{
e.printStackTrace();
}
}
}
ClientConsumer consumer = sessionConsumer.createConsumer(PagingTest.ADDRESS);
sessionConsumer.start();
consumer.setMessageHandler(new MyHandler());
for (int i = 0; i < numberOfMessages; i++)
{
byte[] body = new byte[1024];
message = sessionProducer.createMessage(false);
message.getBodyBuffer().writeBytes(body);
message.setExpiration(System.currentTimeMillis() + 100);
producer.send(message);
}
sessionProducer.close();
sessionConsumer.close();
}
private void internalTestPageMultipleDestinations(final boolean transacted) throws Exception
{
Configuration config = createDefaultConfig();
final int NUMBER_OF_BINDINGS = 100;
int NUMBER_OF_MESSAGES = 2;
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
ClientSessionFactory sf = createSessionFactory(locator);
ClientSession session = sf.createSession(null, null, false, !transacted, true, false, 0);
for (int i = 0; i < NUMBER_OF_BINDINGS; i++)
{
session.createQueue(PagingTest.ADDRESS, new SimpleString("someQueue" + i), null, true);
}
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[1024];
message = session.createMessage(true);
message.getBodyBuffer().writeBytes(body);
for (int i = 0; i < NUMBER_OF_MESSAGES; i++)
{
producer.send(message);
if (transacted)
{
session.commit();
}
}
session.close();
server.stop();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
sf = createSessionFactory(locator);
session = sf.createSession(null, null, false, true, true, false, 0);
session.start();
for (int msg = 0; msg < NUMBER_OF_MESSAGES; msg++)
{
for (int i = 0; i < NUMBER_OF_BINDINGS; i++)
{
ClientConsumer consumer = session.createConsumer(new SimpleString("someQueue" + i));
ClientMessage message2 = consumer.receive(PagingTest.RECEIVE_TIMEOUT);
Assert.assertNotNull(message2);
message2.acknowledge();
Assert.assertNotNull(message2);
consumer.close();
}
}
session.close();
for (int i = 0; i < NUMBER_OF_BINDINGS; i++)
{
Queue queue = (Queue) server.getPostOffice().getBinding(new SimpleString("someQueue" + i)).getBindable();
Assert.assertEquals("Queue someQueue" + i + " was supposed to be empty", 0, getMessageCount(queue));
Assert.assertEquals("Queue someQueue" + i + " was supposed to be empty", 0, queue.getDeliveringCount());
}
}
@Test
public void testSyncPage() throws Exception
{
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
try
{
server.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true, false);
final CountDownLatch pageUp = new CountDownLatch(0);
final CountDownLatch pageDone = new CountDownLatch(1);
OperationContext ctx = new DummyOperationContext(pageUp, pageDone);
OperationContextImpl.setContext(ctx);
PagingManager paging = server.getPagingManager();
PagingStore store = paging.getPageStore(ADDRESS);
store.sync();
assertTrue(pageUp.await(10, TimeUnit.SECONDS));
assertTrue(pageDone.await(10, TimeUnit.SECONDS));
server.stop();
}
finally
{
try
{
server.stop();
}
catch (Throwable ignored)
{
}
OperationContextImpl.clearContext();
}
}
@Test
public void testSyncPageTX() throws Exception
{
Configuration config = createDefaultConfig();
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
server.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true, false);
final CountDownLatch pageUp = new CountDownLatch(0);
final CountDownLatch pageDone = new CountDownLatch(1);
OperationContext ctx = new DummyOperationContext(pageUp, pageDone);
OperationContextImpl.setContext(ctx);
PagingManager paging = server.getPagingManager();
PagingStore store = paging.getPageStore(ADDRESS);
store.sync();
assertTrue(pageUp.await(10, TimeUnit.SECONDS));
assertTrue(pageDone.await(10, TimeUnit.SECONDS));
}
@Test
public void testPagingOneDestinationOnly() throws Exception
{
SimpleString PAGED_ADDRESS = new SimpleString("paged");
SimpleString NON_PAGED_ADDRESS = new SimpleString("non-paged");
Configuration configuration = createDefaultConfig();
Map<String, AddressSettings> addresses = new HashMap<String, AddressSettings>();
addresses.put("#", new AddressSettings());
AddressSettings pagedDestination = new AddressSettings();
pagedDestination.setPageSizeBytes(1024);
pagedDestination.setMaxSizeBytes(10 * 1024);
addresses.put(PAGED_ADDRESS.toString(), pagedDestination);
server = createServer(true, configuration, -1, -1, addresses);
server.start();
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, true, false);
session.createQueue(PAGED_ADDRESS, PAGED_ADDRESS, true);
session.createQueue(NON_PAGED_ADDRESS, NON_PAGED_ADDRESS, true);
ClientProducer producerPaged = session.createProducer(PAGED_ADDRESS);
ClientProducer producerNonPaged = session.createProducer(NON_PAGED_ADDRESS);
int NUMBER_OF_MESSAGES = 100;
for (int i = 0; i < NUMBER_OF_MESSAGES; i++)
{
ClientMessage msg = session.createMessage(true);
msg.getBodyBuffer().writeBytes(new byte[512]);
producerPaged.send(msg);
producerNonPaged.send(msg);
}
session.close();
Assert.assertTrue(server.getPagingManager().getPageStore(PAGED_ADDRESS).isPaging());
Assert.assertFalse(server.getPagingManager().getPageStore(NON_PAGED_ADDRESS).isPaging());
session = sf.createSession(false, true, false);
session.start();
ClientConsumer consumerNonPaged = session.createConsumer(NON_PAGED_ADDRESS);
ClientConsumer consumerPaged = session.createConsumer(PAGED_ADDRESS);
ClientMessage[] ackList = new ClientMessage[NUMBER_OF_MESSAGES];
for (int i = 0; i < NUMBER_OF_MESSAGES; i++)
{
ClientMessage msg = consumerNonPaged.receive(5000);
Assert.assertNotNull(msg);
ackList[i] = msg;
}
Assert.assertNull(consumerNonPaged.receiveImmediate());
for (ClientMessage ack : ackList)
{
ack.acknowledge();
}
consumerNonPaged.close();
session.commit();
ackList = null;
for (int i = 0; i < NUMBER_OF_MESSAGES; i++)
{
ClientMessage msg = consumerPaged.receive(5000);
Assert.assertNotNull(msg);
msg.acknowledge();
session.commit();
}
Assert.assertNull(consumerPaged.receiveImmediate());
session.close();
}
@Test
public void testPagingDifferentSizes() throws Exception
{
SimpleString PAGED_ADDRESS_A = new SimpleString("paged-a");
SimpleString PAGED_ADDRESS_B = new SimpleString("paged-b");
Configuration configuration = createDefaultConfig();
Map<String, AddressSettings> addresses = new HashMap<String, AddressSettings>();
addresses.put("#", new AddressSettings());
AddressSettings pagedDestinationA = new AddressSettings();
pagedDestinationA.setPageSizeBytes(1024);
pagedDestinationA.setMaxSizeBytes(10 * 1024);
int NUMBER_MESSAGES_BEFORE_PAGING = 11;
addresses.put(PAGED_ADDRESS_A.toString(), pagedDestinationA);
AddressSettings pagedDestinationB = new AddressSettings();
pagedDestinationB.setPageSizeBytes(2024);
pagedDestinationB.setMaxSizeBytes(25 * 1024);
addresses.put(PAGED_ADDRESS_B.toString(), pagedDestinationB);
server = createServer(true, configuration, -1, -1, addresses);
server.start();
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, true, false);
session.createQueue(PAGED_ADDRESS_A, PAGED_ADDRESS_A, true);
session.createQueue(PAGED_ADDRESS_B, PAGED_ADDRESS_B, true);
ClientProducer producerA = session.createProducer(PAGED_ADDRESS_A);
ClientProducer producerB = session.createProducer(PAGED_ADDRESS_B);
int NUMBER_OF_MESSAGES = 100;
for (int i = 0; i < NUMBER_MESSAGES_BEFORE_PAGING; i++)
{
ClientMessage msg = session.createMessage(true);
msg.getBodyBuffer().writeBytes(new byte[512]);
producerA.send(msg);
producerB.send(msg);
}
session.commit(); // commit was called to clean the buffer only (making sure everything is on the server side)
Assert.assertTrue(server.getPagingManager().getPageStore(PAGED_ADDRESS_A).isPaging());
Assert.assertFalse(server.getPagingManager().getPageStore(PAGED_ADDRESS_B).isPaging());
for (int i = 0; i < NUMBER_MESSAGES_BEFORE_PAGING; i++)
{
ClientMessage msg = session.createMessage(true);
msg.getBodyBuffer().writeBytes(new byte[512]);
producerA.send(msg);
producerB.send(msg);
}
session.commit(); // commit was called to clean the buffer only (making sure everything is on the server side)
Assert.assertTrue(server.getPagingManager().getPageStore(PAGED_ADDRESS_A).isPaging());
Assert.assertTrue(server.getPagingManager().getPageStore(PAGED_ADDRESS_B).isPaging());
for (int i = NUMBER_MESSAGES_BEFORE_PAGING * 2; i < NUMBER_OF_MESSAGES; i++)
{
ClientMessage msg = session.createMessage(true);
msg.getBodyBuffer().writeBytes(new byte[512]);
producerA.send(msg);
producerB.send(msg);
}
session.close();
Assert.assertTrue(server.getPagingManager().getPageStore(PAGED_ADDRESS_A).isPaging());
Assert.assertTrue(server.getPagingManager().getPageStore(PAGED_ADDRESS_B).isPaging());
session = sf.createSession(null, null, false, true, true, false, 0);
session.start();
ClientConsumer consumerA = session.createConsumer(PAGED_ADDRESS_A);
ClientConsumer consumerB = session.createConsumer(PAGED_ADDRESS_B);
for (int i = 0; i < NUMBER_OF_MESSAGES; i++)
{
ClientMessage msg = consumerA.receive(5000);
Assert.assertNotNull("Couldn't receive a message on consumerA, iteration = " + i, msg);
msg.acknowledge();
}
Assert.assertNull(consumerA.receiveImmediate());
consumerA.close();
Assert.assertTrue(server.getPagingManager().getPageStore(PAGED_ADDRESS_B).isPaging());
for (int i = 0; i < NUMBER_OF_MESSAGES; i++)
{
ClientMessage msg = consumerB.receive(5000);
Assert.assertNotNull(msg);
msg.acknowledge();
session.commit();
}
Assert.assertNull(consumerB.receiveImmediate());
consumerB.close();
session.close();
}
@Test
public void testPageAndDepageRapidly() throws Exception
{
boolean persistentMessages = true;
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false)
.setJournalFileSize(10 * 1024 * 1024);
server = createServer(true, config, 512 * 1024, 1024 * 1024, new HashMap<String, AddressSettings>());
server.start();
final int messageSize = 51527;
final int numberOfMessages = 200;
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(true, true);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
final AtomicInteger errors = new AtomicInteger(0);
Thread consumeThread = new Thread()
{
@Override
public void run()
{
ClientSession sessionConsumer = null;
try
{
sessionConsumer = sf.createSession(false, false);
sessionConsumer.start();
ClientConsumer cons = sessionConsumer.createConsumer(ADDRESS);
for (int i = 0; i < numberOfMessages; i++)
{
ClientMessage msg = cons.receive(PagingTest.RECEIVE_TIMEOUT);
assertNotNull(msg);
msg.acknowledge();
if (i % 20 == 0)
{
sessionConsumer.commit();
}
}
sessionConsumer.commit();
}
catch (Throwable e)
{
e.printStackTrace();
errors.incrementAndGet();
}
finally
{
try
{
sessionConsumer.close();
}
catch (ActiveMQException e)
{
e.printStackTrace();
errors.incrementAndGet();
}
}
}
};
consumeThread.start();
ClientMessage message = null;
byte[] body = new byte[messageSize];
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(persistentMessages);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty(new SimpleString("id"), i);
producer.send(message);
Thread.sleep(50);
}
consumeThread.join();
assertEquals(0, errors.get());
long timeout = System.currentTimeMillis() + 5000;
while (System.currentTimeMillis() < timeout && (server.getPagingManager().getPageStore(ADDRESS).isPaging() || server.getPagingManager()
.getPageStore(ADDRESS)
.getNumberOfPages() != 1))
{
Thread.sleep(1);
}
// It's async, so need to wait a bit for it happening
assertFalse(server.getPagingManager().getPageStore(ADDRESS).isPaging());
assertEquals(1, server.getPagingManager().getPageStore(ADDRESS).getNumberOfPages());
}
@Test
public void testTwoQueuesDifferentFilters() throws Exception
{
boolean persistentMessages = true;
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int numberOfMessages = 200;
locator = createInVMNonHALocator();
locator.setClientFailureCheckPeriod(120000);
locator.setConnectionTTL(5000000);
locator.setCallTimeout(120000);
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, false, false);
// note: if you want to change this, numberOfMessages has to be a multiple of NQUEUES
int NQUEUES = 2;
for (int i = 0; i < NQUEUES; i++)
{
session.createQueue(PagingTest.ADDRESS,
PagingTest.ADDRESS.concat("=" + i),
new SimpleString("propTest=" + i),
true);
}
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(persistentMessages);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty("propTest", i % NQUEUES);
message.putIntProperty("id", i);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
session.start();
for (int nqueue = 0; nqueue < NQUEUES; nqueue++)
{
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS.concat("=" + nqueue));
for (int i = 0; i < (numberOfMessages / NQUEUES); i++)
{
message = consumer.receive(500000);
assertNotNull(message);
message.acknowledge();
assertEquals(nqueue, message.getIntProperty("propTest").intValue());
}
assertNull(consumer.receiveImmediate());
consumer.close();
session.commit();
}
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
store.getCursorProvider().cleanup();
long timeout = System.currentTimeMillis() + 5000;
while (store.isPaging() && timeout > System.currentTimeMillis())
{
Thread.sleep(100);
}
// It's async, so need to wait a bit for it happening
assertFalse(server.getPagingManager().getPageStore(ADDRESS).isPaging());
}
@Test
public void testTwoQueues() throws Exception
{
boolean persistentMessages = true;
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int messageSize = 1024;
final int numberOfMessages = 1000;
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setClientFailureCheckPeriod(120000);
locator.setConnectionTTL(5000000);
locator.setCallTimeout(120000);
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS.concat("=1"), null, true);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS.concat("=2"), null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[messageSize];
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(persistentMessages);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty("propTest", i % 2 == 0 ? 1 : 2);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
session.start();
for (int msg = 1; msg <= 2; msg++)
{
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS.concat("=" + msg));
for (int i = 0; i < numberOfMessages; i++)
{
message = consumer.receive(5000);
assertNotNull(message);
message.acknowledge();
// assertEquals(msg, message.getIntProperty("propTest").intValue());
System.out.println("i = " + i + " msg = " + message.getIntProperty("propTest"));
}
session.commit();
assertNull(consumer.receiveImmediate());
consumer.close();
}
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
store.getCursorProvider().cleanup();
long timeout = System.currentTimeMillis() + 5000;
while (store.isPaging() && timeout > System.currentTimeMillis())
{
Thread.sleep(100);
}
store.getCursorProvider().cleanup();
waitForNotPaging(server.locateQueue(PagingTest.ADDRESS.concat("=1")));
sf.close();
locator.close();
}
finally
{
try
{
server.stop();
}
catch (Throwable ignored)
{
}
}
}
@Test
public void testTwoQueuesAndOneInativeQueue() throws Exception
{
boolean persistentMessages = true;
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setClientFailureCheckPeriod(120000);
locator.setConnectionTTL(5000000);
locator.setCallTimeout(120000);
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS.concat("=1"), null, true);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS.concat("=2"), null, true);
// A queue with an impossible filter
session.createQueue(PagingTest.ADDRESS,
PagingTest.ADDRESS.concat("-3"),
new SimpleString("nothing='something'"),
true);
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
Queue queue = server.locateQueue(PagingTest.ADDRESS.concat("=1"));
queue.getPageSubscription().getPagingStore().startPaging();
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = session.createMessage(persistentMessages);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(new byte[1024]);
producer.send(message);
session.commit();
session.start();
for (int msg = 1; msg <= 2; msg++)
{
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS.concat("=" + msg));
message = consumer.receive(5000);
assertNotNull(message);
message.acknowledge();
assertNull(consumer.receiveImmediate());
consumer.close();
}
session.commit();
session.close();
store.getCursorProvider().cleanup();
waitForNotPaging(server.locateQueue(PagingTest.ADDRESS.concat("=1")));
sf.close();
locator.close();
}
finally
{
try
{
server.stop();
}
catch (Throwable ignored)
{
}
}
}
@Test
public void testTwoQueuesConsumeOneRestart() throws Exception
{
boolean persistentMessages = true;
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int messageSize = 1024;
final int numberOfMessages = 1000;
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setClientFailureCheckPeriod(120000);
locator.setConnectionTTL(5000000);
locator.setCallTimeout(120000);
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS.concat("=1"), null, true);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS.concat("=2"), null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
byte[] body = new byte[messageSize];
for (int i = 0; i < numberOfMessages; i++)
{
message = session.createMessage(persistentMessages);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
message.putIntProperty("propTest", i % 2 == 0 ? 1 : 2);
producer.send(message);
if (i % 1000 == 0)
{
session.commit();
}
}
session.commit();
session.start();
session.deleteQueue(PagingTest.ADDRESS.concat("=1"));
sf = locator.createSessionFactory();
session = sf.createSession(false, false, false);
session.start();
ClientConsumer consumer = session.createConsumer(PagingTest.ADDRESS.concat("=2"));
for (int i = 0; i < numberOfMessages; i++)
{
message = consumer.receive(5000);
assertNotNull(message);
message.acknowledge();
}
session.commit();
assertNull(consumer.receiveImmediate());
consumer.close();
long timeout = System.currentTimeMillis() + 10000;
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
// It's async, so need to wait a bit for it happening
while (timeout > System.currentTimeMillis() && store.isPaging())
{
Thread.sleep(100);
}
assertFalse(server.getPagingManager().getPageStore(ADDRESS).isPaging());
server.stop();
server.start();
server.stop();
server.start();
sf.close();
locator.close();
}
finally
{
try
{
server.stop();
}
catch (Throwable ignored)
{
}
}
}
@Test
public void testDLAOnLargeMessageAndPaging() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setThreadPoolMaxSize(5)
.setJournalSyncNonTransactional(false);
Map<String, AddressSettings> settings = new HashMap<String, AddressSettings>();
AddressSettings dla = new AddressSettings();
dla.setMaxDeliveryAttempts(5);
dla.setDeadLetterAddress(new SimpleString("DLA"));
settings.put(ADDRESS.toString(), dla);
server = createServer(true, config, PagingTest.PAGE_SIZE, PagingTest.PAGE_MAX, settings);
server.start();
final int messageSize = 1024;
ServerLocator locator = null;
ClientSessionFactory sf = null;
ClientSession session = null;
try
{
locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = locator.createSessionFactory();
session = sf.createSession(false, false, false);
session.createQueue(ADDRESS, ADDRESS, true);
session.createQueue("DLA", "DLA", true);
PagingStore pgStoreAddress = server.getPagingManager().getPageStore(ADDRESS);
pgStoreAddress.startPaging();
PagingStore pgStoreDLA = server.getPagingManager().getPageStore(new SimpleString("DLA"));
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
for (int i = 0; i < 100; i++)
{
log.debug("send message #" + i);
ClientMessage message = session.createMessage(true);
message.putStringProperty("id", "str" + i);
message.setBodyInputStream(createFakeLargeStream(messageSize));
producer.send(message);
if ((i + 1) % 2 == 0)
{
session.commit();
}
}
session.commit();
session.start();
ClientConsumer cons = session.createConsumer(ADDRESS);
for (int msgNr = 0; msgNr < 2; msgNr++)
{
for (int i = 0; i < 5; i++)
{
ClientMessage msg = cons.receive(5000);
assertNotNull(msg);
msg.acknowledge();
assertEquals("str" + msgNr, msg.getStringProperty("id"));
for (int j = 0; j < messageSize; j++)
{
assertEquals(getSamplebyte(j), msg.getBodyBuffer().readByte());
}
session.rollback();
}
pgStoreDLA.startPaging();
}
for (int i = 2; i < 100; i++)
{
log.debug("Received message " + i);
ClientMessage message = cons.receive(5000);
assertNotNull("Message " + i + " wasn't received", message);
message.acknowledge();
final AtomicInteger bytesOutput = new AtomicInteger(0);
message.setOutputStream(new OutputStream()
{
@Override
public void write(int b) throws IOException
{
bytesOutput.incrementAndGet();
}
});
try
{
if (!message.waitOutputStreamCompletion(10000))
{
log.info(threadDump("dump"));
fail("Couldn't finish large message receiving");
}
}
catch (Throwable e)
{
log.info("output bytes = " + bytesOutput);
log.info(threadDump("dump"));
fail("Couldn't finish large message receiving for id=" + message.getStringProperty("id") +
" with messageID=" +
message.getMessageID());
}
}
assertNull(cons.receiveImmediate());
cons.close();
cons = session.createConsumer("DLA");
for (int i = 0; i < 2; i++)
{
assertNotNull(cons.receive(5000));
}
sf.close();
session.close();
locator.close();
server.stop();
server.start();
locator = createInVMNonHALocator();
sf = locator.createSessionFactory();
session = sf.createSession(false, false);
session.start();
cons = session.createConsumer(ADDRESS);
for (int i = 2; i < 100; i++)
{
log.debug("Received message " + i);
ClientMessage message = cons.receive(5000);
assertNotNull(message);
assertEquals("str" + i, message.getStringProperty("id"));
message.acknowledge();
message.setOutputStream(new OutputStream()
{
@Override
public void write(int b) throws IOException
{
}
});
assertTrue(message.waitOutputStreamCompletion(5000));
}
assertNull(cons.receiveImmediate());
cons.close();
cons = session.createConsumer("DLA");
for (int msgNr = 0; msgNr < 2; msgNr++)
{
ClientMessage msg = cons.receive(10000);
assertNotNull(msg);
assertEquals("str" + msgNr, msg.getStringProperty("id"));
for (int i = 0; i < messageSize; i++)
{
assertEquals(getSamplebyte(i), msg.getBodyBuffer().readByte());
}
msg.acknowledge();
}
cons.close();
cons = session.createConsumer(ADDRESS);
session.commit();
assertNull(cons.receiveImmediate());
long timeout = System.currentTimeMillis() + 5000;
pgStoreAddress = server.getPagingManager().getPageStore(ADDRESS);
pgStoreAddress.getCursorProvider().getSubscription(server.locateQueue(ADDRESS).getID()).cleanupEntries(false);
pgStoreAddress.getCursorProvider().cleanup();
while (timeout > System.currentTimeMillis() && pgStoreAddress.isPaging())
{
Thread.sleep(50);
}
assertFalse(pgStoreAddress.isPaging());
session.commit();
}
finally
{
session.close();
sf.close();
locator.close();
try
{
server.stop();
}
catch (Throwable ignored)
{
}
}
}
@Test
public void testExpireLargeMessageOnPaging() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setMessageExpiryScanPeriod(500)
.setJournalSyncNonTransactional(false);
Map<String, AddressSettings> settings = new HashMap<String, AddressSettings>();
AddressSettings dla = new AddressSettings();
dla.setMaxDeliveryAttempts(5);
dla.setDeadLetterAddress(new SimpleString("DLA"));
dla.setExpiryAddress(new SimpleString("DLA"));
settings.put(ADDRESS.toString(), dla);
server = createServer(true, config, PagingTest.PAGE_SIZE, PagingTest.PAGE_MAX, settings);
server.start();
final int messageSize = 20;
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(false, false, false);
session.createQueue(ADDRESS, ADDRESS, true);
session.createQueue("DLA", "DLA", true);
PagingStore pgStoreAddress = server.getPagingManager().getPageStore(ADDRESS);
pgStoreAddress.startPaging();
PagingStore pgStoreDLA = server.getPagingManager().getPageStore(new SimpleString("DLA"));
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = null;
for (int i = 0; i < 500; i++)
{
if (i % 100 == 0)
log.info("send message #" + i);
message = session.createMessage(true);
message.putStringProperty("id", "str" + i);
message.setExpiration(System.currentTimeMillis() + 2000);
if (i % 2 == 0)
{
message.setBodyInputStream(createFakeLargeStream(messageSize));
}
else
{
byte[] bytes = new byte[messageSize];
for (int s = 0; s < bytes.length; s++)
{
bytes[s] = getSamplebyte(s);
}
message.getBodyBuffer().writeBytes(bytes);
}
producer.send(message);
if ((i + 1) % 2 == 0)
{
session.commit();
if (i < 400)
{
pgStoreAddress.forceAnotherPage();
}
}
}
session.commit();
sf.close();
locator.close();
server.stop();
Thread.sleep(3000);
server.start();
locator = createInVMNonHALocator();
sf = locator.createSessionFactory();
session = sf.createSession(false, false);
session.start();
ClientConsumer consAddr = session.createConsumer(ADDRESS);
assertNull(consAddr.receive(1000));
ClientConsumer cons = session.createConsumer("DLA");
for (int i = 0; i < 500; i++)
{
log.info("Received message " + i);
message = cons.receive(10000);
assertNotNull(message);
message.acknowledge();
message.saveToOutputStream(new OutputStream()
{
@Override
public void write(int b) throws IOException
{
}
});
}
assertNull(cons.receiveImmediate());
session.commit();
cons.close();
long timeout = System.currentTimeMillis() + 5000;
pgStoreAddress = server.getPagingManager().getPageStore(ADDRESS);
while (timeout > System.currentTimeMillis() && pgStoreAddress.isPaging())
{
Thread.sleep(50);
}
assertFalse(pgStoreAddress.isPaging());
session.close();
}
finally
{
locator.close();
try
{
server.stop();
}
catch (Throwable ignored)
{
}
}
}
@Test
/**
* When running this test from an IDE add this to the test command line so that the AssertionLoggerHandler works properly:
*
* -Djava.util.logging.manager=org.jboss.logmanager.LogManager -Dlogging.configuration=file:<path_to_source>/tests/config/logging.properties
*
* Note: Idea should get these from the pom and you shouldn't need to do this.
*/
public void testFailMessagesNonDurable() throws Exception
{
AssertionLoggerHandler.startCapture();
try
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
HashMap<String, AddressSettings> settings = new HashMap<String, AddressSettings>();
AddressSettings set = new AddressSettings();
set.setAddressFullMessagePolicy(AddressFullMessagePolicy.FAIL);
settings.put(PagingTest.ADDRESS.toString(), set);
server = createServer(true, config, 1024, 5 * 1024, settings);
server.start();
locator.setBlockOnNonDurableSend(false);
locator.setBlockOnDurableSend(false);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(true, true, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = session.createMessage(false);
int biggerMessageSize = 1024;
byte[] body = new byte[biggerMessageSize];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= biggerMessageSize; j++)
{
bb.put(getSamplebyte(j));
}
message.getBodyBuffer().writeBytes(body);
// Send enough messages to fill up the address, but don't test for an immediate exception because we do not block
// on non-durable send. Instead of receiving an exception immediately the exception will be logged on the server.
for (int i = 0; i < 32; i++)
{
producer.send(message);
}
// allow time for the logging to actually happen on the server
Thread.sleep(100);
Assert.assertTrue("Expected to find AMQ224016", AssertionLoggerHandler.findText("AMQ224016"));
ClientConsumer consumer = session.createConsumer(ADDRESS);
session.start();
// Once the destination is full and the client has run out of credits then it will receive an exception
for (int i = 0; i < 10; i++)
{
validateExceptionOnSending(producer, message);
}
// Receive a message.. this should release credits
ClientMessage msgReceived = consumer.receive(5000);
assertNotNull(msgReceived);
msgReceived.acknowledge();
session.commit(); // to make sure it's on the server (roundtrip)
boolean exception = false;
try
{
for (int i = 0; i < 1000; i++)
{
// this send will succeed on the server
producer.send(message);
}
}
catch (Exception e)
{
exception = true;
}
assertTrue("Expected to throw an exception", exception);
}
finally
{
AssertionLoggerHandler.stopCapture();
}
}
@Test
public void testFailMessagesDurable() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
HashMap<String, AddressSettings> settings = new HashMap<String, AddressSettings>();
AddressSettings set = new AddressSettings();
set.setAddressFullMessagePolicy(AddressFullMessagePolicy.FAIL);
settings.put(PagingTest.ADDRESS.toString(), set);
server = createServer(true, config, 1024, 5 * 1024, settings);
server.start();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = sf.createSession(true, true, 0);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = session.createMessage(true);
int biggerMessageSize = 1024;
byte[] body = new byte[biggerMessageSize];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= biggerMessageSize; j++)
{
bb.put(getSamplebyte(j));
}
message.getBodyBuffer().writeBytes(body);
// Send enough messages to fill up the address and test for an exception.
// The address will actually fill up after 3 messages. Also, it takes 32 messages for the client's
// credits to run out.
for (int i = 0; i < 50; i++)
{
if (i > 2)
{
validateExceptionOnSending(producer, message);
}
else
{
producer.send(message);
}
}
ClientConsumer consumer = session.createConsumer(ADDRESS);
session.start();
// Receive a message.. this should release credits
ClientMessage msgReceived = consumer.receive(5000);
assertNotNull(msgReceived);
msgReceived.acknowledge();
session.commit(); // to make sure it's on the server (roundtrip)
boolean exception = false;
try
{
for (int i = 0; i < 1000; i++)
{
// this send will succeed on the server
producer.send(message);
}
}
catch (Exception e)
{
exception = true;
}
assertTrue("Expected to throw an exception", exception);
}
@Test
public void testFailMessagesDuplicates() throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig();
HashMap<String, AddressSettings> settings = new HashMap<String, AddressSettings>();
AddressSettings set = new AddressSettings();
set.setAddressFullMessagePolicy(AddressFullMessagePolicy.FAIL);
settings.put(PagingTest.ADDRESS.toString(), set);
server = createServer(true, config, 1024, 5 * 1024, settings);
server.start();
locator.setBlockOnNonDurableSend(true);
locator.setBlockOnDurableSend(true);
locator.setBlockOnAcknowledge(true);
sf = createSessionFactory(locator);
ClientSession session = addClientSession(sf.createSession(true, true, 0));
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
ClientMessage message = session.createMessage(true);
int biggerMessageSize = 1024;
byte[] body = new byte[biggerMessageSize];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= biggerMessageSize; j++)
{
bb.put(getSamplebyte(j));
}
message.getBodyBuffer().writeBytes(body);
// Send enough messages to fill up the address.
producer.send(message);
producer.send(message);
producer.send(message);
Queue q = (Queue) server.getPostOffice().getBinding(ADDRESS).getBindable();
Assert.assertEquals(3, getMessageCount(q));
// send a message with a dup ID that should fail b/c the address is full
SimpleString dupID1 = new SimpleString("abcdefg");
message.putBytesProperty(Message.HDR_DUPLICATE_DETECTION_ID, dupID1.getData());
message.putStringProperty("key", dupID1.toString());
validateExceptionOnSending(producer, message);
Assert.assertEquals(3, getMessageCount(q));
ClientConsumer consumer = session.createConsumer(ADDRESS);
session.start();
// Receive a message...this should open space for another message
ClientMessage msgReceived = consumer.receive(5000);
assertNotNull(msgReceived);
msgReceived.acknowledge();
session.commit(); // to make sure it's on the server (roundtrip)
consumer.close();
Assert.assertEquals(2, getMessageCount(q));
producer.send(message);
Assert.assertEquals(3, getMessageCount(q));
consumer = session.createConsumer(ADDRESS);
for (int i = 0; i < 3; i++)
{
msgReceived = consumer.receive(5000);
assertNotNull(msgReceived);
msgReceived.acknowledge();
session.commit();
}
}
/**
* This method validates if sending a message will throw an exception
*/
private void validateExceptionOnSending(ClientProducer producer, ClientMessage message)
{
ActiveMQException expected = null;
try
{
// after the address is full this send should fail (since the address full policy is FAIL)
producer.send(message);
}
catch (ActiveMQException e)
{
expected = e;
}
assertNotNull(expected);
assertEquals(ActiveMQExceptionType.ADDRESS_FULL, expected.getType());
}
@Test
public void testSpreadMessagesWithFilterWithDeadConsumer() throws Exception
{
testSpreadMessagesWithFilter(true);
}
@Test
public void testSpreadMessagesWithFilterWithoutDeadConsumer() throws Exception
{
testSpreadMessagesWithFilter(false);
}
@Test
public void testRouteOnTopWithMultipleQueues() throws Exception
{
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
ServerLocator locator = createInVMNonHALocator();
locator.setBlockOnDurableSend(false);
ClientSessionFactory sf = createSessionFactory(locator);
ClientSession session = sf.createSession(false, true, 0);
session.createQueue("Q", "Q1", "dest=1", true);
session.createQueue("Q", "Q2", "dest=2", true);
session.createQueue("Q", "Q3", "dest=3", true);
Queue queue = server.locateQueue(new SimpleString("Q1"));
queue.getPageSubscription().getPagingStore().startPaging();
ClientProducer prod = session.createProducer("Q");
ClientMessage msg = session.createMessage(true);
msg.putIntProperty("dest", 1);
prod.send(msg);
session.commit();
msg = session.createMessage(true);
msg.putIntProperty("dest", 2);
prod.send(msg);
session.commit();
session.start();
ClientConsumer cons1 = session.createConsumer("Q1");
msg = cons1.receive(5000);
assertNotNull(msg);
msg.acknowledge();
ClientConsumer cons2 = session.createConsumer("Q2");
msg = cons2.receive(5000);
assertNotNull(msg);
queue.getPageSubscription().getPagingStore().forceAnotherPage();
msg = session.createMessage(true);
msg.putIntProperty("dest", 1);
prod.send(msg);
session.commit();
msg = cons1.receive(5000);
assertNotNull(msg);
msg.acknowledge();
queue.getPageSubscription().cleanupEntries(false);
System.out.println("Waiting there");
server.stop();
}
// https://issues.jboss.org/browse/HORNETQ-1042 - spread messages because of filters
public void testSpreadMessagesWithFilter(boolean deadConsumer) throws Exception
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setBlockOnDurableSend(false);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(true, false);
session.createQueue(ADDRESS.toString(), "Q1", "destQ=1 or both=true", true);
session.createQueue(ADDRESS.toString(), "Q2", "destQ=2 or both=true", true);
if (deadConsumer)
{
// This queue won't receive any messages
session.createQueue(ADDRESS.toString(), "Q3", "destQ=3", true);
}
session.createQueue(ADDRESS.toString(), "Q_initial", "initialBurst=true", true);
ClientSession sessionConsumerQ3 = null;
final AtomicInteger consumerQ3Msgs = new AtomicInteger(0);
if (deadConsumer)
{
sessionConsumerQ3 = sf.createSession(true, true);
ClientConsumer consumerQ3 = sessionConsumerQ3.createConsumer("Q3");
consumerQ3.setMessageHandler(new MessageHandler()
{
public void onMessage(ClientMessage message)
{
System.out.println("Received an unexpected message");
consumerQ3Msgs.incrementAndGet();
}
});
sessionConsumerQ3.start();
}
final int initialBurst = 100;
final int messagesSentAfterBurst = 100;
final int MESSAGE_SIZE = 300;
final byte[] bodyWrite = new byte[MESSAGE_SIZE];
Queue serverQueue = server.locateQueue(new SimpleString("Q1"));
PagingStore pageStore = serverQueue.getPageSubscription().getPagingStore();
ClientProducer producer = session.createProducer(ADDRESS);
// send an initial burst that will put the system into page mode. The initial burst will be towards Q1 only
for (int i = 0; i < initialBurst; i++)
{
ClientMessage m = session.createMessage(true);
m.getBodyBuffer().writeBytes(bodyWrite);
m.putIntProperty("destQ", 1);
m.putBooleanProperty("both", false);
m.putBooleanProperty("initialBurst", true);
producer.send(m);
if (i % 100 == 0)
{
session.commit();
}
}
session.commit();
pageStore.forceAnotherPage();
for (int i = 0; i < messagesSentAfterBurst; i++)
{
{
ClientMessage m = session.createMessage(true);
m.getBodyBuffer().writeBytes(bodyWrite);
m.putIntProperty("destQ", 1);
m.putBooleanProperty("initialBurst", false);
m.putIntProperty("i", i);
m.putBooleanProperty("both", i % 10 == 0);
producer.send(m);
}
if (i % 10 != 0)
{
ClientMessage m = session.createMessage(true);
m.getBodyBuffer().writeBytes(bodyWrite);
m.putIntProperty("destQ", 2);
m.putIntProperty("i", i);
m.putBooleanProperty("both", false);
m.putBooleanProperty("initialBurst", false);
producer.send(m);
}
if (i > 0 && i % 10 == 0)
{
session.commit();
if (i + 10 < messagesSentAfterBurst)
{
pageStore.forceAnotherPage();
}
}
}
session.commit();
ClientConsumer consumerQ1 = session.createConsumer("Q1");
ClientConsumer consumerQ2 = session.createConsumer("Q2");
session.start();
// consuming now
// initial burst
for (int i = 0; i < initialBurst; i++)
{
ClientMessage m = consumerQ1.receive(5000);
assertNotNull(m);
assertEquals(1, m.getIntProperty("destQ").intValue());
m.acknowledge();
session.commit();
}
// This will consume messages from the beginning of the queue only
ClientConsumer consumerInitial = session.createConsumer("Q_initial");
for (int i = 0; i < initialBurst; i++)
{
ClientMessage m = consumerInitial.receive(5000);
assertNotNull(m);
assertEquals(1, m.getIntProperty("destQ").intValue());
m.acknowledge();
}
assertNull(consumerInitial.receiveImmediate());
session.commit();
// messages from Q1
for (int i = 0; i < messagesSentAfterBurst; i++)
{
ClientMessage m = consumerQ1.receive(5000);
assertNotNull(m);
if (!m.getBooleanProperty("both"))
{
assertEquals(1, m.getIntProperty("destQ").intValue());
}
assertEquals(i, m.getIntProperty("i").intValue());
m.acknowledge();
session.commit();
}
for (int i = 0; i < messagesSentAfterBurst; i++)
{
ClientMessage m = consumerQ2.receive(5000);
assertNotNull(m);
if (!m.getBooleanProperty("both"))
{
assertEquals(2, m.getIntProperty("destQ").intValue());
}
assertEquals(i, m.getIntProperty("i").intValue());
m.acknowledge();
session.commit();
}
waitForNotPaging(serverQueue);
if (sessionConsumerQ3 != null)
{
sessionConsumerQ3.close();
}
assertEquals(0, consumerQ3Msgs.intValue());
session.close();
locator.close();
}
finally
{
server.stop();
}
}
// We send messages to pages, create a big hole (a few pages without any messages), ack everything
// and expect it to move to the next page
@Test
public void testPageHole() throws Throwable
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setBlockOnDurableSend(true);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(true, true, 0);
session.createQueue(ADDRESS.toString(), "Q1", "dest=1", true);
session.createQueue(ADDRESS.toString(), "Q2", "dest=2", true);
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
store.startPaging();
ClientProducer prod = session.createProducer(ADDRESS);
ClientMessage msg = session.createMessage(true);
msg.putIntProperty("dest", 1);
prod.send(msg);
for (int i = 0; i < 100; i++)
{
msg = session.createMessage(true);
msg.putIntProperty("dest", 2);
prod.send(msg);
if (i > 0 && i % 10 == 0)
{
store.forceAnotherPage();
}
}
session.start();
ClientConsumer cons1 = session.createConsumer("Q1");
ClientMessage msgReceivedCons1 = cons1.receive(5000);
assertNotNull(msgReceivedCons1);
msgReceivedCons1.acknowledge();
ClientConsumer cons2 = session.createConsumer("Q2");
for (int i = 0; i < 100; i++)
{
ClientMessage msgReceivedCons2 = cons2.receive(1000);
assertNotNull(msgReceivedCons2);
msgReceivedCons2.acknowledge();
session.commit();
// It will send another message when it's mid consumed
if (i == 20)
{
// wait at least one page to be deleted before sending a new one
for (long timeout = System.currentTimeMillis() + 5000; timeout > System.currentTimeMillis() && store.checkPageFileExists(2); )
{
Thread.sleep(10);
}
msg = session.createMessage(true);
msg.putIntProperty("dest", 1);
prod.send(msg);
}
}
msgReceivedCons1 = cons1.receive(5000);
assertNotNull(msgReceivedCons1);
msgReceivedCons1.acknowledge();
assertNull(cons1.receiveImmediate());
assertNull(cons2.receiveImmediate());
session.commit();
session.close();
waitForNotPaging(store);
}
finally
{
server.stop();
}
}
@Test
public void testMultiFiltersBrowsing() throws Throwable
{
internalTestMultiFilters(true);
}
@Test
public void testMultiFiltersRegularConsumer() throws Throwable
{
internalTestMultiFilters(false);
}
public void internalTestMultiFilters(boolean browsing) throws Throwable
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setBlockOnDurableSend(true);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(true, true, 0);
session.createQueue(ADDRESS.toString(), "Q1", null, true);
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
ClientProducer prod = session.createProducer(ADDRESS);
ClientMessage msg = null;
store.startPaging();
for (int i = 0; i < 100; i++)
{
msg = session.createMessage(true);
msg.putStringProperty("color", "red");
msg.putIntProperty("count", i);
prod.send(msg);
if (i > 0 && i % 10 == 0)
{
store.startPaging();
store.forceAnotherPage();
}
}
for (int i = 0; i < 100; i++)
{
msg = session.createMessage(true);
msg.putStringProperty("color", "green");
msg.putIntProperty("count", i);
prod.send(msg);
if (i > 0 && i % 10 == 0)
{
store.startPaging();
store.forceAnotherPage();
}
}
session.commit();
session.close();
session = sf.createSession(false, false, 0);
session.start();
ClientConsumer cons1;
if (browsing)
{
cons1 = session.createConsumer("Q1", "color='green'", true);
}
else
{
cons1 = session.createConsumer("Q1", "color='red'", false);
}
for (int i = 0; i < 100; i++)
{
msg = cons1.receive(5000);
System.out.println("Received " + msg);
assertNotNull(msg);
if (!browsing)
{
msg.acknowledge();
}
}
session.commit();
session.close();
}
finally
{
server.stop();
}
}
@Test
public void testPendingACKOutOfOrder() throws Throwable
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setBlockOnDurableSend(false);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(true, true, 0);
session.createQueue(ADDRESS.toString(), "Q1", true);
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
store.startPaging();
ClientProducer prod = session.createProducer(ADDRESS);
for (int i = 0; i < 100; i++)
{
ClientMessage msg = session.createMessage(true);
msg.putIntProperty("count", i);
prod.send(msg);
session.commit();
if ((i + 1) % 5 == 0 && i < 50)
{
store.forceAnotherPage();
}
}
session.start();
ClientConsumer cons1 = session.createConsumer("Q1");
for (int i = 0; i < 100; i++)
{
ClientMessage msg = cons1.receive(5000);
assertNotNull(msg);
if (i == 13)
{
msg.individualAcknowledge();
}
}
session.close();
locator.close();
server.stop();
server.start();
store = server.getPagingManager().getPageStore(ADDRESS);
locator = createInVMNonHALocator();
sf = locator.createSessionFactory();
session = sf.createSession(true, true, 0);
cons1 = session.createConsumer("Q1");
session.start();
for (int i = 0; i < 99; i++)
{
ClientMessage msg = cons1.receive(5000);
assertNotNull(msg);
System.out.println("count = " + msg.getIntProperty("count"));
msg.acknowledge();
}
assertNull(cons1.receiveImmediate());
session.close();
waitForNotPaging(store);
}
finally
{
server.stop();
}
}
// Test a scenario where a page was complete and now needs to be cleared
@Test
public void testPageCompleteWasLive() throws Throwable
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setBlockOnDurableSend(false);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(true, true, 0);
session.createQueue(ADDRESS.toString(), "Q1", "dest=1", true);
session.createQueue(ADDRESS.toString(), "Q2", "dest=2", true);
PagingStore store = server.getPagingManager().getPageStore(ADDRESS);
store.startPaging();
ClientProducer prod = session.createProducer(ADDRESS);
ClientMessage msg = session.createMessage(true);
msg.putIntProperty("dest", 1);
prod.send(msg);
msg = session.createMessage(true);
msg.putIntProperty("dest", 2);
prod.send(msg);
session.start();
ClientConsumer cons1 = session.createConsumer("Q1");
ClientMessage msgReceivedCons1 = cons1.receive(1000);
assertNotNull(msgReceivedCons1);
ClientConsumer cons2 = session.createConsumer("Q2");
ClientMessage msgReceivedCons2 = cons2.receive(1000);
assertNotNull(msgReceivedCons2);
store.forceAnotherPage();
msg = session.createMessage(true);
msg.putIntProperty("dest", 1);
prod.send(msg);
msgReceivedCons1.acknowledge();
msgReceivedCons1 = cons1.receive(1000);
assertNotNull(msgReceivedCons1);
msgReceivedCons1.acknowledge();
msgReceivedCons2.acknowledge();
assertNull(cons1.receiveImmediate());
assertNull(cons2.receiveImmediate());
session.commit();
session.close();
waitForNotPaging(store);
}
finally
{
server.stop();
}
}
@Test
public void testNoCursors() throws Exception
{
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
ServerLocator locator = createInVMNonHALocator();
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession();
session.createQueue(ADDRESS, ADDRESS, true);
ClientProducer prod = session.createProducer(ADDRESS);
for (int i = 0; i < 100; i++)
{
Message msg = session.createMessage(true);
msg.getBodyBuffer().writeBytes(new byte[1024]);
prod.send(msg);
}
session.commit();
session.deleteQueue(ADDRESS);
session.close();
sf.close();
locator.close();
server.stop();
server.start();
waitForNotPaging(server.getPagingManager().getPageStore(ADDRESS));
server.stop();
}
// Test a scenario where a page was complete and now needs to be cleared
@Test
public void testMoveMessages() throws Throwable
{
clearDataRecreateServerDirs();
Configuration config = createDefaultConfig()
.setJournalSyncNonTransactional(false);
server = createServer(true,
config,
PagingTest.PAGE_SIZE,
PagingTest.PAGE_MAX,
new HashMap<String, AddressSettings>());
server.start();
final int LARGE_MESSAGE_SIZE = 1024 * 1024;
try
{
ServerLocator locator = createInVMNonHALocator();
locator.setBlockOnDurableSend(false);
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(true, true, 0);
session.createQueue("Q1", "Q1", true);
session.createQueue("Q2", "Q2", true);
PagingStore store = server.getPagingManager().getPageStore(new SimpleString("Q1"));
ClientProducer prod = session.createProducer("Q1");
for (int i = 0; i < 50; i++)
{
ClientMessage msg = session.createMessage(true);
msg.putIntProperty("count", i);
if (i > 0 && i % 10 == 0)
{
msg.setBodyInputStream(createFakeLargeStream(LARGE_MESSAGE_SIZE));
}
prod.send(msg);
}
session.commit();
store.startPaging();
for (int i = 50; i < 100; i++)
{
ClientMessage msg = session.createMessage(true);
msg.putIntProperty("count", i);
if (i % 10 == 0)
{
msg.setBodyInputStream(createFakeLargeStream(LARGE_MESSAGE_SIZE));
}
prod.send(msg);
if (i % 10 == 0)
{
session.commit();
store.forceAnotherPage();
}
}
session.commit();
Queue queue = server.locateQueue(new SimpleString("Q1"));
queue.moveReferences(10, (Filter) null, new SimpleString("Q2"), false);
waitForNotPaging(store);
session.close();
locator.close();
server.stop();
server.start();
locator = createInVMNonHALocator();
locator.setBlockOnDurableSend(false);
sf = locator.createSessionFactory();
session = sf.createSession(true, true, 0);
session.start();
ClientConsumer cons = session.createConsumer("Q2");
for (int i = 0; i < 100; i++)
{
ClientMessage msg = cons.receive(10000);
assertNotNull(msg);
if (i > 0 && i % 10 == 0)
{
byte[] largeMessageRead = new byte[LARGE_MESSAGE_SIZE];
msg.getBodyBuffer().readBytes(largeMessageRead);
for (int j = 0; j < LARGE_MESSAGE_SIZE; j++)
{
assertEquals(largeMessageRead[j], getSamplebyte(j));
}
}
msg.acknowledge();
assertEquals(i, msg.getIntProperty("count").intValue());
}
assertNull(cons.receiveImmediate());
waitForNotPaging(server.locateQueue(new SimpleString("Q2")));
session.close();
sf.close();
locator.close();
}
finally
{
server.stop();
}
}
@Override
protected Configuration createDefaultConfig() throws Exception
{
return super.createDefaultConfig()
.setJournalSyncNonTransactional(false);
}
private static final class DummyOperationContext implements OperationContext
{
private final CountDownLatch pageUp;
private final CountDownLatch pageDone;
public DummyOperationContext(CountDownLatch pageUp, CountDownLatch pageDone)
{
this.pageDone = pageDone;
this.pageUp = pageUp;
}
public void onError(int errorCode, String errorMessage)
{
}
public void done()
{
}
public void storeLineUp()
{
}
public boolean waitCompletion(long timeout) throws Exception
{
return false;
}
public void waitCompletion() throws Exception
{
}
public void replicationLineUp()
{
}
public void replicationDone()
{
}
public void pageSyncLineUp()
{
pageUp.countDown();
}
public void pageSyncDone()
{
pageDone.countDown();
}
public void executeOnCompletion(IOAsyncTask runnable)
{
}
}
}
| apache-2.0 |
qinannmj/FireFly | src/main/java/cn/com/sparkle/firefly/stablestorage/io/PriorChangeable.java | 137 | package cn.com.sparkle.firefly.stablestorage.io;
public interface PriorChangeable {
public void setIsHighPrior(boolean isHigh);
}
| apache-2.0 |
spring-cloud/spring-cloud-aws | spring-cloud-aws-integration-test/src/test/java/org/springframework/cloud/aws/it/jdbc/DatabaseService.java | 819 | /*
* Copyright 2013-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.aws.it.jdbc;
import java.util.Date;
/**
*
*/
interface DatabaseService {
Date getLastUpdate(Date lastAccessDatabase);
Date updateLastAccessDatabase();
}
| apache-2.0 |
google/polymorphicDSL | src/main/java/com/pdsl/gherkin/filter/GherkinTagsVisitor.java | 1170 | // Generated from GherkinTags.g4 by ANTLR 4.9
package com.pdsl.gherkin.filter;
import org.antlr.v4.runtime.tree.ParseTreeVisitor;
/**
* This interface defines a complete generic visitor for a parse tree produced
* by {@link GherkinTagsParser}.
*
* @param <T> The return type of the visit operation. Use {@link Void} for
* operations with no return type.
*/
public interface GherkinTagsVisitor<T> extends ParseTreeVisitor<T> {
/**
* Visit a parse tree produced by {@link GherkinTagsParser#not}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitNot(GherkinTagsParser.NotContext ctx);
/**
* Visit a parse tree produced by {@link GherkinTagsParser#and}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitAnd(GherkinTagsParser.AndContext ctx);
/**
* Visit a parse tree produced by {@link GherkinTagsParser#or}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitOr(GherkinTagsParser.OrContext ctx);
/**
* Visit a parse tree produced by {@link GherkinTagsParser#expr}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitExpr(GherkinTagsParser.ExprContext ctx);
} | apache-2.0 |
lys091112/moonstar | practice/src/main/java/com/github/springboot/support/springusetest/retrofit/MeetingService.java | 117 | package com.github.springboot.support.springusetest.retrofit;
@RetrofitService
public interface MeetingService {
}
| apache-2.0 |
dash-/apache-openaz | openaz-xacml-pdp/src/main/java/org/apache/openaz/xacml/pdp/policy/AllOf.java | 4863 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.openaz.xacml.pdp.policy;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.openaz.xacml.api.StatusCode;
import org.apache.openaz.xacml.pdp.eval.EvaluationContext;
import org.apache.openaz.xacml.pdp.eval.EvaluationException;
import org.apache.openaz.xacml.pdp.eval.MatchResult;
import org.apache.openaz.xacml.pdp.eval.Matchable;
import org.apache.openaz.xacml.std.StdStatus;
import org.apache.openaz.xacml.std.StdStatusCode;
import org.apache.openaz.xacml.util.StringUtils;
/**
* AnyOf extends {@link org.apache.openaz.xacml.pdp.policy.PolicyComponent} and implements the
* {@link org.apache.openaz.xacml.pdp.policy.Matchable} interface to represent XACML AllOf elements in a
* XACML Target.
*/
public class AllOf extends PolicyComponent implements Matchable {
private List<Match> matches;
protected List<Match> getMatchList(boolean bNoNulls) {
if (this.matches == null && bNoNulls) {
this.matches = new ArrayList<Match>();
}
return this.matches;
}
protected void clearMatchList() {
if (this.matches != null) {
this.matches.clear();
}
}
public AllOf(StatusCode statusCodeIn, String statusMessageIn) {
super(statusCodeIn, statusMessageIn);
}
public AllOf(StatusCode statusCodeIn) {
super(statusCodeIn);
}
public AllOf() {
}
public Iterator<Match> getMatches() {
return (this.matches == null ? null : this.matches.iterator());
}
public void setMatches(Collection<Match> matchesIn) {
this.clearMatchList();
if (matchesIn != null) {
this.addMatches(matchesIn);
}
}
public void addMatch(Match match) {
List<Match> matchList = this.getMatchList(true);
matchList.add(match);
}
public void addMatches(Collection<Match> matchesIn) {
List<Match> matchList = this.getMatchList(true);
matchList.addAll(matchesIn);
}
@Override
public MatchResult match(EvaluationContext evaluationContext) throws EvaluationException {
if (!this.validate()) {
return new MatchResult(new StdStatus(this.getStatusCode(), this.getStatusMessage()));
}
Iterator<Match> iterMatches = this.getMatches();
assert iterMatches != null && iterMatches.hasNext();
MatchResult matchResultFallThrough = MatchResult.MM_MATCH;
while (iterMatches.hasNext()) {
MatchResult matchResultMatch = iterMatches.next().match(evaluationContext);
assert matchResultMatch != null;
switch (matchResultMatch.getMatchCode()) {
case INDETERMINATE:
if (matchResultFallThrough.getMatchCode() != MatchResult.MatchCode.INDETERMINATE) {
matchResultFallThrough = matchResultMatch;
}
break;
case MATCH:
break;
case NOMATCH:
return matchResultMatch;
}
}
return matchResultFallThrough;
}
@Override
protected boolean validateComponent() {
Iterator<Match> iterMatches = this.getMatches();
if (iterMatches == null || !iterMatches.hasNext()) {
this.setStatus(StdStatusCode.STATUS_CODE_SYNTAX_ERROR, "Missing matches");
return false;
} else {
this.setStatus(StdStatusCode.STATUS_CODE_OK, null);
return true;
}
}
@Override
public String toString() {
StringBuilder stringBuilder = new StringBuilder("{");
stringBuilder.append("super=");
stringBuilder.append(super.toString());
String stringMatches = StringUtils.toString(this.getMatches());
if (stringMatches != null) {
stringBuilder.append(",matches=");
stringBuilder.append(stringMatches);
}
stringBuilder.append('}');
return stringBuilder.toString();
}
}
| apache-2.0 |
sumanta23/ExportToExcel | src/test/java/org/sumanta/main/Employee.java | 1103 | package org.sumanta.main;
import org.sumanta.annotation.Embedded;
import org.sumanta.annotation.Property;
public class Employee {
@Property
public int id;
@Property
public String firstName;
@Property
public String lastName;
@Property
public int salary;
@Embedded
@Property
public Address ad;
public Employee() {
}
public Employee(int id, String firstName, String lastName, int salary) {
// TODO Auto-generated constructor stub
setId(id);
setFirstName(firstName);
setLastName(lastName);
setSalary(salary);
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public int getSalary() {
return salary;
}
public void setSalary(int salary) {
this.salary = salary;
}
public Address getAd() {
return ad;
}
public void setAd(Address ad) {
this.ad = ad;
}
}
| apache-2.0 |
SaharChang/Author-Extraction | AuthorExtract/src/com/steadystate/css/ParseException.java | 203 | /* Generated By:JavaCC: Do not edit this line. ParseException.java Version 0.7pre6 */
package com.steadystate.css;
public class ParseException extends com.steadystate.css.parser.ParseException {
}
| apache-2.0 |
yliu120/ErrorCorrection | src/edu/jhu/cs/cs439/project/KMerHashers.java | 2723 | package edu.jhu.cs.cs439.project;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import edu.jhu.cs.cs439.project.projectinterface.Hashers;
/**
* This class implements hash function library that can used for Count-min
* sketch. The library gives a maximum 24 hash-functions for the sake of d =
* ceil( log( 1/delta ) ). We doesn't really need this much.
*
* @author Yunlong Liu
* @author Yijie Li
*/
public class KMerHashers implements Hashers {
// This field will determine how many hashers
private int depth;
// This field will determine the hashcode for KMers.
private int K;
// Hash function libraries
private List<Map<String, String>> hashLibraries;
/**
* This default constructor takes two parameters to generate hashers
*
* @param depth
* the number of hasher needed
* @param k
* the "K"-mer's K
*/
public KMerHashers(int depth, int k) {
super();
this.depth = depth;
K = k;
this.hashLibraries = new ArrayList<>();
this.generateHashLibraries();
}
/**
* @see Hashers#getHashFunctions()
* @return Hash Functions as list of map
*/
@Override
public List<Map<String, String>> getHashFunctions() {
List<Map<String, String>> hashFunctions = new ArrayList<>();
for (Integer i : this.generateDRandomNumber()) {
hashFunctions.add(this.hashLibraries.get(i));
}
return hashFunctions;
}
/*
* This function will be called only for testing purpose.
*/
public List<Map<String, String>> getFixedHashFunctions() {
List<Map<String, String>> hashFunctions = new ArrayList<>();
for (int i = 0; i < this.generateDRandomNumber().size(); i ++) {
hashFunctions.add(this.hashLibraries.get(i));
}
return hashFunctions;
}
private void generateHashLibraries() {
Permutations permut = new Permutations();
permut.perm2("ACGT");
List<String> acgt = permut.getAcgt();
for (int i = 0; i < acgt.size(); i++) {
String hashAcgt = acgt.get(i);
Map<String, String> hashFuncAcgt = new HashMap<>();
hashFuncAcgt.put(hashAcgt.substring(0, 1), "00");
hashFuncAcgt.put(hashAcgt.substring(1, 2), "01");
hashFuncAcgt.put(hashAcgt.substring(2, 3), "10");
hashFuncAcgt.put(hashAcgt.substring(3, 4), "11");
this.hashLibraries.add(hashFuncAcgt);
}
}
private Set<Integer> generateDRandomNumber() {
Set<Integer> dRandom = new HashSet<>();
Random random = new Random();
while (true) {
if (dRandom.size() < this.depth) {
// See Permutations to know why we have 24 here.
dRandom.add(random.nextInt(24));
} else {
break;
}
}
return dRandom;
}
}
| apache-2.0 |
WASdev/standards.jsr352.tck | com.ibm.jbatch.tck/src/main/java/com/ibm/jbatch/tck/artifacts/specialized/MyPartitionCollector.java | 1303 | /**
* Copyright 2012 International Business Machines Corp.
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. Licensed under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.jbatch.tck.artifacts.specialized;
import java.io.Externalizable;
import javax.batch.api.partition.PartitionCollector;
import javax.batch.runtime.context.StepContext;
import javax.inject.Inject;
import com.ibm.jbatch.tck.artifacts.reusable.ExternalizableString;
@javax.inject.Named
public class MyPartitionCollector implements PartitionCollector {
@Inject
StepContext ctx;
@Override
public Externalizable collectPartitionData() throws Exception {
ExternalizableString eString = new ExternalizableString("C");
return eString;
}
}
| apache-2.0 |
karussell/fastutil | src/it/unimi/dsi/fastutil/ints/Int2CharAVLTreeMap.java | 53587 | /* Generic definitions */
/* Assertions (useful to generate conditional code) */
/* Current type and class (and size, if applicable) */
/* Value methods */
/* Interfaces (keys) */
/* Interfaces (values) */
/* Abstract implementations (keys) */
/* Abstract implementations (values) */
/* Static containers (keys) */
/* Static containers (values) */
/* Implementations */
/* Synchronized wrappers */
/* Unmodifiable wrappers */
/* Other wrappers */
/* Methods (keys) */
/* Methods (values) */
/* Methods (keys/values) */
/* Methods that have special names depending on keys (but the special names depend on values) */
/* Equality */
/* Object/Reference-only definitions (keys) */
/* Primitive-type-only definitions (keys) */
/* Object/Reference-only definitions (values) */
/* Primitive-type-only definitions (values) */
/*
* Copyright (C) 2002-2013 Sebastiano Vigna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.unimi.dsi.fastutil.ints;
import it.unimi.dsi.fastutil.objects.AbstractObjectSortedSet;
import it.unimi.dsi.fastutil.objects.ObjectBidirectionalIterator;
import it.unimi.dsi.fastutil.objects.ObjectListIterator;
import it.unimi.dsi.fastutil.objects.ObjectSortedSet;
import it.unimi.dsi.fastutil.chars.CharCollection;
import it.unimi.dsi.fastutil.chars.AbstractCharCollection;
import it.unimi.dsi.fastutil.chars.CharIterator;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Map;
import java.util.SortedMap;
import java.util.NoSuchElementException;
import it.unimi.dsi.fastutil.chars.CharListIterator;
/** A type-specific AVL tree map with a fast, small-footprint implementation.
*
* <P>The iterators provided by the views of this class are type-specific {@linkplain
* it.unimi.dsi.fastutil.BidirectionalIterator bidirectional iterators}.
* Moreover, the iterator returned by <code>iterator()</code> can be safely cast
* to a type-specific {@linkplain java.util.ListIterator list iterator}.
*/
public class Int2CharAVLTreeMap extends AbstractInt2CharSortedMap implements java.io.Serializable, Cloneable {
/** A reference to the root entry. */
protected transient Entry tree;
/** Number of entries in this map. */
protected int count;
/** The first key in this map. */
protected transient Entry firstEntry;
/** The last key in this map. */
protected transient Entry lastEntry;
/** Cached set of entries. */
protected transient volatile ObjectSortedSet<Int2CharMap.Entry > entries;
/** Cached set of keys. */
protected transient volatile IntSortedSet keys;
/** Cached collection of values. */
protected transient volatile CharCollection values;
/** The value of this variable remembers, after a <code>put()</code>
* or a <code>remove()</code>, whether the <em>domain</em> of the map
* has been modified. */
protected transient boolean modified;
/** This map's comparator, as provided in the constructor. */
protected Comparator<? super Integer> storedComparator;
/** This map's actual comparator; it may differ from {@link #storedComparator} because it is
always a type-specific comparator, so it could be derived from the former by wrapping. */
protected transient IntComparator actualComparator;
private static final long serialVersionUID = -7046029254386353129L;
private static final boolean ASSERTS = false;
{
allocatePaths();
}
/** Creates a new empty tree map.
*/
public Int2CharAVLTreeMap() {
tree = null;
count = 0;
}
/** Generates the comparator that will be actually used.
*
* <P>When a specific {@link Comparator} is specified and stored in {@link
* #storedComparator}, we must check whether it is type-specific. If it is
* so, we can used directly, and we store it in {@link #actualComparator}. Otherwise,
* we generate on-the-fly an anonymous class that wraps the non-specific {@link Comparator}
* and makes it into a type-specific one.
*/
@SuppressWarnings("unchecked")
private void setActualComparator() {
/* If the provided comparator is already type-specific, we use it. Otherwise,
we use a wrapper anonymous class to fake that it is type-specific. */
if ( storedComparator == null || storedComparator instanceof IntComparator ) actualComparator = (IntComparator)storedComparator;
else actualComparator = new IntComparator () {
public int compare( int k1, int k2 ) {
return storedComparator.compare( (Integer.valueOf(k1)), (Integer.valueOf(k2)) );
}
public int compare( Integer ok1, Integer ok2 ) {
return storedComparator.compare( ok1, ok2 );
}
};
}
/** Creates a new empty tree map with the given comparator.
*
* @param c a (possibly type-specific) comparator.
*/
public Int2CharAVLTreeMap( final Comparator<? super Integer> c ) {
this();
storedComparator = c;
setActualComparator();
}
/** Creates a new tree map copying a given map.
*
* @param m a {@link Map} to be copied into the new tree map.
*/
public Int2CharAVLTreeMap( final Map<? extends Integer, ? extends Character> m ) {
this();
putAll( m );
}
/** Creates a new tree map copying a given sorted map (and its {@link Comparator}).
*
* @param m a {@link SortedMap} to be copied into the new tree map.
*/
public Int2CharAVLTreeMap( final SortedMap<Integer,Character> m ) {
this( m.comparator() );
putAll( m );
}
/** Creates a new tree map copying a given map.
*
* @param m a type-specific map to be copied into the new tree map.
*/
public Int2CharAVLTreeMap( final Int2CharMap m ) {
this();
putAll( m );
}
/** Creates a new tree map copying a given sorted map (and its {@link Comparator}).
*
* @param m a type-specific sorted map to be copied into the new tree map.
*/
public Int2CharAVLTreeMap( final Int2CharSortedMap m ) {
this( m.comparator() );
putAll( m );
}
/** Creates a new tree map using the elements of two parallel arrays and the given comparator.
*
* @param k the array of keys of the new tree map.
* @param v the array of corresponding values in the new tree map.
* @param c a (possibly type-specific) comparator.
* @throws IllegalArgumentException if <code>k</code> and <code>v</code> have different lengths.
*/
public Int2CharAVLTreeMap( final int[] k, final char v[], final Comparator<? super Integer> c ) {
this( c );
if ( k.length != v.length ) throw new IllegalArgumentException( "The key array and the value array have different lengths (" + k.length + " and " + v.length + ")" );
for( int i = 0; i < k.length; i++ ) this.put( k[ i ], v[ i ] );
}
/** Creates a new tree map using the elements of two parallel arrays.
*
* @param k the array of keys of the new tree map.
* @param v the array of corresponding values in the new tree map.
* @throws IllegalArgumentException if <code>k</code> and <code>v</code> have different lengths.
*/
public Int2CharAVLTreeMap( final int[] k, final char v[] ) {
this( k, v, null );
}
/*
* The following methods implements some basic building blocks used by
* all accessors. They are (and should be maintained) identical to those used in AVLTreeSet.drv.
*
* The put()/remove() code is derived from Ben Pfaff's GNU libavl
* (http://www.msu.edu/~pfaffben/avl/). If you want to understand what's
* going on, you should have a look at the literate code contained therein
* first.
*/
/** Compares two keys in the right way.
*
* <P>This method uses the {@link #actualComparator} if it is non-<code>null</code>.
* Otherwise, it resorts to primitive type comparisons or to {@link Comparable#compareTo(Object) compareTo()}.
*
* @param k1 the first key.
* @param k2 the second key.
* @return a number smaller than, equal to or greater than 0, as usual
* (i.e., when k1 < k2, k1 = k2 or k1 > k2, respectively).
*/
@SuppressWarnings("unchecked")
final int compare( final int k1, final int k2 ) {
return actualComparator == null ? ( (k1) < (k2) ? -1 : ( (k1) == (k2) ? 0 : 1 ) ) : actualComparator.compare( k1, k2 );
}
/** Returns the entry corresponding to the given key, if it is in the tree; <code>null</code>, otherwise.
*
* @param k the key to search for.
* @return the corresponding entry, or <code>null</code> if no entry with the given key exists.
*/
final Entry findKey( final int k ) {
Entry e = tree;
int cmp;
while ( e != null && ( cmp = compare( k, e.key ) ) != 0 ) e = cmp < 0 ? e.left() : e.right();
return e;
}
/** Locates a key.
*
* @param k a key.
* @return the last entry on a search for the given key; this will be
* the given key, if it present; otherwise, it will be either the smallest greater key or the greatest smaller key.
*/
final Entry locateKey( final int k ) {
Entry e = tree, last = tree;
int cmp = 0;
while ( e != null && ( cmp = compare( k, e.key ) ) != 0 ) {
last = e;
e = cmp < 0 ? e.left() : e.right();
}
return cmp == 0 ? e : last;
}
/** This vector remembers the directions followed during
* the current insertion. It suffices for about 2<sup>32</sup> entries. */
private transient boolean dirPath[];
private void allocatePaths() {
dirPath = new boolean[ 48 ];
}
/* After execution of this method, modified is true iff a new entry has
been inserted. */
public char put( final int k, final char v ) {
modified = false;
if ( tree == null ) { // The case of the empty tree is treated separately.
count++;
tree = lastEntry = firstEntry = new Entry ( k, v );
modified = true;
}
else {
Entry p = tree, q = null, y = tree, z = null, e = null, w = null;
int cmp, i = 0;
while( true ) {
if ( ( cmp = compare( k, p.key ) ) == 0 ) {
final char oldValue = p.value;
p.value = v;
return oldValue;
}
if ( p.balance() != 0 ) {
i = 0;
z = q;
y = p;
}
if ( dirPath[ i++ ] = cmp > 0 ) {
if ( p.succ() ) {
count++;
e = new Entry ( k, v );
modified = true;
if ( p.right == null ) lastEntry = e;
e.left = p;
e.right = p.right;
p.right( e );
break;
}
q = p;
p = p.right;
}
else {
if ( p.pred() ) {
count++;
e = new Entry ( k, v );
modified = true;
if ( p.left == null ) firstEntry = e;
e.right = p;
e.left = p.left;
p.left( e );
break;
}
q = p;
p = p.left;
}
}
p = y;
i = 0;
while( p != e ) {
if ( dirPath[ i ] ) p.incBalance();
else p.decBalance();
p = dirPath[ i++ ] ? p.right : p.left;
}
if ( y.balance() == -2 ) {
Entry x = y.left;
if ( x.balance() == -1 ) {
w = x;
if ( x.succ() ) {
x.succ( false );
y.pred( x );
}
else y.left = x.right;
x.right = y;
x.balance( 0 );
y.balance( 0 );
}
else {
if ( ASSERTS ) assert x.balance() == 1;
w = x.right;
x.right = w.left;
w.left = x;
y.left = w.right;
w.right = y;
if ( w.balance() == -1 ) {
x.balance( 0 );
y.balance( 1 );
}
else if ( w.balance() == 0 ) {
x.balance( 0 );
y.balance( 0 );
}
else {
x.balance( -1 );
y.balance( 0 );
}
w.balance( 0 );
if ( w.pred() ) {
x.succ( w );
w.pred( false );
}
if ( w.succ() ) {
y.pred( w );
w.succ( false );
}
}
}
else if ( y.balance() == +2 ) {
Entry x = y.right;
if ( x.balance() == 1 ) {
w = x;
if ( x.pred() ) {
x.pred( false );
y.succ( x );
}
else y.right = x.left;
x.left = y;
x.balance( 0 );
y.balance( 0 );
}
else {
if ( ASSERTS ) assert x.balance() == -1;
w = x.left;
x.left = w.right;
w.right = x;
y.right = w.left;
w.left = y;
if ( w.balance() == 1 ) {
x.balance( 0 );
y.balance( -1 );
}
else if ( w.balance() == 0 ) {
x.balance( 0 );
y.balance( 0 );
}
else {
x.balance( 1 );
y.balance( 0 );
}
w.balance( 0 );
if ( w.pred() ) {
y.succ( w );
w.pred( false );
}
if ( w.succ() ) {
x.pred( w );
w.succ( false );
}
}
}
else return defRetValue;
if ( z == null ) tree = w;
else {
if ( z.left == y ) z.left = w;
else z.right = w;
}
}
if ( ASSERTS ) checkTree( tree );
return defRetValue;
}
/** Finds the parent of an entry.
*
* @param e a node of the tree.
* @return the parent of the given node, or <code>null</code> for the root.
*/
private Entry parent( final Entry e ) {
if ( e == tree ) return null;
Entry x, y, p;
x = y = e;
while( true ) {
if ( y.succ() ) {
p = y.right;
if ( p == null || p.left != e ) {
while( ! x.pred() ) x = x.left;
p = x.left;
}
return p;
}
else if ( x.pred() ) {
p = x.left;
if ( p == null || p.right != e ) {
while( ! y.succ() ) y = y.right;
p = y.right;
}
return p;
}
x = x.left;
y = y.right;
}
}
/* After execution of this method, {@link #modified} is true iff an entry
has been deleted. */
@SuppressWarnings("unchecked")
public char remove( final int k ) {
modified = false;
if ( tree == null ) return defRetValue;
int cmp;
Entry p = tree, q = null;
boolean dir = false;
final int kk = k;
while( true ) {
if ( ( cmp = compare( kk, p.key ) ) == 0 ) break;
else if ( dir = cmp > 0 ) {
q = p;
if ( ( p = p.right() ) == null ) return defRetValue;
}
else {
q = p;
if ( ( p = p.left() ) == null ) return defRetValue;
}
}
if ( p.left == null ) firstEntry = p.next();
if ( p.right == null ) lastEntry = p.prev();
if ( p.succ() ) {
if ( p.pred() ) {
if ( q != null ) {
if ( dir ) q.succ( p.right );
else q.pred( p.left );
}
else tree = dir ? p.right : p.left;
}
else {
p.prev().right = p.right;
if ( q != null ) {
if ( dir ) q.right = p.left;
else q.left = p.left;
}
else tree = p.left;
}
}
else {
Entry r = p.right;
if ( r.pred() ) {
r.left = p.left;
r.pred( p.pred() );
if ( ! r.pred() ) r.prev().right = r;
if ( q != null ) {
if ( dir ) q.right = r;
else q.left = r;
}
else tree = r;
r.balance( p.balance() );
q = r;
dir = true;
}
else {
Entry s;
while( true ) {
s = r.left;
if ( s.pred() ) break;
r = s;
}
if ( s.succ() ) r.pred( s );
else r.left = s.right;
s.left = p.left;
if ( ! p.pred() ) {
p.prev().right = s;
s.pred( false );
}
s.right = p.right;
s.succ( false );
if ( q != null ) {
if ( dir ) q.right = s;
else q.left = s;
}
else tree = s;
s.balance( p.balance() );
q = r;
dir = false;
}
}
Entry y;
while( q != null ) {
y = q;
q = parent( y );
if ( ! dir ) {
dir = q != null && q.left != y;
y.incBalance();
if ( y.balance() == 1 ) break;
else if ( y.balance() == 2 ) {
Entry x = y.right;
if ( ASSERTS ) assert x != null;
if ( x.balance() == -1 ) {
Entry w;
if ( ASSERTS ) assert x.balance() == -1;
w = x.left;
x.left = w.right;
w.right = x;
y.right = w.left;
w.left = y;
if ( w.balance() == 1 ) {
x.balance( 0 );
y.balance( -1 );
}
else if ( w.balance() == 0 ) {
x.balance( 0 );
y.balance( 0 );
}
else {
if ( ASSERTS ) assert w.balance() == -1;
x.balance( 1 );
y.balance( 0 );
}
w.balance( 0 );
if ( w.pred() ) {
y.succ( w );
w.pred( false );
}
if ( w.succ() ) {
x.pred( w );
w.succ( false );
}
if ( q != null ) {
if ( dir ) q.right = w;
else q.left = w;
}
else tree = w;
}
else {
if ( q != null ) {
if ( dir ) q.right = x;
else q.left = x;
}
else tree = x;
if ( x.balance() == 0 ) {
y.right = x.left;
x.left = y;
x.balance( -1 );
y.balance( +1 );
break;
}
if ( ASSERTS ) assert x.balance() == 1;
if ( x.pred() ) {
y.succ( true );
x.pred( false );
}
else y.right = x.left;
x.left = y;
y.balance( 0 );
x.balance( 0 );
}
}
}
else {
dir = q != null && q.left != y;
y.decBalance();
if ( y.balance() == -1 ) break;
else if ( y.balance() == -2 ) {
Entry x = y.left;
if ( ASSERTS ) assert x != null;
if ( x.balance() == 1 ) {
Entry w;
if ( ASSERTS ) assert x.balance() == 1;
w = x.right;
x.right = w.left;
w.left = x;
y.left = w.right;
w.right = y;
if ( w.balance() == -1 ) {
x.balance( 0 );
y.balance( 1 );
}
else if ( w.balance() == 0 ) {
x.balance( 0 );
y.balance( 0 );
}
else {
if ( ASSERTS ) assert w.balance() == 1;
x.balance( -1 );
y.balance( 0 );
}
w.balance( 0 );
if ( w.pred() ) {
x.succ( w );
w.pred( false );
}
if ( w.succ() ) {
y.pred( w );
w.succ( false );
}
if ( q != null ) {
if ( dir ) q.right = w;
else q.left = w;
}
else tree = w;
}
else {
if ( q != null ) {
if ( dir ) q.right = x;
else q.left = x;
}
else tree = x;
if ( x.balance() == 0 ) {
y.left = x.right;
x.right = y;
x.balance( +1 );
y.balance( -1 );
break;
}
if ( ASSERTS ) assert x.balance() == -1;
if ( x.succ() ) {
y.pred( true );
x.succ( false );
}
else y.left = x.right;
x.right = y;
y.balance( 0 );
x.balance( 0 );
}
}
}
}
modified = true;
count--;
if ( ASSERTS ) checkTree( tree );
return p.value;
}
public Character put( final Integer ok, final Character ov ) {
final char oldValue = put( ((ok).intValue()), ((ov).charValue()) );
return modified ? (null) : (Character.valueOf(oldValue));
}
public Character remove( final Object ok ) {
final char oldValue = remove( ((((Integer)(ok)).intValue())) );
return modified ? (Character.valueOf(oldValue)) : (null);
}
public boolean containsValue( final char v ) {
final ValueIterator i = new ValueIterator();
char ev;
int j = count;
while( j-- != 0 ) {
ev = i.nextChar();
if ( ( (ev) == (v) ) ) return true;
}
return false;
}
public void clear() {
count = 0;
tree = null;
entries = null;
values = null;
keys = null;
firstEntry = lastEntry = null;
}
/** This class represent an entry in a tree map.
*
* <P>We use the only "metadata", i.e., {@link Entry#info}, to store
* information about balance, predecessor status and successor status.
*
* <P>Note that since the class is recursive, it can be
* considered equivalently a tree.
*/
private static final class Entry implements Cloneable, Int2CharMap.Entry {
/** If the bit in this mask is true, {@link #right} points to a successor. */
private final static int SUCC_MASK = 1 << 31;
/** If the bit in this mask is true, {@link #left} points to a predecessor. */
private final static int PRED_MASK = 1 << 30;
/** The bits in this mask hold the node balance info. You can get it just by casting to byte. */
private final static int BALANCE_MASK = 0xFF;
/** The key of this entry. */
int key;
/** The value of this entry. */
char value;
/** The pointers to the left and right subtrees. */
Entry left, right;
/** This integers holds different information in different bits (see {@link #SUCC_MASK}, {@link #PRED_MASK} and {@link #BALANCE_MASK}). */
int info;
Entry() {}
/** Creates a new entry with the given key and value.
*
* @param k a key.
* @param v a value.
*/
Entry( final int k, final char v ) {
this.key = k;
this.value = v;
info = SUCC_MASK | PRED_MASK;
}
/** Returns the left subtree.
*
* @return the left subtree (<code>null</code> if the left
* subtree is empty).
*/
Entry left() {
return ( info & PRED_MASK ) != 0 ? null : left;
}
/** Returns the right subtree.
*
* @return the right subtree (<code>null</code> if the right
* subtree is empty).
*/
Entry right() {
return ( info & SUCC_MASK ) != 0 ? null : right;
}
/** Checks whether the left pointer is really a predecessor.
* @return true if the left pointer is a predecessor.
*/
boolean pred() {
return ( info & PRED_MASK ) != 0;
}
/** Checks whether the right pointer is really a successor.
* @return true if the right pointer is a successor.
*/
boolean succ() {
return ( info & SUCC_MASK ) != 0;
}
/** Sets whether the left pointer is really a predecessor.
* @param pred if true then the left pointer will be considered a predecessor.
*/
void pred( final boolean pred ) {
if ( pred ) info |= PRED_MASK;
else info &= ~PRED_MASK;
}
/** Sets whether the right pointer is really a successor.
* @param succ if true then the right pointer will be considered a successor.
*/
void succ( final boolean succ ) {
if ( succ ) info |= SUCC_MASK;
else info &= ~SUCC_MASK;
}
/** Sets the left pointer to a predecessor.
* @param pred the predecessr.
*/
void pred( final Entry pred ) {
info |= PRED_MASK;
left = pred;
}
/** Sets the right pointer to a successor.
* @param succ the successor.
*/
void succ( final Entry succ ) {
info |= SUCC_MASK;
right = succ;
}
/** Sets the left pointer to the given subtree.
* @param left the new left subtree.
*/
void left( final Entry left ) {
info &= ~PRED_MASK;
this.left = left;
}
/** Sets the right pointer to the given subtree.
* @param right the new right subtree.
*/
void right( final Entry right ) {
info &= ~SUCC_MASK;
this.right = right;
}
/** Returns the current level of the node.
* @return the current level of this node.
*/
int balance() {
return (byte)info;
}
/** Sets the level of this node.
* @param level the new level of this node.
*/
void balance( int level ) {
info &= ~BALANCE_MASK;
info |= ( level & BALANCE_MASK );
}
/** Increments the level of this node. */
void incBalance() {
info = info & ~BALANCE_MASK | ( (byte)info + 1 ) & 0xFF;
}
/** Decrements the level of this node. */
protected void decBalance() {
info = info & ~BALANCE_MASK | ( (byte)info - 1 ) & 0xFF;
}
/** Computes the next entry in the set order.
*
* @return the next entry (<code>null</code>) if this is the last entry).
*/
Entry next() {
Entry next = this.right;
if ( ( info & SUCC_MASK ) == 0 ) while ( ( next.info & PRED_MASK ) == 0 ) next = next.left;
return next;
}
/** Computes the previous entry in the set order.
*
* @return the previous entry (<code>null</code>) if this is the first entry).
*/
Entry prev() {
Entry prev = this.left;
if ( ( info & PRED_MASK ) == 0 ) while ( ( prev.info & SUCC_MASK ) == 0 ) prev = prev.right;
return prev;
}
public Integer getKey() {
return (Integer.valueOf(key));
}
public int getIntKey() {
return key;
}
public Character getValue() {
return (Character.valueOf(value));
}
public char getCharValue() {
return value;
}
public char setValue(final char value) {
final char oldValue = this.value;
this.value = value;
return oldValue;
}
public Character setValue(final Character value) {
return (Character.valueOf(setValue(((value).charValue()))));
}
@SuppressWarnings("unchecked")
public Entry clone() {
Entry c;
try {
c = (Entry )super.clone();
}
catch(CloneNotSupportedException cantHappen) {
throw new InternalError();
}
c.key = key;
c.value = value;
c.info = info;
return c;
}
@SuppressWarnings("unchecked")
public boolean equals( final Object o ) {
if (!(o instanceof Map.Entry)) return false;
Map.Entry<Integer, Character> e = (Map.Entry<Integer, Character>)o;
return ( (key) == (((e.getKey()).intValue())) ) && ( (value) == (((e.getValue()).charValue())) );
}
public int hashCode() {
return (key) ^ (value);
}
public String toString() {
return key + "=>" + value;
}
/*
public void prettyPrint() {
prettyPrint(0);
}
public void prettyPrint(int level) {
if ( pred() ) {
for (int i = 0; i < level; i++)
System.err.print(" ");
System.err.println("pred: " + left );
}
else if (left != null)
left.prettyPrint(level +1 );
for (int i = 0; i < level; i++)
System.err.print(" ");
System.err.println(key + "=" + value + " (" + balance() + ")");
if ( succ() ) {
for (int i = 0; i < level; i++)
System.err.print(" ");
System.err.println("succ: " + right );
}
else if (right != null)
right.prettyPrint(level + 1);
}
*/
}
/*
public void prettyPrint() {
System.err.println("size: " + count);
if (tree != null) tree.prettyPrint();
}
*/
@SuppressWarnings("unchecked")
public boolean containsKey( final int k ) {
return findKey( k ) != null;
}
public int size() {
return count;
}
public boolean isEmpty() {
return count == 0;
}
@SuppressWarnings("unchecked")
public char get( final int k ) {
final Entry e = findKey( k );
return e == null ? defRetValue : e.value;
}
public int firstIntKey() {
if ( tree == null ) throw new NoSuchElementException();
return firstEntry.key;
}
public int lastIntKey() {
if ( tree == null ) throw new NoSuchElementException();
return lastEntry.key;
}
/** An abstract iterator on the whole range.
*
* <P>This class can iterate in both directions on a threaded tree.
*/
private class TreeIterator {
/** The entry that will be returned by the next call to {@link java.util.ListIterator#previous()} (or <code>null</code> if no previous entry exists). */
Entry prev;
/** The entry that will be returned by the next call to {@link java.util.ListIterator#next()} (or <code>null</code> if no next entry exists). */
Entry next;
/** The last entry that was returned (or <code>null</code> if we did not iterate or used {@link #remove()}). */
Entry curr;
/** The current index (in the sense of a {@link java.util.ListIterator}). Note that this value is not meaningful when this {@link TreeIterator} has been created using the nonempty constructor.*/
int index = 0;
TreeIterator() {
next = firstEntry;
}
TreeIterator( final int k ) {
if ( ( next = locateKey( k ) ) != null ) {
if ( compare( next.key, k ) <= 0 ) {
prev = next;
next = next.next();
}
else prev = next.prev();
}
}
public boolean hasNext() { return next != null; }
public boolean hasPrevious() { return prev != null; }
void updateNext() {
next = next.next();
}
Entry nextEntry() {
if ( ! hasNext() ) throw new NoSuchElementException();
curr = prev = next;
index++;
updateNext();
return curr;
}
void updatePrevious() {
prev = prev.prev();
}
Entry previousEntry() {
if ( ! hasPrevious() ) throw new NoSuchElementException();
curr = next = prev;
index--;
updatePrevious();
return curr;
}
public int nextIndex() {
return index;
}
public int previousIndex() {
return index - 1;
}
public void remove() {
if ( curr == null ) throw new IllegalStateException();
/* If the last operation was a next(), we are removing an entry that preceeds
the current index, and thus we must decrement it. */
if ( curr == prev ) index--;
next = prev = curr;
updatePrevious();
updateNext();
Int2CharAVLTreeMap.this.remove( curr.key );
curr = null;
}
public int skip( final int n ) {
int i = n;
while( i-- != 0 && hasNext() ) nextEntry();
return n - i - 1;
}
public int back( final int n ) {
int i = n;
while( i-- != 0 && hasPrevious() ) previousEntry();
return n - i - 1;
}
}
/** An iterator on the whole range.
*
* <P>This class can iterate in both directions on a threaded tree.
*/
private class EntryIterator extends TreeIterator implements ObjectListIterator<Int2CharMap.Entry > {
EntryIterator() {}
EntryIterator( final int k ) {
super( k );
}
public Int2CharMap.Entry next() { return nextEntry(); }
public Int2CharMap.Entry previous() { return previousEntry(); }
public void set( Int2CharMap.Entry ok ) { throw new UnsupportedOperationException(); }
public void add( Int2CharMap.Entry ok ) { throw new UnsupportedOperationException(); }
}
public ObjectSortedSet<Int2CharMap.Entry > int2CharEntrySet() {
if ( entries == null ) entries = new AbstractObjectSortedSet<Int2CharMap.Entry >() {
final Comparator<? super Int2CharMap.Entry > comparator = new Comparator<Int2CharMap.Entry > () {
public int compare( final Int2CharMap.Entry x, final Int2CharMap.Entry y ) {
return Int2CharAVLTreeMap.this.storedComparator.compare( x.getKey(), y.getKey() );
}
};
public Comparator<? super Int2CharMap.Entry > comparator() {
return comparator;
}
public ObjectBidirectionalIterator<Int2CharMap.Entry > iterator() {
return new EntryIterator();
}
public ObjectBidirectionalIterator<Int2CharMap.Entry > iterator( final Int2CharMap.Entry from ) {
return new EntryIterator( ((from.getKey()).intValue()) );
}
@SuppressWarnings("unchecked")
public boolean contains( final Object o ) {
if (!(o instanceof Map.Entry)) return false;
final Map.Entry <Integer, Character> e = (Map.Entry <Integer, Character>)o;
final Entry f = findKey( ((e.getKey()).intValue()) );
return e.equals( f );
}
@SuppressWarnings("unchecked")
public boolean remove( final Object o ) {
if (!(o instanceof Map.Entry)) return false;
final Map.Entry <Integer, Character> e = (Map.Entry <Integer, Character>)o;
final Entry f = findKey( ((e.getKey()).intValue()) );
if ( f != null ) Int2CharAVLTreeMap.this.remove( f.key );
return f != null;
}
public int size() { return count; }
public void clear() { Int2CharAVLTreeMap.this.clear(); }
public Int2CharMap.Entry first() { return firstEntry; }
public Int2CharMap.Entry last() { return lastEntry; }
public ObjectSortedSet<Int2CharMap.Entry > subSet( Int2CharMap.Entry from, Int2CharMap.Entry to ) { return subMap( from.getKey(), to.getKey() ).int2CharEntrySet(); }
public ObjectSortedSet<Int2CharMap.Entry > headSet( Int2CharMap.Entry to ) { return headMap( to.getKey() ).int2CharEntrySet(); }
public ObjectSortedSet<Int2CharMap.Entry > tailSet( Int2CharMap.Entry from ) { return tailMap( from.getKey() ).int2CharEntrySet(); }
};
return entries;
}
/** An iterator on the whole range of keys.
*
* <P>This class can iterate in both directions on the keys of a threaded tree. We
* simply override the {@link java.util.ListIterator#next()}/{@link java.util.ListIterator#previous()} methods (and possibly
* their type-specific counterparts) so that they return keys instead of entries.
*/
private final class KeyIterator extends TreeIterator implements IntListIterator {
public KeyIterator() {}
public KeyIterator( final int k ) { super( k ); }
public int nextInt() { return nextEntry().key; }
public int previousInt() { return previousEntry().key; }
public void set( int k ) { throw new UnsupportedOperationException(); }
public void add( int k ) { throw new UnsupportedOperationException(); }
public Integer next() { return (Integer.valueOf(nextEntry().key)); }
public Integer previous() { return (Integer.valueOf(previousEntry().key)); }
public void set( Integer ok ) { throw new UnsupportedOperationException(); }
public void add( Integer ok ) { throw new UnsupportedOperationException(); }
};
/** A keyset implementation using a more direct implementation for iterators. */
private class KeySet extends AbstractInt2CharSortedMap .KeySet {
public IntBidirectionalIterator iterator() { return new KeyIterator(); }
public IntBidirectionalIterator iterator( final int from ) { return new KeyIterator( from ); }
}
/** Returns a type-specific sorted set view of the keys contained in this map.
*
* <P>In addition to the semantics of {@link java.util.Map#keySet()}, you can
* safely cast the set returned by this call to a type-specific sorted
* set interface.
*
* @return a type-specific sorted set view of the keys contained in this map.
*/
public IntSortedSet keySet() {
if ( keys == null ) keys = new KeySet();
return keys;
}
/** An iterator on the whole range of values.
*
* <P>This class can iterate in both directions on the values of a threaded tree. We
* simply override the {@link java.util.ListIterator#next()}/{@link java.util.ListIterator#previous()} methods (and possibly
* their type-specific counterparts) so that they return values instead of entries.
*/
private final class ValueIterator extends TreeIterator implements CharListIterator {
public char nextChar() { return nextEntry().value; }
public char previousChar() { return previousEntry().value; }
public void set( char v ) { throw new UnsupportedOperationException(); }
public void add( char v ) { throw new UnsupportedOperationException(); }
public Character next() { return (Character.valueOf(nextEntry().value)); }
public Character previous() { return (Character.valueOf(previousEntry().value)); }
public void set( Character ok ) { throw new UnsupportedOperationException(); }
public void add( Character ok ) { throw new UnsupportedOperationException(); }
};
/** Returns a type-specific collection view of the values contained in this map.
*
* <P>In addition to the semantics of {@link java.util.Map#values()}, you can
* safely cast the collection returned by this call to a type-specific collection
* interface.
*
* @return a type-specific collection view of the values contained in this map.
*/
public CharCollection values() {
if ( values == null ) values = new AbstractCharCollection () {
public CharIterator iterator() {
return new ValueIterator();
}
public boolean contains( final char k ) {
return containsValue( k );
}
public int size() {
return count;
}
public void clear() {
Int2CharAVLTreeMap.this.clear();
}
};
return values;
}
public IntComparator comparator() {
return actualComparator;
}
public Int2CharSortedMap headMap( int to ) {
return new Submap( ((int)0), true, to, false );
}
public Int2CharSortedMap tailMap( int from ) {
return new Submap( from, false, ((int)0), true );
}
public Int2CharSortedMap subMap( int from, int to ) {
return new Submap( from, false, to, false );
}
/** A submap with given range.
*
* <P>This class represents a submap. One has to specify the left/right
* limits (which can be set to -∞ or ∞). Since the submap is a
* view on the map, at a given moment it could happen that the limits of
* the range are not any longer in the main map. Thus, things such as
* {@link java.util.SortedMap#firstKey()} or {@link java.util.Collection#size()} must be always computed
* on-the-fly.
*/
private final class Submap extends AbstractInt2CharSortedMap implements java.io.Serializable {
private static final long serialVersionUID = -7046029254386353129L;
/** The start of the submap range, unless {@link #bottom} is true. */
int from;
/** The end of the submap range, unless {@link #top} is true. */
int to;
/** If true, the submap range starts from -∞. */
boolean bottom;
/** If true, the submap range goes to ∞. */
boolean top;
/** Cached set of entries. */
@SuppressWarnings("hiding")
protected transient volatile ObjectSortedSet<Int2CharMap.Entry > entries;
/** Cached set of keys. */
@SuppressWarnings("hiding")
protected transient volatile IntSortedSet keys;
/** Cached collection of values. */
@SuppressWarnings("hiding")
protected transient volatile CharCollection values;
/** Creates a new submap with given key range.
*
* @param from the start of the submap range.
* @param bottom if true, the first parameter is ignored and the range starts from -∞.
* @param to the end of the submap range.
* @param top if true, the third parameter is ignored and the range goes to ∞.
*/
public Submap( final int from, final boolean bottom, final int to, final boolean top ) {
if ( ! bottom && ! top && Int2CharAVLTreeMap.this.compare( from, to ) > 0 ) throw new IllegalArgumentException( "Start key (" + from + ") is larger than end key (" + to + ")" );
this.from = from;
this.bottom = bottom;
this.to = to;
this.top = top;
this.defRetValue = Int2CharAVLTreeMap.this.defRetValue;
}
public void clear() {
final SubmapIterator i = new SubmapIterator();
while( i.hasNext() ) {
i.nextEntry();
i.remove();
}
}
/** Checks whether a key is in the submap range.
* @param k a key.
* @return true if is the key is in the submap range.
*/
final boolean in( final int k ) {
return ( bottom || Int2CharAVLTreeMap.this.compare( k, from ) >= 0 ) &&
( top || Int2CharAVLTreeMap.this.compare( k, to ) < 0 );
}
public ObjectSortedSet<Int2CharMap.Entry > int2CharEntrySet() {
if ( entries == null ) entries = new AbstractObjectSortedSet<Int2CharMap.Entry >() {
public ObjectBidirectionalIterator<Int2CharMap.Entry > iterator() {
return new SubmapEntryIterator();
}
public ObjectBidirectionalIterator<Int2CharMap.Entry > iterator( final Int2CharMap.Entry from ) {
return new SubmapEntryIterator( ((from.getKey()).intValue()) );
}
public Comparator<? super Int2CharMap.Entry > comparator() { return Int2CharAVLTreeMap.this.entrySet().comparator(); }
@SuppressWarnings("unchecked")
public boolean contains( final Object o ) {
if (!(o instanceof Map.Entry)) return false;
final Map.Entry <Integer, Character> e = (Map.Entry <Integer, Character>)o;
final Int2CharAVLTreeMap.Entry f = findKey( ((e.getKey()).intValue()) );
return f != null && in( f.key ) && e.equals( f );
}
@SuppressWarnings("unchecked")
public boolean remove( final Object o ) {
if (!(o instanceof Map.Entry)) return false;
final Map.Entry <Integer, Character> e = (Map.Entry <Integer, Character>)o;
final Int2CharAVLTreeMap.Entry f = findKey( ((e.getKey()).intValue()) );
if ( f != null && in( f.key ) ) Submap.this.remove( f.key );
return f != null;
}
public int size() {
int c = 0;
for( Iterator<?> i = iterator(); i.hasNext(); i.next() ) c++;
return c;
}
public boolean isEmpty() {
return ! new SubmapIterator().hasNext();
}
public void clear() {
Submap.this.clear();
}
public Int2CharMap.Entry first() { return firstEntry(); }
public Int2CharMap.Entry last() { return lastEntry(); }
public ObjectSortedSet<Int2CharMap.Entry > subSet( Int2CharMap.Entry from, Int2CharMap.Entry to ) { return subMap( from.getKey(), to.getKey() ).int2CharEntrySet(); }
public ObjectSortedSet<Int2CharMap.Entry > headSet( Int2CharMap.Entry to ) { return headMap( to.getKey() ).int2CharEntrySet(); }
public ObjectSortedSet<Int2CharMap.Entry > tailSet( Int2CharMap.Entry from ) { return tailMap( from.getKey() ).int2CharEntrySet(); }
};
return entries;
}
private class KeySet extends AbstractInt2CharSortedMap .KeySet {
public IntBidirectionalIterator iterator() { return new SubmapKeyIterator(); }
public IntBidirectionalIterator iterator( final int from ) { return new SubmapKeyIterator( from ); }
}
public IntSortedSet keySet() {
if ( keys == null ) keys = new KeySet();
return keys;
}
public CharCollection values() {
if ( values == null ) values = new AbstractCharCollection () {
public CharIterator iterator() {
return new SubmapValueIterator();
}
public boolean contains( final char k ) {
return containsValue( k );
}
public int size() {
return Submap.this.size();
}
public void clear() {
Submap.this.clear();
}
};
return values;
}
@SuppressWarnings("unchecked")
public boolean containsKey( final int k ) {
return in( k ) && Int2CharAVLTreeMap.this.containsKey( k );
}
public boolean containsValue( final char v ) {
final SubmapIterator i = new SubmapIterator();
char ev;
while( i.hasNext() ) {
ev = i.nextEntry().value;
if ( ( (ev) == (v) ) ) return true;
}
return false;
}
@SuppressWarnings("unchecked")
public char get(final int k) {
final Int2CharAVLTreeMap.Entry e;
final int kk = k;
return in( kk ) && ( e = findKey( kk ) ) != null ? e.value : this.defRetValue;
}
public char put(final int k, final char v) {
modified = false;
if ( ! in( k ) ) throw new IllegalArgumentException( "Key (" + k + ") out of range [" + ( bottom ? "-" : String.valueOf( from ) ) + ", " + ( top ? "-" : String.valueOf( to ) ) + ")" );
final char oldValue = Int2CharAVLTreeMap.this.put( k, v );
return modified ? this.defRetValue : oldValue;
}
public Character put( final Integer ok, final Character ov ) {
final char oldValue = put( ((ok).intValue()), ((ov).charValue()) );
return modified ? (null) : (Character.valueOf(oldValue));
}
@SuppressWarnings("unchecked")
public char remove( final int k ) {
modified = false;
if ( ! in( k ) ) return this.defRetValue;
final char oldValue = Int2CharAVLTreeMap.this.remove( k );
return modified ? oldValue : this.defRetValue;
}
public Character remove( final Object ok ) {
final char oldValue = remove( ((((Integer)(ok)).intValue())) );
return modified ? (Character.valueOf(oldValue)) : (null);
}
public int size() {
final SubmapIterator i = new SubmapIterator();
int n = 0;
while( i.hasNext() ) {
n++;
i.nextEntry();
}
return n;
}
public boolean isEmpty() {
return ! new SubmapIterator().hasNext();
}
public IntComparator comparator() {
return actualComparator;
}
public Int2CharSortedMap headMap( final int to ) {
if ( top ) return new Submap( from, bottom, to, false );
return compare( to, this.to ) < 0 ? new Submap( from, bottom, to, false ) : this;
}
public Int2CharSortedMap tailMap( final int from ) {
if ( bottom ) return new Submap( from, false, to, top );
return compare( from, this.from ) > 0 ? new Submap( from, false, to, top ) : this;
}
public Int2CharSortedMap subMap( int from, int to ) {
if ( top && bottom ) return new Submap( from, false, to, false );
if ( ! top ) to = compare( to, this.to ) < 0 ? to : this.to;
if ( ! bottom ) from = compare( from, this.from ) > 0 ? from : this.from;
if ( ! top && ! bottom && from == this.from && to == this.to ) return this;
return new Submap( from, false, to, false );
}
/** Locates the first entry.
*
* @return the first entry of this submap, or <code>null</code> if the submap is empty.
*/
public Int2CharAVLTreeMap.Entry firstEntry() {
if ( tree == null ) return null;
// If this submap goes to -infinity, we return the main map first entry; otherwise, we locate the start of the map.
Int2CharAVLTreeMap.Entry e;
if ( bottom ) e = firstEntry;
else {
e = locateKey( from );
// If we find either the start or something greater we're OK.
if ( compare( e.key, from ) < 0 ) e = e.next();
}
// Finally, if this subset doesn't go to infinity, we check that the resulting key isn't greater than the end.
if ( e == null || ! top && compare( e.key, to ) >= 0 ) return null;
return e;
}
/** Locates the last entry.
*
* @return the last entry of this submap, or <code>null</code> if the submap is empty.
*/
public Int2CharAVLTreeMap.Entry lastEntry() {
if ( tree == null ) return null;
// If this submap goes to infinity, we return the main map last entry; otherwise, we locate the end of the map.
Int2CharAVLTreeMap.Entry e;
if ( top ) e = lastEntry;
else {
e = locateKey( to );
// If we find something smaller than the end we're OK.
if ( compare( e.key, to ) >= 0 ) e = e.prev();
}
// Finally, if this subset doesn't go to -infinity, we check that the resulting key isn't smaller than the start.
if ( e == null || ! bottom && compare( e.key, from ) < 0 ) return null;
return e;
}
public int firstIntKey() {
Int2CharAVLTreeMap.Entry e = firstEntry();
if ( e == null ) throw new NoSuchElementException();
return e.key;
}
public int lastIntKey() {
Int2CharAVLTreeMap.Entry e = lastEntry();
if ( e == null ) throw new NoSuchElementException();
return e.key;
}
public Integer firstKey() {
Int2CharAVLTreeMap.Entry e = firstEntry();
if ( e == null ) throw new NoSuchElementException();
return e.getKey();
}
public Integer lastKey() {
Int2CharAVLTreeMap.Entry e = lastEntry();
if ( e == null ) throw new NoSuchElementException();
return e.getKey();
}
/** An iterator for subranges.
*
* <P>This class inherits from {@link TreeIterator}, but overrides the methods that
* update the pointer after a {@link java.util.ListIterator#next()} or {@link java.util.ListIterator#previous()}. If we would
* move out of the range of the submap we just overwrite the next or previous
* entry with <code>null</code>.
*/
private class SubmapIterator extends TreeIterator {
SubmapIterator() {
next = firstEntry();
}
SubmapIterator( final int k ) {
this();
if ( next != null ) {
if ( ! bottom && compare( k, next.key ) < 0 ) prev = null;
else if ( ! top && compare( k, ( prev = lastEntry() ).key ) >= 0 ) next = null;
else {
next = locateKey( k );
if ( compare( next.key, k ) <= 0 ) {
prev = next;
next = next.next();
}
else prev = next.prev();
}
}
}
void updatePrevious() {
prev = prev.prev();
if ( ! bottom && prev != null && Int2CharAVLTreeMap.this.compare( prev.key, from ) < 0 ) prev = null;
}
void updateNext() {
next = next.next();
if ( ! top && next != null && Int2CharAVLTreeMap.this.compare( next.key, to ) >= 0 ) next = null;
}
}
private class SubmapEntryIterator extends SubmapIterator implements ObjectListIterator<Int2CharMap.Entry > {
SubmapEntryIterator() {}
SubmapEntryIterator( final int k ) {
super( k );
}
public Int2CharMap.Entry next() { return nextEntry(); }
public Int2CharMap.Entry previous() { return previousEntry(); }
public void set( Int2CharMap.Entry ok ) { throw new UnsupportedOperationException(); }
public void add( Int2CharMap.Entry ok ) { throw new UnsupportedOperationException(); }
}
/** An iterator on a subrange of keys.
*
* <P>This class can iterate in both directions on a subrange of the
* keys of a threaded tree. We simply override the {@link
* java.util.ListIterator#next()}/{@link java.util.ListIterator#previous()} methods (and possibly their
* type-specific counterparts) so that they return keys instead of
* entries.
*/
private final class SubmapKeyIterator extends SubmapIterator implements IntListIterator {
public SubmapKeyIterator() { super(); }
public SubmapKeyIterator( int from ) { super( from ); }
public int nextInt() { return nextEntry().key; }
public int previousInt() { return previousEntry().key; }
public void set( int k ) { throw new UnsupportedOperationException(); }
public void add( int k ) { throw new UnsupportedOperationException(); }
public Integer next() { return (Integer.valueOf(nextEntry().key)); }
public Integer previous() { return (Integer.valueOf(previousEntry().key)); }
public void set( Integer ok ) { throw new UnsupportedOperationException(); }
public void add( Integer ok ) { throw new UnsupportedOperationException(); }
};
/** An iterator on a subrange of values.
*
* <P>This class can iterate in both directions on the values of a
* subrange of the keys of a threaded tree. We simply override the
* {@link java.util.ListIterator#next()}/{@link java.util.ListIterator#previous()} methods (and possibly their
* type-specific counterparts) so that they return values instead of
* entries.
*/
private final class SubmapValueIterator extends SubmapIterator implements CharListIterator {
public char nextChar() { return nextEntry().value; }
public char previousChar() { return previousEntry().value; }
public void set( char v ) { throw new UnsupportedOperationException(); }
public void add( char v ) { throw new UnsupportedOperationException(); }
public Character next() { return (Character.valueOf(nextEntry().value)); }
public Character previous() { return (Character.valueOf(previousEntry().value)); }
public void set( Character ok ) { throw new UnsupportedOperationException(); }
public void add( Character ok ) { throw new UnsupportedOperationException(); }
};
}
/** Returns a deep copy of this tree map.
*
* <P>This method performs a deep copy of this tree map; the data stored in the
* set, however, is not cloned. Note that this makes a difference only for object keys.
*
* @return a deep copy of this tree map.
*/
@SuppressWarnings("unchecked")
public Int2CharAVLTreeMap clone() {
Int2CharAVLTreeMap c;
try {
c = (Int2CharAVLTreeMap )super.clone();
}
catch(CloneNotSupportedException cantHappen) {
throw new InternalError();
}
c.keys = null;
c.values = null;
c.entries = null;
c.allocatePaths();
if ( count != 0 ) {
// Also this apparently unfathomable code is derived from GNU libavl.
Entry e, p, q, rp = new Entry (), rq = new Entry ();
p = rp;
rp.left( tree );
q = rq;
rq.pred( null );
while( true ) {
if ( ! p.pred() ) {
e = p.left.clone();
e.pred( q.left );
e.succ( q );
q.left( e );
p = p.left;
q = q.left;
}
else {
while( p.succ() ) {
p = p.right;
if ( p == null ) {
q.right = null;
c.tree = rq.left;
c.firstEntry = c.tree;
while( c.firstEntry.left != null ) c.firstEntry = c.firstEntry.left;
c.lastEntry = c.tree;
while( c.lastEntry.right != null ) c.lastEntry = c.lastEntry.right;
return c;
}
q = q.right;
}
p = p.right;
q = q.right;
}
if ( ! p.succ() ) {
e = p.right.clone();
e.succ( q.right );
e.pred( q );
q.right( e );
}
}
}
return c;
}
private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException {
int n = count;
EntryIterator i = new EntryIterator();
Entry e;
s.defaultWriteObject();
while(n-- != 0) {
e = i.nextEntry();
s.writeInt( e.key );
s.writeChar( e.value );
}
}
/** Reads the given number of entries from the input stream, returning the corresponding tree.
*
* @param s the input stream.
* @param n the (positive) number of entries to read.
* @param pred the entry containing the key that preceeds the first key in the tree.
* @param succ the entry containing the key that follows the last key in the tree.
*/
@SuppressWarnings("unchecked")
private Entry readTree( final java.io.ObjectInputStream s, final int n, final Entry pred, final Entry succ ) throws java.io.IOException, ClassNotFoundException {
if ( n == 1 ) {
final Entry top = new Entry ( s.readInt(), s.readChar() );
top.pred( pred );
top.succ( succ );
return top;
}
if ( n == 2 ) {
/* We handle separately this case so that recursion will
*always* be on nonempty subtrees. */
final Entry top = new Entry ( s.readInt(), s.readChar() );
top.right( new Entry ( s.readInt(), s.readChar() ) );
top.right.pred( top );
top.balance( 1 );
top.pred( pred );
top.right.succ( succ );
return top;
}
// The right subtree is the largest one.
final int rightN = n / 2, leftN = n - rightN - 1;
final Entry top = new Entry ();
top.left( readTree( s, leftN, pred, top ) );
top.key = s.readInt();
top.value = s.readChar();
top.right( readTree( s, rightN, top, succ ) );
if ( n == ( n & -n ) ) top.balance( 1 ); // Quick test for determining whether n is a power of 2.
return top;
}
private void readObject( java.io.ObjectInputStream s ) throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
/* The storedComparator is now correctly set, but we must restore
on-the-fly the actualComparator. */
setActualComparator();
allocatePaths();
if ( count != 0 ) {
tree = readTree( s, count, null, null );
Entry e;
e = tree;
while( e.left() != null ) e = e.left();
firstEntry = e;
e = tree;
while( e.right() != null ) e = e.right();
lastEntry = e;
}
if ( ASSERTS ) checkTree( tree );
}
@SuppressWarnings("rawtypes")
private static int checkTree( @SuppressWarnings("unused") Entry e ) { return 0; }
}
| apache-2.0 |
lyubomyr-shaydariv/ext-gson | src/main/java/lsh/ext/gson/adapters/EpochDateTypeAdapter.java | 997 | package lsh.ext.gson.adapters;
import java.io.IOException;
import java.util.Date;
import com.google.gson.TypeAdapter;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
import lombok.AccessLevel;
import lombok.RequiredArgsConstructor;
/**
* Represents the epoch to {@link Date} and vice versa type adapter.
*
* @author Lyubomyr Shaydariv
*/
@RequiredArgsConstructor(access = AccessLevel.PRIVATE)
public final class EpochDateTypeAdapter
extends TypeAdapter<Date> {
private static final TypeAdapter<Date> instance = new EpochDateTypeAdapter()
.nullSafe();
/**
* @return An instance of {@link EpochDateTypeAdapter}.
*/
public static TypeAdapter<Date> getInstance() {
return instance;
}
@Override
public Date read(final JsonReader in)
throws IOException {
return new Date(in.nextLong() * 1000);
}
@Override
public void write(final JsonWriter out, final Date value)
throws IOException {
out.value(value.getTime() / 1000);
}
}
| apache-2.0 |
ernestp/consulo | platform/external-system-impl/src/com/intellij/openapi/externalSystem/service/project/ExternalLibraryPathTypeMapperImpl.java | 1797 | /*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.externalSystem.service.project;
import com.intellij.openapi.externalSystem.model.project.LibraryPathType;
import com.intellij.openapi.roots.OrderRootType;
import com.intellij.openapi.roots.types.BinariesOrderRootType;
import com.intellij.openapi.roots.types.DocumentationOrderRootType;
import com.intellij.openapi.roots.types.SourcesOrderRootType;
import org.jetbrains.annotations.NotNull;
import java.util.EnumMap;
import java.util.Map;
/**
* @author Denis Zhdanov
* @since 1/17/13 3:55 PM
*/
public class ExternalLibraryPathTypeMapperImpl implements ExternalLibraryPathTypeMapper {
private static final Map<LibraryPathType, OrderRootType> MAPPINGS = new EnumMap<LibraryPathType, OrderRootType>(LibraryPathType.class);
static {
MAPPINGS.put(LibraryPathType.BINARY, BinariesOrderRootType.getInstance());
MAPPINGS.put(LibraryPathType.SOURCE, SourcesOrderRootType.getInstance());
MAPPINGS.put(LibraryPathType.DOC, DocumentationOrderRootType.getInstance());
assert LibraryPathType.values().length == MAPPINGS.size();
}
@NotNull
@Override
public OrderRootType map(@NotNull LibraryPathType type) {
return MAPPINGS.get(type);
}
}
| apache-2.0 |
dolszews/appium-tigerspike | src/main/java/io/appium/java_client/HidesKeyboardWithKeyName.java | 1755 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.appium.java_client;
import static io.appium.java_client.MobileCommand.hideKeyboardCommand;
public interface HidesKeyboardWithKeyName extends HidesKeyboard {
/**
* Hides the keyboard by pressing the button specified by keyName if it is
* showing.
*
* @param keyName The button pressed by the mobile driver to attempt hiding the
* keyboard.
*/
default void hideKeyboard(String keyName) {
CommandExecutionHelper.execute(this, hideKeyboardCommand(keyName));
}
/**
* Hides the keyboard if it is showing. Hiding the keyboard often
* depends on the way an app is implemented, no single strategy always
* works.
*
* @param strategy HideKeyboardStrategy.
* @param keyName a String, representing the text displayed on the button of the
* keyboard you want to press. For example: "Done".
*/
default void hideKeyboard(String strategy, String keyName) {
CommandExecutionHelper.execute(this, hideKeyboardCommand(strategy, keyName));
}
}
| apache-2.0 |
duftler/orca | orca-web/src/main/groovy/com/netflix/spinnaker/config/EnhancedMonitoringConfiguration.java | 3848 | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spinnaker.config;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spinnaker.orca.ExecutionStatus;
import com.netflix.spinnaker.orca.pipeline.model.Execution;
import com.netflix.spinnaker.orca.pipeline.persistence.ExecutionRepository;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.Scheduled;
import rx.schedulers.Schedulers;
@Configuration
@EnableConfigurationProperties(EnhancedMonitoringConfigurationProperties.class)
@ConditionalOnExpression(value = "${pollers.enhanced-monitoring.enabled:false}")
public class EnhancedMonitoringConfiguration {
private final Logger log = LoggerFactory.getLogger(getClass());
private final Registry registry;
private final ExecutionRepository executionRepository;
private final EnhancedMonitoringConfigurationProperties configuration;
private final Map<String, AtomicLong> orchestrationCountPerApplication = new HashMap<>();
@Autowired
public EnhancedMonitoringConfiguration(
Registry registry,
ExecutionRepository executionRepository,
EnhancedMonitoringConfigurationProperties configuration) {
this.registry = registry;
this.executionRepository = executionRepository;
this.configuration = configuration;
Id runningOrchestrationsId =
registry
.createId("executions.running")
.withTag("executionType", "Orchestration"); // similar to what MetricsTagHelper is doing
for (String application : configuration.getApplications()) {
Id applicationSpecificId = runningOrchestrationsId.withTag("application", application);
orchestrationCountPerApplication.put(
application, registry.gauge(applicationSpecificId, new AtomicLong(0)));
}
}
@Scheduled(fixedDelayString = "${pollers.enhanced-monitoring.interval-ms:60000}")
void refresh() {
log.info("Refreshing Running Orchestration Counts ({})", orchestrationCountPerApplication);
for (String application : configuration.getApplications()) {
try {
List<Execution> executions =
executionRepository
.retrieveOrchestrationsForApplication(
application,
new ExecutionRepository.ExecutionCriteria()
.setStatuses(ExecutionStatus.RUNNING))
.subscribeOn(Schedulers.io())
.toList()
.toBlocking()
.single();
orchestrationCountPerApplication.get(application).set(executions.size());
} catch (Exception e) {
log.error(
"Unable to refresh running orchestration count (application: {})", application, e);
}
}
log.info("Refreshed Running Orchestration Counts ({})", orchestrationCountPerApplication);
}
}
| apache-2.0 |
dream-x/ignite | modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaIndexCacheVisitorImpl.java | 6759 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.query.schema;
import java.util.List;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
import org.apache.ignite.internal.processors.cache.KeyCacheObject;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter;
import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
import org.apache.ignite.internal.util.lang.GridCursor;
import org.apache.ignite.internal.util.typedef.internal.S;
import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.EVICTED;
import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.OWNING;
import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.RENTING;
/**
* Traversor operating all primary and backup partitions of given cache.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
public class SchemaIndexCacheVisitorImpl implements SchemaIndexCacheVisitor {
/** Count of rows, being processed within a single checkpoint lock. */
private static final int BATCH_SIZE = 1000;
/** Cache context. */
private final GridCacheContext cctx;
/** Row filter. */
private final SchemaIndexCacheFilter rowFilter;
/** Cancellation token. */
private final SchemaIndexOperationCancellationToken cancel;
/**
* Constructor.
* @param cctx Cache context.
*/
public SchemaIndexCacheVisitorImpl(GridCacheContext cctx) {
this(cctx, null, null);
}
/**
* Constructor.
* @param cctx Cache context.
* @param cancel Cancellation token.
*/
public SchemaIndexCacheVisitorImpl(GridCacheContext cctx, SchemaIndexCacheFilter rowFilter,
SchemaIndexOperationCancellationToken cancel) {
this.rowFilter = rowFilter;
this.cancel = cancel;
if (cctx.isNear())
cctx = ((GridNearCacheAdapter)cctx.cache()).dht().context();
this.cctx = cctx;
}
/** {@inheritDoc} */
@Override public void visit(SchemaIndexCacheVisitorClosure clo) throws IgniteCheckedException {
assert clo != null;
List<GridDhtLocalPartition> parts = cctx.topology().localPartitions();
for (int i = 0, size = parts.size(); i < size; i++)
processPartition(parts.get(i), clo);
}
/**
* Process partition.
*
* @param part Partition.
* @param clo Index closure.
* @throws IgniteCheckedException If failed.
*/
private void processPartition(GridDhtLocalPartition part, SchemaIndexCacheVisitorClosure clo)
throws IgniteCheckedException {
checkCancelled();
boolean reserved = false;
if (part != null && part.state() != EVICTED)
reserved = (part.state() == OWNING || part.state() == RENTING) && part.reserve();
if (!reserved)
return;
try {
GridCursor<? extends CacheDataRow> cursor = part.dataStore().cursor(cctx.cacheId(),
null,
null,
CacheDataRowAdapter.RowData.KEY_ONLY);
boolean locked = false;
try {
int cntr = 0;
while (cursor.next()) {
KeyCacheObject key = cursor.get().key();
if (!locked) {
cctx.shared().database().checkpointReadLock();
locked = true;
}
processKey(key, clo);
if (++cntr % BATCH_SIZE == 0) {
cctx.shared().database().checkpointReadUnlock();
locked = false;
}
if (part.state() == RENTING)
break;
}
}
finally {
if (locked)
cctx.shared().database().checkpointReadUnlock();
}
}
finally {
part.release();
}
}
/**
* Process single key.
*
* @param key Key.
* @param clo Closure.
* @throws IgniteCheckedException If failed.
*/
private void processKey(KeyCacheObject key, SchemaIndexCacheVisitorClosure clo) throws IgniteCheckedException {
while (true) {
try {
checkCancelled();
GridCacheEntryEx entry = cctx.cache().entryEx(key);
try {
entry.updateIndex(rowFilter, clo);
}
finally {
cctx.evicts().touch(entry, AffinityTopologyVersion.NONE);
}
break;
}
catch (GridDhtInvalidPartitionException ignore) {
break;
}
catch (GridCacheEntryRemovedException ignored) {
// No-op.
}
}
}
/**
* Check if visit process is not cancelled.
*
* @throws IgniteCheckedException If cancelled.
*/
private void checkCancelled() throws IgniteCheckedException {
if (cancel != null && cancel.isCancelled())
throw new IgniteCheckedException("Index creation was cancelled.");
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(SchemaIndexCacheVisitorImpl.class, this);
}
}
| apache-2.0 |
CaoYouXin/serveV2 | apis/blog/src/blog/controller/ListResourceLevelCtrl.java | 1045 | package blog.controller;
import auth.AuthHelper;
import beans.BeanManager;
import blog.service.IResourceLevelService;
import org.apache.http.HttpException;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.protocol.HttpContext;
import rest.RestHelper;
import rest.WithMatcher;
import java.io.IOException;
public class ListResourceLevelCtrl extends WithMatcher {
private IResourceLevelService resourceLevelService = BeanManager.getInstance().getService(IResourceLevelService.class);
@Override
public int auth() {
return AuthHelper.ADMIN;
}
@Override
public String name() {
return "blog list resource level";
}
@Override
public String urlPattern() {
return "/blog/resource-level/list";
}
@Override
public void handle(HttpRequest httpRequest, HttpResponse httpResponse, HttpContext httpContext) throws HttpException, IOException {
RestHelper.oneCallAndRet(httpResponse, this.resourceLevelService, "list");
}
}
| apache-2.0 |
uschindler/elasticsearch | test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java | 11342 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test.loggerusage;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.Marker;
import org.apache.logging.log4j.message.Message;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.MessageSupplier;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.SuppressLoggerChecks;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogMessage;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.loggerusage.ESLoggerUsageChecker.WrongLoggerUsage;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Stream;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.oneOf;
public class ESLoggerUsageTests extends ESTestCase {
public void testLoggerUsageChecks() throws IOException {
for (Method method : getClass().getMethods()) {
if (method.getDeclaringClass().equals(getClass())) {
if (method.getName().startsWith("check")) {
logger.info("Checking logger usage for method {}", method.getName());
InputStream classInputStream = getClass().getResourceAsStream(getClass().getSimpleName() + ".class");
List<WrongLoggerUsage> errors = new ArrayList<>();
ESLoggerUsageChecker.check(errors::add, classInputStream,
m -> m.equals(method.getName()) || m.startsWith("lambda$" + method.getName()));
if (method.getName().startsWith("checkFail")) {
assertFalse("Expected " + method.getName() + " to have wrong Logger usage", errors.isEmpty());
} else {
assertTrue("Method " + method.getName() + " has unexpected Logger usage errors: " + errors, errors.isEmpty());
}
} else {
assertTrue("only allow methods starting with test or check in this class", method.getName().startsWith("test"));
}
}
}
}
public void testLoggerUsageCheckerCompatibilityWithLog4j2Logger() throws NoSuchMethodException {
for (Method method : Logger.class.getMethods()) {
if (ESLoggerUsageChecker.LOGGER_METHODS.contains(method.getName())) {
assertThat(method.getParameterTypes().length, greaterThanOrEqualTo(1));
int markerOffset = method.getParameterTypes()[0].equals(Marker.class) ? 1 : 0;
int paramLength = method.getParameterTypes().length - markerOffset;
if (method.isVarArgs()) {
assertEquals(2, paramLength);
assertEquals(String.class, method.getParameterTypes()[markerOffset]);
assertThat(method.getParameterTypes()[markerOffset + 1], is(oneOf(Object[].class, Supplier[].class)));
} else {
assertThat(method.getParameterTypes()[markerOffset], is(oneOf(Message.class, MessageSupplier.class,
CharSequence.class, Object.class, String.class, Supplier.class)));
if (paramLength == 2) {
assertThat(method.getParameterTypes()[markerOffset + 1], is(oneOf(Throwable.class, Object.class)));
if (method.getParameterTypes()[markerOffset + 1].equals(Object.class)) {
assertEquals(String.class, method.getParameterTypes()[markerOffset]);
}
}
if (paramLength > 2) {
assertEquals(String.class, method.getParameterTypes()[markerOffset]);
assertThat(paramLength, lessThanOrEqualTo(11));
for (int i = 1; i < paramLength; i++) {
assertEquals(Object.class, method.getParameterTypes()[markerOffset + i]);
}
}
}
}
}
for (String methodName : ESLoggerUsageChecker.LOGGER_METHODS) {
assertEquals(48, Stream.of(Logger.class.getMethods()).filter(m -> methodName.equals(m.getName())).count());
}
for (Constructor<?> constructor : ParameterizedMessage.class.getConstructors()) {
assertThat(constructor.getParameterTypes().length, greaterThanOrEqualTo(2));
assertEquals(String.class, constructor.getParameterTypes()[0]);
assertThat(constructor.getParameterTypes()[1], is(oneOf(String[].class, Object[].class, Object.class)));
if (constructor.getParameterTypes().length > 2) {
assertEquals(3, constructor.getParameterTypes().length);
if (constructor.getParameterTypes()[1].equals(Object.class)) {
assertEquals(Object.class, constructor.getParameterTypes()[2]);
} else {
assertEquals(Throwable.class, constructor.getParameterTypes()[2]);
}
}
}
assertEquals(5, ParameterizedMessage.class.getConstructors().length);
}
public void checkArgumentsProvidedInConstructor() {
logger.debug(new ESLogMessage("message {}", "some-arg")
.field("x-opaque-id", "some-value"));
}
public void checkWithUsage() {
logger.debug(new ESLogMessage("message {}")
.argAndField("x-opaque-id", "some-value")
.field("field", "value")
.with("field2", "value2"));
}
public void checkFailArraySizeForSubclasses(Object... arr) {
logger.debug(new ESLogMessage("message {}", arr));
}
public void checkFailForTooManyArgumentsInConstr() {
logger.debug(new ESLogMessage("message {}", "arg1", "arg2"));
}
public void checkFailForTooManyArgumentsWithChain() {
logger.debug(new ESLogMessage("message {}").argAndField("x-opaque-id", "some-value")
.argAndField("too-many-arg", "xxx"));
}
public void checkFailArraySize(String... arr) {
logger.debug(new ParameterizedMessage("text {}", (Object[])arr));
}
public void checkNumberOfArguments1() {
logger.info("Hello {}", "world");
}
public void checkFailNumberOfArguments1() {
logger.info("Hello {}");
}
@SuppressLoggerChecks(reason = "test ignore functionality")
public void checkIgnoreWhenAnnotationPresent() {
logger.info("Hello {}");
}
public void checkNumberOfArguments2() {
logger.info("Hello {}, {}, {}", "world", 2, "third argument");
}
public void checkFailNumberOfArguments2() {
logger.info("Hello {}, {}", "world", 2, "third argument");
}
public void checkNumberOfArguments3() {
logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, new String("last arg"));
}
public void checkFailNumberOfArguments3() {
logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, 7, new String("last arg"));
}
public void checkNumberOfArgumentsParameterizedMessage1() {
logger.info(new ParameterizedMessage("Hello {}, {}, {}", "world", 2, "third argument"));
}
public void checkFailNumberOfArgumentsParameterizedMessage1() {
logger.info(new ParameterizedMessage("Hello {}, {}", "world", 2, "third argument"));
}
public void checkNumberOfArgumentsParameterizedMessage2() {
logger.info(new ParameterizedMessage("Hello {}, {}", "world", 2));
}
public void checkFailNumberOfArgumentsParameterizedMessage2() {
logger.info(new ParameterizedMessage("Hello {}, {}, {}", "world", 2));
}
public void checkNumberOfArgumentsParameterizedMessage3() {
logger.info((Supplier<?>) () -> new ParameterizedMessage("Hello {}, {}, {}", "world", 2, "third argument"));
}
public void checkFailNumberOfArgumentsParameterizedMessage3() {
logger.info((Supplier<?>) () -> new ParameterizedMessage("Hello {}, {}", "world", 2, "third argument"));
}
public void checkOrderOfExceptionArgument() {
logger.info("Hello", new Exception());
}
public void checkOrderOfExceptionArgument1() {
logger.info((Supplier<?>) () -> new ParameterizedMessage("Hello {}", "world"), new Exception());
}
public void checkFailOrderOfExceptionArgument1() {
logger.info("Hello {}", "world", new Exception());
}
public void checkOrderOfExceptionArgument2() {
logger.info((Supplier<?>) () -> new ParameterizedMessage("Hello {}, {}", "world", 42), new Exception());
}
public void checkFailOrderOfExceptionArgument2() {
logger.info("Hello {}, {}", "world", 42, new Exception());
}
public void checkNonConstantMessageWithZeroArguments(boolean b) {
logger.info(Boolean.toString(b), new Exception());
}
public void checkFailNonConstantMessageWithArguments(boolean b) {
logger.info((Supplier<?>) () -> new ParameterizedMessage(Boolean.toString(b), 42), new Exception());
}
public void checkComplexUsage(boolean b) {
String message = "Hello {}, {}";
Object[] args = new Object[] { "world", 42 };
if (b) {
message = "also two args {}{}";
args = new Object[] { "world", 43 };
}
logger.info(message, args);
}
public void checkFailComplexUsage1(boolean b) {
String message = "Hello {}, {}";
Object[] args = new Object[] { "world", 42 };
if (b) {
message = "just one arg {}";
args = new Object[] { "world", 43 };
}
logger.info(message, args);
}
public void checkFailComplexUsage2(boolean b) {
String message = "Hello {}, {}";
Object[] args = new Object[] { "world", 42 };
if (b) {
message = "also two args {}{}";
args = new Object[] { "world", 43, "another argument" };
}
logger.info(message, args);
}
public void checkDeprecationLogger() {
DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
deprecationLogger.deprecate("key","message {}", 123);
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-licensemanager/src/main/java/com/amazonaws/services/licensemanager/model/transform/IssuerJsonUnmarshaller.java | 2861 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.licensemanager.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.licensemanager.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* Issuer JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class IssuerJsonUnmarshaller implements Unmarshaller<Issuer, JsonUnmarshallerContext> {
public Issuer unmarshall(JsonUnmarshallerContext context) throws Exception {
Issuer issuer = new Issuer();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return null;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("Name", targetDepth)) {
context.nextToken();
issuer.setName(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("SignKey", targetDepth)) {
context.nextToken();
issuer.setSignKey(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return issuer;
}
private static IssuerJsonUnmarshaller instance;
public static IssuerJsonUnmarshaller getInstance() {
if (instance == null)
instance = new IssuerJsonUnmarshaller();
return instance;
}
}
| apache-2.0 |
ExplorViz/ExplorViz | src/explorviz/shared/usertracking/records/landscape/NodeGroupCloseRecord.java | 285 | package explorviz.shared.usertracking.records.landscape;
import explorviz.shared.model.NodeGroup;
public class NodeGroupCloseRecord extends NodeGroupRecord {
protected NodeGroupCloseRecord() {
}
public NodeGroupCloseRecord(final NodeGroup nodeGroup) {
super(nodeGroup);
}
}
| apache-2.0 |
deeplearning4j/deeplearning4j | datavec/datavec-data/datavec-geo/src/test/java/org/datavec/api/transform/transform/TestGeoTransforms.java | 6800 | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.datavec.api.transform.transform;
import org.datavec.api.transform.ColumnType;
import org.datavec.api.transform.Transform;
import org.datavec.api.transform.geo.LocationType;
import org.datavec.api.transform.schema.Schema;
import org.datavec.api.transform.transform.geo.CoordinatesDistanceTransform;
import org.datavec.api.transform.transform.geo.IPAddressToCoordinatesTransform;
import org.datavec.api.transform.transform.geo.IPAddressToLocationTransform;
import org.datavec.api.writable.DoubleWritable;
import org.datavec.api.writable.Text;
import org.datavec.api.writable.Writable;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.nd4j.common.io.ClassPathResource;
import java.io.*;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import static org.junit.Assert.assertEquals;
/**
* @author saudet
*/
public class TestGeoTransforms {
@BeforeClass
public static void beforeClass() throws Exception {
//Use test resources version to avoid tests suddenly failing due to IP/Location DB content changing
File f = new ClassPathResource("datavec-geo/GeoIP2-City-Test.mmdb").getFile();
System.setProperty(IPAddressToLocationTransform.GEOIP_FILE_PROPERTY, f.getPath());
}
@AfterClass
public static void afterClass(){
System.setProperty(IPAddressToLocationTransform.GEOIP_FILE_PROPERTY, "");
}
@Test
public void testCoordinatesDistanceTransform() throws Exception {
Schema schema = new Schema.Builder().addColumnString("point").addColumnString("mean").addColumnString("stddev")
.build();
Transform transform = new CoordinatesDistanceTransform("dist", "point", "mean", "stddev", "\\|");
transform.setInputSchema(schema);
Schema out = transform.transform(schema);
assertEquals(4, out.numColumns());
assertEquals(Arrays.asList("point", "mean", "stddev", "dist"), out.getColumnNames());
assertEquals(Arrays.asList(ColumnType.String, ColumnType.String, ColumnType.String, ColumnType.Double),
out.getColumnTypes());
assertEquals(Arrays.asList((Writable) new Text("-30"), new Text("20"), new Text("10"), new DoubleWritable(5.0)),
transform.map(Arrays.asList((Writable) new Text("-30"), new Text("20"), new Text("10"))));
assertEquals(Arrays.asList((Writable) new Text("50|40"), new Text("10|-20"), new Text("10|5"),
new DoubleWritable(Math.sqrt(160))),
transform.map(Arrays.asList((Writable) new Text("50|40"), new Text("10|-20"),
new Text("10|5"))));
}
@Test
public void testIPAddressToCoordinatesTransform() throws Exception {
Schema schema = new Schema.Builder().addColumnString("column").build();
Transform transform = new IPAddressToCoordinatesTransform("column", "CUSTOM_DELIMITER");
transform.setInputSchema(schema);
Schema out = transform.transform(schema);
assertEquals(1, out.getColumnMetaData().size());
assertEquals(ColumnType.String, out.getMetaData(0).getColumnType());
String in = "81.2.69.160";
double latitude = 51.5142;
double longitude = -0.0931;
List<Writable> writables = transform.map(Collections.singletonList((Writable) new Text(in)));
assertEquals(1, writables.size());
String[] coordinates = writables.get(0).toString().split("CUSTOM_DELIMITER");
assertEquals(2, coordinates.length);
assertEquals(latitude, Double.parseDouble(coordinates[0]), 0.1);
assertEquals(longitude, Double.parseDouble(coordinates[1]), 0.1);
//Check serialization: things like DatabaseReader etc aren't serializable, hence we need custom serialization :/
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeObject(transform);
byte[] bytes = baos.toByteArray();
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
ObjectInputStream ois = new ObjectInputStream(bais);
Transform deserialized = (Transform) ois.readObject();
writables = deserialized.map(Collections.singletonList((Writable) new Text(in)));
assertEquals(1, writables.size());
coordinates = writables.get(0).toString().split("CUSTOM_DELIMITER");
//System.out.println(Arrays.toString(coordinates));
assertEquals(2, coordinates.length);
assertEquals(latitude, Double.parseDouble(coordinates[0]), 0.1);
assertEquals(longitude, Double.parseDouble(coordinates[1]), 0.1);
}
@Test
public void testIPAddressToLocationTransform() throws Exception {
Schema schema = new Schema.Builder().addColumnString("column").build();
LocationType[] locationTypes = LocationType.values();
String in = "81.2.69.160";
String[] locations = {"London", "2643743", "Europe", "6255148", "United Kingdom", "2635167",
"51.5142:-0.0931", "", "England", "6269131"}; //Note: no postcode in this test DB for this record
for (int i = 0; i < locationTypes.length; i++) {
LocationType locationType = locationTypes[i];
String location = locations[i];
Transform transform = new IPAddressToLocationTransform("column", locationType);
transform.setInputSchema(schema);
Schema out = transform.transform(schema);
assertEquals(1, out.getColumnMetaData().size());
assertEquals(ColumnType.String, out.getMetaData(0).getColumnType());
List<Writable> writables = transform.map(Collections.singletonList((Writable) new Text(in)));
assertEquals(1, writables.size());
assertEquals(location, writables.get(0).toString());
//System.out.println(location);
}
}
}
| apache-2.0 |
jfdenise/aesh | aesh/src/main/java/org/aesh/command/settings/Settings.java | 7255 | /*
* JBoss, Home of Professional Open Source
* Copyright 2014 Red Hat Inc. and/or its affiliates and other contributors
* as indicated by the @authors tag. All rights reserved.
* See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.aesh.command.settings;
import org.aesh.command.CommandNotFoundHandler;
import org.aesh.command.activator.CommandActivator;
import org.aesh.command.activator.CommandActivatorProvider;
import org.aesh.command.activator.OptionActivator;
import org.aesh.command.activator.OptionActivatorProvider;
import org.aesh.command.completer.CompleterInvocation;
import org.aesh.command.completer.CompleterInvocationProvider;
import org.aesh.command.converter.ConverterInvocation;
import org.aesh.command.converter.ConverterInvocationProvider;
import org.aesh.command.export.ExportChangeListener;
import org.aesh.command.invocation.CommandInvocation;
import org.aesh.command.invocation.CommandInvocationProvider;
import org.aesh.command.invocation.InvocationProviders;
import org.aesh.command.registry.CommandRegistry;
import org.aesh.command.validator.ValidatorInvocation;
import org.aesh.command.validator.ValidatorInvocationProvider;
import org.aesh.io.Resource;
import org.aesh.readline.AeshContext;
import org.aesh.readline.alias.AliasManager;
import org.aesh.readline.editing.EditMode;
import org.aesh.terminal.Connection;
import java.io.File;
import java.io.InputStream;
import java.io.PrintStream;
import java.util.function.Consumer;
/**
* Object thats define all tunable settings used by Console
*
* @author <a href="mailto:stale.pedersen@jboss.org">Ståle W. Pedersen</a>
*/
public interface Settings<CI extends CommandInvocation,
CI3 extends ConverterInvocation, CI2 extends CompleterInvocation,
VI extends ValidatorInvocation, OA extends OptionActivator,
CA extends CommandActivator> extends Cloneable {
/**
* Get the current Mode.
* Default mode is Emacs
*
*/
EditMode.Mode mode();
/**
* Get current edit mode
*/
EditMode editMode();
/**
* Get file where history is stored
*/
File historyFile();
/**
* Get history file access permission, this is used when write to history file.
*
* <code>null</code> means default file permission revealed by system.
*/
FileAccessPermission historyFilePermission();
/**
* Get history size
*/
int historySize();
/**
* Get bell style
* NOTE: Not used yet
*/
String bellStyle();
/**
* Get input stream
*/
InputStream stdIn();
/**
* Get standard output stream
*/
PrintStream stdOut();
/**
* Get standard error stream
*/
PrintStream stdErr();
/**
* Get inputrc file location
*/
File inputrc();
/**
* Are we logging
*/
boolean logging();
/**
* Is completion disabled
*/
boolean completionDisabled();
/**
* Get location of log file
*/
String logFile();
/**
* Do aesh read inputrc during init
*/
boolean readInputrc();
/**
* Is history disabled
*/
boolean historyDisabled();
/**
* Is history persisted during shutdown
*/
boolean historyPersistent();
/**
* Location of alias file
*/
File aliasFile();
/**
* Is alias enabled
*/
boolean aliasEnabled();
/**
* Is alias persisted
*/
boolean persistAlias();
/**
* If a custom AliasManager is needed, this will be used
* Note: Only set this if you know what you're doing.
*
* @return custom AliasManager
*/
AliasManager aliasManager();
/**
* Get quit handler
*/
QuitHandler quitHandler();
/**
* Is operator parser enabled
*/
boolean operatorParserEnabled();
/**
* Is the man command enabled (currently only for AeshConsole)
*/
boolean manEnabled();
/**
* Get aesh context
*/
AeshContext aeshContext();
/**
* Get the export file
* Defaults to ~/.aesh_export
*/
File exportFile();
/**
* Are export enabled?
*/
boolean exportEnabled();
/**
* Should aesh persist export variables at shutdown
*/
void setPersistExport(boolean persist);
/**
* Is aesh persisting variables at shutdown
*/
boolean persistExport();
/**
* Aesh load environment system.
*/
void setExportUsesSystemEnvironment(boolean isLoad);
/**
* Is load environment system?
*/
boolean exportUsesSystemEnvironment();
/**
* Specify
* @param resource
*/
void setResource(Resource resource);
/**
* @param execute specify a string that will be pushed to the input stream at start
*/
void setExecuteAtStart(String execute);
/**
* @return execute at start string
*/
String executeAtStart();
/**
* @param executeFileAtStart file that will be read, parsed and executed at start
*/
void setExecuteFileAtStart(Resource executeFileAtStart);
/**
* @return execute file at start
*/
Resource executeFileAtStart();
/**
* @return get resource
*/
Resource resource();
/**
* @return get command registry
*/
CommandRegistry<CI> commandRegistry();
/**
*
* @return get CommandInvocationServices
*/
CommandInvocationProvider<CI> commandInvocationProvider();
/**
* @return get CommandNotFoundHandler
*/
CommandNotFoundHandler commandNotFoundHandler();
/**
* @return CompleterInvocationProvider
*/
CompleterInvocationProvider<CI2> completerInvocationProvider();
ConverterInvocationProvider<CI3> converterInvocationProvider();
ValidatorInvocationProvider<VI> validatorInvocationProvider();
OptionActivatorProvider<OA> optionActivatorProvider();
ManProvider manProvider();
CommandActivatorProvider<CA> commandActivatorProvider();
Connection connection();
Object clone();
InvocationProviders<CA, CI3, CI2, VI, OA> invocationProviders();
ExportChangeListener exportListener();
void echoCtrl(boolean echo);
boolean isEchoCtrl();
void setInterruptHandler(Consumer<Void> handler);
Consumer<Void> getInterruptHandler();
void redrawPromptOnInterrupt(boolean redraw);
boolean isRedrawPromptOnInterrupt();
void setScanForCommandPackages(String... packages);
String[] getScanForCommandPackages();
void setEnableSearchInPaging(boolean enable);
boolean enableSearchInPaging();
}
| apache-2.0 |
xiaotangai/KnowledgeBase | src/main/java/liu/study/api/java/awt/interfaces/XActiveEvent.java | 161 | package liu.study.api.java.awt.interfaces;
import java.awt.ActiveEvent;
public interface XActiveEvent extends ActiveEvent {
@Override
void dispatch();
}
| apache-2.0 |
adrian-wang/project-panthera-skin | src/main/java/com/intel/ssg/dcst/panthera/parse/sql/generator/AsteriskGenerator.java | 1954 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.ssg.dcst.panthera.parse.sql.generator;
import org.antlr.runtime.tree.CommonTree;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import com.intel.ssg.dcst.panthera.parse.sql.SqlXlateException;
import com.intel.ssg.dcst.panthera.parse.sql.SqlXlateUtil;
import com.intel.ssg.dcst.panthera.parse.sql.TranslateContext;
public class AsteriskGenerator extends BaseHiveASTGenerator {
@Override
public boolean generate(ASTNode hiveRoot, CommonTree sqlRoot, ASTNode currentHiveNode,
CommonTree currentSqlNode, TranslateContext context) throws SqlXlateException {
if (currentSqlNode.getChildCount() > 0) {
//
// If * has any child, it must be a multiplication operator with two children.
//
assert (currentSqlNode.getChildCount() == 2);
ASTNode ret = SqlXlateUtil.newASTNode(HiveParser.STAR, "*");
currentHiveNode.addChild(ret);
return generateChildren(hiveRoot, sqlRoot, ret, currentSqlNode, context);
} else {
ASTNode ret = super.buildAllColRef();
currentHiveNode.addChild(ret);
return true;
}
}
}
| apache-2.0 |
ServerStarted/cat | cat-home/src/main/java/com/dianping/cat/report/task/metric/BaselineConfig.java | 817 | package com.dianping.cat.report.task.metric;
import java.util.List;
public class BaselineConfig {
private int m_id;
private String m_key;
private int m_targetDate;
private List<Double> m_weights;
private List<Integer> m_days;
public List<Integer> getDays() {
return m_days;
}
public int getId() {
return m_id;
}
public String getKey() {
return m_key;
}
public int getTargetDate() {
return m_targetDate;
}
public List<Double> getWeights() {
return m_weights;
}
public void setDays(List<Integer> days) {
m_days = days;
}
public void setId(int id) {
m_id = id;
}
public void setKey(String key) {
m_key = key;
}
public void setTargetDate(int targetDate) {
m_targetDate = targetDate;
}
public void setWeights(List<Double> weights) {
m_weights = weights;
}
}
| apache-2.0 |
leleuj/cas | core/cas-server-core-webflow-mfa-api/src/test/java/org/apereo/cas/web/flow/configurer/BaseMultifactorWebflowConfigurerTests.java | 2616 | package org.apereo.cas.web.flow.configurer;
import org.apereo.cas.web.flow.CasWebflowConfigurer;
import org.apereo.cas.web.flow.CasWebflowConstants;
import org.apereo.cas.web.flow.CasWebflowExecutionPlan;
import lombok.val;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.webflow.definition.registry.FlowDefinitionRegistry;
import org.springframework.webflow.engine.Flow;
import org.springframework.webflow.engine.SubflowState;
import static org.junit.jupiter.api.Assertions.*;
/**
* This is {@link BaseMultifactorWebflowConfigurerTests}.
*
* @author Misagh Moayyed
* @since 6.2.0
*/
@Tag("Webflow")
public abstract class BaseMultifactorWebflowConfigurerTests {
@Autowired
@Qualifier("casWebflowExecutionPlan")
protected CasWebflowExecutionPlan casWebflowExecutionPlan;
@Autowired
@Qualifier("loginFlowRegistry")
protected FlowDefinitionRegistry loginFlowDefinitionRegistry;
protected abstract FlowDefinitionRegistry getMultifactorFlowDefinitionRegistry();
protected abstract String getMultifactorEventId();
@Test
public void verifyOperation() {
val registry = getMultifactorFlowDefinitionRegistry();
assertTrue(registry.containsFlowDefinition(getMultifactorEventId()));
val flow = (Flow) registry.getFlowDefinition(getMultifactorEventId());
assertTrue(flow.containsState(CasWebflowConstants.STATE_ID_MFA_CHECK_BYPASS));
assertTrue(flow.containsState(CasWebflowConstants.STATE_ID_MFA_CHECK_AVAILABLE));
assertTrue(flow.containsState(CasWebflowConstants.STATE_ID_MFA_FAILURE));
val loginFlow = (Flow) loginFlowDefinitionRegistry.getFlowDefinition(CasWebflowConfigurer.FLOW_ID_LOGIN);
assertTrue(loginFlow.getState(getMultifactorEventId()) instanceof SubflowState);
}
@Test
public void verifyTrustedDevice() {
val registry = getMultifactorFlowDefinitionRegistry();
assertTrue(registry.containsFlowDefinition(getMultifactorEventId()));
val flow = (Flow) registry.getFlowDefinition(getMultifactorEventId());
assertTrue(flow.containsState(CasWebflowConstants.STATE_ID_REGISTER_TRUSTED_DEVICE));
assertTrue(flow.containsState(CasWebflowConstants.STATE_ID_FINISH_MFA_TRUSTED_AUTH));
assertTrue(flow.containsState(CasWebflowConstants.STATE_ID_PREPARE_REGISTER_TRUSTED_DEVICE));
assertTrue(flow.containsState(CasWebflowConstants.STATE_ID_REGISTER_DEVICE));
}
}
| apache-2.0 |
gosu-lang/gosu-lang | gosu-lab/src/main/java/editor/search/AbstractSearcher.java | 1996 | package editor.search;
import editor.FileTree;
import editor.FileTreeUtil;
import editor.NodeKind;
import editor.util.IProgressCallback;
import java.util.Collections;
import java.util.List;
import java.util.function.Predicate;
/**
*/
public abstract class AbstractSearcher
{
public abstract boolean search( FileTree tree, SearchTree results );
public boolean searchTree( FileTree tree, SearchTree results, Predicate<FileTree> filter, IProgressCallback progress )
{
return searchTrees( Collections.singletonList( tree ), results, filter, progress );
}
public boolean searchTrees( List<FileTree> trees, SearchTree results, Predicate<FileTree> filter, IProgressCallback progress )
{
if( progress != null && progress.isAbort() )
{
return false;
}
boolean bFound = false;
for( FileTree tree: trees )
{
if( tree.isFile() && filter.test( tree ) )
{
if( progress != null )
{
progress.incrementProgress( tree.getName() );
}
bFound = bFound | search( tree, results );
}
else if( !tree.isLeaf() )
{
if( searchTrees( tree.getChildren(), results, filter, progress ) )
{
bFound = true;
}
}
}
return bFound;
}
protected SearchTree getOrMakePath( FileTree tree, SearchTree results )
{
if( tree.getParent() != null )
{
results = getOrMakePath( tree.getParent(), results );
}
for( SearchTree child: results.getChildren() )
{
SearchTree.SearchTreeNode node = child.getNode();
if( node != null && node.getFile() == tree )
{
return child;
}
}
SearchTree.SearchTreeNode node = new SearchTree.SearchTreeNode( tree, null );
SearchTree searchTree = new SearchTree( NodeKind.Directory, node );
results.addViaModel( searchTree );
return searchTree;
}
protected boolean isExcluded( FileTree tree )
{
return !FileTreeUtil.isSupportedTextFile( tree );
}
}
| apache-2.0 |
indeedeng/util | io/src/main/java/com/indeed/util/io/Files.java | 35710 | // $Id$
package com.indeed.util.io;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.hash.Hashing;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.io.Writer;
import java.nio.channels.Channels;
import java.nio.channels.FileChannel;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import java.util.zip.CRC32;
import java.util.zip.Checksum;
public class Files {
// you may wish to use this particular logger sparingly, as many times it can be more helpful
// if you log stuff to a more specific (context-specific) logger than something in common-util
private static final Logger LOGGER = LoggerFactory.getLogger(Files.class);
private Files() {}
public static String buildPath(String... parts) {
if (parts.length == 0) return null;
if (parts.length == 1) return parts[0];
File temp = new File(parts[0], parts[1]);
for (int i = 2; i < parts.length; i++) {
temp = new File(temp, parts[i]);
}
return temp.getPath();
}
/**
* Serializes an object to a file, throws an exception if it fails
*
* @param obj object to write to a file
* @param file path to save the object to
* @throws java.io.IOException if the existing file could not be erased, or the file could not
* be written, flushed, synced, or closed
*/
public static void writeObjectToFileOrDie2(
@Nonnull final Object obj, @Nonnull final String file) throws IOException {
Preconditions.checkNotNull(file, "file argument is required!");
Preconditions.checkArgument(!file.isEmpty(), "file argument is required!");
// todo: should 'obj' be required? do we ever WANT to write 'null' to an artifact?
Preconditions.checkNotNull(obj, "cannot write a 'null' object");
final File targetFile = new File(file);
// write object to temporary file that is flushed, fsynced, and closed by the time it
// returns
final ObjectOutputStreamCallback callback = new ObjectOutputStreamCallback(obj);
final File tmpFile = writeDataToTempFileOrDie2(callback, targetFile);
final long checksumForWrittenData = callback.getChecksumValue();
// verify that what we WROTE to the disk is then immediately READABLE before allowing the
// rename to happen
final long checksumFound = computeFileChecksum(tmpFile, new CRC32());
if (checksumForWrittenData != checksumFound) {
throw new IOException(
"Data written to file is not what we expected, "
+ checksumFound
+ " != "
+ checksumForWrittenData
+ ": "
+ tmpFile);
}
if (!tmpFile.renameTo(targetFile)) {
// failed to atomically rename from temp file to target file, so throw an exception,
// leaving the filesystem in
// a sane state at all times
throw new IOException("Could not rename '" + tmpFile + "' to '" + targetFile + "'.");
}
}
/** @deprecated Use {@link #writeObjectToFileOrDie2(java.lang.Object, java.lang.String)} */
@Deprecated
public static void writeObjectToFileOrDie(
@Nonnull final Object obj,
@Nonnull final String file,
@Nonnull final org.apache.log4j.Logger log)
throws IOException {
writeObjectToFileOrDie2(obj, file);
}
private static class ObjectOutputStreamCallback implements OutputStreamCallback {
private long checksumForWrittenData = 0L;
@Nonnull private final Object obj;
private ObjectOutputStreamCallback(@Nonnull Object obj) {
this.obj = obj;
}
public long getChecksumValue() {
return checksumForWrittenData;
}
@Override
public void writeAndFlushData(@Nonnull OutputStream outputStream) throws IOException {
final ChecksummingOutputStream checksummingOutputStream =
new ChecksummingOutputStream(new BufferedOutputStream(outputStream));
final ObjectOutputStream out = new ObjectOutputStream(checksummingOutputStream);
// write the data
out.writeObject(obj);
// flush the various streams
out.flush();
checksumForWrittenData = checksummingOutputStream.getChecksumValue();
}
}
private static class ChecksummingOutputStream extends FilterOutputStream {
@Nonnull private final Checksum checksummer;
private ChecksummingOutputStream(OutputStream out) {
super(out);
checksummer = new CRC32();
}
@Override
public void write(int b) throws IOException {
out.write(b);
checksummer.update(b & 0xff);
}
@Override
public void write(byte[] b) throws IOException {
out.write(b);
checksummer.update(b, 0, b.length);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
checksummer.update(b, off, len);
}
/**
* Returns the current checksum value.
*
* @return the current checksum value
*/
public long getChecksumValue() {
return checksummer.getValue();
}
}
private static interface OutputStreamCallback {
void writeAndFlushData(@Nonnull final OutputStream outputStream) throws IOException;
}
// return a reference to a temp file that contains the written + flushed + fsynced + closed data
@Nonnull
private static File writeDataToTempFileOrDie2(
@Nonnull final OutputStreamCallback callback, @Nonnull final File targetFile)
throws IOException {
Preconditions.checkNotNull(callback, "callback argument is required!");
Preconditions.checkNotNull(targetFile, "targetFile argument is required!");
FileOutputStream fileOut = null;
FileChannel fileChannel = null;
try {
final String targetFinalName = targetFile.getName();
final File targetDirectory = targetFile.getParentFile();
// open temporary file
final File tmpFile = File.createTempFile(targetFinalName, ".tmp", targetDirectory);
fileOut = new FileOutputStream(tmpFile);
fileChannel = fileOut.getChannel();
// make sure to use an output stream that flows THROUGH the FileChannel, so that
// FileChannel.force(true)
// can do what it's supposed to
// write the data AND flush it
callback.writeAndFlushData(Channels.newOutputStream(fileChannel));
return tmpFile;
} finally {
try {
// fsync to disk (both data AND length)
if (fileChannel != null) {
fileChannel.force(true);
}
} finally {
// close the open file (if during an EXC,
if (fileOut != null) {
fileOut.close();
}
}
}
}
/**
* @deprecated Use {@link
* Files#writeDataToTempFileOrDie2(com.indeed.util.io.Files.OutputStreamCallback,
* java.io.File)}
*/
@Deprecated
@Nonnull
private static File writeDataToTempFileOrDie(
@Nonnull final OutputStreamCallback callback,
@Nonnull final File targetFile,
@Nonnull final org.apache.log4j.Logger log)
throws IOException {
return writeDataToTempFileOrDie2(callback, targetFile);
}
@Nonnull
private static File writeTextToTempFileOrDie2(
@Nonnull final String[] text, @Nonnull final File targetFile) throws IOException {
Preconditions.checkNotNull(text, "callback argument is required!");
Preconditions.checkNotNull(targetFile, "targetFile argument is required!");
final String targetFinalName = targetFile.getName();
final File targetDirectory = targetFile.getParentFile();
// open temporary file
final File tmpFile = File.createTempFile(targetFinalName, ".tmp", targetDirectory);
FileOutputStream fileOut = new FileOutputStream(tmpFile);
Writer writer = new PrintWriter(new OutputStreamWriter(fileOut, Charsets.UTF_8));
FileChannel fileChannel = null;
try {
fileChannel = fileOut.getChannel();
for (String line : text) {
writer.write(line);
writer.write('\n');
}
writer.flush();
return tmpFile;
} finally {
try {
// fsync to disk (both data AND length)
if (fileChannel != null) {
fileChannel.force(true);
}
} finally {
fileOut.close();
}
}
}
/** @deprecated Use {@link #writeTextToTempFileOrDie2(java.lang.String[], java.io.File)} */
@Deprecated
@Nonnull
private static File writeTextToTempFileOrDie(
@Nonnull final String[] text,
@Nonnull final File targetFile,
@Nonnull final org.apache.log4j.Logger log)
throws IOException {
return writeTextToTempFileOrDie2(text, targetFile);
}
/**
* Writes an object to a file only if it is different from the current contents of the file, or
* if the file does not exist. Note that you must have enough heap to contain the entire
* contents of the object graph.
*
* @param obj object to write to a file
* @param file path to save the object to
* @return true if the file was actually written, false if the file was unchanged
* @throws java.io.IOException if the existing file could not be read for comparison, if the
* existing file could not be erased, or if the new file could not be written, flushed,
* synced, or closed
*/
public static boolean writeObjectIfChangedOrDie2(
@Nonnull final Object obj, @Nonnull final String file) throws IOException {
Preconditions.checkNotNull(file, "file argument is required!");
Preconditions.checkArgument(!file.isEmpty(), "file argument is required!");
// todo: should 'obj' be required? do we ever WANT to write 'null' to an artifact?
Preconditions.checkNotNull(obj, "cannot write a 'null' object to file %s", file);
// first serialize the object into a byte array, this should almost never fail
final IndexableByteArrayOutputStream baos = new IndexableByteArrayOutputStream(524288);
{
final ObjectOutputStream out = new ObjectOutputStream(baos);
out.writeObject(obj);
out.close();
baos.close();
}
if (isChanged(baos.unsafeByteArrayView(), baos.size(), file)) {
// compute the checksum of what we intend on writing to disk
final Checksum checksum = new CRC32();
checksum.update(baos.unsafeByteArrayView(), 0, baos.size());
final long checksumForWrittenData = checksum.getValue();
final File targetFile = new File(file);
// write object to temporary file that is flushed, fsynced, and closed by the time it
// returns
final File tmpFile =
writeDataToTempFileOrDie2(
outputStream -> {
// write the data
baos.writeTo(outputStream);
// flush it, so that the fsync() has everything it needs
outputStream.flush();
},
targetFile);
// verify that what we WROTE to the disk is then immediately READABLE before allowing
// the rename to happen
checksum.reset();
final long checksumFound = computeFileChecksum(tmpFile, new CRC32());
if (checksumForWrittenData != checksumFound) {
throw new IOException(
"Data written to file is not what we expected, "
+ checksumFound
+ " != "
+ checksumForWrittenData
+ ": "
+ tmpFile);
}
if (!tmpFile.renameTo(targetFile)) {
// failed to atomically rename from temp file to target file, so throw an exception,
// leaving the filesystem in
// a sane state at all times
throw new IOException(
"Could not rename '" + tmpFile + "' to '" + targetFile + "'.");
}
return true;
} else {
return false;
}
}
/** @deprecated Use {@link #writeObjectIfChangedOrDie2(java.lang.Object, java.lang.String)} */
@Deprecated
public static boolean writeObjectIfChangedOrDie(
@Nonnull final Object obj,
@Nonnull final String file,
@Nonnull final org.apache.log4j.Logger log)
throws IOException {
return writeObjectIfChangedOrDie2(obj, file);
}
public static long computeFileChecksum(
@Nonnull final File file, @Nonnull final Checksum checksum) throws IOException {
return com.google.common.io.Files.asByteSource(file).hash(Hashing.crc32()).padToLong();
}
/**
* Writes an object to a file.
*
* @return true if the file was successfully written, false otherwise
* @deprecated use {@link #writeObjectToFileOrDie2(Object, String)} instead
*/
@Deprecated
public static boolean writeObjectToFile(Object obj, String file) {
try {
writeObjectToFileOrDie2(obj, file);
return true;
} catch (Exception e) {
LOGGER.error(
e.getClass()
+ ": writeObjectToFile("
+ file
+ ") encountered exception: "
+ e.getMessage(),
e);
return false;
}
}
/**
* Writes an object to a file only if it is different from the current contents of the file, or
* if the file does not exist. Note that you must have enough heap to contain the entire
* contents of the object graph.
*
* @return true if the file was actually written, false otherwise
* @deprecated use {@link #writeObjectIfChangedOrDie2(Object, String)} instead
*/
@Deprecated
public static boolean writeObjectIfChanged(Object obj, String filepath) {
try {
return writeObjectIfChangedOrDie2(obj, filepath);
} catch (Exception e) {
LOGGER.error(
e.getClass()
+ ": writeObjectIfChanged("
+ filepath
+ ") encountered exception: "
+ e.getMessage(),
e);
return false;
}
}
/**
* Returns true iff the bytes in an array are different from the bytes contained in the given
* file, or if the file does not exist.
*/
private static boolean isChanged(final byte[] bytes, final int length, final String filepath)
throws IOException {
Preconditions.checkArgument(length >= 0, "invalid length value: %s", length);
Preconditions.checkArgument(bytes.length >= length, "invalid length value: %s", length);
File file = new File(filepath);
if (!file.exists()) {
return true;
}
if (file.length() != length) {
return true;
}
final int BUFLEN = 1048576; // 1 megabyte
byte[] buffer = new byte[BUFLEN];
InputStream is = new FileInputStream(file);
try {
int len;
for (int offset = 0; ; offset += len) {
len = is.read(buffer);
if (len < 0) break; // eof
if (!arrayCompare(bytes, offset, buffer, 0, len)) return true;
}
return false;
} finally {
is.close();
}
}
/** Returns true if the array chunks are equal, false otherwise. */
private static boolean arrayCompare(byte[] a, int offset1, byte[] a2, int offset2, int length) {
for (int i = 0; i < length; i++) {
if (a[offset1++] != a2[offset2++]) return false;
}
return true;
}
/**
* Reads an object of type {@code T} from {@code file}.
*
* @param file file from which the object should be read
* @param clazz non-null Class object for {@code T}
* @param printException whether or not any stacktraces should be printed
* @param <T> the return type
* @return possibly null object of type {@code T}.
*/
public static <T> T readObjectFromFile(String file, Class<T> clazz, boolean printException) {
final FileInputStream fileIn;
try {
fileIn = new FileInputStream(file);
} catch (Exception e) {
printException(e, printException);
return null;
}
final BufferedInputStream bufferedIn = new BufferedInputStream(fileIn);
final ObjectInputStream objIn;
try {
objIn = new ObjectInputStream(bufferedIn);
} catch (Exception e) {
printException(e, printException);
closeInputStream(fileIn, printException);
return null;
}
final Object ret;
try {
ret = objIn.readObject();
} catch (Exception e) {
printException(e, printException);
closeInputStream(objIn, printException); // objIn.close() also closes fileIn
return null;
}
closeInputStream(objIn, printException); // objIn.close() also closes fileIn
return clazz.cast(ret);
}
/**
* Convenience for {@link #readObjectFromFile(String file, Class clazz, boolean printException)}
* where:
*
* <ul>
* <li>{@code clazz} is Object.class
* <li>{@code printException} is false
* </ul>
*/
public static Object readObjectFromClasspathDir(String file) {
return readObjectFromClasspathDir(file, Object.class, false);
}
public static <T> T readObjectFromClasspathDir(
String file, Class<T> clazz, boolean printException) {
// final FileInputStream fileIn;
/*try {
fileIn = new FileInputStream(file);
} catch (Exception e) {
printException(e, printException);
return null;
} */
final InputStream inStream = Files.class.getResourceAsStream(file);
final BufferedInputStream bufferedIn = new BufferedInputStream(inStream);
final ObjectInputStream objIn;
try {
objIn = new ObjectInputStream(bufferedIn);
} catch (Exception e) {
printException(e, printException);
closeInputStream(inStream, printException);
return null;
}
final Object ret;
try {
ret = objIn.readObject();
} catch (Exception e) {
printException(e, printException);
closeInputStream(objIn, printException); // objIn.close() also closes fileIn
return null;
}
closeInputStream(objIn, printException); // objIn.close() also closes fileIn
return clazz.cast(ret);
}
private static void closeInputStream(final InputStream in, final boolean printException) {
try {
in.close();
} catch (Exception e) {
printException(e, printException);
}
}
private static void printException(final Exception e, final boolean reallyPrintIt) {
if (!reallyPrintIt) return;
e.printStackTrace();
}
/**
* Convenience for {@link #readObjectFromFile(String file, Class clazz, boolean printException)}
* where:
*
* <ul>
* <li>{@code printException} is false
* </ul>
*/
public static <T> T readObjectFromFile(String file, Class<T> clazz) {
return readObjectFromFile(file, clazz, false);
}
/**
* Convenience for {@link #readObjectFromFile(String file, Class clazz, boolean printException)}
* where:
*
* <ul>
* <li>{@code clazz} is Object.class
* </ul>
*/
public static Object readObjectFromFile(String file, boolean printException) {
return readObjectFromFile(file, Object.class, printException);
}
/**
* Convenience for {@link #readObjectFromFile(String file, Class clazz, boolean printException)}
* where:
*
* <ul>
* <li>{@code clazz} is Object.class
* <li>{@code printException} is false
* </ul>
*/
public static Object readObjectFromFile(String file) {
return readObjectFromFile(file, Object.class, false);
}
public static String[] readTextFile(String file) {
try {
return readTextFileOrDie(file);
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
public static String[] readTextFileOrDie(String file) throws IOException {
ArrayList<String> contents = new ArrayList<String>();
BufferedReader reader = getBufferedReaderForUtf8(file);
try {
String line = reader.readLine();
while (line != null) {
contents.add(line);
line = reader.readLine();
}
} finally {
reader.close();
}
return contents.toArray(new String[contents.size()]);
}
/**
* Reads all the lines in the given file, truncating everything that happens after # (including
* the #)
*
* @param file
* @return a List of the lines in the file in the order they appear (whitespace trimmed)
* @throws IOException
*/
public static List<String> readCommentedTextFile(final String file) throws IOException {
BufferedReader reader = getBufferedReaderForUtf8(file);
try {
final List<String> lines = new ArrayList<String>();
for (String line = reader.readLine(); line != null; line = reader.readLine()) {
final String trimmed = line.trim();
if (trimmed.length() == 0) {
continue;
}
final int hashIndex = trimmed.indexOf('#');
if (hashIndex == -1) {
lines.add(trimmed);
} else {
final String realPart = trimmed.substring(0, hashIndex).trim();
if (realPart.length() > 0) {
lines.add(realPart);
}
}
}
return lines;
} finally {
try {
reader.close();
} catch (final IOException e) {
// intentionally empty
}
}
}
public static int[] readIntsFromTextFile(String file) {
String[] strings = readTextFile(file);
if (strings == null) return null;
int[] ints = new int[strings.length];
for (int i = 0; i < strings.length; i++) ints[i] = Integer.parseInt(strings[i]);
return ints;
}
public static float[] readFloatsFromTextFile(String file) {
String[] strings = readTextFile(file);
if (strings == null) return null;
float[] floats = new float[strings.length];
for (int i = 0; i < strings.length; i++) floats[i] = Float.parseFloat(strings[i]);
return floats;
}
@Deprecated
public static void writeToTextFile(String[] lines, String file) {
try {
final BufferedWriter bufferedWriter = getBufferedWriterForUtf8(file);
for (String line : lines) {
bufferedWriter.write(line);
bufferedWriter.newLine();
}
bufferedWriter.close();
} catch (Exception e) {
e.printStackTrace();
}
}
public static void writeToTextFileOrDie(
@Nonnull final String[] lines, @Nonnull final String file) throws IOException {
// Write out a temp file (or die)
final File f = new File(file);
final File temp = writeTextToTempFileOrDie2(lines, f);
// Rename the temp file if writing succeeded
if (!temp.renameTo(f)) {
throw new IOException(
String.format("couldn't rename %s to %s", temp.getCanonicalPath(), file));
}
}
public static void appendToTextFile(String[] lines, String file) {
try {
PrintStream out = new PrintStream(new FileOutputStream(file, true));
for (String line : lines) {
out.println(line);
}
out.close();
} catch (Exception e) {
e.printStackTrace();
}
}
public static String getTempDirectory(String prefix, String suffix) throws IOException {
return getTempDirectory(prefix, suffix, (File) null);
}
public static String getTempDirectory(String prefix, String suffix, String directory)
throws IOException {
File dir = null;
if (directory != null) {
dir = new File(directory);
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException(
"directory did not exist and could not be created: " + directory);
}
}
}
return getTempDirectory(prefix, suffix, dir);
}
public static String getTempDirectory(String prefix, String suffix, File directory)
throws IOException {
File f = File.createTempFile(prefix, suffix, directory);
f.delete();
f.mkdir();
return f.getAbsolutePath();
}
/**
* Deletes file or recursively deletes a directory
*
* <p>NOTE: this returns true if the file was actually deleted, and false for 2 cases: 1. file
* did not exist to start with 2. File.delete() failed at some point for some reason
*
* <p>use {@link #deleteOrDie(String)} instead if you want a clearer distinction between the
* 'falsy' responses
*
* @param file path to erase
* @return true if all deletions were successful. If a deletion fails, the method stops
* attempting to delete and returns false
*/
public static boolean delete(String file) {
File f = new File(file);
if (f.isDirectory()) {
// first insure the directory is empty
String[] children = f.list();
for (String child : children) {
if (!delete(Files.buildPath(file, child))) return false;
}
}
return f.delete();
}
/**
* Deletes file or recursively deletes a directory
*
* @param file path to erase
* @return true if all deletions were successful, false if file did not exist
* @throws IOException if deletion fails and the file still exists at the end
*/
public static boolean deleteOrDie(@Nonnull final String file) throws IOException {
// this returns true if the file was actually deleted
// and false for 2 cases:
// 1. file did not exist to start with
// 2. File.delete() failed at some point for some reason
// so we disambiguate the 'false' case below by checking for file existence
final boolean fileWasDeleted = delete(file);
if (fileWasDeleted) {
// file was definitely deleted
return true;
} else {
final File fileObj = new File(file);
if (fileObj.exists()) {
throw new IOException(
"File still exists after erasure, cannot write object to file: " + file);
}
// file was not deleted, because it does not exist
return false;
}
}
/**
* Use this function instead of {@link java.io.File#getCanonicalPath()}, as that method can fail
* during symlink changes, and this method will retry up to 3 times with a short delay. Returns
* null if unsuccessful after retrying.
*
* @param path path to canonicalize
* @return the canonical pathname, or null if unsuccessful
*/
public static String getCanonicalPath(String path) {
if (path == null) return null;
return getCanonicalPath(
path,
new Supplier<Boolean>() {
int retries = 2;
public Boolean get() {
if (retries-- == 0) {
return false;
}
try {
// if retries not exhausted, sleep 100 milliseconds
Thread.sleep(100);
} catch (InterruptedException e) {
return false;
}
return true;
}
});
}
static String getCanonicalPath(String path, Supplier<Boolean> retryPolicy) {
boolean shouldRetry = true;
while (shouldRetry) {
File file = new File(path);
if (file.exists()) {
try {
return file.getCanonicalPath();
} catch (IOException e) {
// try again I guess
}
}
shouldRetry = retryPolicy.get();
}
return null;
}
/**
* Gets the directory name of the canonical path -- the last element in the result of {@link
* #getCanonicalPath(String)}.
*
* @param path path to canonicalize
* @return canonical directory name
*/
public static String getCanonicalDirectoryName(String path) {
String realPath = getCanonicalPath(path);
if (realPath == null) {
return null;
}
String separator = System.getProperty("file.separator", "/");
// backslashes must be escaped for split(). kthxbai
separator = Pattern.quote(separator);
String[] pathElements = realPath.split(separator);
return pathElements.length > 0 ? pathElements[pathElements.length - 1] : realPath;
}
public static byte[] loadFileAsByteArray(final String file) throws IOException {
final InputStream inputStream = new BufferedInputStream(new FileInputStream(file));
try {
final ByteArrayOutputStream out = new ByteArrayOutputStream(10000);
while (true) {
final int val = inputStream.read();
if (val == -1) break;
out.write(val);
}
return out.toByteArray();
} finally {
inputStream.close();
}
}
public static String getFileHash(final String file, final String algorithm)
throws IOException, NoSuchAlgorithmException {
final MessageDigest md = MessageDigest.getInstance(algorithm);
return Files.toHex(md.digest(loadFileAsByteArray(file)));
}
/**
* Converts a byte array to a hex string. The String returned will be of length exactly {@code
* bytes.length * 2}.
*/
@Nonnull
public static String toHex(@Nonnull final byte[] bytes) {
StringBuilder buf = new StringBuilder(bytes.length * 2);
for (byte b : bytes) {
String hexDigits = Integer.toHexString((int) b & 0x00ff);
if (hexDigits.length() == 1) {
buf.append('0');
}
buf.append(hexDigits);
}
return buf.toString();
}
public static BufferedReader getBufferedReaderForUtf8(String file)
throws FileNotFoundException {
try {
final FileInputStream fileInputStream = new FileInputStream(file);
final InputStreamReader inputStreamReader =
new InputStreamReader(fileInputStream, "UTF-8");
return new BufferedReader(inputStreamReader);
} catch (UnsupportedEncodingException uee) {
// Should never occur!
throw new RuntimeException(uee);
}
}
private static BufferedWriter getBufferedWriterForUtf8(String file)
throws FileNotFoundException {
try {
final FileOutputStream fileOutputStream = new FileOutputStream(file);
final OutputStreamWriter outputStreamWriter =
new OutputStreamWriter(fileOutputStream, "UTF-8");
return new BufferedWriter(outputStreamWriter);
} catch (UnsupportedEncodingException uee) {
// Should never occur!
throw new RuntimeException(uee);
}
}
public static String readStreamAsString(final InputStream in) throws IOException {
return readStreamAsString(in, 256);
}
public static String readStreamAsString(final InputStream in, final int bufferSize)
throws IOException {
final StringBuilder sb = new StringBuilder();
final char[] buffer = new char[bufferSize];
final InputStreamReader reader = new InputStreamReader(new BufferedInputStream(in));
for (int read = reader.read(buffer); read != -1; read = reader.read(buffer)) {
if (read > 0) {
sb.append(buffer, 0, read);
}
}
return sb.toString();
}
/**
* Use this instead of {@link FileWriter} because you cannot specify the character encoding with
* that.
*/
public static Writer newBufferedUTF8FileWriter(final String file)
throws UnsupportedEncodingException, FileNotFoundException {
return new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
}
/**
* Use this instead of {@link FileWriter} because you cannot specify the character encoding with
* that.
*/
public static Writer newBufferedUTF8FileWriter(final File file)
throws UnsupportedEncodingException, FileNotFoundException {
return new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
}
}
| apache-2.0 |
zhenghongfei/todolist | src/main/java/com/zhenghongfei/todolist/springmvc/controller/TodoController.java | 1579 | package com.zhenghongfei.todolist.springmvc.controller;
import com.zhenghongfei.todolist.springmvc.model.Todo;
import com.zhenghongfei.todolist.springmvc.service.TodoService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.stereotype.Controller;
import org.springframework.util.Assert;
import org.springframework.web.bind.annotation.*;
import java.util.List;
/**
* Simple Description
* <p>
* Author: Hongfei
* Date: 2016-12-04
* Since: 1.0
*/
@Controller
@RequestMapping("/api/todo")
public class TodoController {
@Autowired
private TodoService todoService;
@RequestMapping(method = RequestMethod.GET, produces = "application/json")
@ResponseBody
public List<Todo> findAll() {
return todoService.findAll();
}
@RequestMapping(method = RequestMethod.POST, consumes = "application/json", produces = "application/json")
@ResponseStatus(HttpStatus.CREATED)
@ResponseBody
public Todo create(@RequestBody Todo todo) {
Assert.notNull(todo);
return todoService.create(todo);
}
@RequestMapping(value = "/{id}", method = RequestMethod.PUT)
@ResponseStatus(HttpStatus.OK)
public void update(@RequestBody Todo todo, @PathVariable int id) {
Assert.isTrue(todo.getId() == id);
todoService.update(todo);
}
@RequestMapping(value = "/{id}", method = RequestMethod.DELETE)
@ResponseStatus(HttpStatus.OK)
public void remove(@PathVariable int id) {
todoService.remove(id);
}
} | apache-2.0 |
dremio/dremio-oss | sabot/kernel/src/test/java/com/dremio/exec/planner/TestComplexSchemaFlattener.java | 3607 | /*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.exec.planner;
import static java.util.Arrays.asList;
import static org.junit.Assert.assertEquals;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeFactory;
import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
import org.apache.calcite.rex.RexBuilder;
import org.apache.calcite.sql.type.SqlTypeName;
import org.junit.Before;
import org.junit.Test;
import com.dremio.exec.planner.common.ComplexSchemaFlattener;
import com.dremio.exec.planner.types.JavaTypeFactoryImpl;
import com.google.common.collect.ImmutableList;
/**
* Test for complex schema flattener
*/
public class TestComplexSchemaFlattener {
private static final RelDataTypeFactory typeFactory = JavaTypeFactoryImpl.INSTANCE;
private static final RexBuilder rexBuilder = new RexBuilder(typeFactory);
private List<SqlTypeName> sqlTypeNameList;
private RelDataType rowType;
@Before
public void setup() {
sqlTypeNameList = asList(
SqlTypeName.VARCHAR, SqlTypeName.BIGINT, SqlTypeName.DOUBLE,
SqlTypeName.INTEGER, SqlTypeName.BOOLEAN, SqlTypeName.FLOAT);
RelDataTypeFactory.Builder builder = typeFactory.builder();
List<RelDataType> typeList = ImmutableList.of(
typeFactory.createSqlType(SqlTypeName.VARCHAR),
typeFactory.createSqlType(SqlTypeName.BIGINT),
typeFactory.createSqlType(SqlTypeName.DOUBLE),
typeFactory.createStructType(
asList(
typeFactory.createSqlType(SqlTypeName.INTEGER),
typeFactory.createStructType(
asList(
typeFactory.createSqlType(SqlTypeName.BOOLEAN),
typeFactory.createSqlType(SqlTypeName.FLOAT)),
asList("a3", "b3"))),
asList("a2", "struct3")));
List<String> names = ImmutableList.of("a1", "b1", "c1", "struct2");
builder.add(new RelDataTypeFieldImpl("struct1", 0, typeFactory.createStructType(typeList, names)));
rowType = builder.build();
}
@Test
public void testComplexSchemaFlattenerWithJoiner() {
List<String> leafNodesWithoutJoiner = asList("a1", "b1", "c1", "a2", "a3", "b3");
ComplexSchemaFlattener flattener = new ComplexSchemaFlattener(rexBuilder, null);
flattener.flatten(rowType);
assertEquals(sqlTypeNameList,
flattener.getTypeList().stream().map(RelDataType::getSqlTypeName).collect(Collectors.toList()));
assertEquals(leafNodesWithoutJoiner, flattener.getFields());
}
@Test
public void testComplexSchemaFlattenerWithoutJoiner() {
List<String> leafNodesWithJoiner = asList("struct1_a1", "struct1_b1", "struct1_c1", "a2", "a3", "b3");
ComplexSchemaFlattener flattener = new ComplexSchemaFlattener(rexBuilder, "_");
flattener.flatten(rowType);
assertEquals(sqlTypeNameList,
flattener.getTypeList().stream().map(RelDataType::getSqlTypeName).collect(Collectors.toList()));
assertEquals(leafNodesWithJoiner, flattener.getFields());
}
}
| apache-2.0 |
cocoatomo/asakusafw | testing-project/asakusa-test-moderator/src/main/java/com/asakusafw/testdriver/core/SpiImporterPreparator.java | 4428 | /**
* Copyright 2011-2017 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.testdriver.core;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.ServiceLoader;
import com.asakusafw.runtime.io.ModelOutput;
import com.asakusafw.vocabulary.external.ImporterDescription;
/**
* Composition of registered {@link ImporterPreparator} as {@link ServiceLoader services}.
* @since 0.2.0
* @version 0.2.2
*/
public class SpiImporterPreparator implements ImporterPreparator<ImporterDescription> {
@SuppressWarnings("rawtypes")
private final List<ImporterPreparator> elements;
/**
* Creates a new instance.
* @param serviceClassLoader the class loader to load the registered services
* @throws IllegalArgumentException if some parameters were {@code null}
*/
public SpiImporterPreparator(ClassLoader serviceClassLoader) {
if (serviceClassLoader == null) {
throw new IllegalArgumentException("serviceClassLoader must not be null"); //$NON-NLS-1$
}
this.elements = Util.loadService(ImporterPreparator.class, serviceClassLoader);
}
/**
* Creates a new instance.
* @param elements the elements to be composited
* @throws IllegalArgumentException if some parameters were {@code null}
*/
public SpiImporterPreparator(List<? extends ImporterPreparator<?>> elements) {
if (elements == null) {
throw new IllegalArgumentException("elements must not be null"); //$NON-NLS-1$
}
this.elements = new ArrayList<>(elements);
}
@Override
public Class<ImporterDescription> getDescriptionClass() {
return ImporterDescription.class;
}
@Override
public void truncate(ImporterDescription description, TestContext context) throws IOException {
for (ImporterPreparator<?> element : elements) {
if (element.getDescriptionClass().isAssignableFrom(description.getClass())) {
truncate0(element, description, context);
return;
}
}
throw new IOException(MessageFormat.format(
Messages.getString("SpiImporterPreparator.errorFailedToTruncate"), //$NON-NLS-1$
description));
}
private <T extends ImporterDescription> void truncate0(
ImporterPreparator<T> preparator,
ImporterDescription description,
TestContext context) throws IOException {
assert preparator != null;
assert description != null;
T desc = preparator.getDescriptionClass().cast(description);
preparator.truncate(desc, context);
}
@Override
public <V> ModelOutput<V> createOutput(
DataModelDefinition<V> definition,
ImporterDescription description,
TestContext context) throws IOException {
for (ImporterPreparator<?> element : elements) {
if (element.getDescriptionClass().isAssignableFrom(description.getClass())) {
return createOutput0(definition, element, description, context);
}
}
throw new IOException(MessageFormat.format(
Messages.getString("SpiImporterPreparator.errorFailedToCreateOutput"), //$NON-NLS-1$
description));
}
private <T extends ImporterDescription, V> ModelOutput<V> createOutput0(
DataModelDefinition<V> definition,
ImporterPreparator<T> preparator,
ImporterDescription description,
TestContext context) throws IOException {
assert definition != null;
assert preparator != null;
assert description != null;
T desc = preparator.getDescriptionClass().cast(description);
return preparator.createOutput(definition, desc, context);
}
}
| apache-2.0 |
greensnow25/javaaz | chapter5/tree/src/mian/java/greensnow25/com/SimpleTree.java | 6518 | package greensnow25.com;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
/**
* public class SimpleTree.
*
* @author greensnow25.
* @version 1.
* @since 29.05.2017.
*/
public class SimpleTree<E extends Comparable> implements ISimpleTree<E> {
/**
* root.
*/
private Node<E> root = null;
/**
* add element child to parent.
* Parent can have a child list.
*
* @param parent parent.
* @param child child.
* @return
*/
@Override
public boolean add(E parent, E child) {
if (root == null) {
root = new Node<>(child);
} else {
Node nodeParent = this.search(this.root, parent);
if (nodeParent == null) {
return false;
} else {
nodeParent.children.add(new Node<E>(child));
}
}
return true;
}
/**
* The method checks if the tree is binary.
*
* @return true, if binary, else false.
*/
@Override
public boolean isBinary() {
return this.recSearchBinary(this.root);
}
/**
* Recursive passage through the elements of a tree
* @param eNode node.
* @return result.
*/
private boolean recSearchBinary(Node<E> eNode) {
boolean res = true;
if (eNode.children.size() > 2) return false;
for (Node node : eNode.children) {
res = this.recSearchBinary(node);
}
return res;
}
/**
* method return object iterator, for the passage through the tree.
*
* @return Iterator.
*/
@Override
public Iterator<E> iterator() {
return new Iterator<E>() {
/**
* position in the tree.
*/
private E position = null;
/**
* minimal value of the tree.
*/
private E minValueTree = root.value;
/**
* maximal value of the tree.
*/
private E maxValueTree = root.value;
/**
* Single use indicator.
*/
boolean findMinMax = true;
/**
*The method checks for the presence of the following element.
* @return true if exist, else false.
*/
@Override
public boolean hasNext() {
return this.position.compareTo(this.maxValueTree) < 0;
}
/**
* method return next value after the pointer.
* @throws NoSuchElementException If the pointer is on the last element and an attempt.
* is made to call this method
* @return value.
*/
@Override
public E next() {
if (findMinMax) {
this.findMinValue(root);
this.position = this.minValueTree;
this.findMaxValue(root);
this.findMinMax = false;
this.position = minValueTree;
return position;
}
if (this.position == this.maxValueTree) throw new NoSuchElementException("no elements");
this.position = this.nextValue(root, maxValueTree);
return this.position;
}
/**
* find minimal value in the tree.
* @param node node.
*/
private void findMinValue(Node<E> node) {
for (Node nod : node.children) {
if (this.minValueTree.compareTo(nod.value) > 0) {
minValueTree = (E) nod.value;
}
this.findMinValue(nod);
}
}
/**
* find maximum value in the tree.
* @param node node.
*/
private void findMaxValue(Node<E> node) {
for (Node nod : node.children) {
if (this.maxValueTree.compareTo(nod.value) < 0) {
maxValueTree = (E) nod.value;
}
this.findMaxValue(nod);
}
}
/**
*The method recursively passes through the tree, and comparing all the elements.
* with each other and returns, the element next by value moves the cursor one position.
* @param node The current element is passed to the method.
* @param current The closest element to the pointer.
* @return next value after the pointer.
*/
private E nextValue(Node<E> node, E current) {
E res = current;
if (this.position.compareTo(node.value) < 0 && current.compareTo(node.value) >= 0) {
res = node.value;
}
for (Node nod : node.children) {
if (this.position.compareTo(nod.value) < 0 && res.compareTo(nod.value) > 0) {
res = (E) nod.value;
}
res = (E) this.nextValue(nod, res);
}
return res;
}
};
}
/**
* The method looks for the specified element.
*
* @param node current node.
* @param parent the object to be found.
* @return founded object.
*/
private Node<E> search(Node<E> node, E parent) {
Node res = null;
if (node.value.compareTo(parent) == 0) {
res = node;
return res;
}
for (Node<E> nod : node.children) {
if (nod.value.compareTo(parent) == 0) {
res = nod;
return res;
} else if (this.search(nod, parent) != null) {
res = this.search(nod, parent);
}
}
return res;
}
/**
* inner class Node.
*
* @param <E> param.
* @author greensnow25.
*/
private class Node<E> {
/**
* children list.
*/
private List<Node<E>> children;
/**
* value
*/
private E value;
/**
* constructor.
*
* @param value value of the node.
*/
public Node(E value) {
this.children = new ArrayList<>();
this.value = value;
}
}
}
| apache-2.0 |
Aikis07/barkovd | chapter_001/src/main/java/ru/job4j/loop/Counter.java | 270 | package ru.job4j.loop;
public class Counter {
public int add(int start, int finish){
int sum = 0;
for (int i = start; i <= finish; i++){
if (i % 2 == 0){
sum = sum + i;
}
}
return sum;
}
}
| apache-2.0 |
gigaroby/hops | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImplDist.java | 23996 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import io.hops.metadata.yarn.entity.PendingEvent;
import io.hops.util.DBUtility;
import io.hops.util.ToCommitHB;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
public class RMNodeImplDist extends RMNodeImpl {
private static final Log LOG = LogFactory.getLog(RMNodeImplDist.class);
private ToCommitHB toCommit = new ToCommitHB(this.nodeId.toString());
// Used by RT streaming receiver
public static enum KeyType {
CURRENTNMTOKENMASTERKEY,
NEXTNMTOKENMASTERKEY,
CURRENTCONTAINERTOKENMASTERKEY,
NEXTCONTAINERTOKENMASTERKEY
}
public RMNodeImplDist(NodeId nodeId, RMContext context, String hostName,
int cmPort, int httpPort, Node node, Resource capability,
String nodeManagerVersion) {
super(nodeId, context, hostName, cmPort, httpPort, node, capability,
nodeManagerVersion);
}
protected NodeState statusUpdateWhenHealthyTransitionInternal(
RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
// Switch the last heartbeatresponse.
rmNode.latestNodeHeartBeatResponse = statusEvent.getLatestResponse();
NodeHealthStatus remoteNodeHealthStatus = statusEvent.
getNodeHealthStatus();
rmNode.setHealthReport(remoteNodeHealthStatus.getHealthReport());
rmNode.setLastHealthReportTime(
remoteNodeHealthStatus.getLastHealthReportTime());
if (!remoteNodeHealthStatus.getIsNodeHealthy()) {
LOG.info("Node " + rmNode.nodeId + " reported UNHEALTHY with details: "
+ remoteNodeHealthStatus.getHealthReport());
rmNode.nodeUpdateQueue.clear();
// Inform the scheduler
// if (rmNode.context.isDistributed() && !rmNode.context.isLeader()) {
//Add NodeRemovedSchedulerEvent to TransactionState
LOG.debug("HOP :: Added Pending event to TransactionState");
toCommit.addPendingEvent(PendingEvent.Type.NODE_REMOVED,
PendingEvent.Status.NEW);
// } else {
// rmNode.context.getDispatcher().getEventHandler().handle(
// new NodeRemovedSchedulerEvent(rmNode));
if(rmNode.context.isLeader()){
rmNode.context.getDispatcher().getEventHandler().handle(
new NodesListManagerEvent(
NodesListManagerEventType.NODE_UNUSABLE, rmNode));
}
// Update metrics
rmNode.updateMetricsForDeactivatedNode(rmNode.getState(),
NodeState.UNHEALTHY);
return NodeState.UNHEALTHY;
}
((RMNodeImplDist) rmNode).handleContainerStatus(statusEvent.
getContainers());
if (rmNode.nextHeartBeat) {
rmNode.nextHeartBeat = false;
toCommit.addNextHeartBeat(rmNode.nextHeartBeat);
// if (rmNode.context.isDistributed() && !rmNode.context.isLeader()) {
//Add NodeUpdatedSchedulerEvent to TransactionState
toCommit.addPendingEvent(PendingEvent.Type.NODE_UPDATED,
PendingEvent.Status.SCHEDULER_FINISHED_PROCESSING);
// } else {
// rmNode.context.getDispatcher().getEventHandler().handle(
// new NodeUpdateSchedulerEvent(rmNode));
// }
} else if (rmNode.context.isDistributed()
// && !rmNode.context.isLeader()
) {
toCommit.addPendingEvent(PendingEvent.Type.NODE_UPDATED,
PendingEvent.Status.SCHEDULER_NOT_FINISHED_PROCESSING);
}
// Update DTRenewer in secure mode to keep these apps alive. Today this is
// needed for log-aggregation to finish long after the apps are gone.
if (UserGroupInformation.isSecurityEnabled()) {
rmNode.context.getDelegationTokenRenewer().updateKeepAliveApplications(
statusEvent.getKeepAliveAppIds());
}
toCommit.addRMNode(hostName, commandPort, httpPort, totalCapability,
nodeManagerVersion, getState(), getHealthReport(),
getLastHealthReportTime());
return NodeState.RUNNING;
}
@Override
protected void handleContainerStatus(List<ContainerStatus> containerStatuses) {
// Filter the map to only obtain just launched containers and finished
// containers.
List<ContainerStatus> newlyLaunchedContainers
= new ArrayList<>();
List<ContainerStatus> completedContainers = new ArrayList<>();
List<io.hops.metadata.yarn.entity.ContainerStatus> containerToLog
= new ArrayList<>();
for (ContainerStatus remoteContainer : containerStatuses) {
ContainerId containerId = remoteContainer.getContainerId();
// Don't bother with containers already scheduled for cleanup, or for
// applications already killed. The scheduler doens't need to know any
// more about this container
if (containersToClean.contains(containerId)) {
LOG.info("Container " + containerId + " already scheduled for "
+ "cleanup, no further processing");
continue;
}
if (finishedApplications.contains(containerId.getApplicationAttemptId()
.getApplicationId())) {
LOG.info("Container " + containerId
+ " belongs to an application that is already killed,"
+ " no further processing");
continue;
}
// Process running containers
if (remoteContainer.getState() == ContainerState.RUNNING) {
if (!launchedContainers.contains(containerId)) {
// Just launched container. RM knows about it the first time.
launchedContainers.add(containerId);
newlyLaunchedContainers.add(remoteContainer);
}
} else {
// A finished container
launchedContainers.remove(containerId);
completedContainers.add(remoteContainer);
}
containerToLog.add(new io.hops.metadata.yarn.entity.ContainerStatus(
remoteContainer.getContainerId().toString(), remoteContainer.
getState().name(), remoteContainer.getDiagnostics(),
remoteContainer.getExitStatus(), nodeId.toString()));
}
if (!newlyLaunchedContainers.isEmpty() || !completedContainers.isEmpty()) {
UpdatedContainerInfo uci = new UpdatedContainerInfo(
newlyLaunchedContainers,
completedContainers);
toCommit.addNodeUpdateQueue(uci);
}
if(context.getContainersLogsService()!=null && !containerToLog.isEmpty()){
context.getContainersLogsService().insertEvent(containerToLog);
}
}
@Override
public void updateNodeHeartbeatResponseForCleanup(
NodeHeartbeatResponse response) {
this.writeLock.lock();
try {
response.addAllContainersToCleanup(
new ArrayList<>(this.containersToClean));
response.addAllApplicationsToCleanup(this.finishedApplications);
response.addContainersToBeRemovedFromNM(
new ArrayList<>(this.containersToBeRemovedFromNM));
// We need to make a deep copy of containersToClean and finishedApplications
// since DBUtility is async and we get ConcurrentModificationException
Set<ContainerId> copyContainersToClean = new HashSet<>(this.containersToClean.size());
for (ContainerId cid : this.containersToClean) {
copyContainersToClean.add(ContainerId.newContainerId(cid.getApplicationAttemptId(),
cid.getContainerId()));
}
DBUtility.removeContainersToClean(copyContainersToClean, this.nodeId);
List<ApplicationId> copyFinishedApplications = new ArrayList<>(this.finishedApplications.size());
for (ApplicationId appId : this.finishedApplications) {
copyFinishedApplications.add(ApplicationId.newInstance(appId.getClusterTimestamp(),
appId.getId()));
}
DBUtility.removeFinishedApplications(copyFinishedApplications, this.nodeId);
this.containersToClean.clear();
this.finishedApplications.clear();
this.containersToBeRemovedFromNM.clear();
} catch (IOException ex) {
LOG.error(ex, ex);
} finally {
this.writeLock.unlock();
}
}
protected void handleRunningAppOnNode(RMNodeImpl rmNode,
RMContext context, ApplicationId appId, NodeId nodeId) {
RMApp app = context.getRMApps().get(appId);
// if we failed getting app by appId, maybe something wrong happened, just
// add the app to the finishedApplications list so that the app can be
// cleaned up on the NM
if (null == app) {
LOG.warn("Cannot get RMApp by appId=" + appId
+ ", just added it to finishedApplications list for cleanup");
rmNode.finishedApplications.add(appId);
try {
DBUtility.addFinishedApplication(appId, rmNode.nodeId);
} catch (IOException ex) {
LOG.error(ex, ex);
}
return;
}
context.getDispatcher().getEventHandler()
.handle(new RMAppRunningOnNodeEvent(appId, nodeId));
}
@Override
protected void cleanUpAppTransitionInternal(RMNodeImpl rmNode,
RMNodeEvent event) {
rmNode.finishedApplications.add(((RMNodeCleanAppEvent) event).getAppId());
try {
DBUtility.addFinishedApplication(((RMNodeCleanAppEvent) event).
getAppId(),
rmNode.getNodeID());
} catch (IOException ex) {
LOG.error(ex, ex);
}
}
protected void cleanUpContainerTransitionInternal(RMNodeImpl rmNode,
RMNodeEvent event) {
rmNode.containersToClean.add(((RMNodeCleanContainerEvent) event).
getContainerId());
try {
DBUtility.addContainerToClean(((RMNodeCleanContainerEvent) event).
getContainerId(), rmNode.getNodeID());
} catch (IOException ex) {
LOG.error(ex, ex);
}
}
@Override
public List<UpdatedContainerInfo> pullContainerUpdates() {
List<UpdatedContainerInfo> latestContainerInfoList
= new ArrayList<UpdatedContainerInfo>();
try {
UpdatedContainerInfo containerInfo;
while ((containerInfo = nodeUpdateQueue.poll()) != null) {
latestContainerInfoList.add(containerInfo);
}
DBUtility.removeUCI(latestContainerInfoList, this.nodeId.toString());
this.nextHeartBeat = true;
DBUtility.addNextHB(this.nextHeartBeat, this.nodeId.toString());
} catch (IOException ex) {
LOG.error(ex, ex);
}
return latestContainerInfoList;
}
public void setContainersToCleanUp(Set<ContainerId> containersToCleanUp) {
super.writeLock.lock();
try {
super.containersToClean.addAll(containersToCleanUp);
} finally {
super.writeLock.unlock();
}
}
public void addContainersToCleanUp(ContainerId containerToCleanUp) {
super.writeLock.lock();
try {
super.containersToClean.add(containerToCleanUp);
} finally {
super.writeLock.unlock();
}
}
public void setAppsToCleanUp(List<ApplicationId> appsToCleanUp) {
super.writeLock.lock();
try {
super.finishedApplications.addAll(appsToCleanUp);
} finally {
super.writeLock.unlock();
}
}
public void addAppToCleanUp(ApplicationId appToCleanUp) {
super.writeLock.lock();
try {
super.finishedApplications.add(appToCleanUp);
} finally {
super.writeLock.unlock();
}
}
public void setNextHeartbeat(boolean nextHeartbeat) {
super.writeLock.lock();
try {
super.nextHeartBeat = nextHeartbeat;
} finally {
super.writeLock.unlock();
}
}
public void setState(String state) {
super.writeLock.lock();
try {
super.stateMachine.setCurrentState(NodeState.valueOf(state));
} finally {
super.writeLock.unlock();
}
}
public void setUpdatedContainerInfo(ConcurrentLinkedQueue<UpdatedContainerInfo>
updatedContainerInfo) {
super.nodeUpdateQueue.addAll(updatedContainerInfo);
}
@Override
protected void addNodeTransitionInternal(RMNodeImpl rmNode, RMNodeEvent event) {
// Inform the scheduler
RMNodeStartedEvent startEvent = (RMNodeStartedEvent) event;
List<NMContainerStatus> containers = null;
String host = rmNode.nodeId.getHost();
RMNode previousRMNode = rmNode.context.getInactiveRMNodes().remove(host);
if (previousRMNode != null) {
if (previousRMNode.getNodeID().getPort() != -1) {
// Old node rejoining
rmNode.updateMetricsForRejoinedNode(previousRMNode.getState());
} else {
// An old excluded node rejoining
ClusterMetrics.getMetrics().decrDecommisionedNMs();
containers = updateNewNodeMetricsAndContainers(rmNode, startEvent);
}
} else {
// Increment activeNodes explicitly because this is a new node.
containers = updateNewNodeMetricsAndContainers(rmNode, startEvent);
}
if (null != startEvent.getRunningApplications()) {
for (ApplicationId appId : startEvent.getRunningApplications()) {
rmNode.handleRunningAppOnNode(rmNode, rmNode.context, appId,
rmNode.nodeId);
}
}
// if (rmNode.context.isDistributed() && !rmNode.context.isLeader()) {
//Add NodeAddedSchedulerEvent to TransactionState
toCommit.addPendingEvent(PendingEvent.Type.NODE_ADDED,
PendingEvent.Status.NEW);
// } else {
// rmNode.context.getDispatcher().getEventHandler()
// .handle(new NodeAddedSchedulerEvent(rmNode, containers));
if(rmNode.context.isLeader()){
rmNode.context.getDispatcher().getEventHandler().handle(
new NodesListManagerEvent(
NodesListManagerEventType.NODE_USABLE, rmNode));
}
}
protected void reconnectNodeTransitionInternal(RMNodeImpl rmNode,
RMNodeEvent event) {
RMNodeReconnectEvent reconnectEvent = (RMNodeReconnectEvent) event;
RMNode newNode = reconnectEvent.getReconnectedNode();
rmNode.nodeManagerVersion = newNode.getNodeManagerVersion();
List<ApplicationId> runningApps = reconnectEvent.getRunningApplications();
boolean noRunningApps = (runningApps == null) || (runningApps.size() == 0);
// No application running on the node, so send node-removal event with
// cleaning up old container info.
if (noRunningApps) {
rmNode.nodeUpdateQueue.clear();
// if (rmNode.context.isDistributed() && !rmNode.context.isLeader()) {
//Add NodeRemovedSchedulerEvent to TransactionState
LOG.debug("HOP :: Added Pending event to TransactionState");
toCommit.addPendingEvent(PendingEvent.Type.NODE_REMOVED,
PendingEvent.Status.NEW);
// } else {
// rmNode.context.getDispatcher().getEventHandler().handle(
// new NodeRemovedSchedulerEvent(rmNode));
// }
if (rmNode.getHttpPort() == newNode.getHttpPort()) {
if (!rmNode.getTotalCapability().equals(
newNode.getTotalCapability())) {
rmNode.totalCapability = newNode.getTotalCapability();
}
if (rmNode.getState().equals(NodeState.RUNNING)) {
// Only add old node if old state is RUNNING
if (rmNode.context.isDistributed()
// && !rmNode.context.isLeader()
) {
//Add NodeAddedSchedulerEvent to TransactionState
LOG.debug("HOP :: Added Pending event to TransactionState");
toCommit.addPendingEvent(PendingEvent.Type.NODE_ADDED,
PendingEvent.Status.NEW);
} else {
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeAddedSchedulerEvent(rmNode));
}
}
} else {
// Reconnected node differs, so replace old node and start new node
switch (rmNode.getState()) {
case RUNNING:
ClusterMetrics.getMetrics().decrNumActiveNodes();
break;
case UNHEALTHY:
ClusterMetrics.getMetrics().decrNumUnhealthyNMs();
break;
default:
LOG.debug("Unexpected Rmnode state");
}
rmNode.context.getRMNodes().put(newNode.getNodeID(), newNode);
rmNode.context.getDispatcher().getEventHandler().handle(
new RMNodeStartedEvent(newNode.getNodeID(), null, null));
}
} else {
rmNode.httpPort = newNode.getHttpPort();
rmNode.httpAddress = newNode.getHttpAddress();
boolean isCapabilityChanged = false;
if (!rmNode.getTotalCapability().equals(
newNode.getTotalCapability())) {
rmNode.totalCapability = newNode.getTotalCapability();
isCapabilityChanged = true;
}
handleNMContainerStatus(reconnectEvent.getNMContainerStatuses(), rmNode);
for (ApplicationId appId : reconnectEvent.getRunningApplications()) {
rmNode.handleRunningAppOnNode(rmNode, rmNode.context, appId,
rmNode.nodeId);
}
if (isCapabilityChanged
&& rmNode.getState().equals(NodeState.RUNNING)) {
// Update scheduler node's capacity for reconnect node.
rmNode.context
.getDispatcher()
.getEventHandler()
.handle(
new NodeResourceUpdateSchedulerEvent(rmNode,
ResourceOption
.newInstance(newNode.getTotalCapability(), -1)));
}
}
}
@Override
protected void deactivateNodeTransitionInternal(RMNodeImpl rmNode,
RMNodeEvent event, final NodeState finalState) {
//check for UnknownNodeId
if (rmNode.getNodeID().getPort() == -1) {
rmNode.updateMetricsForDeactivatedNode(rmNode.getState(), finalState);
return;
}
// Inform the scheduler
rmNode.nodeUpdateQueue.clear();
// If the current state is NodeState.UNHEALTHY
// Then node is already been removed from the
// Scheduler
NodeState initialState = rmNode.getState();
if (!initialState.equals(NodeState.UNHEALTHY)) {
// if (rmNode.context.isDistributed() && !rmNode.context.isLeader()) {
//Add NodeRemovedSchedulerEvent to TransactionState
LOG.debug("HOP :: Added Pending event to TransactionState");
toCommit.addPendingEvent(PendingEvent.Type.NODE_REMOVED,
PendingEvent.Status.NEW);
// } else {
// rmNode.context.getDispatcher().getEventHandler()
// .handle(new NodeRemovedSchedulerEvent(rmNode));
// }
}
if(rmNode.context.isLeader()){
rmNode.context.getDispatcher().getEventHandler().handle(
new NodesListManagerEvent(
NodesListManagerEventType.NODE_UNUSABLE, rmNode));
}
// Deactivate the node
rmNode.context.getRMNodes().remove(rmNode.nodeId);
LOG.info("Deactivating Node " + rmNode.nodeId + " as it is now "
+ finalState);
rmNode.context.getInactiveRMNodes().put(rmNode.nodeId.getHost(), rmNode);
//Update the metrics
rmNode.updateMetricsForDeactivatedNode(initialState, finalState);
}
protected NodeState statusUpdateWhenUnHealthyTransitionInternal(
RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
// Switch the last heartbeatresponse.
rmNode.latestNodeHeartBeatResponse = statusEvent.getLatestResponse();
NodeHealthStatus remoteNodeHealthStatus = statusEvent.getNodeHealthStatus();
rmNode.setHealthReport(remoteNodeHealthStatus.getHealthReport());
rmNode.setLastHealthReportTime(
remoteNodeHealthStatus.getLastHealthReportTime());
if (remoteNodeHealthStatus.getIsNodeHealthy()) {
// if (rmNode.context.isDistributed() && !rmNode.context.isLeader()) {
//Add NodeAddedSchedulerEvent to TransactionState
LOG.debug("HOP :: Added Pending event to TransactionState");
toCommit.addPendingEvent(PendingEvent.Type.NODE_ADDED,
PendingEvent.Status.NEW);
// } else {
// rmNode.context.getDispatcher().getEventHandler().handle(
// new NodeAddedSchedulerEvent(rmNode));
if(rmNode.context.isLeader()){
rmNode.context.getDispatcher().getEventHandler().handle(
new NodesListManagerEvent(
NodesListManagerEventType.NODE_USABLE, rmNode));
}
// ??? how about updating metrics before notifying to ensure that
// notifiers get update metadata because they will very likely query it
// upon notification
// Update metrics
rmNode.updateMetricsForRejoinedNode(NodeState.UNHEALTHY);
return NodeState.RUNNING;
}
return NodeState.UNHEALTHY;
}
public void handle(RMNodeEvent event) {
LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType());
try {
writeLock.lock();
NodeState oldState = getState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitonException e) {
LOG.error("Can't handle this event at current state", e);
LOG.error("Invalid event " + event.getType() + " on Node "
+ this.nodeId);
}
if (oldState != getState()) {
LOG.info(nodeId + " Node Transitioned from " + oldState + " to "
+ getState());
toCommit.addRMNode(hostName, commandPort, httpPort, totalCapability,
nodeManagerVersion, getState(), getHealthReport(),
getLastHealthReportTime());
}
try {
toCommit.commit();
toCommit = new ToCommitHB(this.nodeId.toString());
} catch (IOException ex) {
LOG.error(ex, ex);
}
} finally {
writeLock.unlock();
}
}
}
| apache-2.0 |