repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
girirajsharma/wildfly-elytron
src/main/java/org/wildfly/security/manager/action/ClearContextClassLoaderAction.java
1940
/* * JBoss, Home of Professional Open Source. * Copyright 2013, Red Hat, Inc., and individual contributors * as indicated by the @author tags. See the copyright.txt file in the * distribution for a full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. */ package org.wildfly.security.manager.action; import java.security.PrivilegedAction; /** * A security action to clear the current thread context class loader. * * @author <a href="mailto:david.lloyd@redhat.com">David M. Lloyd</a> */ public final class ClearContextClassLoaderAction implements PrivilegedAction<ClassLoader> { private static final ClearContextClassLoaderAction INSTANCE = new ClearContextClassLoaderAction(); private ClearContextClassLoaderAction() { } /** * Get the singleton instance. * * @return the singleton instance */ public static ClearContextClassLoaderAction getInstance() { return INSTANCE; } public ClassLoader run() { final Thread thread = Thread.currentThread(); try { return thread.getContextClassLoader(); } finally { thread.setContextClassLoader(null); } } }
apache-2.0
aemay2/hapi-fhir
hapi-fhir-docs/src/main/java/ca/uhn/hapi/fhir/docs/FhirTesterConfig.java
2800
package ca.uhn.hapi.fhir.docs; /*- * #%L * HAPI FHIR - Docs * %% * Copyright (C) 2014 - 2022 Smile CDR, Inc. * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import ca.uhn.fhir.context.FhirVersionEnum; import ca.uhn.fhir.to.FhirTesterMvcConfig; import ca.uhn.fhir.to.TesterConfig; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Import; // START SNIPPET: file /** * This spring config file configures the web testing module. It serves two * purposes: * 1. It imports FhirTesterMvcConfig, which is the spring config for the * tester itself * 2. It tells the tester which server(s) to talk to, via the testerConfig() * method below */ @Configuration @Import(FhirTesterMvcConfig.class) public class FhirTesterConfig { /** * This bean tells the testing webpage which servers it should configure itself * to communicate with. In this example we configure it to talk to the local * server, as well as one public server. If you are creating a project to * deploy somewhere else, you might choose to only put your own server's * address here. * * Note the use of the ${serverBase} variable below. This will be replaced with * the base URL as reported by the server itself. Often for a simple Tomcat * (or other container) installation, this will end up being something * like "http://localhost:8080/hapi-fhir-jpaserver-example". If you are * deploying your server to a place with a fully qualified domain name, * you might want to use that instead of using the variable. */ @Bean public TesterConfig testerConfig() { TesterConfig retVal = new TesterConfig(); retVal .addServer() .withId("home") .withFhirVersion(FhirVersionEnum.DSTU2) .withBaseUrl("${serverBase}/fhir") .withName("Local Tester") .addServer() .withId("hapi") .withFhirVersion(FhirVersionEnum.DSTU2) .withBaseUrl("http://fhirtest.uhn.ca/baseDstu2") .withName("Public HAPI Test Server"); /* * Use the method below to supply a client "factory" which can be used * if your server requires authentication */ // retVal.setClientFactory(clientFactory); return retVal; } } // END SNIPPET: file
apache-2.0
mayonghui2112/helloWorld
sourceCode/testMaven/onjava8/src/main/java/reuse/DerivedSpaceShip.java
570
package reuse;// reuse/DerivedSpaceShip.java // (c)2017 MindView LLC: see Copyright.txt // We make no guarantees that this code is fit for any purpose. // Visit http://OnJava8.com for more book information. public class DerivedSpaceShip extends SpaceShipControls { private String name; public DerivedSpaceShip(String name) { this.name = name; } @Override public String toString() { return name; } public static void main(String[] args) { DerivedSpaceShip protector = new DerivedSpaceShip("NSEA Protector"); protector.forward(100); } }
apache-2.0
SeldonIO/seldon-server
server/src/io/seldon/api/Constants.java
6180
/* * Seldon -- open source prediction engine * ======================================= * * Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/) * * ******************************************************************************************** * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * ******************************************************************************************** */ package io.seldon.api; /** * @author claudio */ public class Constants { public final static String CONTENT_TYPE_JSON = "application/json"; public final static String CACHE_CONTROL = "Cache-Control"; public final static String NO_CACHE = "no-store"; public final static String ACCESS_TOKEN = "access_token"; public final static String TOKEN_TYPE = "token_type"; public final static String TOKEN_SCOPE = "token_scope"; public final static String EXPIRES_IN = "expires_in"; public final static String GET = "GET"; public final static String POST = "POST"; public final static String PUT = "PUT"; public final static String DELETE = "DELETE"; public final static String AUTHORIZATION = "Authorization"; public final static String API_DB = "api"; public final static long TOKEN_TIMEOUT = 6000; public final static String CONSUMER_KEY = "consumer_key"; public final static String CONSUMER_SECRET = "consumer_secret"; public final static String OAUTH_TOKEN = "oauth_token"; public final static String TOKEN_ALL_SCOPE = "all"; public final static String TOKEN_READONLY_SCOPE = "readonly"; public static final String TOKEN_JAVASCRIPT_SCOPE = "js"; //RESOURCE NAME public final static String DEFAULT_RESOURCE_NAME = "resource"; public final static String TOKEN_RESOURCE_NAME = "token"; public final static String CONSUMER_RESOURCE_NAME = "consumer"; public final static String ERROR_RESOURCE_NAME = "error"; public final static String USER_RESOURCE_NAME = "user"; public final static String USERS_RESOURCE_NAME = "users"; public final static String RECOMMENDATION_RESOURCE_NAME = "recommendation"; public final static String RECOMMENDATIONS_RESOURCE_NAME = "recommendations"; public final static String ITEM_RESOURCE_NAME = "item"; public final static String ITEMS_RESOURCE_NAME = "items"; public final static String ITEMSIMILARITYNODE_RESOURCE_NAME = "itemsimilaritynode"; public final static String ITEMSIMILARITYGRAPH_RESOURCE_NAME = "itemsimilaritygraph"; //DEFAULT VALUES public final static int DEFAULT_TIMES = 1; public final static int DEFAULT_DIMENSION = 0; public final static int DEFAULT_DEMOGRAPHIC = 0; public final static int DEFAULT_RESULT_LIMIT = 10; public final static int DEFAULT_BIGRESULT_LIMIT = 100; public final static long POSITION_NOT_DEFINED = 0; public final static int USER_ITEM_NOINTERACTION = 0; public final static int USER_ITEM_DEFAULT_INTERACTION = 1; public final static int OPINION_NOT_DEFINED_VALUE = -1; public final static double SIMILARITY_NOT_DEFINED = -1.0; public final static double TRUST_NOT_DEFINED = -1.0; public final static int CACHING_TIME = 3600; public final static int USERBEAN_CACHING_TIME = 7200; public final static int LONG_CACHING_TIME = 86400; public final static boolean CACHING = true; public final static double DEFAULT_OPINION_VALUE = 0; public final static int NO_LIMIT = Integer.MAX_VALUE; //URL QUERY PARAMETER public final static String URL_LIMIT = "limit"; public final static String URL_FULL = "full"; public final static String URL_KEYWORD = "keyword"; public static final int NO_TRUST_DIMENSION = -1; public static final String URL_ATTR_DIMENSION = "dimension"; public static final String URL_ATTR_DIMENSIONS = "dimensions"; public static final String URL_ATTR_SORT = "sort"; public static final String URL_NAME = "name"; public static final String URL_ATTR_NAME = "attr_name"; public static final String URL_SORT = "sort"; public static final String URL_TYPE = "type"; public static final String URL_ALGORITHMS = "algorithms"; public static final String URL_ATTRIBUTES = "attributes"; public static final String URL_ATTR_LOCALE = "rec_locale"; //SORT FIELDs public final static String SORT_ID = "id"; public final static String SORT_NAME = "name"; public final static String SORT_LAST_ACTION = "last_action"; public final static String SORT_POPULARITY = "popularity"; public final static String SORT_DATE = "date"; //USER public final static int DEFAULT_USER_TYPE = 1; //ITEM public final static int DEFAULT_ITEM_TYPE= 1; public final static int ITEM_NOT_VALID = 0; //TYPES public final static String TYPE_VARCHAR = "VARCHAR"; public final static String TYPE_INT = "INT"; public final static String TYPE_BIGINT = "BIGINT"; public final static String TYPE_DOUBLE = "DOUBLE"; public final static String TYPE_TEXT = "TEXT"; public final static String TYPE_DATETIME = "DATETIME"; public final static String TYPE_BOOLEAN = "BOOLEAN"; public final static String TYPE_ENUM = "ENUM"; //attribute id defining the item type public final static int ATTRIBUTE_TYPE = 0; public static final int VARCHAR_SIZE = 255; //Default item attributes public final static String ITEM_ATTR_TITLE = "title"; public final static String ITEM_ATTR_IMG = "img_url"; public final static String ITEM_ATTR_CAT = "category"; public final static String ITEM_ATTR_SUBCAT = "subcategory"; public final static String ITEM_ATTR_TAGS = "tags"; //LOG public final static int LIST_LOG_LIMIT = 10; // ID prefixes public static final String FACEBOOK_ID_PREFIX = "_fb_"; public static final Long ANONYMOUS_USER = -1L; public static String DEFAULT_CLIENT = "default_client"; }
apache-2.0
javaduke/mule-intellij-plugins
data-weave-plugin/src/main/gen/org/mule/tooling/lang/dw/parser/psi/WeaveTypeLiteral.java
307
// This is a generated file. Not intended for manual editing. package org.mule.tooling.lang.dw.parser.psi; import java.util.List; import org.jetbrains.annotations.*; import com.intellij.psi.PsiElement; public interface WeaveTypeLiteral extends WeaveExpression { @Nullable WeaveSchema getSchema(); }
apache-2.0
nknize/elasticsearch
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java
5657
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.ml.job.process.autodetect; import org.elasticsearch.common.Nullable; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; import org.elasticsearch.xpack.core.ml.job.config.PerPartitionCategorizationConfig; import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; public final class UpdateParams { private final String jobId; private final ModelPlotConfig modelPlotConfig; private final PerPartitionCategorizationConfig perPartitionCategorizationConfig; private final List<JobUpdate.DetectorUpdate> detectorUpdates; private final MlFilter filter; private final boolean updateScheduledEvents; private UpdateParams(String jobId, @Nullable ModelPlotConfig modelPlotConfig, @Nullable PerPartitionCategorizationConfig perPartitionCategorizationConfig, @Nullable List<JobUpdate.DetectorUpdate> detectorUpdates, @Nullable MlFilter filter, boolean updateScheduledEvents) { this.jobId = Objects.requireNonNull(jobId); this.modelPlotConfig = modelPlotConfig; this.perPartitionCategorizationConfig = perPartitionCategorizationConfig; this.detectorUpdates = detectorUpdates; this.filter = filter; this.updateScheduledEvents = updateScheduledEvents; } public String getJobId() { return jobId; } @Nullable public ModelPlotConfig getModelPlotConfig() { return modelPlotConfig; } @Nullable public PerPartitionCategorizationConfig getPerPartitionCategorizationConfig() { return perPartitionCategorizationConfig; } @Nullable public List<JobUpdate.DetectorUpdate> getDetectorUpdates() { return detectorUpdates; } @Nullable public MlFilter getFilter() { return filter; } /** * Returns true if the update params include a job update, * ie an update to the job config directly rather than an * update to external resources a job uses (e.g. calendars, filters). */ public boolean isJobUpdate() { return modelPlotConfig != null || detectorUpdates != null || perPartitionCategorizationConfig != null; } public boolean isUpdateScheduledEvents() { return updateScheduledEvents; } /** * Returns all filters referenced by this update * @return all referenced filters */ public Set<String> extractReferencedFilters() { Set<String> filterIds = new HashSet<>(); if (filter != null) { filterIds.add(filter.getId()); } if (detectorUpdates != null) { detectorUpdates.forEach( detectorUpdate -> detectorUpdate.getRules().forEach( rule -> filterIds.addAll(rule.extractReferencedFilters()))); } return filterIds; } public static UpdateParams fromJobUpdate(JobUpdate jobUpdate) { return new Builder(jobUpdate.getJobId()) .modelPlotConfig(jobUpdate.getModelPlotConfig()) .perPartitionCategorizationConfig(jobUpdate.getPerPartitionCategorizationConfig()) .detectorUpdates(jobUpdate.getDetectorUpdates()) .updateScheduledEvents(jobUpdate.getGroups() != null) .build(); } public static UpdateParams filterUpdate(String jobId, MlFilter filter) { return new Builder(jobId).filter(filter).build(); } public static UpdateParams scheduledEventsUpdate(String jobId) { return new Builder(jobId).updateScheduledEvents(true).build(); } public static Builder builder(String jobId) { return new Builder(jobId); } public static class Builder { private String jobId; private ModelPlotConfig modelPlotConfig; private PerPartitionCategorizationConfig perPartitionCategorizationConfig; private List<JobUpdate.DetectorUpdate> detectorUpdates; private MlFilter filter; private boolean updateScheduledEvents; public Builder(String jobId) { this.jobId = Objects.requireNonNull(jobId); } public Builder modelPlotConfig(ModelPlotConfig modelPlotConfig) { this.modelPlotConfig = modelPlotConfig; return this; } public Builder perPartitionCategorizationConfig(PerPartitionCategorizationConfig perPartitionCategorizationConfig) { this.perPartitionCategorizationConfig = perPartitionCategorizationConfig; return this; } public Builder detectorUpdates(List<JobUpdate.DetectorUpdate> detectorUpdates) { this.detectorUpdates = detectorUpdates; return this; } public Builder filter(MlFilter filter) { this.filter = filter; return this; } public Builder updateScheduledEvents(boolean updateScheduledEvents) { this.updateScheduledEvents = updateScheduledEvents; return this; } public UpdateParams build() { return new UpdateParams(jobId, modelPlotConfig, perPartitionCategorizationConfig, detectorUpdates, filter, updateScheduledEvents); } } }
apache-2.0
jtwig/jtwig-core
src/test/java/org/jtwig/value/context/JtwigModelValueContextTest.java
1229
package org.jtwig.value.context; import com.google.common.base.Optional; import org.jtwig.JtwigModel; import org.jtwig.reflection.model.Value; import org.jtwig.value.Undefined; import org.junit.Test; import static org.junit.Assert.assertSame; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class JtwigModelValueContextTest { private final JtwigModel jtwigModel = mock(JtwigModel.class); private JtwigModelValueContext underTest = new JtwigModelValueContext(jtwigModel); @Test public void resolveUndefined() throws Exception { String key = "key"; when(jtwigModel.get(key)).thenReturn(Optional.<Value>absent()); Object result = underTest.resolve(key); assertSame(Undefined.UNDEFINED, result); } @Test public void resolveDefined() throws Exception { String key = "key"; Object value = new Object(); when(jtwigModel.get(key)).thenReturn(Optional.of(new Value(value))); Object result = underTest.resolve(key); assertSame(value, result); } @Test(expected = IllegalArgumentException.class) public void with() throws Exception { underTest.with("key", null); } }
apache-2.0
tokee/lucene
contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchWithSortTask.java
4632
package org.apache.lucene.benchmark.byTask.tasks; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.feeds.QueryMaker; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; /** * Does sort search on specified field. * */ public class SearchWithSortTask extends ReadTask { private boolean doScore = true; private boolean doMaxScore = true; private Sort sort; public SearchWithSortTask(PerfRunData runData) { super(runData); } /** * SortFields: field:type,field:type[,noscore][,nomaxscore] * * If noscore is present, then we turn off score tracking * in {@link org.apache.lucene.search.TopFieldCollector}. * If nomaxscore is present, then we turn off maxScore tracking * in {@link org.apache.lucene.search.TopFieldCollector}. * * name:string,page:int,subject:string * */ @Override public void setParams(String sortField) { super.setParams(sortField); String[] fields = sortField.split(","); SortField[] sortFields = new SortField[fields.length]; int upto = 0; for (int i = 0; i < fields.length; i++) { String field = fields[i]; SortField sortField0; if (field.equals("doc")) { sortField0 = SortField.FIELD_DOC; } if (field.equals("score")) { sortField0 = SortField.FIELD_SCORE; } else if (field.equals("noscore")) { doScore = false; continue; } else if (field.equals("nomaxscore")) { doMaxScore = false; continue; } else { int index = field.lastIndexOf(":"); String fieldName; String typeString; if (index != -1) { fieldName = field.substring(0, index); typeString = field.substring(1+index, field.length()); } else { throw new RuntimeException("You must specify the sort type ie page:int,subject:string"); } int type = getType(typeString); sortField0 = new SortField(fieldName, type); } sortFields[upto++] = sortField0; } if (upto < sortFields.length) { SortField[] newSortFields = new SortField[upto]; System.arraycopy(sortFields, 0, newSortFields, 0, upto); sortFields = newSortFields; } this.sort = new Sort(sortFields); } private int getType(String typeString) { int type; if (typeString.equals("float")) { type = SortField.FLOAT; } else if (typeString.equals("double")) { type = SortField.DOUBLE; } else if (typeString.equals("byte")) { type = SortField.BYTE; } else if (typeString.equals("short")) { type = SortField.SHORT; } else if (typeString.equals("int")) { type = SortField.INT; } else if (typeString.equals("long")) { type = SortField.LONG; } else if (typeString.equals("string")) { type = SortField.STRING; } else if (typeString.equals("string_val")) { type = SortField.STRING_VAL; } else { throw new RuntimeException("Unrecognized sort field type " + typeString); } return type; } @Override public boolean supportsParams() { return true; } @Override public QueryMaker getQueryMaker() { return getRunData().getQueryMaker(this); } @Override public boolean withRetrieve() { return false; } @Override public boolean withSearch() { return true; } @Override public boolean withTraverse() { return false; } @Override public boolean withWarm() { return false; } @Override public boolean withScore() { return doScore; } @Override public boolean withMaxScore() { return doMaxScore; } @Override public Sort getSort() { if (sort == null) { throw new IllegalStateException("No sort field was set"); } return sort; } }
apache-2.0
Bernardo-MG/pendragon-model-api
src/main/java/com/wandrell/tabletop/pendragon/model/glory/FatherClassGlory.java
205
package com.wandrell.tabletop.pendragon.model.glory; public interface FatherClassGlory { public Integer getBaseGlory(); public String getFatherClass(); public Integer getYearlyGlory(); }
apache-2.0
apache/openejb
container/openejb-jee/src/main/java/org/apache/openejb/jee/jba/cmp/CleanReadAheadOnLoad.java
2135
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.openejb.jee.jba.cmp; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlType; import javax.xml.bind.annotation.XmlValue; /** * <p>Java class for anonymous complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = { "content" }) @XmlRootElement(name = "clean-read-ahead-on-load") public class CleanReadAheadOnLoad { @XmlValue protected String content; /** * Gets the value of the content property. * * @return * possible object is * {@link String } * */ public String getContent() { return content; } /** * Sets the value of the content property. * * @param value * allowed object is * {@link String } * */ public void setContent(String value) { this.content = value; } }
apache-2.0
ermh/Gdata-mavenized
java/src/com/google/gdata/client/http/GoogleGDataRequest.java
18502
/* Copyright (c) 2008 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.gdata.client.http; import com.google.gdata.client.GDataProtocol; import com.google.gdata.client.GoogleService; import com.google.gdata.client.GoogleService.SessionExpiredException; import com.google.gdata.client.Service.GDataRequest; import com.google.gdata.util.AuthenticationException; import com.google.gdata.util.ContentType; import com.google.gdata.util.RedirectRequiredException; import com.google.gdata.util.ServiceException; import com.google.gdata.util.Version; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.net.CookieHandler; import java.net.HttpURLConnection; import java.net.URI; import java.net.URL; import java.text.SimpleDateFormat; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.logging.Logger; /** * The GoogleGDataRequest class provides a basic implementation of an * interface to connect with a Google-GData server. * * */ public class GoogleGDataRequest extends HttpGDataRequest { private static final Logger logger = Logger.getLogger(GoogleGDataRequest.class.getName()); /** * If set, this System property will globally disable interception and * handling of cookies for all GData services. */ public static final String DISABLE_COOKIE_HANDLER_PROPERTY = "com.google.gdata.DisableCookieHandler"; /* * Disables cookie handling when run in AppEngine. This is a no-op if run * outside of AppEngine. */ static { try { Class apiProxyClass = Class.forName( "com.google.apphosting.api.ApiProxy"); if (apiProxyClass.getMethod( "getCurrentEnvironment").invoke(null) != null) { System.setProperty(DISABLE_COOKIE_HANDLER_PROPERTY, "true"); } } catch (ClassNotFoundException e) { } catch (IllegalAccessException e) { } catch (InvocationTargetException e) { } catch (NoSuchMethodException e) { } } /** * The GoogleGDataRequest.Factory class is a factory class for * constructing new GoogleGDataRequest instances. */ public static class Factory extends HttpGDataRequest.Factory { @Override protected GDataRequest createRequest(RequestType type, URL requestUrl, ContentType contentType) throws IOException, ServiceException { return new GoogleGDataRequest(type, requestUrl, contentType, authToken, headerMap, privateHeaderMap, connectionSource); } } /** * Google cookie. */ public static class GoogleCookie { // Cookie state. All fields have public accessors, except for cookie // values which are restricted to package-level access for security. private String domain; public String getDomain() { return domain; } private String path; public String getPath() { return path; } private String name; public String getName() { return name; } private String value; String getValue() { return value; } private Date expires; public Date getExpires() { return (expires != null) ? (Date) expires.clone() : null; } /** * Constructs a new GoogleCookie instance. * * @param uri the original request URI that returned Set-Cookie header * in the response * @param cookieHeader the value of the Set-Cookie header. */ public GoogleCookie(URI uri, String cookieHeader) { // Set default values String attributes[] = cookieHeader.split(";"); String nameValue = attributes[0].trim(); int equals = nameValue.indexOf('='); if (equals < 0) { throw new IllegalArgumentException("Cookie is not a name/value pair"); } this.name = nameValue.substring(0, equals); this.value = nameValue.substring(equals + 1); this.path = "/"; this.domain = uri.getHost(); // Process optional cookie attributes for (int i = 1; i < attributes.length; i++) { nameValue = attributes[i].trim(); equals = nameValue.indexOf('='); if (equals == -1) { continue; } String name = nameValue.substring(0, equals); String value = nameValue.substring(equals + 1); if (name.equalsIgnoreCase("domain")) { if (uri.getPort() > 0) { // ignore port int colon = value.lastIndexOf(':'); if (colon > 0) { value = value.substring(0, colon); } } String uriDomain = uri.getHost(); if (uriDomain.equals(value)) { this.domain = value; } else { if (!matchDomain(uriDomain, value)) { throw new IllegalArgumentException( "Trying to set foreign cookie"); } } this.domain = value; } else if (name.equalsIgnoreCase("path")) { this.path = value; } else if (name.equalsIgnoreCase("expires")) { try { this.expires = new SimpleDateFormat("E, dd-MMM-yyyy k:m:s 'GMT'", Locale.US) .parse(value); } catch (java.text.ParseException e) { try { this.expires = new SimpleDateFormat("E, dd MMM yyyy k:m:s 'GMT'", Locale.US) .parse(value); } catch (java.text.ParseException e2) { throw new IllegalArgumentException( "Bad date format in header: " + value); } } } } } /** * Returns true if the full domain's final segments match * the tail domain. */ private boolean matchDomain(String testDomain, String tailDomain) { // Simple check if (!testDomain.endsWith(tailDomain)) { return false; } // Exact match if (testDomain.length() == tailDomain.length()) { return true; } // Verify that a segment match happened, not a partial match if (tailDomain.charAt(0) == '.') { return true; } return testDomain.charAt(testDomain.length() - tailDomain.length() - 1) == '.'; } /** * Returns {@code true} if the cookie has expired. */ public boolean hasExpired() { if (expires == null) { return false; } Date now = new Date(); return now.after(expires); } /** * Returns {@code true} if the cookie hasn't expired, the * URI domain matches, and the URI path starts with the * cookie path. * * @param uri URI to check against * @return true if match, false otherwise */ public boolean matches(URI uri) { if (hasExpired()) { return false; } String uriDomain = uri.getHost(); if (!matchDomain(uriDomain, domain)) { return false; } String path = uri.getPath(); if (path == null) { path = "/"; } return path.startsWith(this.path); } /** * Returns the actual name/value pair that should be sent in a * Cookie request header. */ String getHeaderValue() { StringBuilder result = new StringBuilder(name); result.append("="); result.append(value); return result.toString(); } /** * Returns {@code true} if the target object is a GoogleCookie that * has the same name as this cookie and that matches the same target * domain and path as this cookie. Cookie expiration and value * <b>are not</b> taken into account when considering equivalence. */ @Override public boolean equals(Object o) { if (o == null || !(o instanceof GoogleCookie)) { return false; } GoogleCookie cookie = (GoogleCookie) o; if (!name.equals(cookie.name) || !domain.equals(cookie.domain)) { return false; } if (path == null) { if (cookie.path != null) { return false; } return true; } return path.equals(cookie.path); } @Override public int hashCode() { int result = 17; result = 37 * result + name.hashCode(); result = 37 * result + domain.hashCode(); result = 37 * result + (path != null ? path.hashCode() : 0); return result; } @Override public String toString() { StringBuilder buf = new StringBuilder("GoogleCookie("); buf.append(domain); buf.append(path); buf.append("["); buf.append(name); buf.append("]"); buf.append(")"); return buf.toString(); } } /** * Implements a scoped cookie handling mechanism for GData services. This * handler is a singleton class that is registered to globally listen and * set cookies using {@link CookieHandler#setDefault(CookieHandler)}. It * will only process HTTP headers and responses associated with GData * services, and will delegate the processing of any other cookie headers * to the previously registered {@link CookieHandler} (if any). When * a Set-Cookie response header is found, it will save any associated * cookie in the cookie cache associated with the {@link GoogleService} * issuing the request. Similarly, when a {@link GoogleService} issues * a request, it will check its cookie cache and add any necessary * Cookie header. */ private static class GoogleCookieHandler extends CookieHandler { private CookieHandler nextHandler; // This is a singleton, only constructed once at class load time. private GoogleCookieHandler() { // Install the global GoogleCookieHandler instance, chaining to any // existing CookieHandler if (!Boolean.getBoolean(DISABLE_COOKIE_HANDLER_PROPERTY)) { logger.fine("Installing GoogleCookieHandler"); nextHandler = CookieHandler.getDefault(); CookieHandler.setDefault(this); } } @Override public Map<String, List<String>> get( URI uri, Map<String, List<String>> requestHeaders) throws IOException { Map<String, List<String>> cookieHeaders = new HashMap<String, List<String>>(); // Only service requests initiated by GData services with cookie // handling enabled. GoogleService service = activeService.get(); if (service != null && service.handlesCookies()) { // Get the list of matching cookies and accumulate a buffer // containing the cookie name/value pairs. Set<GoogleCookie> cookies = service.getCookies(); StringBuilder cookieBuf = new StringBuilder(); for (GoogleCookie cookie : cookies) { if (cookie.matches(uri)) { if (cookieBuf.length() > 0) { cookieBuf.append("; "); } cookieBuf.append(cookie.getHeaderValue()); logger.fine("Setting cookie: " + cookie); } } // If any matching cookies were found, update the request headers. // Note: it's assumed here that nothing else is setting the Cookie // header, which seems reasonable; otherwise we'd have to parse the // existing value and add/merge managed cookies. if (cookieBuf.length() != 0) { cookieHeaders.put("Cookie", Collections.singletonList(cookieBuf.toString())); } } else { if (nextHandler != null) { return nextHandler.get(uri, requestHeaders); } } return Collections.unmodifiableMap(cookieHeaders); } @Override public void put(URI uri, Map<String, List<String>> responseHeaders) throws IOException { // Only service requests initiated by GData services with cookie // handling enabled. GoogleService service = activeService.get(); if (service != null && service.handlesCookies()) { List<String> setCookieList = responseHeaders.get("Set-Cookie"); if (setCookieList != null && setCookieList.size() > 0) { for (String cookieValue : setCookieList) { GoogleCookie cookie = new GoogleCookie(uri, cookieValue); service.addCookie(cookie); logger.fine("Adding cookie:" + cookie); } } } else { if (nextHandler != null) { nextHandler.get(uri, responseHeaders); } } } } /** * Holds the GoogleService that is executing requests for the current * execution thread. */ private static final ThreadLocal<GoogleService> activeService = new ThreadLocal<GoogleService>(); /** * The global CookieHandler instance for GData services. */ @SuppressWarnings("unused") // instance init installs global hooks. private static final GoogleCookieHandler googleCookieHandler; static { if (!Boolean.getBoolean(DISABLE_COOKIE_HANDLER_PROPERTY)) { googleCookieHandler = new GoogleCookieHandler(); } else { googleCookieHandler = null; } } /** * Constructs a new GoogleGDataRequest instance of the specified * RequestType, targeting the specified URL with the specified * authentication token. * * @param type type of GDataRequest * @param requestUrl request target URL * @param authToken token authenticating request to server * @param headerMap map containing additional headers to set * @param privateHeaderMap map containing additional headers to set * that should not be logged (eg. authentication info) * @throws IOException on error initializing service connection */ protected GoogleGDataRequest(RequestType type, URL requestUrl, ContentType contentType, HttpAuthToken authToken, Map<String, String> headerMap, Map<String, String> privateHeaderMap, HttpUrlConnectionSource connectionSource) throws IOException { super(type, requestUrl, contentType, authToken, headerMap, privateHeaderMap, connectionSource); } /** * The GoogleService instance that constructed the request. */ private GoogleService service; /** * Returns the {@link Version} that will be used to execute the request on the * target service or {@code null} if the service is not versioned. * * @return version sent with the request or {@code null}. */ public Version getRequestVersion() { // Always get the request version from the associated service, never from // the version registry. There are aspects of request handling that happen // outside the scope of Service.begin/endVersionScope. return service.getProtocolVersion(); } /** * The version associated with the response. */ private Version responseVersion; /** * Returns the {@link Version} that was used by the target service to execute * the request or {@code null} if the service is not versioned. * * @return version returned with the response or {@code null}. */ public Version getResponseVersion() { if (!executed) { throw new IllegalStateException("Request has not been executed"); } return responseVersion; } /** * Sets the GoogleService associated with the request. */ public void setService(GoogleService service) { this.service = service; // This undocumented system property can be used to disable version headers. // It exists only to support some unit test scenarios for query-parameter // version configuration and back-compat defaulting when no version // information is sent by the client library. if (Boolean.getBoolean("GoogleGDataRequest.disableVersionHeader")) { return; } // Look up the active version for the type of service initiating the // request, and set the version header if found. try { Version requestVersion = service.getProtocolVersion(); if (requestVersion != null) { setHeader(GDataProtocol.Header.VERSION, requestVersion.getVersionString()); } } catch (IllegalStateException iae) { // Service may not be versioned. } } @Override public void execute() throws IOException, ServiceException { // Set the current active service, so cookie handling will be enabled. try { activeService.set(service); // Propagate redirects to our layer to add URL specific data to the // request (like URL dependant authentication headers) httpConn.setInstanceFollowRedirects(false); super.execute(); // Capture the version used to process the request String versionHeader = httpConn.getHeaderField(GDataProtocol.Header.VERSION); if (versionHeader != null) { GoogleService service = activeService.get(); if (service != null) { responseVersion = new Version(service.getClass(), versionHeader); } } } finally { activeService.set(null); } } @Override protected void handleErrorResponse() throws IOException, ServiceException { try { switch (httpConn.getResponseCode()) { case HttpURLConnection.HTTP_MOVED_PERM: case HttpURLConnection.HTTP_MOVED_TEMP: throw new RedirectRequiredException(httpConn); } super.handleErrorResponse(); } catch (AuthenticationException e) { // Throw a more specific exception for session expiration. String msg = e.getMessage(); if (msg != null && msg.contains("Token expired")) { SessionExpiredException se = new SessionExpiredException(e.getMessage()); se.setResponse(e.getResponseContentType(), e.getResponseBody()); throw se; } throw e; } } }
apache-2.0
punkhorn/camel-upstream
core/camel-api/src/main/java/org/apache/camel/support/jsse/CipherSuitesParameters.java
1929
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.support.jsse; import java.util.ArrayList; import java.util.List; /** * Represents a list of TLS/SSL cipher suite names. */ public class CipherSuitesParameters { private List<String> cipherSuite; /** * Returns a live reference to the list of cipher suite names. * * @return a reference to the list, never {@code null} */ public List<String> getCipherSuite() { if (this.cipherSuite == null) { this.cipherSuite = new ArrayList<>(); } return this.cipherSuite; } /** * Sets the cipher suite. It creates a copy of the given cipher suite. * * @param cipherSuite cipher suite */ public void setCipherSuite(List<String> cipherSuite) { this.cipherSuite = cipherSuite == null ? null : new ArrayList<>(cipherSuite); } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("CipherSuitesParameters[cipherSuite="); builder.append(getCipherSuite()); builder.append("]"); return builder.toString(); } }
apache-2.0
rrenomeron/cas
core/cas-server-core-authentication-mfa-api/src/main/java/org/apereo/cas/authentication/DefaultMultifactorAuthenticationContextValidator.java
5828
package org.apereo.cas.authentication; import org.apereo.cas.services.RegisteredService; import org.apereo.cas.util.CollectionUtils; import lombok.Getter; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import lombok.val; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.springframework.context.ConfigurableApplicationContext; import org.springframework.core.OrderComparator; import java.util.Arrays; import java.util.Collection; import java.util.LinkedHashSet; import java.util.Optional; import java.util.stream.Collectors; /** * The {@link DefaultMultifactorAuthenticationContextValidator} is responsible for evaluating an authentication * object to see whether it satisfied a requested authentication context. * * @author Misagh Moayyed * @since 4.3 */ @Slf4j @Getter @RequiredArgsConstructor public class DefaultMultifactorAuthenticationContextValidator implements MultifactorAuthenticationContextValidator { private final String authenticationContextAttribute; private final String globalFailureMode; private final String mfaTrustedAuthnAttributeName; private final ConfigurableApplicationContext applicationContext; private static Optional<MultifactorAuthenticationProvider> locateRequestedProvider( final Collection<MultifactorAuthenticationProvider> providersArray, final String requestedProvider) { return providersArray.stream().filter(provider -> provider.getId().equals(requestedProvider)).findFirst(); } /** * {@inheritDoc} * If the authentication event is established as part trusted/device browser * such that MFA was skipped, allow for validation to execute successfully. * If authentication event did bypass MFA, let's for allow for validation to execute successfully. * * @param authentication the authentication * @param requestedContext the requested context * @param service the service * @return true if the context can be successfully validated. */ @Override public Pair<Boolean, Optional<MultifactorAuthenticationProvider>> validate(final Authentication authentication, final String requestedContext, final RegisteredService service) { val attributes = authentication.getAttributes(); val ctxAttr = attributes.get(this.authenticationContextAttribute); val contexts = CollectionUtils.toCollection(ctxAttr); LOGGER.trace("Attempting to match requested authentication context [{}] against [{}]", requestedContext, contexts); val providerMap = MultifactorAuthenticationUtils.getAvailableMultifactorAuthenticationProviders(this.applicationContext); LOGGER.trace("Available MFA providers are [{}]", providerMap.values()); val requestedProvider = locateRequestedProvider(providerMap.values(), requestedContext); if (requestedProvider.isEmpty()) { LOGGER.debug("Requested authentication provider cannot be recognized."); return Pair.of(Boolean.FALSE, Optional.empty()); } LOGGER.debug("Requested context is [{}] and available contexts are [{}]", requestedContext, contexts); if (contexts.stream().anyMatch(ctx -> ctx.toString().equals(requestedContext))) { LOGGER.debug("Requested authentication context [{}] is satisfied", requestedContext); return Pair.of(Boolean.TRUE, requestedProvider); } if (StringUtils.isNotBlank(this.mfaTrustedAuthnAttributeName) && attributes.containsKey(this.mfaTrustedAuthnAttributeName)) { LOGGER.debug("Requested authentication context [{}] is satisfied since device is already trusted", requestedContext); return Pair.of(Boolean.TRUE, requestedProvider); } val provider = requestedProvider.get(); val satisfiedProviders = getSatisfiedAuthenticationProviders(authentication, providerMap.values()); if (satisfiedProviders != null && !satisfiedProviders.isEmpty()) { val providers = satisfiedProviders.toArray(MultifactorAuthenticationProvider[]::new); OrderComparator.sortIfNecessary(providers); val result = Arrays.stream(providers) .filter(p -> p.equals(provider) || p.getOrder() >= provider.getOrder()) .findFirst(); if (result.isPresent()) { LOGGER.debug("Current provider [{}] already satisfies the authentication requirements of [{}]; proceed with flow normally.", result.get(), requestedProvider); return Pair.of(Boolean.TRUE, requestedProvider); } } LOGGER.debug("No multifactor providers could be located to satisfy the requested context for [{}]", provider); return Pair.of(Boolean.FALSE, requestedProvider); } private Collection<MultifactorAuthenticationProvider> getSatisfiedAuthenticationProviders(final Authentication authentication, final Collection<MultifactorAuthenticationProvider> providers) { val contexts = CollectionUtils.toCollection(authentication.getAttributes().get(this.authenticationContextAttribute)); if (contexts == null || contexts.isEmpty()) { LOGGER.debug("No authentication context could be determined based on authentication attribute [{}]", this.authenticationContextAttribute); return null; } return providers.stream() .filter(p -> contexts.contains(p.getId())) .collect(Collectors.toCollection(LinkedHashSet::new)); } }
apache-2.0
chirino/activemq
activemq-web/src/main/java/org/apache/activemq/web/RemoteJMXBrokerFacade.java
8728
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.web; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import javax.management.MBeanServerConnection; import javax.management.MBeanServerInvocationHandler; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; import javax.management.QueryExp; import javax.management.remote.JMXConnector; import javax.management.remote.JMXConnectorFactory; import javax.management.remote.JMXServiceURL; import org.apache.activemq.broker.jmx.BrokerViewMBean; import org.apache.activemq.broker.jmx.ManagementContext; import org.apache.activemq.broker.jmx.QueueViewMBean; import org.apache.activemq.command.ActiveMQDestination; import org.apache.activemq.web.config.WebConsoleConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A {@link BrokerFacade} which uses a JMX-Connection to communicate with a * broker */ public class RemoteJMXBrokerFacade extends BrokerFacadeSupport { private static final transient Logger LOG = LoggerFactory.getLogger(RemoteJMXBrokerFacade.class); private String brokerName; private JMXConnector connector; private WebConsoleConfiguration configuration; public void setBrokerName(String brokerName) { this.brokerName = brokerName; } public WebConsoleConfiguration getConfiguration() { return configuration; } public void setConfiguration(WebConsoleConfiguration configuration) { this.configuration = configuration; } /** * Shutdown this facade aka close any open connection. */ public void shutdown() { closeConnection(); } @Override public BrokerViewMBean getBrokerAdmin() throws Exception { MBeanServerConnection connection = getMBeanServerConnection(); Set<ObjectName> brokers = findBrokers(connection); if (brokers.size() == 0) { throw new IOException("No broker could be found in the JMX."); } ObjectName name = brokers.iterator().next(); BrokerViewMBean mbean = MBeanServerInvocationHandler.newProxyInstance(connection, name, BrokerViewMBean.class, true); return mbean; } @Override public String getBrokerName() throws Exception, MalformedObjectNameException { return getBrokerAdmin().getBrokerName(); } protected MBeanServerConnection getMBeanServerConnection() throws Exception { JMXConnector connector = this.connector; if (isConnectionActive(connector)) { return connector.getMBeanServerConnection(); } synchronized (this) { closeConnection(); LOG.debug("Creating a new JMX-Connection to the broker"); this.connector = createConnection(); return this.connector.getMBeanServerConnection(); } } protected boolean isConnectionActive(JMXConnector connector) { if (connector == null) { return false; } try { MBeanServerConnection connection = connector.getMBeanServerConnection(); int brokerCount = findBrokers(connection).size(); return brokerCount > 0; } catch (Exception e) { return false; } } protected JMXConnector createConnection() { Map<String, Object> env = new HashMap<String, Object>(); if (this.configuration.getJmxUser() != null) { env.put("jmx.remote.credentials", new String[] { this.configuration.getJmxUser(), this.configuration.getJmxPassword() }); } Collection<JMXServiceURL> jmxUrls = this.configuration.getJmxUrls(); Exception exception = null; for (JMXServiceURL url : jmxUrls) { try { JMXConnector connector = JMXConnectorFactory.connect(url, env); connector.connect(); MBeanServerConnection connection = connector.getMBeanServerConnection(); Set<ObjectName> brokers = findBrokers(connection); if (brokers.size() > 0) { LOG.info("Connected via JMX to the broker at " + url); return connector; } } catch (Exception e) { // Keep the exception for later exception = e; } } if (exception != null) { if (exception instanceof RuntimeException) { throw (RuntimeException) exception; } else { throw new RuntimeException(exception); } } throw new IllegalStateException("No broker is found at any of the " + jmxUrls.size() + " configured urls"); } protected synchronized void closeConnection() { if (connector != null) { try { LOG.debug("Closing a connection to a broker (" + connector.getConnectionId() + ")"); connector.close(); } catch (IOException e) { // Ignore the exception, since it most likly won't matter anymore } } } /** * Finds all ActiveMQ-Brokers registered on a certain JMX-Server or, if a * JMX-BrokerName has been set, the broker with that name. * * @param connection * not <code>null</code> * @return Set with ObjectName-elements * @throws IOException * @throws MalformedObjectNameException */ protected Set<ObjectName> findBrokers(MBeanServerConnection connection) throws IOException, MalformedObjectNameException { ObjectName name; if (this.brokerName == null) { name = new ObjectName("org.apache.activemq:type=Broker,brokerName=*"); } else { name = new ObjectName("org.apache.activemq:type=Broker,brokerName=" + this.brokerName); } Set<ObjectName> brokers = connection.queryNames(name, null); Set<ObjectName> masterBrokers = new HashSet<ObjectName>(); for (ObjectName objectName : brokers) { BrokerViewMBean mbean = MBeanServerInvocationHandler.newProxyInstance(connection, objectName, BrokerViewMBean.class, true); if (!mbean.isSlave()) masterBrokers.add(objectName); } return masterBrokers; } @Override public void purgeQueue(ActiveMQDestination destination) throws Exception { QueueViewMBean queue = getQueue(destination.getPhysicalName()); queue.purge(); } @Override public ManagementContext getManagementContext() { throw new IllegalStateException("not supported"); } @Override protected <T> Collection<T> getManagedObjects(ObjectName[] names, Class<T> type) { MBeanServerConnection connection; try { connection = getMBeanServerConnection(); } catch (Exception e) { throw new RuntimeException(e); } List<T> answer = new ArrayList<T>(); if (connection != null) { for (int i = 0; i < names.length; i++) { ObjectName name = names[i]; T value = MBeanServerInvocationHandler.newProxyInstance(connection, name, type, true); if (value != null) { answer.add(value); } } } return answer; } @Override public Set queryNames(ObjectName name, QueryExp query) throws Exception { return getMBeanServerConnection().queryNames(name, query); } @Override public Object newProxyInstance(ObjectName objectName, Class interfaceClass, boolean notificationBroadcaster) throws Exception { return MBeanServerInvocationHandler.newProxyInstance(getMBeanServerConnection(), objectName, interfaceClass, notificationBroadcaster); } }
apache-2.0
parasoft-pl/jpf-core
src/tests/gov/nasa/jpf/util/PermutationGeneratorTest.java
3375
/* * Copyright (C) 2015, United States Government, as represented by the * Administrator of the National Aeronautics and Space Administration. * All rights reserved. * * The Java Pathfinder core (jpf-core) platform is licensed under the * Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package gov.nasa.jpf.util; import gov.nasa.jpf.util.test.TestJPF; import org.junit.Test; /** * regression test for PermutationGenerator */ public class PermutationGeneratorTest extends TestJPF { @Test public void testTotalPermutation(){ PermutationGenerator pg = new TotalPermutationGenerator(4); long nPerm = pg.getNumberOfPermutations(); assertTrue( nPerm == 24); while (pg.hasNext()){ int[] perms = pg.next(); assertTrue(perms != null); pg.printOn(System.out); } } @Test public void testPairPermutation(){ PermutationGenerator pg = new PairPermutationGenerator(4); long nPerm = pg.getNumberOfPermutations(); assertTrue( nPerm == 7); while (pg.hasNext()){ int[] perms = pg.next(); assertTrue(perms != null); pg.printOn(System.out); } } @Test public void testRandomPermutation(){ int nPermutations = 14; PermutationGenerator pg = new RandomPermutationGenerator(4, nPermutations, 42); long nPerm = pg.getNumberOfPermutations(); assertTrue( nPerm == nPermutations); System.out.println("this CAN have duplicates"); while (pg.hasNext()){ int[] perms = pg.next(); assertTrue(perms != null); pg.printOn(System.out); } } boolean isEqual (int[] a, int[] b){ if (a.length == b.length){ for (int i=0; i<a.length; i++){ if (a[i] != b[i]){ return false; } } return true; } return false; } @Test public void testUniqueRandomPermutation(){ int nPermutations = 14; PermutationGenerator pg = new UniqueRandomPermGenerator(4, nPermutations, 42); long nPerm = pg.getNumberOfPermutations(); assertTrue( nPerm == nPermutations); int[][] seen = new int[nPermutations][]; int n = 0; System.out.println("this should NOT have duplicates"); while (pg.hasNext()){ int[] perms = pg.next(); assertTrue(perms != null); pg.printOn(System.out); for (int i=0; i<n; i++){ assertFalse(isEqual(seen[i], perms)); } seen[n++] = perms.clone(); } } @Test public void testMaxUniqueRandomPermutation(){ int nPermutations = 14; // too high, this only has 3! different permutations PermutationGenerator pg = new UniqueRandomPermGenerator(3, nPermutations, 42); long nPerm = pg.getNumberOfPermutations(); assertTrue( nPerm == 6); while (pg.hasNext()){ int[] perms = pg.next(); assertTrue(perms != null); pg.printOn(System.out); } } }
apache-2.0
Wesley-Lawrence/nifi
nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/JoltTransformJSON.java
18817
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.nifi.processors.standard; import java.io.FilenameFilter; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.nifi.annotation.behavior.EventDriven; import org.apache.nifi.annotation.behavior.InputRequirement; import org.apache.nifi.annotation.behavior.SideEffectFree; import org.apache.nifi.annotation.behavior.SupportsBatching; import org.apache.nifi.annotation.behavior.WritesAttribute; import org.apache.nifi.annotation.documentation.CapabilityDescription; import org.apache.nifi.annotation.documentation.Tags; import org.apache.nifi.annotation.lifecycle.OnScheduled; import org.apache.nifi.components.AllowableValue; import org.apache.nifi.components.PropertyDescriptor; import org.apache.nifi.components.ValidationContext; import org.apache.nifi.components.ValidationResult; import org.apache.nifi.expression.ExpressionLanguageScope; import org.apache.nifi.flowfile.FlowFile; import org.apache.nifi.flowfile.attributes.CoreAttributes; import org.apache.nifi.logging.ComponentLog; import org.apache.nifi.processor.AbstractProcessor; import org.apache.nifi.processor.ProcessContext; import org.apache.nifi.processor.ProcessSession; import org.apache.nifi.processor.Relationship; import org.apache.nifi.processor.exception.ProcessException; import org.apache.nifi.processor.io.OutputStreamCallback; import org.apache.nifi.processor.util.StandardValidators; import org.apache.nifi.processors.standard.util.jolt.TransformFactory; import org.apache.nifi.processors.standard.util.jolt.TransformUtils; import org.apache.nifi.util.StopWatch; import org.apache.nifi.util.StringUtils; import org.apache.nifi.util.file.classloader.ClassLoaderUtils; import com.bazaarvoice.jolt.JoltTransform; import com.bazaarvoice.jolt.JsonUtils; @EventDriven @SideEffectFree @SupportsBatching @Tags({"json", "jolt", "transform", "shiftr", "chainr", "defaultr", "removr","cardinality","sort"}) @InputRequirement(InputRequirement.Requirement.INPUT_REQUIRED) @WritesAttribute(attribute = "mime.type",description = "Always set to application/json") @CapabilityDescription("Applies a list of Jolt specifications to the flowfile JSON payload. A new FlowFile is created " + "with transformed content and is routed to the 'success' relationship. If the JSON transform " + "fails, the original FlowFile is routed to the 'failure' relationship.") public class JoltTransformJSON extends AbstractProcessor { public static final AllowableValue SHIFTR = new AllowableValue("jolt-transform-shift", "Shift", "Shift input JSON/data to create the output JSON."); public static final AllowableValue CHAINR = new AllowableValue("jolt-transform-chain", "Chain", "Execute list of Jolt transformations."); public static final AllowableValue DEFAULTR = new AllowableValue("jolt-transform-default", "Default", " Apply default values to the output JSON."); public static final AllowableValue REMOVR = new AllowableValue("jolt-transform-remove", "Remove", " Remove values from input data to create the output JSON."); public static final AllowableValue CARDINALITY = new AllowableValue("jolt-transform-card", "Cardinality", "Change the cardinality of input elements to create the output JSON."); public static final AllowableValue SORTR = new AllowableValue("jolt-transform-sort", "Sort", "Sort input json key values alphabetically. Any specification set is ignored."); public static final AllowableValue CUSTOMR = new AllowableValue("jolt-transform-custom", "Custom", "Custom Transformation. Requires Custom Transformation Class Name"); public static final AllowableValue MODIFIER_DEFAULTR = new AllowableValue("jolt-transform-modify-default", "Modify - Default", "Writes when key is missing or value is null"); public static final AllowableValue MODIFIER_OVERWRITER = new AllowableValue("jolt-transform-modify-overwrite", "Modify - Overwrite", " Always overwrite value"); public static final AllowableValue MODIFIER_DEFINER = new AllowableValue("jolt-transform-modify-define", "Modify - Define", "Writes when key is missing"); public static final PropertyDescriptor JOLT_TRANSFORM = new PropertyDescriptor.Builder() .name("jolt-transform") .displayName("Jolt Transformation DSL") .description("Specifies the Jolt Transformation that should be used with the provided specification.") .required(true) .allowableValues(CARDINALITY, CHAINR, DEFAULTR, MODIFIER_DEFAULTR, MODIFIER_DEFINER, MODIFIER_OVERWRITER, REMOVR, SHIFTR, SORTR, CUSTOMR) .defaultValue(CHAINR.getValue()) .build(); public static final PropertyDescriptor JOLT_SPEC = new PropertyDescriptor.Builder() .name("jolt-spec") .displayName("Jolt Specification") .description("Jolt Specification for transform of JSON data. This value is ignored if the Jolt Sort Transformation is selected.") .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES) .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) .required(false) .build(); public static final PropertyDescriptor CUSTOM_CLASS = new PropertyDescriptor.Builder() .name("jolt-custom-class") .displayName("Custom Transformation Class Name") .description("Fully Qualified Class Name for Custom Transformation") .required(false) .expressionLanguageSupported(ExpressionLanguageScope.NONE) .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) .build(); public static final PropertyDescriptor MODULES = new PropertyDescriptor.Builder() .name("jolt-custom-modules") .displayName("Custom Module Directory") .description("Comma-separated list of paths to files and/or directories which contain modules containing custom transformations (that are not included on NiFi's classpath).") .required(false) .expressionLanguageSupported(ExpressionLanguageScope.NONE) .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) .build(); static final PropertyDescriptor TRANSFORM_CACHE_SIZE = new PropertyDescriptor.Builder() .name("Transform Cache Size") .description("Compiling a Jolt Transform can be fairly expensive. Ideally, this will be done only once. However, if the Expression Language is used in the transform, we may need " + "a new Transform for each FlowFile. This value controls how many of those Transforms we cache in memory in order to avoid having to compile the Transform each time.") .expressionLanguageSupported(ExpressionLanguageScope.NONE) .addValidator(StandardValidators.POSITIVE_INTEGER_VALIDATOR) .defaultValue("1") .required(true) .build(); public static final Relationship REL_SUCCESS = new Relationship.Builder() .name("success") .description("The FlowFile with transformed content will be routed to this relationship") .build(); public static final Relationship REL_FAILURE = new Relationship.Builder() .name("failure") .description("If a FlowFile fails processing for any reason (for example, the FlowFile is not valid JSON), it will be routed to this relationship") .build(); private final static List<PropertyDescriptor> properties; private final static Set<Relationship> relationships; private volatile ClassLoader customClassLoader; private final static String DEFAULT_CHARSET = "UTF-8"; // Cache is guarded by synchronizing on 'this'. private volatile int maxTransformsToCache = 10; private final Map<String, JoltTransform> transformCache = new LinkedHashMap<String, JoltTransform>() { @Override protected boolean removeEldestEntry(Map.Entry<String, JoltTransform> eldest) { final boolean evict = size() > maxTransformsToCache; if (evict) { getLogger().debug("Removing Jolt Transform from cache because cache is full"); } return evict; } }; static { final List<PropertyDescriptor> _properties = new ArrayList<>(); _properties.add(JOLT_TRANSFORM); _properties.add(CUSTOM_CLASS); _properties.add(MODULES); _properties.add(JOLT_SPEC); _properties.add(TRANSFORM_CACHE_SIZE); properties = Collections.unmodifiableList(_properties); final Set<Relationship> _relationships = new HashSet<>(); _relationships.add(REL_SUCCESS); _relationships.add(REL_FAILURE); relationships = Collections.unmodifiableSet(_relationships); } @Override public Set<Relationship> getRelationships() { return relationships; } @Override protected List<PropertyDescriptor> getSupportedPropertyDescriptors() { return properties; } @Override protected Collection<ValidationResult> customValidate(ValidationContext validationContext) { final List<ValidationResult> results = new ArrayList<>(super.customValidate(validationContext)); final String transform = validationContext.getProperty(JOLT_TRANSFORM).getValue(); final String customTransform = validationContext.getProperty(CUSTOM_CLASS).getValue(); final String modulePath = validationContext.getProperty(MODULES).isSet()? validationContext.getProperty(MODULES).getValue() : null; if(!validationContext.getProperty(JOLT_SPEC).isSet() || StringUtils.isEmpty(validationContext.getProperty(JOLT_SPEC).getValue())){ if(!SORTR.getValue().equals(transform)) { final String message = "A specification is required for this transformation"; results.add(new ValidationResult.Builder().valid(false) .explanation(message) .build()); } } else { final ClassLoader customClassLoader; try { if (modulePath != null) { customClassLoader = ClassLoaderUtils.getCustomClassLoader(modulePath, this.getClass().getClassLoader(), getJarFilenameFilter()); } else { customClassLoader = this.getClass().getClassLoader(); } final String specValue = validationContext.getProperty(JOLT_SPEC).getValue(); final String invalidExpressionMsg = validationContext.newExpressionLanguageCompiler().validateExpression(specValue,true); if (validationContext.isExpressionLanguagePresent(specValue) && invalidExpressionMsg != null) { final String customMessage = "The expression language used withing this specification is invalid"; results.add(new ValidationResult.Builder().valid(false) .explanation(customMessage) .build()); } else { //for validation we want to be able to ensure the spec is syntactically correct and not try to resolve variables since they may not exist yet Object specJson = SORTR.getValue().equals(transform) ? null : JsonUtils.jsonToObject(specValue.replaceAll("\\$\\{","\\\\\\\\\\$\\{"), DEFAULT_CHARSET); if (CUSTOMR.getValue().equals(transform)) { if (StringUtils.isEmpty(customTransform)) { final String customMessage = "A custom transformation class should be provided. "; results.add(new ValidationResult.Builder().valid(false) .explanation(customMessage) .build()); } else { TransformFactory.getCustomTransform(customClassLoader, customTransform, specJson); } } else { TransformFactory.getTransform(customClassLoader, transform, specJson); } } } catch (final Exception e) { getLogger().info("Processor is not valid - " + e.toString()); String message = "Specification not valid for the selected transformation." ; results.add(new ValidationResult.Builder().valid(false) .explanation(message) .build()); } } return results; } @Override public void onTrigger(final ProcessContext context, ProcessSession session) throws ProcessException { final FlowFile original = session.get(); if (original == null) { return; } final ComponentLog logger = getLogger(); final StopWatch stopWatch = new StopWatch(true); final Object inputJson; try (final InputStream in = session.read(original)) { inputJson = JsonUtils.jsonToObject(in); } catch (final Exception e) { logger.error("Failed to transform {}; routing to failure", new Object[] {original, e}); session.transfer(original, REL_FAILURE); return; } final String jsonString; final ClassLoader originalContextClassLoader = Thread.currentThread().getContextClassLoader(); try { final JoltTransform transform = getTransform(context, original); if (customClassLoader != null) { Thread.currentThread().setContextClassLoader(customClassLoader); } final Object transformedJson = TransformUtils.transform(transform,inputJson); jsonString = JsonUtils.toJsonString(transformedJson); } catch (final Exception ex) { logger.error("Unable to transform {} due to {}", new Object[] {original, ex.toString(), ex}); session.transfer(original, REL_FAILURE); return; } finally { if (customClassLoader != null && originalContextClassLoader != null) { Thread.currentThread().setContextClassLoader(originalContextClassLoader); } } FlowFile transformed = session.write(original, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { out.write(jsonString.getBytes(DEFAULT_CHARSET)); } }); final String transformType = context.getProperty(JOLT_TRANSFORM).getValue(); transformed = session.putAttribute(transformed, CoreAttributes.MIME_TYPE.key(), "application/json"); session.transfer(transformed, REL_SUCCESS); session.getProvenanceReporter().modifyContent(transformed,"Modified With " + transformType ,stopWatch.getElapsed(TimeUnit.MILLISECONDS)); logger.info("Transformed {}", new Object[]{original}); } private JoltTransform getTransform(final ProcessContext context, final FlowFile flowFile) throws Exception { final String specString; if (context.getProperty(JOLT_SPEC).isSet()) { specString = context.getProperty(JOLT_SPEC).evaluateAttributeExpressions(flowFile).getValue(); } else { specString = null; } // Get the transform from our cache, if it exists. JoltTransform transform = null; synchronized (this) { transform = transformCache.get(specString); } if (transform != null) { return transform; } // If no transform for our spec, create the transform. final Object specJson; if (context.getProperty(JOLT_SPEC).isSet() && !SORTR.getValue().equals(context.getProperty(JOLT_TRANSFORM).getValue())) { specJson = JsonUtils.jsonToObject(specString, DEFAULT_CHARSET); } else { specJson = null; } if (CUSTOMR.getValue().equals(context.getProperty(JOLT_TRANSFORM).getValue())) { transform = TransformFactory.getCustomTransform(customClassLoader, context.getProperty(CUSTOM_CLASS).getValue(), specJson); } else { transform = TransformFactory.getTransform(customClassLoader, context.getProperty(JOLT_TRANSFORM).getValue(), specJson); } // Check again for the transform in our cache, since it's possible that another thread has // already populated it. If absent from the cache, populate the cache. Otherwise, use the // value from the cache. synchronized (this) { final JoltTransform existingTransform = transformCache.get(specString); if (existingTransform == null) { transformCache.put(specString, transform); } else { transform = existingTransform; } } return transform; } @OnScheduled public synchronized void setup(final ProcessContext context) { transformCache.clear(); maxTransformsToCache = context.getProperty(TRANSFORM_CACHE_SIZE).asInteger(); try { if (context.getProperty(MODULES).isSet()) { customClassLoader = ClassLoaderUtils.getCustomClassLoader(context.getProperty(MODULES).getValue(), this.getClass().getClassLoader(), getJarFilenameFilter()); } else { customClassLoader = this.getClass().getClassLoader(); } } catch (final Exception ex) { getLogger().error("Unable to setup processor", ex); } } protected FilenameFilter getJarFilenameFilter(){ return (dir, name) -> (name != null && name.endsWith(".jar")); } }
apache-2.0
ptupitsyn/ignite
modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageClusterNodeData.java
1751
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.metastorage.persistence; import java.io.Serializable; /** */ @SuppressWarnings("PublicField") class DistributedMetaStorageClusterNodeData implements Serializable { /** */ private static final long serialVersionUID = 0L; /** */ public final DistributedMetaStorageVersion ver; /** */ public final DistributedMetaStorageHistoryItem[] fullData; /** */ public final DistributedMetaStorageHistoryItem[] hist; /** */ public DistributedMetaStorageHistoryItem[] updates; /** */ public DistributedMetaStorageClusterNodeData( DistributedMetaStorageVersion ver, DistributedMetaStorageHistoryItem[] fullData, DistributedMetaStorageHistoryItem[] hist, DistributedMetaStorageHistoryItem[] updates ) { this.fullData = fullData; this.ver = ver; this.hist = hist; this.updates = updates; } }
apache-2.0
vorburger/mifos-head
application/src/test/java/org/mifos/application/holiday/business/service/HolidayServiceTest.java
6068
package org.mifos.application.holiday.business.service; import org.joda.time.DateTime; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mifos.application.holiday.persistence.HolidayDao; import org.mifos.config.FiscalCalendarRules; import org.mifos.framework.util.helpers.DateUtils; import org.mockito.Matchers; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import java.text.DateFormat; import java.util.Calendar; import java.util.Date; import java.util.Locale; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class HolidayServiceTest { @Mock private HolidayDao holidayDao; @Mock private FiscalCalendarRules fiscalCalendarRules; private HolidayService holidayService; private Locale locale; private String dateFormat; private Short officeId; @Before public void setUp() { holidayService = new HolidayServiceImpl(null, holidayDao, null, fiscalCalendarRules); locale = new Locale("en", "GB"); dateFormat = computeDateFormat(locale); officeId = Short.valueOf("1"); } @Test public void shouldDetermineIfRegularHolidayIsNotWorkingDay() { Calendar holiday = toCalendar("01-Nov-2010"); when(fiscalCalendarRules.isWorkingDay(holiday)).thenReturn(false); assertThat(holidayService.isWorkingDay(holiday, officeId), is(false)); verify(fiscalCalendarRules, times(1)).isWorkingDay(holiday); } @Test public void shouldDetermineIfActualHolidayIsNotWorkingDay() { Calendar holiday = toCalendar("01-Nov-2010"); DateTime holidayAsDateTime = new DateTime(holiday.getTime().getTime()); String holidayAsString = holidayAsDateTime.toLocalDate().toString(); when(fiscalCalendarRules.isWorkingDay(holiday)).thenReturn(true); when(holidayDao.isHoliday(officeId, holidayAsString)).thenReturn(true); assertThat(holidayService.isWorkingDay(holiday, officeId), is(false)); verify(fiscalCalendarRules, times(1)).isWorkingDay(holiday); verify(holidayDao).isHoliday(officeId, holidayAsString); } @Test public void shouldDetermineIfWorkingDay() { Calendar holiday = toCalendar("01-Nov-2010"); DateTime holidayAsDateTime = new DateTime(holiday.getTime().getTime()); String holidayAsString = holidayAsDateTime.toLocalDate().toString(); when(fiscalCalendarRules.isWorkingDay(holiday)).thenReturn(true); when(holidayDao.isHoliday(officeId, holidayAsString)).thenReturn(false); assertThat(holidayService.isWorkingDay(holiday, officeId), is(true)); verify(fiscalCalendarRules, times(1)).isWorkingDay(holiday); verify(holidayDao).isHoliday(officeId, holidayAsString); } @Test public void shouldGetNextWorkingDay() { Calendar holiday1 = toCalendar("01-Nov-2010"); when(fiscalCalendarRules.isWorkingDay(holiday1)).thenReturn(false); Calendar holiday2 = toCalendar("02-Nov-2010"); DateTime holiday2AsDateTime = new DateTime(holiday2.getTime().getTime()); String holiday2AsString = holiday2AsDateTime.toLocalDate().toString(); when(fiscalCalendarRules.isWorkingDay(holiday2)).thenReturn(true); when(holidayDao.isHoliday(officeId, holiday2AsString)).thenReturn(true); Calendar holiday3 = toCalendar("03-Nov-2010"); DateTime holiday3AsDateTime = new DateTime(holiday3.getTime().getTime()); String holiday3AsString = holiday3AsDateTime.toLocalDate().toString(); when(fiscalCalendarRules.isWorkingDay(holiday3)).thenReturn(true); when(holidayDao.isHoliday(officeId, holiday3AsString)).thenReturn(true); Calendar workingDay = toCalendar("04-Nov-2010"); DateTime holidayAsDateTime = new DateTime(workingDay.getTime().getTime()); String holidayAsString = holidayAsDateTime.toLocalDate().toString(); when(fiscalCalendarRules.isWorkingDay(workingDay)).thenReturn(true); when(holidayDao.isHoliday(officeId, holidayAsString)).thenReturn(false); Calendar nextWorkingDay = holidayService.getNextWorkingDay(toCalendar("01-Nov-2010"), officeId); assertThat(nextWorkingDay.get(Calendar.DAY_OF_MONTH), is(4)); assertThat(nextWorkingDay.get(Calendar.MONTH), is(10)); assertThat(nextWorkingDay.get(Calendar.YEAR), is(2010)); verify(fiscalCalendarRules, times(4)).isWorkingDay(Matchers.<Calendar>anyObject()); verify(holidayDao, times(3)).isHoliday(Matchers.anyShort(), Matchers.anyString()); } @Test public void shouldGetTheSameDayIfAlreadyWorkingDay() { Calendar workingDay = toCalendar("01-Nov-2010"); DateTime holidayAsDateTime = new DateTime(workingDay.getTime().getTime()); String holidayAsString = holidayAsDateTime.toLocalDate().toString(); when(fiscalCalendarRules.isWorkingDay(workingDay)).thenReturn(true); when(holidayDao.isHoliday(officeId, holidayAsString)).thenReturn(false); Calendar nextWorkingDay = holidayService.getNextWorkingDay(toCalendar("01-Nov-2010"), officeId); assertThat(nextWorkingDay, is(workingDay)); verify(fiscalCalendarRules, times(1)).isWorkingDay(workingDay); verify(holidayDao).isHoliday(officeId, holidayAsString); } private Date toDate(String dateString) { return DateUtils.getDate(dateString, locale, dateFormat); } private Calendar toCalendar(String dateString) { Calendar calendar = Calendar.getInstance(); calendar.setTime(toDate(dateString)); return calendar; } private String computeDateFormat(Locale locale) { String dateSeparator = DateUtils.getDateSeparatorByLocale(locale, DateFormat.MEDIUM); return String.format("dd%sMMM%syyyy", dateSeparator, dateSeparator); } }
apache-2.0
studanshu/datacollector
cli/src/main/java/com/streamsets/datacollector/client/model/RawSourceDefinitionJson.java
2877
/** * Copyright 2015 StreamSets Inc. * * Licensed under the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.streamsets.datacollector.client.model; import com.streamsets.datacollector.client.StringUtil; import java.util.*; import com.streamsets.datacollector.client.model.ConfigDefinitionJson; import io.swagger.annotations.*; import com.fasterxml.jackson.annotation.JsonProperty; @ApiModel(description = "") @javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2015-09-11T14:51:29.367-07:00") public class RawSourceDefinitionJson { private String rawSourcePreviewerClass = null; private String mimeType = null; private List<ConfigDefinitionJson> configDefinitions = new ArrayList<ConfigDefinitionJson>(); /** **/ @ApiModelProperty(value = "") @JsonProperty("rawSourcePreviewerClass") public String getRawSourcePreviewerClass() { return rawSourcePreviewerClass; } public void setRawSourcePreviewerClass(String rawSourcePreviewerClass) { this.rawSourcePreviewerClass = rawSourcePreviewerClass; } /** **/ @ApiModelProperty(value = "") @JsonProperty("mimeType") public String getMimeType() { return mimeType; } public void setMimeType(String mimeType) { this.mimeType = mimeType; } /** **/ @ApiModelProperty(value = "") @JsonProperty("configDefinitions") public List<ConfigDefinitionJson> getConfigDefinitions() { return configDefinitions; } public void setConfigDefinitions(List<ConfigDefinitionJson> configDefinitions) { this.configDefinitions = configDefinitions; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("class RawSourceDefinitionJson {\n"); sb.append(" rawSourcePreviewerClass: ").append(StringUtil.toIndentedString(rawSourcePreviewerClass)).append("\n"); sb.append(" mimeType: ").append(StringUtil.toIndentedString(mimeType)).append("\n"); sb.append(" configDefinitions: ").append(StringUtil.toIndentedString(configDefinitions)).append("\n"); sb.append("}"); return sb.toString(); } }
apache-2.0
msavy/apiman
gateway/platforms/vertx3/vertx3/src/test/java/io/apiman/gateway/platforms/vertx3/components/ldap/LdapQueryTests.java
6859
/* * Copyright 2015 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.apiman.gateway.platforms.vertx3.components.ldap; import io.apiman.gateway.engine.components.ldap.ILdapClientConnection; import io.apiman.gateway.engine.components.ldap.ILdapSearchEntry; import io.apiman.gateway.engine.components.ldap.LdapSearchScope; import io.apiman.gateway.engine.components.ldap.result.LdapResultCode; import io.vertx.ext.unit.Async; import io.vertx.ext.unit.TestCompletion; import io.vertx.ext.unit.TestContext; import io.vertx.ext.unit.TestSuite; import java.util.List; import org.junit.After; import org.junit.Test; /** * @author Marc Savy {@literal <msavy@redhat.com>} */ @SuppressWarnings("nls") public class LdapQueryTests extends LdapTestParent { public ILdapClientConnection connection; @After public void after() { if (connection != null) connection.close(); } @Test public void shouldConnectSuccessfully() { config.setBindDn("uid=admin,ou=system"); config.setBindPassword("secret"); TestCompletion completion = TestSuite.create("").test("", context -> { Async async = context.async(); ldapClientComponent.connect(config, connectionResult -> { context.assertTrue(connectionResult.isSuccess()); connection = connectionResult.getResult(); async.complete(); }); async.awaitSuccess(); }).run(); completion.awaitSuccess(); } @Test public void shouldCompleteSimpleQuery() { config.setBindDn("uid=admin,ou=system"); config.setBindPassword("secret"); connect((connection, context) -> { Async async = context.async(); connection.search("ou=people,o=apiman", "(uid=msavy)", LdapSearchScope.SUBTREE) .setLdapErrorHandler(result -> context.fail(result.getCause())) .search(searchResult -> { context.assertTrue(searchResult.isSuccess()); List<ILdapSearchEntry> result = searchResult.getResult(); context.assertEquals(1, result.size()); async.complete(); }); }); } @Test public void shouldCompleteMultipleSimpleQueries() { config.setBindDn("uid=admin,ou=system"); config.setBindPassword("secret"); connect((connection, context) -> { Async async = context.async(); Async async2 = context.async(); connection.search("ou=people,o=apiman", "(uid=msavy)", LdapSearchScope.SUBTREE) .setLdapErrorHandler(result -> context.fail(result.getCause())) .search(searchResult -> { context.assertTrue(searchResult.isSuccess()); List<ILdapSearchEntry> result = searchResult.getResult(); context.assertEquals(1, result.size()); async.complete(); }); connection.search("ou=people,o=apiman", "(uid=ewittman)", LdapSearchScope.SUBTREE) .setLdapErrorHandler(result -> context.fail(result.getCause())) .search(searchResult -> { context.assertTrue(searchResult.isSuccess()); List<ILdapSearchEntry> result = searchResult.getResult(); context.assertEquals(1, result.size()); async2.complete(); }); }); } @Test public void shouldReturnEmptyForUnmatchedFilter() { config.setBindDn("uid=admin,ou=system"); config.setBindPassword("secret"); connect((connection, context) -> { Async async = context.async(); connection.search("ou=people,o=apiman", "(uid=sushi)", LdapSearchScope.SUBTREE) .setLdapErrorHandler(result -> context.fail(result.getCause())) .search(searchResult -> { context.assertTrue(searchResult.isSuccess()); List<ILdapSearchEntry> result = searchResult.getResult(); context.assertEquals(0, result.size()); async.complete(); }); }); } @Test public void shouldErrorIfSearchDnInvalid() { config.setBindDn("uid=admin,ou=system"); config.setBindPassword("secret"); connect((connection, context) -> { Async async = context.async(); connection.search("invalid", "(uid=msavy)", LdapSearchScope.SUBTREE) .setLdapErrorHandler(error -> { System.err.println(error.getResultCode()); context.assertTrue(error.getResultCode().equals(LdapResultCode.INVALID_DN_SYNTAX)); async.complete(); }) .search(searchResult -> { context.assertFalse(true); // Should not be executed }); }); } @Test public void shouldErrorIfSearchFilterInvalid() { config.setBindDn("uid=admin,ou=system"); config.setBindPassword("secret"); connect((connection, context) -> { Async async = context.async(); connection.search("ou=people,o=apiman", "!!!!", LdapSearchScope.SUBTREE) .setLdapErrorHandler(error -> { context.assertTrue(error.getResultCode().equals(LdapResultCode.FILTER_ERROR)); async.complete(); }) .search(searchResult -> { context.assertFalse(true); // Should not be executed }); }); } private void connect(DoubleHandler<ILdapClientConnection, TestContext> handler) { TestCompletion completion = TestSuite.create("").test("", context -> { Async async = context.async(); ldapClientComponent.connect(config, connectionResult -> { context.assertTrue(connectionResult.isSuccess()); connection = connectionResult.getResult(); handler.handle(connection, context); async.complete(); }); async.awaitSuccess(); }).run(); completion.awaitSuccess(); } interface DoubleHandler<X, Y> { void handle(X x, Y y); } }
apache-2.0
intel-analytics/BigDL
scala/friesian/src/main/java/com/intel/analytics/bigdl/friesian/serving/recall/faiss/swighnswlib/IndexPQ.java
7972
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 3.0.12 * * Do not make changes to this file unless you know what you are doing--modify * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ package com.intel.analytics.bigdl.friesian.serving.recall.faiss.swighnswlib; public class IndexPQ extends Index { private transient long swigCPtr; protected IndexPQ(long cPtr, boolean cMemoryOwn) { super(swigfaissJNI.IndexPQ_SWIGUpcast(cPtr), cMemoryOwn); swigCPtr = cPtr; } protected static long getCPtr(IndexPQ obj) { return (obj == null) ? 0 : obj.swigCPtr; } protected void finalize() { delete(); } public synchronized void delete() { if (swigCPtr != 0) { if (swigCMemOwn) { swigCMemOwn = false; swigfaissJNI.delete_IndexPQ(swigCPtr); } swigCPtr = 0; } super.delete(); } public void setPq(ProductQuantizer value) { swigfaissJNI.IndexPQ_pq_set(swigCPtr, this, ProductQuantizer.getCPtr(value), value); } public ProductQuantizer getPq() { long cPtr = swigfaissJNI.IndexPQ_pq_get(swigCPtr, this); return (cPtr == 0) ? null : new ProductQuantizer(cPtr, false); } public void setCodes(ByteVector value) { swigfaissJNI.IndexPQ_codes_set(swigCPtr, this, ByteVector.getCPtr(value), value); } public ByteVector getCodes() { long cPtr = swigfaissJNI.IndexPQ_codes_get(swigCPtr, this); return (cPtr == 0) ? null : new ByteVector(cPtr, false); } public IndexPQ(int d, long M, long nbits, MetricType metric) { this(swigfaissJNI.new_IndexPQ__SWIG_0(d, M, nbits, metric.swigValue()), true); } public IndexPQ(int d, long M, long nbits) { this(swigfaissJNI.new_IndexPQ__SWIG_1(d, M, nbits), true); } public IndexPQ() { this(swigfaissJNI.new_IndexPQ__SWIG_2(), true); } public void train(int n, SWIGTYPE_p_float x) { swigfaissJNI.IndexPQ_train(swigCPtr, this, n, SWIGTYPE_p_float.getCPtr(x)); } public void add(int n, SWIGTYPE_p_float x) { swigfaissJNI.IndexPQ_add(swigCPtr, this, n, SWIGTYPE_p_float.getCPtr(x)); } public void search(int n, SWIGTYPE_p_float x, int k, SWIGTYPE_p_float distances, SWIGTYPE_p_long labels) { swigfaissJNI.IndexPQ_search(swigCPtr, this, n, SWIGTYPE_p_float.getCPtr(x), k, SWIGTYPE_p_float.getCPtr(distances), SWIGTYPE_p_long.getCPtr(labels)); } public void reset() { swigfaissJNI.IndexPQ_reset(swigCPtr, this); } public void reconstruct_n(int i0, int ni, SWIGTYPE_p_float recons) { swigfaissJNI.IndexPQ_reconstruct_n(swigCPtr, this, i0, ni, SWIGTYPE_p_float.getCPtr(recons)); } public void reconstruct(int key, SWIGTYPE_p_float recons) { swigfaissJNI.IndexPQ_reconstruct(swigCPtr, this, key, SWIGTYPE_p_float.getCPtr(recons)); } public long remove_ids(IDSelector sel) { return swigfaissJNI.IndexPQ_remove_ids(swigCPtr, this, IDSelector.getCPtr(sel), sel); } public long sa_code_size() { return swigfaissJNI.IndexPQ_sa_code_size(swigCPtr, this); } public void sa_encode(int n, SWIGTYPE_p_float x, SWIGTYPE_p_unsigned_char bytes) { swigfaissJNI.IndexPQ_sa_encode(swigCPtr, this, n, SWIGTYPE_p_float.getCPtr(x), SWIGTYPE_p_unsigned_char.getCPtr(bytes)); } public void sa_decode(int n, SWIGTYPE_p_unsigned_char bytes, SWIGTYPE_p_float x) { swigfaissJNI.IndexPQ_sa_decode(swigCPtr, this, n, SWIGTYPE_p_unsigned_char.getCPtr(bytes), SWIGTYPE_p_float.getCPtr(x)); } public DistanceComputer get_distance_computer() { long cPtr = swigfaissJNI.IndexPQ_get_distance_computer(swigCPtr, this); return (cPtr == 0) ? null : new DistanceComputer(cPtr, true); } public void setDo_polysemous_training(boolean value) { swigfaissJNI.IndexPQ_do_polysemous_training_set(swigCPtr, this, value); } public boolean getDo_polysemous_training() { return swigfaissJNI.IndexPQ_do_polysemous_training_get(swigCPtr, this); } public void setPolysemous_training(PolysemousTraining value) { swigfaissJNI.IndexPQ_polysemous_training_set(swigCPtr, this, PolysemousTraining.getCPtr(value), value); } public PolysemousTraining getPolysemous_training() { long cPtr = swigfaissJNI.IndexPQ_polysemous_training_get(swigCPtr, this); return (cPtr == 0) ? null : new PolysemousTraining(cPtr, false); } public void setSearch_type(IndexPQ.Search_type_t value) { swigfaissJNI.IndexPQ_search_type_set(swigCPtr, this, value.swigValue()); } public IndexPQ.Search_type_t getSearch_type() { return IndexPQ.Search_type_t.swigToEnum(swigfaissJNI.IndexPQ_search_type_get(swigCPtr, this)); } public void setEncode_signs(boolean value) { swigfaissJNI.IndexPQ_encode_signs_set(swigCPtr, this, value); } public boolean getEncode_signs() { return swigfaissJNI.IndexPQ_encode_signs_get(swigCPtr, this); } public void setPolysemous_ht(int value) { swigfaissJNI.IndexPQ_polysemous_ht_set(swigCPtr, this, value); } public int getPolysemous_ht() { return swigfaissJNI.IndexPQ_polysemous_ht_get(swigCPtr, this); } public void search_core_polysemous(int n, SWIGTYPE_p_float x, int k, SWIGTYPE_p_float distances, SWIGTYPE_p_long labels) { swigfaissJNI.IndexPQ_search_core_polysemous(swigCPtr, this, n, SWIGTYPE_p_float.getCPtr(x), k, SWIGTYPE_p_float.getCPtr(distances), SWIGTYPE_p_long.getCPtr(labels)); } public void hamming_distance_histogram(int n, SWIGTYPE_p_float x, int nb, SWIGTYPE_p_float xb, SWIGTYPE_p_long dist_histogram) { swigfaissJNI.IndexPQ_hamming_distance_histogram(swigCPtr, this, n, SWIGTYPE_p_float.getCPtr(x), nb, SWIGTYPE_p_float.getCPtr(xb), SWIGTYPE_p_long.getCPtr(dist_histogram)); } public void hamming_distance_table(int n, SWIGTYPE_p_float x, SWIGTYPE_p_int dis) { swigfaissJNI.IndexPQ_hamming_distance_table(swigCPtr, this, n, SWIGTYPE_p_float.getCPtr(x), SWIGTYPE_p_int.getCPtr(dis)); } public final static class Search_type_t { public final static IndexPQ.Search_type_t ST_PQ = new IndexPQ.Search_type_t("ST_PQ"); public final static IndexPQ.Search_type_t ST_HE = new IndexPQ.Search_type_t("ST_HE"); public final static IndexPQ.Search_type_t ST_generalized_HE = new IndexPQ.Search_type_t("ST_generalized_HE"); public final static IndexPQ.Search_type_t ST_SDC = new IndexPQ.Search_type_t("ST_SDC"); public final static IndexPQ.Search_type_t ST_polysemous = new IndexPQ.Search_type_t("ST_polysemous"); public final static IndexPQ.Search_type_t ST_polysemous_generalize = new IndexPQ.Search_type_t("ST_polysemous_generalize"); public final int swigValue() { return swigValue; } public String toString() { return swigName; } public static Search_type_t swigToEnum(int swigValue) { if (swigValue < swigValues.length && swigValue >= 0 && swigValues[swigValue].swigValue == swigValue) return swigValues[swigValue]; for (int i = 0; i < swigValues.length; i++) if (swigValues[i].swigValue == swigValue) return swigValues[i]; throw new IllegalArgumentException("No enum " + Search_type_t.class + " with value " + swigValue); } private Search_type_t(String swigName) { this.swigName = swigName; this.swigValue = swigNext++; } private Search_type_t(String swigName, int swigValue) { this.swigName = swigName; this.swigValue = swigValue; swigNext = swigValue+1; } private Search_type_t(String swigName, Search_type_t swigEnum) { this.swigName = swigName; this.swigValue = swigEnum.swigValue; swigNext = this.swigValue+1; } private static Search_type_t[] swigValues = { ST_PQ, ST_HE, ST_generalized_HE, ST_SDC, ST_polysemous, ST_polysemous_generalize }; private static int swigNext = 0; private final int swigValue; private final String swigName; } }
apache-2.0
resmo/cloudstack
server/src/com/cloud/capacity/CapacityManagerImpl.java
54679
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.capacity; import java.net.URI; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import javax.inject.Inject; import javax.naming.ConfigurationException; import com.cloud.resource.ResourceState; import com.cloud.utils.fsm.StateMachine2; import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; import com.cloud.agent.api.AgentControlCommand; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.deploy.DeploymentClusterPlanner; import com.cloud.event.UsageEventVO; import com.cloud.exception.ConnectionException; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.offering.ServiceOffering; import com.cloud.resource.ResourceListener; import com.cloud.resource.ResourceManager; import com.cloud.resource.ServerResource; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.StorageManager; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; import com.cloud.vm.UserVmDetailVO; import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Event; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.dao.VMSnapshotDao; public class CapacityManagerImpl extends ManagerBase implements CapacityManager, StateListener<State, VirtualMachine.Event, VirtualMachine>, Listener, ResourceListener, Configurable { private static final Logger s_logger = Logger.getLogger(CapacityManagerImpl.class); @Inject CapacityDao _capacityDao; @Inject ConfigurationDao _configDao; @Inject ServiceOfferingDao _offeringsDao; @Inject HostDao _hostDao; @Inject VMInstanceDao _vmDao; @Inject VolumeDao _volumeDao; @Inject VMTemplatePoolDao _templatePoolDao; @Inject AgentManager _agentManager; @Inject ResourceManager _resourceMgr; @Inject StorageManager _storageMgr; @Inject ConfigurationManager _configMgr; @Inject HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; @Inject protected VMSnapshotDao _vmSnapshotDao; @Inject protected UserVmDao _userVMDao; @Inject protected UserVmDetailsDao _userVmDetailsDao; @Inject ClusterDao _clusterDao; @Inject ConfigDepot _configDepot; @Inject DataStoreProviderManager _dataStoreProviderMgr; @Inject ClusterDetailsDao _clusterDetailsDao; private int _vmCapacityReleaseInterval; private ScheduledExecutorService _executor; long _extraBytesPerVolume = 0; @Inject MessageBus _messageBus; private static final String MESSAGE_RESERVED_CAPACITY_FREED_FLAG = "Message.ReservedCapacityFreed.Flag"; @Override public boolean configure(String name, Map<String, Object> params) throws ConfigurationException { _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("HostCapacity-Checker")); VirtualMachine.State.getStateMachine().registerListener(this); _agentManager.registerForHostEvents(new StorageCapacityListener(_capacityDao, _storageMgr), true, false, false); _agentManager.registerForHostEvents(new ComputeCapacityListener(_capacityDao, this), true, false, false); return true; } @Override public boolean start() { _resourceMgr.registerResourceEvent(ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER, this); _resourceMgr.registerResourceEvent(ResourceListener.EVENT_CANCEL_MAINTENANCE_AFTER, this); return true; } @Override public boolean stop() { _executor.shutdownNow(); return true; } @DB @Override public boolean releaseVmCapacity(VirtualMachine vm, final boolean moveFromReserved, final boolean moveToReservered, final Long hostId) { if (hostId == null) { return true; } final ServiceOfferingVO svo = _offeringsDao.findById(vm.getId(), vm.getServiceOfferingId()); CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU); CapacityVO capacityMemory = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_MEMORY); Long clusterId = null; if (hostId != null) { HostVO host = _hostDao.findById(hostId); if (host == null) { s_logger.warn("Host " + hostId + " no long exist anymore!"); return true; } clusterId = host.getClusterId(); } if (capacityCpu == null || capacityMemory == null || svo == null) { return false; } try { final Long clusterIdFinal = clusterId; final long capacityCpuId = capacityCpu.getId(); final long capacityMemoryId = capacityMemory.getId(); Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { CapacityVO capacityCpu = _capacityDao.lockRow(capacityCpuId, true); CapacityVO capacityMemory = _capacityDao.lockRow(capacityMemoryId, true); long usedCpu = capacityCpu.getUsedCapacity(); long usedMem = capacityMemory.getUsedCapacity(); long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMemory.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, "cpuOvercommitRatio").getValue()); float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, "memoryOvercommitRatio").getValue()); int vmCPU = svo.getCpu() * svo.getSpeed(); long vmMem = svo.getRamSize() * 1024L * 1024L; long actualTotalMem = capacityMemory.getTotalCapacity(); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); s_logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem); } if (!moveFromReserved) { /* move resource from used */ if (usedCpu >= vmCPU) { capacityCpu.setUsedCapacity(usedCpu - vmCPU); } if (usedMem >= vmMem) { capacityMemory.setUsedCapacity(usedMem - vmMem); } if (moveToReservered) { if (reservedCpu + vmCPU <= totalCpu) { capacityCpu.setReservedCapacity(reservedCpu + vmCPU); } if (reservedMem + vmMem <= totalMem) { capacityMemory.setReservedCapacity(reservedMem + vmMem); } } } else { if (reservedCpu >= vmCPU) { capacityCpu.setReservedCapacity(reservedCpu - vmCPU); } if (reservedMem >= vmMem) { capacityMemory.setReservedCapacity(reservedMem - vmMem); } } s_logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + capacityCpu.getReservedCapacity() + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); s_logger.debug("release mem from host: " + hostId + ", old used: " + usedMem + ",reserved: " + reservedMem + ", total: " + totalMem + "; new used: " + capacityMemory.getUsedCapacity() + ",reserved:" + capacityMemory.getReservedCapacity() + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); _capacityDao.update(capacityCpu.getId(), capacityCpu); _capacityDao.update(capacityMemory.getId(), capacityMemory); } }); return true; } catch (Exception e) { s_logger.debug("Failed to transit vm's state, due to " + e.getMessage()); return false; } } @DB @Override public void allocateVmCapacity(VirtualMachine vm, final boolean fromLastHost) { final long hostId = vm.getHostId(); HostVO host = _hostDao.findById(hostId); final long clusterId = host.getClusterId(); final float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "cpuOvercommitRatio").getValue()); final float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "memoryOvercommitRatio").getValue()); final ServiceOfferingVO svo = _offeringsDao.findById(vm.getId(), vm.getServiceOfferingId()); CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU); CapacityVO capacityMem = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_MEMORY); if (capacityCpu == null || capacityMem == null || svo == null) { return; } final int cpu = svo.getCpu() * svo.getSpeed(); final long ram = svo.getRamSize() * 1024L * 1024L; try { final long capacityCpuId = capacityCpu.getId(); final long capacityMemId = capacityMem.getId(); Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { CapacityVO capacityCpu = _capacityDao.lockRow(capacityCpuId, true); CapacityVO capacityMem = _capacityDao.lockRow(capacityMemId, true); long usedCpu = capacityCpu.getUsedCapacity(); long usedMem = capacityMem.getUsedCapacity(); long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMem.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); long actualTotalMem = capacityMem.getTotalCapacity(); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } long freeCpu = totalCpu - (reservedCpu + usedCpu); long freeMem = totalMem - (reservedMem + usedMem); if (s_logger.isDebugEnabled()) { s_logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); s_logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); s_logger.debug("Current Used RAM: " + usedMem + " , Free RAM:" + freeMem + " ,Requested RAM: " + ram); } capacityCpu.setUsedCapacity(usedCpu + cpu); capacityMem.setUsedCapacity(usedMem + ram); if (fromLastHost) { /* alloc from reserved */ if (s_logger.isDebugEnabled()) { s_logger.debug("We are allocating VM to the last host again, so adjusting the reserved capacity if it is not less than required"); s_logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu); s_logger.debug("Reserved RAM: " + reservedMem + " , Requested RAM: " + ram); } if (reservedCpu >= cpu && reservedMem >= ram) { capacityCpu.setReservedCapacity(reservedCpu - cpu); capacityMem.setReservedCapacity(reservedMem - ram); } } else { /* alloc from free resource */ if (!((reservedCpu + usedCpu + cpu <= totalCpu) && (reservedMem + usedMem + ram <= totalMem))) { if (s_logger.isDebugEnabled()) { s_logger.debug("Host doesnt seem to have enough free capacity, but increasing the used capacity anyways, " + "since the VM is already starting on this host "); } } } s_logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used:" + capacityCpu.getUsedCapacity() + ", reserved:" + capacityCpu.getReservedCapacity() + "; requested cpu:" + cpu + ",alloc_from_last:" + fromLastHost); s_logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + usedMem + ", old reserved: " + reservedMem + ", total: " + totalMem + "; new used: " + capacityMem.getUsedCapacity() + ", reserved: " + capacityMem.getReservedCapacity() + "; requested mem: " + ram + ",alloc_from_last:" + fromLastHost); _capacityDao.update(capacityCpu.getId(), capacityCpu); _capacityDao.update(capacityMem.getId(), capacityMem); } }); } catch (Exception e) { s_logger.error("Exception allocating VM capacity", e); return; } } @Override public boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer cpuSpeed) { // Check host can support the Cpu Number and Speed. Host host = _hostDao.findById(hostId); boolean isCpuNumGood = host.getCpus().intValue() >= cpuNum; boolean isCpuSpeedGood = host.getSpeed().intValue() >= cpuSpeed; if (isCpuNumGood && isCpuSpeedGood) { if (s_logger.isDebugEnabled()) { s_logger.debug("Host: " + hostId + " has cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); } return true; } else { if (s_logger.isDebugEnabled()) { s_logger.debug("Host: " + hostId + " doesn't have cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); } return false; } } @Override public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOvercommitRatio, float memoryOvercommitRatio, boolean considerReservedCapacity) { boolean hasCapacity = false; if (s_logger.isDebugEnabled()) { s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + ram + " , cpuOverprovisioningFactor: " + cpuOvercommitRatio); } CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU); CapacityVO capacityMem = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_MEMORY); if (capacityCpu == null || capacityMem == null) { if (capacityCpu == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for hostId: " + hostId); } } if (capacityMem == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for hostId: " + hostId); } } return false; } long usedCpu = capacityCpu.getUsedCapacity(); long usedMem = capacityMem.getUsedCapacity(); long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMem.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); long actualTotalMem = capacityMem.getTotalCapacity(); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } String failureReason = ""; if (checkFromReservedCapacity) { long freeCpu = reservedCpu; long freeMem = reservedMem; if (s_logger.isDebugEnabled()) { s_logger.debug("We need to allocate to the last host again, so checking if there is enough reserved capacity"); s_logger.debug("Reserved CPU: " + freeCpu + " , Requested CPU: " + cpu); s_logger.debug("Reserved RAM: " + freeMem + " , Requested RAM: " + ram); } /* alloc from reserved */ if (reservedCpu >= cpu) { if (reservedMem >= ram) { hasCapacity = true; } else { failureReason = "Host does not have enough reserved RAM available"; } } else { failureReason = "Host does not have enough reserved CPU available"; } } else { long reservedCpuValueToUse = reservedCpu; long reservedMemValueToUse = reservedMem; if (!considerReservedCapacity) { if (s_logger.isDebugEnabled()) { s_logger.debug("considerReservedCapacity is" + considerReservedCapacity + " , not considering reserved capacity for calculating free capacity"); } reservedCpuValueToUse = 0; reservedMemValueToUse = 0; } long freeCpu = totalCpu - (reservedCpuValueToUse + usedCpu); long freeMem = totalMem - (reservedMemValueToUse + usedMem); if (s_logger.isDebugEnabled()) { s_logger.debug("Free CPU: " + freeCpu + " , Requested CPU: " + cpu); s_logger.debug("Free RAM: " + freeMem + " , Requested RAM: " + ram); } /* alloc from free resource */ if ((reservedCpuValueToUse + usedCpu + cpu <= totalCpu)) { if ((reservedMemValueToUse + usedMem + ram <= totalMem)) { hasCapacity = true; } else { failureReason = "Host does not have enough RAM available"; } } else { failureReason = "Host does not have enough CPU available"; } } if (hasCapacity) { if (s_logger.isDebugEnabled()) { s_logger.debug("Host has enough CPU and RAM available"); } s_logger.debug("STATS: Can alloc CPU from host: " + hostId + ", used: " + usedCpu + ", reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; requested cpu:" + cpu + ",alloc_from_last_host?:" + checkFromReservedCapacity + " ,considerReservedCapacity?: " + considerReservedCapacity); s_logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + usedMem + ", reserved: " + reservedMem + ", total: " + totalMem + "; requested mem: " + ram + ",alloc_from_last_host?:" + checkFromReservedCapacity + " ,considerReservedCapacity?: " + considerReservedCapacity); } else { if (checkFromReservedCapacity) { s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", requested cpu: " + cpu + ", reservedMem: " + reservedMem + ", requested mem: " + ram); } else { s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " + cpu + ", actual total cpu: " + actualTotalCpu + ", total cpu with overprovisioning: " + totalCpu + ", reservedMem: " + reservedMem + ", used Mem: " + usedMem + ", requested mem: " + ram + ", total Mem:" + totalMem + " ,considerReservedCapacity?: " + considerReservedCapacity); } if (s_logger.isDebugEnabled()) { s_logger.debug(failureReason + ", cannot allocate to this host."); } } return hasCapacity; } @Override public long getUsedBytes(StoragePoolVO pool) { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); if (storeDriver instanceof PrimaryDataStoreDriver) { PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; return primaryStoreDriver.getUsedBytes(pool); } throw new CloudRuntimeException("Storage driver in CapacityManagerImpl.getUsedBytes(StoragePoolVO) is not a PrimaryDataStoreDriver."); } @Override public long getUsedIops(StoragePoolVO pool) { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); if (storeDriver instanceof PrimaryDataStoreDriver) { PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; return primaryStoreDriver.getUsedIops(pool); } throw new CloudRuntimeException("Storage driver in CapacityManagerImpl.getUsedIops(StoragePoolVO) is not a PrimaryDataStoreDriver."); } @Override public long getAllocatedPoolCapacity(StoragePoolVO pool, VMTemplateVO templateForVmCreation) { long totalAllocatedSize = 0; // if the storage pool is managed, the used bytes can be larger than the sum of the sizes of all of the non-destroyed volumes // in this case, call getUsedBytes(StoragePoolVO) if (pool.isManaged()) { return getUsedBytes(pool); } else { // Get size for all the non-destroyed volumes. Pair<Long, Long> sizes = _volumeDao.getNonDestroyedCountAndTotalByPool(pool.getId()); totalAllocatedSize = sizes.second() + sizes.first() * _extraBytesPerVolume; } // Get size for VM Snapshots. totalAllocatedSize += _volumeDao.getVMSnapshotSizeByPool(pool.getId()); boolean tmpInstalled = false; // Iterate through all templates on this storage pool. List<VMTemplateStoragePoolVO> templatePoolVOs = _templatePoolDao.listByPoolId(pool.getId()); for (VMTemplateStoragePoolVO templatePoolVO : templatePoolVOs) { if ((templateForVmCreation != null) && !tmpInstalled && (templatePoolVO.getTemplateId() == templateForVmCreation.getId())) { tmpInstalled = true; } long templateSize = templatePoolVO.getTemplateSize(); totalAllocatedSize += templateSize + _extraBytesPerVolume; } if ((templateForVmCreation != null) && !tmpInstalled) { long templateForVmCreationSize = templateForVmCreation.getSize() != null ? templateForVmCreation.getSize() : 0; totalAllocatedSize += templateForVmCreationSize + _extraBytesPerVolume; } return totalAllocatedSize; } @DB @Override public void updateCapacityForHost(final Host host) { // prepare the service offerings List<ServiceOfferingVO> offerings = _offeringsDao.listAllIncludingRemoved(); Map<Long, ServiceOfferingVO> offeringsMap = new HashMap<Long, ServiceOfferingVO>(); for (ServiceOfferingVO offering : offerings) { offeringsMap.put(offering.getId(), offering); } long usedCpu = 0; long usedMemory = 0; long reservedMemory = 0; long reservedCpu = 0; final CapacityState capacityState = (host.getResourceState() == ResourceState.Enabled) ? CapacityState.Enabled : CapacityState.Disabled; List<VMInstanceVO> vms = _vmDao.listUpByHostId(host.getId()); if (s_logger.isDebugEnabled()) { s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId()); } ClusterVO cluster = _clusterDao.findById(host.getClusterId()); ClusterDetailsVO clusterDetailCpu = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); ClusterDetailsVO clusterDetailRam = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); Float clusterCpuOvercommitRatio = Float.parseFloat(clusterDetailCpu.getValue()); Float clusterRamOvercommitRatio = Float.parseFloat(clusterDetailRam.getValue()); Float cpuOvercommitRatio = 1f; Float ramOvercommitRatio = 1f; for (VMInstanceVO vm : vms) { Map<String, String> vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId()); String vmDetailCpu = vmDetails.get("cpuOvercommitRatio"); String vmDetailRam = vmDetails.get("memoryOvercommitRatio"); if (vmDetailCpu != null) { //if vmDetail_cpu is not null it means it is running in a overcommited cluster. cpuOvercommitRatio = Float.parseFloat(vmDetailCpu); ramOvercommitRatio = Float.parseFloat(vmDetailRam); } ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); if (so.isDynamic()) { usedMemory += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio; usedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * clusterCpuOvercommitRatio; } else { usedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio; usedCpu += ((so.getCpu() * so.getSpeed()) / cpuOvercommitRatio) * clusterCpuOvercommitRatio; } } List<VMInstanceVO> vmsByLastHostId = _vmDao.listByLastHostId(host.getId()); if (s_logger.isDebugEnabled()) { s_logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId()); } for (VMInstanceVO vm : vmsByLastHostId) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { UserVmDetailVO vmDetailCpu = _userVmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio"); UserVmDetailVO vmDetailRam = _userVmDetailsDao.findDetail(vm.getId(), "memoryOvercommitRatio"); if (vmDetailCpu != null) { //if vmDetail_cpu is not null it means it is running in a overcommited cluster. cpuOvercommitRatio = Float.parseFloat(vmDetailCpu.getValue()); ramOvercommitRatio = Float.parseFloat(vmDetailRam.getValue()); } ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); Map<String, String> vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId()); if (so.isDynamic()) { reservedMemory += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio; reservedCpu += ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * clusterCpuOvercommitRatio; } else { reservedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio; reservedCpu += (so.getCpu() * so.getSpeed() / cpuOvercommitRatio) * clusterCpuOvercommitRatio; } } else { // signal if not done already, that the VM has been stopped for skip.counting.hours, // hence capacity will not be reserved anymore. UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), MESSAGE_RESERVED_CAPACITY_FREED_FLAG); if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) { _messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm); if (vm.getType() == VirtualMachine.Type.User) { UserVmVO userVM = _userVMDao.findById(vm.getId()); _userVMDao.loadDetails(userVM); userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true"); _userVMDao.saveDetails(userVM); } } } } CapacityVO cpuCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU); CapacityVO memCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY); if (cpuCap != null && memCap != null) { if (host.getTotalMemory() != null) { memCap.setTotalCapacity(host.getTotalMemory()); } long hostTotalCpu = host.getCpus().longValue() * host.getSpeed().longValue(); if (cpuCap.getTotalCapacity() != hostTotalCpu) { s_logger.debug("Calibrate total cpu for host: " + host.getId() + " old total CPU:" + cpuCap.getTotalCapacity() + " new total CPU:" + hostTotalCpu); cpuCap.setTotalCapacity(hostTotalCpu); } // Set the capacity state as per the host allocation state. if(capacityState != cpuCap.getCapacityState()){ s_logger.debug("Calibrate cpu capacity state for host: " + host.getId() + " old capacity state:" + cpuCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu); cpuCap.setCapacityState(capacityState); } memCap.setCapacityState(capacityState); if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) { s_logger.debug("No need to calibrate cpu capacity, host:" + host.getId() + " usedCpu: " + cpuCap.getUsedCapacity() + " reservedCpu: " + cpuCap.getReservedCapacity()); } else { if (cpuCap.getReservedCapacity() != reservedCpu) { s_logger.debug("Calibrate reserved cpu for host: " + host.getId() + " old reservedCpu:" + cpuCap.getReservedCapacity() + " new reservedCpu:" + reservedCpu); cpuCap.setReservedCapacity(reservedCpu); } if (cpuCap.getUsedCapacity() != usedCpu) { s_logger.debug("Calibrate used cpu for host: " + host.getId() + " old usedCpu:" + cpuCap.getUsedCapacity() + " new usedCpu:" + usedCpu); cpuCap.setUsedCapacity(usedCpu); } } if (memCap.getTotalCapacity() != host.getTotalMemory()) { s_logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + memCap.getTotalCapacity() + " new total memory:" + host.getTotalMemory()); memCap.setTotalCapacity(host.getTotalMemory()); } // Set the capacity state as per the host allocation state. if(capacityState != memCap.getCapacityState()){ s_logger.debug("Calibrate memory capacity state for host: " + host.getId() + " old capacity state:" + memCap.getTotalCapacity() + " new capacity state:" + hostTotalCpu); memCap.setCapacityState(capacityState); } if (memCap.getUsedCapacity() == usedMemory && memCap.getReservedCapacity() == reservedMemory) { s_logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + memCap.getUsedCapacity() + " reservedMem: " + memCap.getReservedCapacity()); } else { if (memCap.getReservedCapacity() != reservedMemory) { s_logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" + reservedMemory); memCap.setReservedCapacity(reservedMemory); } if (memCap.getUsedCapacity() != usedMemory) { /* * Didn't calibrate for used memory, because VMs can be in * state(starting/migrating) that I don't know on which host * they are allocated */ s_logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + memCap.getUsedCapacity() + " new usedMem: " + usedMemory); memCap.setUsedCapacity(usedMemory); } } try { _capacityDao.update(cpuCap.getId(), cpuCap); _capacityDao.update(memCap.getId(), memCap); } catch (Exception e) { s_logger.error("Caught exception while updating cpu/memory capacity for the host " + host.getId(), e); } } else { final long usedMemoryFinal = usedMemory; final long reservedMemoryFinal = reservedMemory; final long usedCpuFinal = usedCpu; final long reservedCpuFinal = reservedCpu; Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedMemoryFinal, host.getTotalMemory(), Capacity.CAPACITY_TYPE_MEMORY); capacity.setReservedCapacity(reservedMemoryFinal); capacity.setCapacityState(capacityState); _capacityDao.persist(capacity); capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedCpuFinal, host.getCpus().longValue() * host.getSpeed().longValue(), Capacity.CAPACITY_TYPE_CPU); capacity.setReservedCapacity(reservedCpuFinal); capacity.setCapacityState(capacityState); _capacityDao.persist(capacity); } }); } } @Override public boolean preStateTransitionEvent(State oldState, Event event, State newState, VirtualMachine vm, boolean transitionStatus, Object opaque) { return true; } @Override public boolean postStateTransitionEvent(StateMachine2.Transition<State, Event> transition, VirtualMachine vm, boolean status, Object opaque) { if (!status) { return false; } @SuppressWarnings("unchecked") Pair<Long, Long> hosts = (Pair<Long, Long>)opaque; Long oldHostId = hosts.first(); State oldState = transition.getCurrentState(); State newState = transition.getToState(); Event event = transition.getEvent(); s_logger.debug("VM state transitted from :" + oldState + " to " + newState + " with event: " + event + "vm's original host id: " + vm.getLastHostId() + " new host id: " + vm.getHostId() + " host id before state transition: " + oldHostId); if (oldState == State.Starting) { if (newState != State.Running) { releaseVmCapacity(vm, false, false, oldHostId); } } else if (oldState == State.Running) { if (event == Event.AgentReportStopped) { releaseVmCapacity(vm, false, true, oldHostId); } else if (event == Event.AgentReportMigrated) { releaseVmCapacity(vm, false, false, oldHostId); } } else if (oldState == State.Migrating) { if (event == Event.AgentReportStopped) { /* Release capacity from original host */ releaseVmCapacity(vm, false, false, vm.getLastHostId()); releaseVmCapacity(vm, false, false, oldHostId); } else if (event == Event.OperationFailed) { /* Release from dest host */ releaseVmCapacity(vm, false, false, oldHostId); } else if (event == Event.OperationSucceeded) { releaseVmCapacity(vm, false, false, vm.getLastHostId()); } } else if (oldState == State.Stopping) { if (event == Event.OperationSucceeded) { releaseVmCapacity(vm, false, true, oldHostId); } else if (event == Event.AgentReportStopped) { releaseVmCapacity(vm, false, false, oldHostId); } else if (event == Event.AgentReportMigrated) { releaseVmCapacity(vm, false, false, oldHostId); } } else if (oldState == State.Stopped) { if (event == Event.DestroyRequested || event == Event.ExpungeOperation) { releaseVmCapacity(vm, true, false, vm.getLastHostId()); } else if (event == Event.AgentReportMigrated) { releaseVmCapacity(vm, false, false, oldHostId); } } if ((newState == State.Starting || newState == State.Migrating || event == Event.AgentReportMigrated) && vm.getHostId() != null) { boolean fromLastHost = false; if (vm.getHostId().equals(vm.getLastHostId())) { s_logger.debug("VM starting again on the last host it was stopped on"); fromLastHost = true; } allocateVmCapacity(vm, fromLastHost); } if (newState == State.Stopped) { if (vm.getType() == VirtualMachine.Type.User) { UserVmVO userVM = _userVMDao.findById(vm.getId()); _userVMDao.loadDetails(userVM); // free the message sent flag if it exists userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "false"); _userVMDao.saveDetails(userVM); } } return true; } // TODO: Get rid of this case once we've determined that the capacity listeners above have all the changes // create capacity entries if none exist for this server private void createCapacityEntry(StartupCommand startup, HostVO server) { SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria(); capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId()); capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId()); capacitySC.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); if (startup instanceof StartupRoutingCommand) { SearchCriteria<CapacityVO> capacityCPU = _capacityDao.createSearchCriteria(); capacityCPU.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId()); capacityCPU.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId()); capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_CPU); List<CapacityVO> capacityVOCpus = _capacityDao.search(capacitySC, null); Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), "cpuOvercommitRatio").getValue()); Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), "memoryOvercommitRatio").getValue()); if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) { CapacityVO CapacityVOCpu = capacityVOCpus.get(0); long newTotalCpu = (long)(server.getCpus().longValue() * server.getSpeed().longValue() * cpuovercommitratio); if ((CapacityVOCpu.getTotalCapacity() <= newTotalCpu) || ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity()) <= newTotalCpu)) { CapacityVOCpu.setTotalCapacity(newTotalCpu); } else if ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity() > newTotalCpu) && (CapacityVOCpu.getUsedCapacity() < newTotalCpu)) { CapacityVOCpu.setReservedCapacity(0); CapacityVOCpu.setTotalCapacity(newTotalCpu); } else { s_logger.debug("What? new cpu is :" + newTotalCpu + ", old one is " + CapacityVOCpu.getUsedCapacity() + "," + CapacityVOCpu.getReservedCapacity() + "," + CapacityVOCpu.getTotalCapacity()); } _capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu); } else { CapacityVO capacity = new CapacityVO(server.getId(), server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L, server.getCpus().longValue() * server.getSpeed().longValue(), Capacity.CAPACITY_TYPE_CPU); _capacityDao.persist(capacity); } SearchCriteria<CapacityVO> capacityMem = _capacityDao.createSearchCriteria(); capacityMem.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId()); capacityMem.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId()); capacityMem.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); capacityMem.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_MEMORY); List<CapacityVO> capacityVOMems = _capacityDao.search(capacityMem, null); if (capacityVOMems != null && !capacityVOMems.isEmpty()) { CapacityVO CapacityVOMem = capacityVOMems.get(0); long newTotalMem = (long)((server.getTotalMemory()) * memoryOvercommitRatio); if (CapacityVOMem.getTotalCapacity() <= newTotalMem || (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity() <= newTotalMem)) { CapacityVOMem.setTotalCapacity(newTotalMem); } else if (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity() > newTotalMem && CapacityVOMem.getUsedCapacity() < newTotalMem) { CapacityVOMem.setReservedCapacity(0); CapacityVOMem.setTotalCapacity(newTotalMem); } else { s_logger.debug("What? new cpu is :" + newTotalMem + ", old one is " + CapacityVOMem.getUsedCapacity() + "," + CapacityVOMem.getReservedCapacity() + "," + CapacityVOMem.getTotalCapacity()); } _capacityDao.update(CapacityVOMem.getId(), CapacityVOMem); } else { CapacityVO capacity = new CapacityVO(server.getId(), server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L, server.getTotalMemory(), Capacity.CAPACITY_TYPE_MEMORY); _capacityDao.persist(capacity); } } } @Override public float getClusterOverProvisioningFactor(Long clusterId, short capacityType) { String capacityOverProvisioningName = ""; if (capacityType == Capacity.CAPACITY_TYPE_CPU) { capacityOverProvisioningName = "cpuOvercommitRatio"; } else if (capacityType == Capacity.CAPACITY_TYPE_MEMORY) { capacityOverProvisioningName = "memoryOvercommitRatio"; } else { throw new CloudRuntimeException("Invalid capacityType - " + capacityType); } ClusterDetailsVO clusterDetailCpu = _clusterDetailsDao.findDetail(clusterId, capacityOverProvisioningName); Float clusterOverProvisioningRatio = Float.parseFloat(clusterDetailCpu.getValue()); return clusterOverProvisioningRatio; } @Override public boolean checkIfClusterCrossesThreshold(Long clusterId, Integer cpuRequested, long ramRequested) { Float clusterCpuOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_CPU); Float clusterMemoryOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_MEMORY); Float clusterCpuCapacityDisableThreshold = DeploymentClusterPlanner.ClusterCPUCapacityDisableThreshold.valueIn(clusterId); Float clusterMemoryCapacityDisableThreshold = DeploymentClusterPlanner.ClusterMemoryCapacityDisableThreshold.valueIn(clusterId); float cpuConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_CPU, cpuRequested); if (cpuConsumption / clusterCpuOverProvisioning > clusterCpuCapacityDisableThreshold) { s_logger.debug("Cluster: " + clusterId + " cpu consumption " + cpuConsumption / clusterCpuOverProvisioning + " crosses disable threshold " + clusterCpuCapacityDisableThreshold); return true; } float memoryConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_MEMORY, ramRequested); if (memoryConsumption / clusterMemoryOverProvisioning > clusterMemoryCapacityDisableThreshold) { s_logger.debug("Cluster: " + clusterId + " memory consumption " + memoryConsumption / clusterMemoryOverProvisioning + " crosses disable threshold " + clusterMemoryCapacityDisableThreshold); return true; } return false; } @Override public boolean processAnswers(long agentId, long seq, Answer[] answers) { // TODO Auto-generated method stub return false; } @Override public boolean processCommands(long agentId, long seq, Command[] commands) { // TODO Auto-generated method stub return false; } @Override public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { // TODO Auto-generated method stub return null; } @Override public void processHostAdded(long hostId) { } @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { // TODO Auto-generated method stub } @Override public boolean processDisconnect(long agentId, Status state) { // TODO Auto-generated method stub return false; } @Override public void processHostAboutToBeRemoved(long hostId) { } @Override public void processHostRemoved(long hostId, long clusterId) { } @Override public boolean isRecurring() { // TODO Auto-generated method stub return false; } @Override public int getTimeout() { // TODO Auto-generated method stub return 0; } @Override public boolean processTimeout(long agentId, long seq) { // TODO Auto-generated method stub return false; } @Override public void processCancelMaintenaceEventAfter(Long hostId) { updateCapacityForHost(_hostDao.findById(hostId)); } @Override public void processCancelMaintenaceEventBefore(Long hostId) { // TODO Auto-generated method stub } @Override public void processDeletHostEventAfter(Host host) { // TODO Auto-generated method stub } @Override public void processDeleteHostEventBefore(Host host) { // TODO Auto-generated method stub } @Override public void processDiscoverEventAfter(Map<? extends ServerResource, Map<String, String>> resources) { // TODO Auto-generated method stub } @Override public void processDiscoverEventBefore(Long dcid, Long podId, Long clusterId, URI uri, String username, String password, List<String> hostTags) { // TODO Auto-generated method stub } @Override public void processPrepareMaintenaceEventAfter(Long hostId) { _capacityDao.removeBy(Capacity.CAPACITY_TYPE_MEMORY, null, null, null, hostId); _capacityDao.removeBy(Capacity.CAPACITY_TYPE_CPU, null, null, null, hostId); } @Override public void processPrepareMaintenaceEventBefore(Long hostId) { // TODO Auto-generated method stub } @Override public boolean checkIfHostReachMaxGuestLimit(Host host) { Long vmCount = _vmDao.countActiveByHostId(host.getId()); HypervisorType hypervisorType = host.getHypervisorType(); String hypervisorVersion = host.getHypervisorVersion(); Long maxGuestLimit = _hypervisorCapabilitiesDao.getMaxGuestsLimit(hypervisorType, hypervisorVersion); if (vmCount.longValue() >= maxGuestLimit.longValue()) { if (s_logger.isDebugEnabled()) { s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " already reached max Running VMs(count includes system VMs), limit is: " + maxGuestLimit + ",Running VM counts is: " + vmCount.longValue()); } return true; } return false; } @Override public String getConfigComponentName() { return CapacityManager.class.getSimpleName(); } @Override public ConfigKey<?>[] getConfigKeys() { return new ConfigKey<?>[] {CpuOverprovisioningFactor, MemOverprovisioningFactor, StorageCapacityDisableThreshold, StorageOverprovisioningFactor, StorageAllocatedCapacityDisableThreshold, StorageOperationsExcludeCluster, VmwareCreateCloneFull, ImageStoreNFSVersion}; } }
apache-2.0
ptupitsyn/ignite
modules/core/src/test/java/org/apache/ignite/internal/marshaller/optimized/OptimizedMarshallerSerialPersistentFieldsSelfTest.java
3845
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.marshaller.optimized; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.ObjectStreamField; import java.io.Serializable; import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller; import org.apache.ignite.marshaller.GridMarshallerAbstractTest; import org.apache.ignite.marshaller.Marshaller; import org.junit.Test; /** * Test that Optimized Marshaller works with classes with serialPersistentFields. */ public class OptimizedMarshallerSerialPersistentFieldsSelfTest extends GridMarshallerAbstractTest { /** {@inheritDoc} */ @Override protected Marshaller marshaller() { return new OptimizedMarshaller(false); } /** * @throws Exception If failed. */ @Test public void testOptimizedMarshaller() throws Exception { unmarshal(marshal(new TestClass())); TestClass2 val = unmarshal(marshal(new TestClass2())); assertNull(val.field3); } /** * Test class with serialPersistentFields fields. */ private static class TestClass implements Serializable { private static final long serialVersionUID = 0L; /** For serialization compatibility. */ private static final ObjectStreamField[] serialPersistentFields = { new ObjectStreamField("field1", Integer.TYPE), new ObjectStreamField("field2", Integer.TYPE) }; /** * @param s Object output stream. */ private void writeObject(ObjectOutputStream s) throws IOException { s.putFields().put("field1", 1); s.putFields().put("field2", 2); s.writeFields(); s.writeObject(null); } /** * @param s Object input stream. */ private void readObject(ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); s.readObject(); } } /** * Test class with serialPersistentFields fields. */ private static class TestClass2 implements Serializable { private static final long serialVersionUID = 0L; private Integer field3 = 1; /** For serialization compatibility. */ private static final ObjectStreamField[] serialPersistentFields = { new ObjectStreamField("field1", Integer.TYPE), new ObjectStreamField("field2", Integer.TYPE) }; /** * @param s Object output stream. */ private void writeObject(ObjectOutputStream s) throws IOException { s.putFields().put("field1", 1); s.putFields().put("field2", 2); s.writeFields(); s.writeObject(null); } /** * @param s Object input stream. */ private void readObject(ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); s.readObject(); } } }
apache-2.0
RobAltena/deeplearning4j
deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/text/tokenization/tokenizerfactory/TokenizerFactory.java
1895
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ package org.deeplearning4j.text.tokenization.tokenizerfactory; import org.deeplearning4j.text.tokenization.tokenizer.TokenPreProcess; import org.deeplearning4j.text.tokenization.tokenizer.Tokenizer; import java.io.InputStream; /** * Generates a tokenizer for a given string * @author Adam Gibson * */ public interface TokenizerFactory { /** * The tokenizer to createComplex * @param toTokenize the string to createComplex the tokenizer with * @return the new tokenizer */ Tokenizer create(String toTokenize); /** * Create a tokenizer based on an input stream * @param toTokenize * @return */ Tokenizer create(InputStream toTokenize); /** * Sets a token pre processor to be used * with every tokenizer * @param preProcessor the token pre processor to use */ void setTokenPreProcessor(TokenPreProcess preProcessor); /** * Returns TokenPreProcessor set for this TokenizerFactory instance * * @return TokenPreProcessor instance, or null if no preprocessor was defined */ TokenPreProcess getTokenPreProcessor(); }
apache-2.0
nknize/elasticsearch
server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java
2989
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.search.aggregations.support; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.CardinalityUpperBound; import java.io.IOException; import java.util.Map; public abstract class MultiValuesSourceAggregatorFactory extends AggregatorFactory { protected final Map<String, ValuesSourceConfig> configs; protected final DocValueFormat format; public MultiValuesSourceAggregatorFactory(String name, Map<String, ValuesSourceConfig> configs, DocValueFormat format, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metadata) throws IOException { super(name, context, parent, subFactoriesBuilder, metadata); this.configs = configs; this.format = format; } @Override public Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata) throws IOException { return doCreateInternal(configs, format, parent, cardinality, metadata); } /** * Create an aggregator that won't collect anything but will return an * appropriate empty aggregation. */ protected abstract Aggregator createUnmapped(Aggregator parent, Map<String, Object> metadata) throws IOException; /** * Create the {@linkplain Aggregator}. * * @param cardinality Upper bound of the number of {@code owningBucketOrd}s * that the {@link Aggregator} created by this method * will be asked to collect. */ protected abstract Aggregator doCreateInternal( Map<String, ValuesSourceConfig> configs, DocValueFormat format, Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata ) throws IOException; }
apache-2.0
romankagan/DDBWorkbench
python/testSrc/com/jetbrains/python/refactoring/PyIntroduceTestCase.java
4198
/* * Copyright 2000-2013 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jetbrains.python.refactoring; import com.intellij.codeInsight.template.impl.TemplateManagerImpl; import com.intellij.codeInsight.template.impl.TemplateState; import com.intellij.openapi.util.text.StringUtil; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.util.Consumer; import com.jetbrains.python.fixtures.PyTestCase; import com.jetbrains.python.psi.PyExpression; import com.jetbrains.python.refactoring.introduce.IntroduceHandler; import com.jetbrains.python.refactoring.introduce.IntroduceOperation; import org.jetbrains.annotations.Nullable; import java.util.Collection; /** * @author yole */ public abstract class PyIntroduceTestCase extends PyTestCase { protected void doTestSuggestions(Class<? extends PyExpression> parentClass, String... expectedNames) { final Collection<String> names = buildSuggestions(parentClass); for (String expectedName : expectedNames) { assertTrue(StringUtil.join(names, ", "), names.contains(expectedName)); } } protected Collection<String> buildSuggestions(Class<? extends PyExpression> parentClass) { myFixture.configureByFile(getTestName(true) + ".py"); IntroduceHandler handler = createHandler(); PyExpression expr = PsiTreeUtil.getParentOfType(myFixture.getFile().findElementAt(myFixture.getEditor().getCaretModel().getOffset()), parentClass); return handler.getSuggestedNames(expr); } protected abstract IntroduceHandler createHandler(); protected void doTest() { doTest(null); } protected void doTest(@Nullable Consumer<IntroduceOperation> customization) { myFixture.configureByFile(getTestName(true) + ".py"); boolean inplaceEnabled = myFixture.getEditor().getSettings().isVariableInplaceRenameEnabled(); try { myFixture.getEditor().getSettings().setVariableInplaceRenameEnabled(false); IntroduceHandler handler = createHandler(); final IntroduceOperation operation = new IntroduceOperation(myFixture.getProject(), myFixture.getEditor(), myFixture.getFile(), "a"); operation.setReplaceAll(true); if (customization != null) { customization.consume(operation); } handler.performAction(operation); myFixture.checkResultByFile(getTestName(true) + ".after.py"); } finally { myFixture.getEditor().getSettings().setVariableInplaceRenameEnabled(inplaceEnabled); } } protected void doTestInplace(@Nullable Consumer<IntroduceOperation> customization) { String name = getTestName(true); myFixture.configureByFile(name + ".py"); final boolean enabled = myFixture.getEditor().getSettings().isVariableInplaceRenameEnabled(); try { TemplateManagerImpl.setTemplateTesting(myFixture.getProject(), getTestRootDisposable()); myFixture.getEditor().getSettings().setVariableInplaceRenameEnabled(true); IntroduceHandler handler = createHandler(); final IntroduceOperation introduceOperation = new IntroduceOperation(myFixture.getProject(), myFixture.getEditor(), myFixture.getFile(), "a"); introduceOperation.setReplaceAll(true); if (customization != null) { customization.consume(introduceOperation); } handler.performAction(introduceOperation); TemplateState state = TemplateManagerImpl.getTemplateState(myFixture.getEditor()); assert state != null; state.gotoEnd(false); myFixture.checkResultByFile(name + ".after.py", true); } finally { myFixture.getEditor().getSettings().setVariableInplaceRenameEnabled(enabled); } } }
apache-2.0
gijsleussink/ceylon
typechecker/src/com/redhat/ceylon/compiler/typechecker/analyzer/SupertypeVisitor.java
8380
package com.redhat.ceylon.compiler.typechecker.analyzer; import static com.redhat.ceylon.compiler.typechecker.analyzer.AliasVisitor.typeList; import static com.redhat.ceylon.model.typechecker.model.ModelUtil.addToIntersection; import static com.redhat.ceylon.model.typechecker.model.ModelUtil.canonicalIntersection; import static java.util.Collections.singleton; import java.util.ArrayList; import java.util.List; import com.redhat.ceylon.compiler.typechecker.tree.Node; import com.redhat.ceylon.compiler.typechecker.tree.Tree; import com.redhat.ceylon.compiler.typechecker.tree.Visitor; import com.redhat.ceylon.model.typechecker.model.DecidabilityException; import com.redhat.ceylon.model.typechecker.model.Type; import com.redhat.ceylon.model.typechecker.model.TypeAlias; import com.redhat.ceylon.model.typechecker.model.TypeDeclaration; import com.redhat.ceylon.model.typechecker.model.Unit; import com.redhat.ceylon.model.typechecker.model.UnknownType; /** * Detects and eliminates potentially undecidable * supertypes, including: * * - supertypes containing intersections in type arguments, * and * - supertypes with incorrect variance. * * @author Gavin King * */ public class SupertypeVisitor extends Visitor { private boolean displayErrors; public SupertypeVisitor(boolean displayErrors) { this.displayErrors = displayErrors; } private boolean checkSupertypeVariance(Type type, TypeDeclaration d, Node node) { List<TypeDeclaration> errors = type.resolveAliases() .checkDecidability(); if (displayErrors) { for (TypeDeclaration td: errors) { Unit unit = node.getUnit(); node.addError("type with contravariant type parameter '" + td.getName() + "' appears in contravariant or invariant location in supertype: '" + type.asString(unit) + "'"); } } return !errors.isEmpty(); } private void checkForUndecidability( Tree.ExtendedType etn, Tree.SatisfiedTypes stn, TypeDeclaration type, Tree.TypeDeclaration that) { boolean errors = false; if (stn!=null) { for (Tree.StaticType st: stn.getTypes()) { Type t = st.getTypeModel(); if (t!=null) { TypeDeclaration td = t.getDeclaration(); if (!(td instanceof UnknownType) && !(td instanceof TypeAlias)) { if (td == type) { brokenSatisfiedType(type, st, null); errors = true; } else { List<TypeDeclaration> list = t.isRecursiveRawTypeDefinition( singleton(type)); if (!list.isEmpty()) { brokenSatisfiedType(type, st, list); errors = true; } } } } } } if (etn!=null) { Tree.StaticType et = etn.getType(); if (et!=null) { Type t = et.getTypeModel(); if (t!=null) { TypeDeclaration td = t.getDeclaration(); if (!(td instanceof UnknownType) && !(td instanceof TypeAlias)) { if (td == type) { brokenExtendedType(type, et, null); errors = true; } else { List<TypeDeclaration> list = t.isRecursiveRawTypeDefinition( singleton(type)); if (!list.isEmpty()) { brokenExtendedType(type, et, list); errors = true; } } } } } } if (!errors) { Unit unit = type.getUnit(); List<Type> list = new ArrayList<Type>(); try { List<Type> supertypes = type.getType() .getSupertypes(); for (Type st: supertypes) { addToIntersection(list, st, unit); } //probably unnecessary - if it were //going to blow up, it would have //already blown up in addToIntersection() canonicalIntersection(list, unit); } catch (DecidabilityException re) { brokenHierarchy(type, that, unit); return; } if (stn!=null) { for (Tree.StaticType st: stn.getTypes()) { Type t = st.getTypeModel(); if (t!=null) { if (checkSupertypeVariance(t, type, st)) { type.getSatisfiedTypes().remove(t); type.clearProducedTypeCache(); } } } } if (etn!=null) { Tree.StaticType et = etn.getType(); if (et!=null) { Type t = et.getTypeModel(); if (t!=null) { if (checkSupertypeVariance(t, type, et)) { type.setExtendedType(unit.getBasicType()); type.clearProducedTypeCache(); } } } } } } private void brokenHierarchy(TypeDeclaration d, Tree.TypeDeclaration that, Unit unit) { if (displayErrors) { that.addError("inheritance hierarchy is undecidable: " + "could not canonicalize the intersection of all supertypes of '" + d.getName() + "'"); } d.getSatisfiedTypes().clear(); d.setExtendedType(unit.getBasicType()); d.clearProducedTypeCache(); } private void brokenExtendedType(TypeDeclaration d, Tree.StaticType et, List<TypeDeclaration> list) { if (displayErrors) { et.addError(message(d, list)); } Type pt = et.getTypeModel(); et.setTypeModel(null); d.setExtendedType(et.getUnit().getBasicType()); d.addBrokenSupertype(pt); d.clearProducedTypeCache(); } private void brokenSatisfiedType(TypeDeclaration d, Tree.StaticType st, List<TypeDeclaration> list) { if (displayErrors) { st.addError(message(d, list)); } Type pt = st.getTypeModel(); st.setTypeModel(null); d.getSatisfiedTypes().remove(pt); d.addBrokenSupertype(pt); d.clearProducedTypeCache(); } private String message(TypeDeclaration d, List<TypeDeclaration> list) { return list==null ? "inheritance is circular: '" + d.getName() + "' inherits itself" : "inheritance is circular: definition of '" + d.getName() + "' is recursive, involving " + typeList(list); } @Override public void visit(Tree.ClassDefinition that) { super.visit(that); checkForUndecidability(that.getExtendedType(), that.getSatisfiedTypes(), that.getDeclarationModel(), that); } @Override public void visit(Tree.InterfaceDefinition that) { super.visit(that); checkForUndecidability(null, that.getSatisfiedTypes(), that.getDeclarationModel(), that); } @Override public void visit(Tree.TypeConstraint that) { super.visit(that); checkForUndecidability(null, that.getSatisfiedTypes(), that.getDeclarationModel(), that); } }
apache-2.0
cbeams-archive/spring-framework-2.5.x
tiger/src/org/springframework/orm/jpa/JpaSystemException.java
1233
/* * Copyright 2002-2006 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.orm.jpa; import javax.persistence.PersistenceException; import org.springframework.dao.UncategorizedDataAccessException; /** * JPA-specific subclass of UncategorizedDataAccessException, * for JPA system errors that do not match any concrete * <code>org.springframework.dao</code> exceptions. * * @author Juergen Hoeller * @since 2.0 * @see EntityManagerFactoryUtils#convertJpaAccessExceptionIfPossible */ public class JpaSystemException extends UncategorizedDataAccessException { public JpaSystemException(PersistenceException ex) { super(ex.getMessage(), ex); } }
apache-2.0
SMARTRACTECHNOLOGY-PUBLIC/smartcosmos-sdk-java
src/main/java/net/smartcosmos/model/batch/IBatchStatusReport.java
1831
package net.smartcosmos.model.batch; /* * *#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#* * SMART COSMOS Platform Core SDK * =============================================================================== * Copyright (C) 2013 - 2015 SMARTRAC Technology Fletcher, Inc. * =============================================================================== * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*# */ public interface IBatchStatusReport { long getBatchProcessorStartTimestamp(); BatchProcessorStatus getBatchProcessorStatus(); int getPercentageComplete(); long getLastPercentageCompleteUpdateTimestamp(); long getBatchProcessorStopTimestamp(); String getErrorMessage(); int getErrorCode(); void setBatchProcessorStartTimestamp(long batchProcessorStartTimestamp); void setBatchProcessorStatus(BatchProcessorStatus batchProcessorStatus); void setPercentageComplete(int percentageComplete); void setLastPercentageCompleteUpdateTimestamp(long lastPercentageCompleteUpdateTimestamp); void setBatchProcessorStopTimestamp(long batchProcessorStopTimestamp); void setErrorMessage(String errorMessage); void setErrorCode(int errorCode); }
apache-2.0
bptlab/processeditor
src/net/frapu/code/simulation/petrinets/SimulationConfiguration.java
10200
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ /* * SimulationConfiguration.java * * Created on 23.04.2009, 14:49:46 */ package net.frapu.code.simulation.petrinets; import net.frapu.code.visualization.Configuration; /** * * @author fpu */ public class SimulationConfiguration extends javax.swing.JPanel { private static final long serialVersionUID = -8179581456325895683L; public final static int SIMULATION_SPEED_SLOWEST = 1000; public final static int SIMULATION_SPEED_SLOW = 250; public final static int SIMULATION_SPEED_MEDIUM = 100; public final static int SIMULATION_SPEED_FAST = 25; public final static int SIMULATION_SPEED_FASTEST = 0; public final static int SIMULATION_SPEED_WARP = -1; public final static int SIMULATION_PRECISION_LOW = 10; public final static int SIMULATION_PRECISION_MEDIUM = 100; public final static int SIMULATION_PRECISION_HIGH = 1000; PetriNetSimulationEditor editor = null; /** Creates new form SimulationConfiguration */ public SimulationConfiguration(PetriNetSimulationEditor editor) { this.editor = editor; initComponents(); this.setSize(300,200); // Set dot location Configuration conf = Configuration.getInstance(); String dotLocation = conf.getProperty(Configuration.PROP_DOT_LOCATION); jTextField1.setText(dotLocation); } /** This method is called from within the constructor to * initialize the form. * WARNING: Do NOT modify this code. The content of this method is * always regenerated by the Form Editor. */ // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents private void initComponents() { new javax.swing.JFileChooser(); jPanel1 = new javax.swing.JPanel(); jLabel1 = new javax.swing.JLabel(); jLabel2 = new javax.swing.JLabel(); jComboBox1 = new javax.swing.JComboBox(); jComboBox2 = new javax.swing.JComboBox(); jPanel2 = new javax.swing.JPanel(); jLabel3 = new javax.swing.JLabel(); jTextField1 = new javax.swing.JTextField(); jButton1 = new javax.swing.JButton(); setMinimumSize(new java.awt.Dimension(300, 100)); jPanel1.setBorder(javax.swing.BorderFactory.createTitledBorder("Simulation engine")); jLabel1.setText("Speed:"); jLabel2.setText("Precision:"); jComboBox1.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Slowest", "Slow", "Medium", "Fast", "Fastest", "Warp (Event-based)" })); jComboBox1.setSelectedIndex(3); jComboBox1.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { jComboBox1ActionPerformed(evt); } }); jComboBox2.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Very low (10 instances)", "Medium (100 instances)", "High (1000 instances)" })); jComboBox2.setSelectedIndex(1); jComboBox2.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { jComboBox2ActionPerformed(evt); } }); javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1); jPanel1.setLayout(jPanel1Layout); jPanel1Layout.setHorizontalGroup( jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(jPanel1Layout.createSequentialGroup() .addContainerGap() .addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addComponent(jLabel2) .addComponent(jLabel1)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false) .addComponent(jComboBox1, 0, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) .addComponent(jComboBox2, 0, 171, Short.MAX_VALUE)) .addContainerGap(51, Short.MAX_VALUE)) ); jPanel1Layout.setVerticalGroup( jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(jPanel1Layout.createSequentialGroup() .addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(jLabel1) .addComponent(jComboBox1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(jLabel2) .addComponent(jComboBox2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) .addContainerGap(6, Short.MAX_VALUE)) ); jPanel2.setBorder(javax.swing.BorderFactory.createTitledBorder("External tools")); jLabel3.setText("Path to dot:"); jTextField1.setText("/usr/local/bin/dot"); jTextField1.setEnabled(false); jButton1.setText("..."); jButton1.setEnabled(false); javax.swing.GroupLayout jPanel2Layout = new javax.swing.GroupLayout(jPanel2); jPanel2.setLayout(jPanel2Layout); jPanel2Layout.setHorizontalGroup( jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(jPanel2Layout.createSequentialGroup() .addComponent(jLabel3) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(jTextField1, javax.swing.GroupLayout.DEFAULT_SIZE, 169, Short.MAX_VALUE) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(jButton1)) ); jPanel2Layout.setVerticalGroup( jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(jButton1) .addComponent(jLabel3) .addComponent(jTextField1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) ); javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this); this.setLayout(layout); layout.setHorizontalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addContainerGap() .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING) .addComponent(jPanel1, javax.swing.GroupLayout.Alignment.LEADING, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) .addComponent(jPanel2, javax.swing.GroupLayout.Alignment.LEADING, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)) .addContainerGap()) ); layout.setVerticalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addContainerGap() .addComponent(jPanel1, javax.swing.GroupLayout.PREFERRED_SIZE, 82, javax.swing.GroupLayout.PREFERRED_SIZE) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(jPanel2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) .addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)) ); }// </editor-fold>//GEN-END:initComponents /** * @param evt */ private void jComboBox1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jComboBox1ActionPerformed switch (jComboBox1.getSelectedIndex()) { case 0: editor.setSimulationSpeed(SIMULATION_SPEED_SLOWEST); break; case 1: editor.setSimulationSpeed(SIMULATION_SPEED_SLOW); break; case 2: editor.setSimulationSpeed(SIMULATION_SPEED_MEDIUM); break; case 3: editor.setSimulationSpeed(SIMULATION_SPEED_FAST); break; case 4: editor.setSimulationSpeed(SIMULATION_SPEED_FASTEST); break; case 5: editor.setSimulationSpeed(SIMULATION_SPEED_WARP); break; } }//GEN-LAST:event_jComboBox1ActionPerformed /** * @param evt */ private void jComboBox2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jComboBox2ActionPerformed switch (jComboBox2.getSelectedIndex()) { case 0: editor.setTokenCount(SIMULATION_PRECISION_LOW); break; case 1: editor.setTokenCount(SIMULATION_PRECISION_MEDIUM); break; case 2: editor.setTokenCount(SIMULATION_PRECISION_HIGH); break; } }//GEN-LAST:event_jComboBox2ActionPerformed // Variables declaration - do not modify//GEN-BEGIN:variables private javax.swing.JButton jButton1; private javax.swing.JComboBox jComboBox1; private javax.swing.JComboBox jComboBox2; private javax.swing.JLabel jLabel1; private javax.swing.JLabel jLabel2; private javax.swing.JLabel jLabel3; private javax.swing.JPanel jPanel1; private javax.swing.JPanel jPanel2; private javax.swing.JTextField jTextField1; // End of variables declaration//GEN-END:variables }
apache-2.0
locationtech/geowave
core/store/src/main/java/org/locationtech/geowave/core/store/query/filter/expression/text/TextBetween.java
4362
/** * Copyright (c) 2013-2020 Contributors to the Eclipse Foundation * * See the NOTICE file distributed with this work for additional information regarding copyright * ownership. All rights reserved. This program and the accompanying materials are made available * under the terms of the Apache License, Version 2.0 which accompanies this distribution and is * available at http://www.apache.org/licenses/LICENSE-2.0.txt */ package org.locationtech.geowave.core.store.query.filter.expression.text; import java.nio.ByteBuffer; import org.locationtech.geowave.core.index.text.CaseSensitivity; import org.locationtech.geowave.core.index.text.TextIndexStrategy; import org.locationtech.geowave.core.index.text.TextSearchType; import org.locationtech.geowave.core.store.AdapterToIndexMapping; import org.locationtech.geowave.core.store.api.DataTypeAdapter; import org.locationtech.geowave.core.store.api.Index; import org.locationtech.geowave.core.store.index.CustomIndex; import org.locationtech.geowave.core.store.query.filter.expression.Between; import org.locationtech.geowave.core.store.query.filter.expression.IndexFieldConstraints; /** * Implementation of between for text data. */ public class TextBetween extends Between<TextExpression, String> { private boolean ignoreCase; public TextBetween() {} public TextBetween( final TextExpression valueExpr, final TextExpression lowerBoundExpr, final TextExpression upperBoundExpr) { this(valueExpr, lowerBoundExpr, upperBoundExpr, false); } public TextBetween( final TextExpression valueExpr, final TextExpression lowerBoundExpr, final TextExpression upperBoundExpr, final boolean ignoreCase) { super(valueExpr, lowerBoundExpr, upperBoundExpr); this.ignoreCase = ignoreCase; } @Override protected boolean indexSupported(final Index index) { if ((index instanceof CustomIndex) && (((CustomIndex<?, ?>) index).getCustomIndexStrategy() instanceof TextIndexStrategy)) { final TextIndexStrategy<?> indexStrategy = (TextIndexStrategy<?>) ((CustomIndex<?, ?>) index).getCustomIndexStrategy(); return (indexStrategy.isSupported(TextSearchType.BEGINS_WITH) && indexStrategy.isSupported( ignoreCase ? CaseSensitivity.CASE_INSENSITIVE : CaseSensitivity.CASE_SENSITIVE)); } return false; } @Override public void prepare( final DataTypeAdapter<?> adapter, final AdapterToIndexMapping indexMapping, final Index index) { if (valueExpr.isLiteral() && !(valueExpr instanceof TextLiteral)) { valueExpr = TextLiteral.of(valueExpr.evaluateValue(null)); } if (lowerBoundExpr.isLiteral() && !(lowerBoundExpr instanceof TextLiteral)) { lowerBoundExpr = TextLiteral.of(lowerBoundExpr.evaluateValue(null)); } if (upperBoundExpr.isLiteral() && !(upperBoundExpr instanceof TextLiteral)) { upperBoundExpr = TextLiteral.of(upperBoundExpr.evaluateValue(null)); } } @Override protected boolean evaluateInternal( final String value, final String lowerBound, final String upperBound) { if (ignoreCase) { final String valueLower = value.toLowerCase(); return (valueLower.compareTo(lowerBound.toLowerCase()) >= 0) && (valueLower.compareTo(upperBound.toLowerCase()) <= 0); } return (value.compareTo(lowerBound) >= 0) && (value.compareTo(upperBound) <= 0); } @Override public byte[] toBinary() { final byte[] superBinary = super.toBinary(); final ByteBuffer buffer = ByteBuffer.allocate(1 + superBinary.length); buffer.put(ignoreCase ? (byte) 1 : (byte) 0); buffer.put(superBinary); return buffer.array(); } @Override public void fromBinary(final byte[] bytes) { final ByteBuffer buffer = ByteBuffer.wrap(bytes); ignoreCase = buffer.get() == (byte) 1; final byte[] superBinary = new byte[buffer.remaining()]; buffer.get(superBinary); super.fromBinary(superBinary); } @Override protected IndexFieldConstraints<String> toConstraints( final String lowerBound, final String upperBound) { // It's not exact because strings with the upper bound prefix may be greater than the upper // bound return TextFieldConstraints.of(lowerBound, upperBound, true, true, false, !ignoreCase, false); } }
apache-2.0
visouza/solr-5.0.0
lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
23920
package org.apache.lucene.index; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; /** * <p>This class implements a {@link MergePolicy} that tries * to merge segments into levels of exponentially * increasing size, where each level has fewer segments than * the value of the merge factor. Whenever extra segments * (beyond the merge factor upper bound) are encountered, * all segments within the level are merged. You can get or * set the merge factor using {@link #getMergeFactor()} and * {@link #setMergeFactor(int)} respectively.</p> * * <p>This class is abstract and requires a subclass to * define the {@link #size} method which specifies how a * segment's size is determined. {@link LogDocMergePolicy} * is one subclass that measures size by document count in * the segment. {@link LogByteSizeMergePolicy} is another * subclass that measures size as the total byte size of the * file(s) for the segment.</p> */ public abstract class LogMergePolicy extends MergePolicy { /** Defines the allowed range of log(size) for each * level. A level is computed by taking the max segment * log size, minus LEVEL_LOG_SPAN, and finding all * segments falling within that range. */ public static final double LEVEL_LOG_SPAN = 0.75; /** Default merge factor, which is how many segments are * merged at a time */ public static final int DEFAULT_MERGE_FACTOR = 10; /** Default maximum segment size. A segment of this size * or larger will never be merged. @see setMaxMergeDocs */ public static final int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE; /** Default noCFSRatio. If a merge's size is {@code >= 10%} of * the index, then we disable compound file for it. * @see MergePolicy#setNoCFSRatio */ public static final double DEFAULT_NO_CFS_RATIO = 0.1; /** How many segments to merge at a time. */ protected int mergeFactor = DEFAULT_MERGE_FACTOR; /** Any segments whose size is smaller than this value * will be rounded up to this value. This ensures that * tiny segments are aggressively merged. */ protected long minMergeSize; /** If the size of a segment exceeds this value then it * will never be merged. */ protected long maxMergeSize; // Although the core MPs set it explicitly, we must default in case someone // out there wrote his own LMP ... /** If the size of a segment exceeds this value then it * will never be merged during {@link IndexWriter#forceMerge}. */ protected long maxMergeSizeForForcedMerge = Long.MAX_VALUE; /** If a segment has more than this many documents then it * will never be merged. */ protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS; /** If true, we pro-rate a segment's size by the * percentage of non-deleted documents. */ protected boolean calibrateSizeByDeletes = true; /** Sole constructor. (For invocation by subclass * constructors, typically implicit.) */ public LogMergePolicy() { super(DEFAULT_NO_CFS_RATIO, MergePolicy.DEFAULT_MAX_CFS_SEGMENT_SIZE); } /** Returns true if {@code LMP} is enabled in {@link * IndexWriter}'s {@code infoStream}. */ protected boolean verbose(IndexWriter writer) { return writer != null && writer.infoStream.isEnabled("LMP"); } /** Print a debug message to {@link IndexWriter}'s {@code * infoStream}. */ protected void message(String message, IndexWriter writer) { if (verbose(writer)) { writer.infoStream.message("LMP", message); } } /** <p>Returns the number of segments that are merged at * once and also controls the total number of segments * allowed to accumulate in the index.</p> */ public int getMergeFactor() { return mergeFactor; } /** Determines how often segment indices are merged by * addDocument(). With smaller values, less RAM is used * while indexing, and searches are * faster, but indexing speed is slower. With larger * values, more RAM is used during indexing, and while * searches is slower, indexing is * faster. Thus larger values ({@code > 10}) are best for batch * index creation, and smaller values ({@code < 10}) for indices * that are interactively maintained. */ public void setMergeFactor(int mergeFactor) { if (mergeFactor < 2) throw new IllegalArgumentException("mergeFactor cannot be less than 2"); this.mergeFactor = mergeFactor; } /** Sets whether the segment size should be calibrated by * the number of deletes when choosing segments for merge. */ public void setCalibrateSizeByDeletes(boolean calibrateSizeByDeletes) { this.calibrateSizeByDeletes = calibrateSizeByDeletes; } /** Returns true if the segment size should be calibrated * by the number of deletes when choosing segments for merge. */ public boolean getCalibrateSizeByDeletes() { return calibrateSizeByDeletes; } /** Return the number of documents in the provided {@link * SegmentCommitInfo}, pro-rated by percentage of * non-deleted documents if {@link * #setCalibrateSizeByDeletes} is set. */ protected long sizeDocs(SegmentCommitInfo info, IndexWriter writer) throws IOException { if (calibrateSizeByDeletes) { int delCount = writer.numDeletedDocs(info); assert delCount <= info.info.getDocCount(); return (info.info.getDocCount() - (long)delCount); } else { return info.info.getDocCount(); } } /** Return the byte size of the provided {@link * SegmentCommitInfo}, pro-rated by percentage of * non-deleted documents if {@link * #setCalibrateSizeByDeletes} is set. */ protected long sizeBytes(SegmentCommitInfo info, IndexWriter writer) throws IOException { if (calibrateSizeByDeletes) { return super.size(info, writer); } return info.sizeInBytes(); } /** Returns true if the number of segments eligible for * merging is less than or equal to the specified {@code * maxNumSegments}. */ protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException { final int numSegments = infos.size(); int numToMerge = 0; SegmentCommitInfo mergeInfo = null; boolean segmentIsOriginal = false; for(int i=0;i<numSegments && numToMerge <= maxNumSegments;i++) { final SegmentCommitInfo info = infos.info(i); final Boolean isOriginal = segmentsToMerge.get(info); if (isOriginal != null) { segmentIsOriginal = isOriginal; numToMerge++; mergeInfo = info; } } return numToMerge <= maxNumSegments && (numToMerge != 1 || !segmentIsOriginal || isMerged(infos, mergeInfo, writer)); } /** * Returns the merges necessary to merge the index, taking the max merge * size or max merge docs into consideration. This method attempts to respect * the {@code maxNumSegments} parameter, however it might be, due to size * constraints, that more than that number of segments will remain in the * index. Also, this method does not guarantee that exactly {@code * maxNumSegments} will remain, but &lt;= that number. */ private MergeSpecification findForcedMergesSizeLimit( SegmentInfos infos, int maxNumSegments, int last, IndexWriter writer) throws IOException { MergeSpecification spec = new MergeSpecification(); final List<SegmentCommitInfo> segments = infos.asList(); int start = last - 1; while (start >= 0) { SegmentCommitInfo info = infos.info(start); if (size(info, writer) > maxMergeSizeForForcedMerge || sizeDocs(info, writer) > maxMergeDocs) { if (verbose(writer)) { message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")", writer); } // need to skip that segment + add a merge for the 'right' segments, // unless there is only 1 which is merged. if (last - start - 1 > 1 || (start != last - 1 && !isMerged(infos, infos.info(start + 1), writer))) { // there is more than 1 segment to the right of // this one, or a mergeable single segment. spec.add(new OneMerge(segments.subList(start + 1, last))); } last = start; } else if (last - start == mergeFactor) { // mergeFactor eligible segments were found, add them as a merge. spec.add(new OneMerge(segments.subList(start, last))); last = start; } --start; } // Add any left-over segments, unless there is just 1 // already fully merged if (last > 0 && (++start + 1 < last || !isMerged(infos, infos.info(start), writer))) { spec.add(new OneMerge(segments.subList(start, last))); } return spec.merges.size() == 0 ? null : spec; } /** * Returns the merges necessary to forceMerge the index. This method constraints * the returned merges only by the {@code maxNumSegments} parameter, and * guaranteed that exactly that number of segments will remain in the index. */ private MergeSpecification findForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last, IndexWriter writer) throws IOException { MergeSpecification spec = new MergeSpecification(); final List<SegmentCommitInfo> segments = infos.asList(); // First, enroll all "full" merges (size // mergeFactor) to potentially be run concurrently: while (last - maxNumSegments + 1 >= mergeFactor) { spec.add(new OneMerge(segments.subList(last - mergeFactor, last))); last -= mergeFactor; } // Only if there are no full merges pending do we // add a final partial (< mergeFactor segments) merge: if (0 == spec.merges.size()) { if (maxNumSegments == 1) { // Since we must merge down to 1 segment, the // choice is simple: if (last > 1 || !isMerged(infos, infos.info(0), writer)) { spec.add(new OneMerge(segments.subList(0, last))); } } else if (last > maxNumSegments) { // Take care to pick a partial merge that is // least cost, but does not make the index too // lopsided. If we always just picked the // partial tail then we could produce a highly // lopsided index over time: // We must merge this many segments to leave // maxNumSegments in the index (from when // forceMerge was first kicked off): final int finalMergeSize = last - maxNumSegments + 1; // Consider all possible starting points: long bestSize = 0; int bestStart = 0; for(int i=0;i<last-finalMergeSize+1;i++) { long sumSize = 0; for(int j=0;j<finalMergeSize;j++) { sumSize += size(infos.info(j+i), writer); } if (i == 0 || (sumSize < 2*size(infos.info(i-1), writer) && sumSize < bestSize)) { bestStart = i; bestSize = sumSize; } } spec.add(new OneMerge(segments.subList(bestStart, bestStart + finalMergeSize))); } } return spec.merges.size() == 0 ? null : spec; } /** Returns the merges necessary to merge the index down * to a specified number of segments. * This respects the {@link #maxMergeSizeForForcedMerge} setting. * By default, and assuming {@code maxNumSegments=1}, only * one segment will be left in the index, where that segment * has no deletions pending nor separate norms, and it is in * compound file format if the current useCompoundFile * setting is true. This method returns multiple merges * (mergeFactor at a time) so the {@link MergeScheduler} * in use may make use of concurrency. */ @Override public MergeSpecification findForcedMerges(SegmentInfos infos, int maxNumSegments, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException { assert maxNumSegments > 0; if (verbose(writer)) { message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge="+ segmentsToMerge, writer); } // If the segments are already merged (e.g. there's only 1 segment), or // there are <maxNumSegments:. if (isMerged(infos, maxNumSegments, segmentsToMerge, writer)) { if (verbose(writer)) { message("already merged; skip", writer); } return null; } // Find the newest (rightmost) segment that needs to // be merged (other segments may have been flushed // since merging started): int last = infos.size(); while (last > 0) { final SegmentCommitInfo info = infos.info(--last); if (segmentsToMerge.get(info) != null) { last++; break; } } if (last == 0) { if (verbose(writer)) { message("last == 0; skip", writer); } return null; } // There is only one segment already, and it is merged if (maxNumSegments == 1 && last == 1 && isMerged(infos, infos.info(0), writer)) { if (verbose(writer)) { message("already 1 seg; skip", writer); } return null; } // Check if there are any segments above the threshold boolean anyTooLarge = false; for (int i = 0; i < last; i++) { SegmentCommitInfo info = infos.info(i); if (size(info, writer) > maxMergeSizeForForcedMerge || sizeDocs(info, writer) > maxMergeDocs) { anyTooLarge = true; break; } } if (anyTooLarge) { return findForcedMergesSizeLimit(infos, maxNumSegments, last, writer); } else { return findForcedMergesMaxNumSegments(infos, maxNumSegments, last, writer); } } /** * Finds merges necessary to force-merge all deletes from the * index. We simply merge adjacent segments that have * deletes, up to mergeFactor at a time. */ @Override public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer) throws IOException { final List<SegmentCommitInfo> segments = segmentInfos.asList(); final int numSegments = segments.size(); if (verbose(writer)) { message("findForcedDeleteMerges: " + numSegments + " segments", writer); } MergeSpecification spec = new MergeSpecification(); int firstSegmentWithDeletions = -1; assert writer != null; for(int i=0;i<numSegments;i++) { final SegmentCommitInfo info = segmentInfos.info(i); int delCount = writer.numDeletedDocs(info); if (delCount > 0) { if (verbose(writer)) { message(" segment " + info.info.name + " has deletions", writer); } if (firstSegmentWithDeletions == -1) firstSegmentWithDeletions = i; else if (i - firstSegmentWithDeletions == mergeFactor) { // We've seen mergeFactor segments in a row with // deletions, so force a merge now: if (verbose(writer)) { message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive", writer); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = i; } } else if (firstSegmentWithDeletions != -1) { // End of a sequence of segments with deletions, so, // merge those past segments even if it's fewer than // mergeFactor segments if (verbose(writer)) { message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive", writer); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = -1; } } if (firstSegmentWithDeletions != -1) { if (verbose(writer)) { message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive", writer); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, numSegments))); } return spec; } private static class SegmentInfoAndLevel implements Comparable<SegmentInfoAndLevel> { SegmentCommitInfo info; float level; int index; public SegmentInfoAndLevel(SegmentCommitInfo info, float level, int index) { this.info = info; this.level = level; this.index = index; } // Sorts largest to smallest @Override public int compareTo(SegmentInfoAndLevel other) { return Float.compare(other.level, level); } } /** Checks if any merges are now necessary and returns a * {@link MergePolicy.MergeSpecification} if so. A merge * is necessary when there are more than {@link * #setMergeFactor} segments at a given level. When * multiple levels have too many segments, this method * will return multiple merges, allowing the {@link * MergeScheduler} to use concurrency. */ @Override public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos infos, IndexWriter writer) throws IOException { final int numSegments = infos.size(); if (verbose(writer)) { message("findMerges: " + numSegments + " segments", writer); } // Compute levels, which is just log (base mergeFactor) // of the size of each segment final List<SegmentInfoAndLevel> levels = new ArrayList<>(); final float norm = (float) Math.log(mergeFactor); final Collection<SegmentCommitInfo> mergingSegments = writer.getMergingSegments(); for(int i=0;i<numSegments;i++) { final SegmentCommitInfo info = infos.info(i); long size = size(info, writer); // Floor tiny segments if (size < 1) { size = 1; } final SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float) Math.log(size)/norm, i); levels.add(infoLevel); if (verbose(writer)) { final long segBytes = sizeBytes(info, writer); String extra = mergingSegments.contains(info) ? " [merging]" : ""; if (size >= maxMergeSize) { extra += " [skip: too large]"; } message("seg=" + writer.segString(info) + " level=" + infoLevel.level + " size=" + String.format(Locale.ROOT, "%.3f MB", segBytes/1024/1024.) + extra, writer); } } final float levelFloor; if (minMergeSize <= 0) levelFloor = (float) 0.0; else levelFloor = (float) (Math.log(minMergeSize)/norm); // Now, we quantize the log values into levels. The // first level is any segment whose log size is within // LEVEL_LOG_SPAN of the max size, or, who has such as // segment "to the right". Then, we find the max of all // other segments and use that to define the next level // segment, etc. MergeSpecification spec = null; final int numMergeableSegments = levels.size(); int start = 0; while(start < numMergeableSegments) { // Find max level of all segments not already // quantized. float maxLevel = levels.get(start).level; for(int i=1+start;i<numMergeableSegments;i++) { final float level = levels.get(i).level; if (level > maxLevel) { maxLevel = level; } } // Now search backwards for the rightmost segment that // falls into this level: float levelBottom; if (maxLevel <= levelFloor) { // All remaining segments fall into the min level levelBottom = -1.0F; } else { levelBottom = (float) (maxLevel - LEVEL_LOG_SPAN); // Force a boundary at the level floor if (levelBottom < levelFloor && maxLevel >= levelFloor) { levelBottom = levelFloor; } } int upto = numMergeableSegments-1; while(upto >= start) { if (levels.get(upto).level >= levelBottom) { break; } upto--; } if (verbose(writer)) { message(" level " + levelBottom + " to " + maxLevel + ": " + (1+upto-start) + " segments", writer); } // Finally, record all merges that are viable at this level: int end = start + mergeFactor; while(end <= 1+upto) { boolean anyTooLarge = false; boolean anyMerging = false; for(int i=start;i<end;i++) { final SegmentCommitInfo info = levels.get(i).info; anyTooLarge |= (size(info, writer) >= maxMergeSize || sizeDocs(info, writer) >= maxMergeDocs); if (mergingSegments.contains(info)) { anyMerging = true; break; } } if (anyMerging) { // skip } else if (!anyTooLarge) { if (spec == null) spec = new MergeSpecification(); final List<SegmentCommitInfo> mergeInfos = new ArrayList<>(); for(int i=start;i<end;i++) { mergeInfos.add(levels.get(i).info); assert infos.contains(levels.get(i).info); } if (verbose(writer)) { message(" add merge=" + writer.segString(mergeInfos) + " start=" + start + " end=" + end, writer); } spec.add(new OneMerge(mergeInfos)); } else if (verbose(writer)) { message(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping", writer); } start = end; end = start + mergeFactor; } start = 1+upto; } return spec; } /** <p>Determines the largest segment (measured by * document count) that may be merged with other segments. * Small values (e.g., less than 10,000) are best for * interactive indexing, as this limits the length of * pauses while indexing to a few seconds. Larger values * are best for batched indexing and speedier * searches.</p> * * <p>The default value is {@link Integer#MAX_VALUE}.</p> * * <p>The default merge policy ({@link * LogByteSizeMergePolicy}) also allows you to set this * limit by net size (in MB) of the segment, using {@link * LogByteSizeMergePolicy#setMaxMergeMB}.</p> */ public void setMaxMergeDocs(int maxMergeDocs) { this.maxMergeDocs = maxMergeDocs; } /** Returns the largest segment (measured by document * count) that may be merged with other segments. * @see #setMaxMergeDocs */ public int getMaxMergeDocs() { return maxMergeDocs; } @Override public String toString() { StringBuilder sb = new StringBuilder("[" + getClass().getSimpleName() + ": "); sb.append("minMergeSize=").append(minMergeSize).append(", "); sb.append("mergeFactor=").append(mergeFactor).append(", "); sb.append("maxMergeSize=").append(maxMergeSize).append(", "); sb.append("maxMergeSizeForForcedMerge=").append(maxMergeSizeForForcedMerge).append(", "); sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", "); sb.append("maxMergeDocs=").append(maxMergeDocs).append(", "); sb.append("maxCFSSegmentSizeMB=").append(getMaxCFSSegmentSizeMB()).append(", "); sb.append("noCFSRatio=").append(noCFSRatio); sb.append("]"); return sb.toString(); } }
apache-2.0
aosgi/org.apache.felix.framework
src/test/java/org/apache/felix/framework/StartStopBundleTest.java
5631
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.felix.framework; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.HashMap; import java.util.Map; import java.util.jar.JarOutputStream; import java.util.jar.Manifest; import java.util.zip.ZipEntry; import junit.framework.TestCase; import org.osgi.framework.Bundle; import org.osgi.framework.BundleActivator; import org.osgi.framework.BundleContext; import org.osgi.framework.BundleException; import org.osgi.framework.Constants; import org.osgi.framework.launch.Framework; public class StartStopBundleTest extends TestCase { public static final int DELAY = 1000; public void testStartStopBundle() throws Exception { Map params = new HashMap(); params.put(Constants.FRAMEWORK_SYSTEMPACKAGES, "org.osgi.framework; version=1.4.0," + "org.osgi.service.packageadmin; version=1.2.0," + "org.osgi.service.startlevel; version=1.1.0," + "org.osgi.util.tracker; version=1.3.3," + "org.osgi.service.url; version=1.0.0"); File cacheDir = File.createTempFile("felix-cache", ".dir"); cacheDir.delete(); cacheDir.mkdirs(); String cache = cacheDir.getPath(); params.put("felix.cache.profiledir", cache); params.put("felix.cache.dir", cache); params.put(Constants.FRAMEWORK_STORAGE, cache); String mf = "Bundle-SymbolicName: boot.test\n" + "Bundle-Version: 1.1.0\n" + "Bundle-ManifestVersion: 2\n" + "Import-Package: org.osgi.framework\n"; File bundleFile = createBundle(mf, cacheDir); Framework f = new Felix(params); f.init(); f.start(); try { final Bundle bundle = f.getBundleContext().installBundle(bundleFile.toURI().toString()); new Thread() { public void run() { try { bundle.start(); } catch (BundleException e) { e.printStackTrace(); } } }.start(); Thread.sleep(DELAY / 4); long t0 = System.currentTimeMillis(); bundle.stop(); long t1 = System.currentTimeMillis(); assertEquals(Bundle.RESOLVED, bundle.getState()); assertTrue((t1 - t0) > DELAY / 2); bundle.start(); new Thread() { public void run() { try { bundle.stop(); } catch (BundleException e) { e.printStackTrace(); } } }.start(); Thread.sleep(DELAY / 4); t0 = System.currentTimeMillis(); bundle.start(); t1 = System.currentTimeMillis(); assertEquals(Bundle.ACTIVE, bundle.getState()); assertTrue((t1 - t0) > DELAY / 2); } finally { f.stop(); Thread.sleep(DELAY); deleteDir(cacheDir); } } private static File createBundle(String manifest, File tempDir) throws IOException { File f = File.createTempFile("felix-bundle", ".jar", tempDir); Manifest mf = new Manifest(new ByteArrayInputStream(manifest.getBytes("utf-8"))); mf.getMainAttributes().putValue("Manifest-Version", "1.0"); mf.getMainAttributes().putValue(Constants.BUNDLE_ACTIVATOR, TestBundleActivator.class.getName()); JarOutputStream os = new JarOutputStream(new FileOutputStream(f), mf); String path = TestBundleActivator.class.getName().replace('.', '/') + ".class"; os.putNextEntry(new ZipEntry(path)); InputStream is = TestBundleActivator.class.getClassLoader().getResourceAsStream(path); byte[] b = new byte[is.available()]; is.read(b); is.close(); os.write(b); os.close(); return f; } private static void deleteDir(File root) throws IOException { if (root.isDirectory()) { for (File file : root.listFiles()) { deleteDir(file); } } assertTrue(root.delete()); } public static class TestBundleActivator implements BundleActivator { public void start(BundleContext context) throws Exception { Thread.sleep(DELAY); } public void stop(BundleContext context) throws Exception { Thread.sleep(DELAY); } } }
apache-2.0
manovotn/core
tests-arquillian/src/test/java/org/jboss/weld/tests/interceptors/aroundConstruct/extended/BravoInterceptor.java
973
/* * JBoss, Home of Professional Open Source * Copyright 2013, Red Hat, Inc., and individual contributors * by the @authors tag. See the copyright.txt in the distribution for a * full listing of individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.weld.tests.interceptors.aroundConstruct.extended; import javax.interceptor.Interceptor; @Interceptor @BravoBinding public class BravoInterceptor extends AbstractInterceptor { }
apache-2.0
chicagozer/rheosoft
components/camel-dozer/src/main/java/org/apache/camel/converter/dozer/DozerTypeConverterLoader.java
8660
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.converter.dozer; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.camel.CamelContext; import org.apache.camel.CamelContextAware; import org.apache.camel.TypeConverter; import org.apache.camel.spi.ClassResolver; import org.apache.camel.spi.TypeConverterRegistry; import org.dozer.DozerBeanMapper; import org.dozer.Mapper; import org.dozer.classmap.ClassMap; import org.dozer.classmap.MappingFileData; import org.dozer.config.BeanContainer; import org.dozer.loader.api.BeanMappingBuilder; import org.dozer.loader.xml.MappingFileReader; import org.dozer.loader.xml.XMLParserFactory; import org.dozer.util.DozerClassLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * <code>DozerTypeConverterLoader</code> provides the mechanism for registering * a Dozer {@link Mapper} as {@link TypeConverter} for a {@link CamelContext}. * <p/> * While a mapper can be explicitly supplied as a parameter the * {@link CamelContext}'s registry will also be searched for {@link Mapper} * instances. A {@link DozerTypeConverter} is created to wrap each * {@link Mapper} instance and the mapper is queried for the types it converts. * The queried types are used to register the {@link TypeConverter} with the * context via its {@link TypeConverterRegistry}. */ public class DozerTypeConverterLoader implements CamelContextAware { private final Logger log = LoggerFactory.getLogger(getClass()); private CamelContext camelContext; private DozerBeanMapper mapper; /** * Creates a <code>DozerTypeConverter</code> performing no * {@link TypeConverter} registration. */ public DozerTypeConverterLoader() { } /** * Creates a <code>DozerTypeConverter</code> that will search the given * {@link CamelContext} for instances of {@link DozerBeanMapper}. Each * discovered instance will be wrapped as a {@link DozerTypeConverter} and * register as a {@link TypeConverter} with the context * * @param camelContext the context to register the * {@link DozerTypeConverter} in */ public DozerTypeConverterLoader(CamelContext camelContext) { init(camelContext, null); } /** * Creates a <code>DozerTypeConverter</code> that will wrap the the given * {@link DozerBeanMapper} as a {@link DozerTypeConverter} and register it * with the given context. It will also search the context for * * @param camelContext the context to register the * {@link DozerTypeConverter} in * @param mapper the DozerMapperBean to be wrapped as a type converter. */ public DozerTypeConverterLoader(CamelContext camelContext, DozerBeanMapper mapper) { init(camelContext, mapper); } /** * Doses the actual querying and registration of {@link DozerTypeConverter}s * with the {@link CamelContext}. * * @param camelContext the context to register the * {@link DozerTypeConverter} in * @param mapper the DozerMapperBean to be wrapped as a type converter. */ public void init(CamelContext camelContext, DozerBeanMapper mapper) { this.camelContext = camelContext; this.mapper = mapper; CamelToDozerClassResolverAdapter adapter = new CamelToDozerClassResolverAdapter(camelContext); BeanContainer.getInstance().setClassLoader(adapter); Map<String, DozerBeanMapper> mappers = new HashMap<String, DozerBeanMapper>(camelContext.getRegistry().lookupByType(DozerBeanMapper.class)); if (mapper != null) { mappers.put("parameter", mapper); } if (mappers.size() > 1) { log.warn("Loaded " + mappers.size() + " Dozer mappers from Camel registry." + " Dozer is most efficient when there is a single mapper instance. Consider amalgamating instances."); } else if (mappers.size() == 0) { log.warn("No Dozer mappers found in Camel registry. You should add Dozer mappers as beans to the registry of the type: " + DozerBeanMapper.class.getName()); } TypeConverterRegistry registry = camelContext.getTypeConverterRegistry(); for (DozerBeanMapper dozer : mappers.values()) { List<ClassMap> all = loadMappings(camelContext, dozer); registerClassMaps(registry, dozer, all); } } private void registerClassMaps(TypeConverterRegistry registry, DozerBeanMapper dozer, List<ClassMap> all) { DozerTypeConverter converter = new DozerTypeConverter(dozer); for (ClassMap map : all) { if (log.isInfoEnabled()) { log.info("Added {} -> {} as type converter to: {}", new Object[]{map.getSrcClassName(), map.getDestClassName(), registry}); } registry.addTypeConverter(map.getSrcClassToMap(), map.getDestClassToMap(), converter); registry.addTypeConverter(map.getDestClassToMap(), map.getSrcClassToMap(), converter); } } private List<ClassMap> loadMappings(CamelContext camelContext, DozerBeanMapper mapper) { List<ClassMap> answer = new ArrayList<ClassMap>(); // load the class map using the class resolver so we can load from classpath in OSGi MappingFileReader reader = new MappingFileReader(XMLParserFactory.getInstance()); List<String> mappingFiles = mapper.getMappingFiles(); if (mappingFiles == null) { return Collections.emptyList(); } for (String name : mappingFiles) { URL url = camelContext.getClassResolver().loadResourceAsURL(name); MappingFileData data = reader.read(url); answer.addAll(data.getClassMaps()); } return answer; } /** * Registers Dozer <code>BeanMappingBuilder</code> in current mapper instance. * This method should be called instead of direct <code>mapper.addMapping()</code> invocation for Camel * being able to register given type conversion. * * @param beanMappingBuilder api-based mapping builder */ public void addMapping(BeanMappingBuilder beanMappingBuilder) { if (mapper == null) { log.warn("No mapper instance provided to " + this.getClass().getSimpleName() + ". Mapping has not been registered!"); return; } mapper.addMapping(beanMappingBuilder); MappingFileData mappingFileData = beanMappingBuilder.build(); TypeConverterRegistry registry = camelContext.getTypeConverterRegistry(); ArrayList<ClassMap> classMaps = new ArrayList<ClassMap>(); classMaps.addAll(mappingFileData.getClassMaps()); registerClassMaps(registry, mapper, classMaps); } public CamelContext getCamelContext() { return camelContext; } public void setCamelContext(CamelContext camelContext) { init(camelContext, null); } private static final class CamelToDozerClassResolverAdapter implements DozerClassLoader { private final ClassResolver classResolver; private CamelToDozerClassResolverAdapter(CamelContext camelContext) { classResolver = camelContext.getClassResolver(); } public Class<?> loadClass(String s) { return classResolver.resolveClass(s); } public URL loadResource(String s) { URL url = classResolver.loadResourceAsURL(s); if (url == null) { // using the classloader of DozerClassLoader as a fallback url = DozerClassLoader.class.getClassLoader().getResource(s); } return url; } } }
apache-2.0
IHTSDO/snow-owl
net4j/com.b2international.snowowl.rpc.test/src/com/b2international/snowowl/rpc/test/testcases/ProgressMonitorUsingTest.java
2541
/* * Copyright 2011-2015 B2i Healthcare Pte Ltd, http://b2i.sg * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.b2international.snowowl.rpc.test.testcases; import static org.mockito.Matchers.anyDouble; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import org.eclipse.core.runtime.IProgressMonitor; import org.eclipse.core.runtime.NullProgressMonitor; import org.junit.Test; import com.b2international.snowowl.rpc.test.service.IProgressMonitorUsingService; import com.b2international.snowowl.rpc.test.service.impl.ProgressMonitorUsingService; /** * Contains test cases for remote method calls that want to report their progress, indicated by an * {@link IProgressMonitor} parameter in the signature. * */ public class ProgressMonitorUsingTest extends AbstractRpcTest<IProgressMonitorUsingService, ProgressMonitorUsingService> { public ProgressMonitorUsingTest() { super(IProgressMonitorUsingService.class); } @Test public void testIProgressMonitor() { final IProgressMonitorUsingService serviceProxy = initializeService(); final IProgressMonitor mockMonitor = mock(IProgressMonitor.class); serviceProxy.reportWithIProgressMonitor(mockMonitor); verify(mockMonitor).beginTask(IProgressMonitorUsingService.TASK_NAME, IProgressMonitorUsingService.TOTAL_WORK); verify(mockMonitor, times(IProgressMonitorUsingService.TOTAL_WORK)).worked(1); verify(mockMonitor, never()).internalWorked(anyDouble()); verify(mockMonitor).done(); } @Test(expected = UnsupportedOperationException.class) public void testNullProgressMonitor() { final IProgressMonitorUsingService serviceProxy = initializeService(); final NullProgressMonitor nullMonitor = new NullProgressMonitor(); serviceProxy.reportWithNullProgressMonitor(nullMonitor); } @Override protected ProgressMonitorUsingService createServiceImplementation() { return new ProgressMonitorUsingService(); } }
apache-2.0
R0g3r10LL31t3/AccountManager-ROLLSoftware
test/com/rollsoftware/br/test/util/CDITest.java
2705
/* * Copyright 2016-2026 Rogério Lecarião Leite * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * CEO 2016: Rogério Lecarião Leite; ROLL Software */ package com.rollsoftware.br.test.util; import org.jboss.weld.context.RequestContext; import org.jboss.weld.context.SessionContext; import org.jboss.weld.context.unbound.UnboundLiteral; import org.jboss.weld.environment.se.Weld; import org.jboss.weld.environment.se.WeldContainer; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; /** * * @author Rogério * @date December, 2016 */ public class CDITest { protected static Weld WELD; protected WeldContainer weldContainer; public CDITest() { } @BeforeClass public static void setUpClass() { WELD = new Weld(); WELD .disableDiscovery() .property("org.jboss.weld.construction.relaxed", true); } @AfterClass public static void tearDownClass() { } @Before public void setUp() { weldContainer = WELD .containerId(getClass().getSimpleName()) .initialize(); } @After public void tearDown() { weldContainer.close(); } public final <T> T getManagedBean(Class<T> type) { return this.weldContainer.instance() .select(type) .get(); } public final void runAtRequestScoped(final Runnable runnable) { RequestContext context = weldContainer .instance() .select(RequestContext.class, UnboundLiteral.INSTANCE) .get(); try { context.activate(); runnable.run(); } finally { context.deactivate(); } } public final void runAtSessionScoped(final Runnable runnable) { SessionContext context = weldContainer .instance() .select(SessionContext.class, UnboundLiteral.INSTANCE) .get(); try { context.activate(); runnable.run(); } finally { context.deactivate(); } } }
apache-2.0
kinow/generators-FUNCTOR-14
src/main/java/org/apache/commons/functor/aggregator/AbstractListBackedAggregator.java
6774
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.functor.aggregator; import java.util.List; import org.apache.commons.functor.UnaryFunction; import org.apache.commons.lang3.Validate; /** * An aggregator which stores the data series in a List. Every call to * {@link #add(Object)} will add an item in the array and there is no limit to * how much data we can store. It is down to subclasses to decide which types of * List implementation they need to used -- and the abstract factory * {@link #createList()} is provided for this. * <p>This implementation also allows for various "aggregations" of the list to be * used by providing a {@link UnaryFunction UnaryFunction<List<T>, T>} in the * constructor.</p> * <p> * <b>Thread safety</b> : Note that due to the fact that * {@link AbstractTimedAggregator} provides a threadsafe environment for access * to data, the <code>List</code> implementation can be unsynchronized. * </p> * * @param <T> * Type of object stored. * @see AbstractTimedAggregator */ public abstract class AbstractListBackedAggregator<T> extends AbstractTimedAggregator<T> { /** * Stores the data series we ought to aggregate/evaluate. This list can only * be modified via {@link #reset()} and {@link #add(Object)} and will be * traversed during {@link #evaluate()}. */ private List<T> series; /** * Used to actually aggregate the data when {@link #evaluate()} is called. * This is set in {@link #AbstractListBackedAggregator() the constructor}. */ private UnaryFunction<List<T>, T> aggregationFunction; /** * Default constructor. Similar to * {@link #AbstractListBackedAggregator(UnaryFunction, long) * AbstractListBackedAggregator(aggregationFunction,0L}. * * @param aggregationFunction * Aggregation function to use in {@link #evaluate()}. Throws * <code>NullPointerException</code> if this is <code>null</code> */ public AbstractListBackedAggregator(UnaryFunction<List<T>, T> aggregationFunction) { this(aggregationFunction, 0L); } /** * Similar to * {@link #AbstractListBackedAggregator(UnaryFunction, long, boolean) * AbstractListBackedAggregator(aggregationFunction,interval,false}. * * @param aggregationFunction * Aggregation function to use in {@link #evaluate()}. Throws * <code>NullPointerException</code> if this is <code>null</code> * @param interval * interval in miliseconds to reset this aggregator */ public AbstractListBackedAggregator(UnaryFunction<List<T>, T> aggregationFunction, long interval) { this(aggregationFunction, interval, false); } /** * Constructs an aggregator which will use the given function, reset itself * at the given interval and will use a shared timer on own private timer. * * @param aggregationFunction * Aggregation function to use in {@link #evaluate()}. Throws * <code>NullPointerException</code> if this is <code>null</code> * @param interval * interval in miliseconds to reset this aggregator * @param useSharedTimer * if set to true, it will use a shared timer, as per * {@link AbstractTimedAggregator#AbstractTimedAggregator(long, boolean)} * ; otherwise if it's false it will use its own timer instance * @see AbstractTimedAggregator#AbstractTimedAggregator(long, boolean) */ public AbstractListBackedAggregator(UnaryFunction<List<T>, T> aggregationFunction, long interval, boolean useSharedTimer) { super(interval, useSharedTimer); this.aggregationFunction = Validate.notNull(aggregationFunction, "UnaryFunction argument must not be null"); this.series = createList(); } /** * Adds data to the series which will be aggregated. This implementation * simply adds the data to the {@link #series} list. * * @param data * Data to be added to the data series. */ @Override public final void doAdd(T data) { series.add(data); } /** * The actual "beef" of this class: iterate through the list and aggregates * all the data and evaluates the result. This is done by calling * <code>aggregationFunction.evaluate(series)</code>. * * @return the result of <code>aggregationFunction.evaluate(series)</code> * @see Aggregator#evaluate() */ @Override protected final T doEvaluate() { return aggregationFunction.evaluate(series); } /** * Resets the data series to the empty state. */ @Override protected final void doReset() { series.clear(); } /** * Allows subclasses to create the list which will store the {@link #series * data series}. * * @return an instance of <code>List</code> which will be used to store the * data. */ protected abstract List<T> createList(); /** * Getter for {@link #series}. * * @return Value of {@link #series} */ protected final List<T> getSeries() { return series; } /** * Simply returns the size of the data series which is the size of the list * used internally. * * @return Size of {@link #series} -- equivalent to * <code>series.size()</code> */ @Override protected final int retrieveDataSize() { return series.size(); } /** * Getter for {@link #aggregationFunction}. Provided for testing purposes * only. * * @return Current value of {@link #aggregationFunction} */ final UnaryFunction<List<T>, T> getAggregationFunction() { return aggregationFunction; } @Override public String toString() { return AbstractListBackedAggregator.class.getName(); } }
apache-2.0
kinbod/deeplearning4j
deeplearning4j-scaleout/spark/dl4j-spark-nlp-java8/src/main/java/org/deeplearning4j/spark/models/sequencevectors/functions/TrainingFunction.java
7083
package org.deeplearning4j.spark.models.sequencevectors.functions; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import org.apache.spark.api.java.function.VoidFunction; import org.apache.spark.broadcast.Broadcast; import org.deeplearning4j.models.embeddings.loader.VectorsConfiguration; import org.deeplearning4j.models.sequencevectors.sequence.Sequence; import org.deeplearning4j.models.sequencevectors.sequence.SequenceElement; import org.deeplearning4j.models.sequencevectors.sequence.ShallowSequenceElement; import org.deeplearning4j.models.word2vec.wordstore.VocabCache; import org.deeplearning4j.spark.models.sequencevectors.learning.SparkElementsLearningAlgorithm; import org.deeplearning4j.spark.models.sequencevectors.learning.SparkSequenceLearningAlgorithm; import org.nd4j.linalg.exception.ND4JIllegalStateException; import org.nd4j.parameterserver.distributed.VoidParameterServer; import org.nd4j.parameterserver.distributed.conf.VoidConfiguration; import org.nd4j.parameterserver.distributed.messages.TrainingMessage; import org.nd4j.parameterserver.distributed.training.TrainingDriver; import org.nd4j.parameterserver.distributed.transport.RoutedTransport; import java.util.concurrent.atomic.AtomicLong; /** * This is wrapper for SequenceVectors training over given Sequence<T> * * @author raver119@gmail.com */ @Slf4j public class TrainingFunction<T extends SequenceElement> implements VoidFunction<Sequence<T>> { protected Broadcast<VocabCache<ShallowSequenceElement>> vocabCacheBroadcast; protected Broadcast<VectorsConfiguration> configurationBroadcast; protected Broadcast<VoidConfiguration> paramServerConfigurationBroadcast; protected transient VoidParameterServer paramServer; protected transient VectorsConfiguration vectorsConfiguration; protected transient SparkElementsLearningAlgorithm elementsLearningAlgorithm; protected transient SparkSequenceLearningAlgorithm sequenceLearningAlgorithm; protected transient VocabCache<ShallowSequenceElement> shallowVocabCache; protected transient TrainingDriver<? extends TrainingMessage> driver; public TrainingFunction(@NonNull Broadcast<VocabCache<ShallowSequenceElement>> vocabCacheBroadcast, @NonNull Broadcast<VectorsConfiguration> vectorsConfigurationBroadcast, @NonNull Broadcast<VoidConfiguration> paramServerConfigurationBroadcast) { this.vocabCacheBroadcast = vocabCacheBroadcast; this.configurationBroadcast = vectorsConfigurationBroadcast; this.paramServerConfigurationBroadcast = paramServerConfigurationBroadcast; } @Override @SuppressWarnings("unchecked") public void call(Sequence<T> sequence) throws Exception { /** * Depending on actual training mode, we'll either go for SkipGram/CBOW/PV-DM/PV-DBOW or whatever */ if (vectorsConfiguration == null) vectorsConfiguration = configurationBroadcast.getValue(); if (paramServer == null) { paramServer = VoidParameterServer.getInstance(); if (elementsLearningAlgorithm == null) { try { elementsLearningAlgorithm = (SparkElementsLearningAlgorithm) Class .forName(vectorsConfiguration.getElementsLearningAlgorithm()).newInstance(); } catch (Exception e) { throw new RuntimeException(e); } } driver = elementsLearningAlgorithm.getTrainingDriver(); // FIXME: init line should probably be removed, basically init happens in VocabRddFunction paramServer.init(paramServerConfigurationBroadcast.getValue(), new RoutedTransport(), driver); } if (vectorsConfiguration == null) vectorsConfiguration = configurationBroadcast.getValue(); if (shallowVocabCache == null) shallowVocabCache = vocabCacheBroadcast.getValue(); if (elementsLearningAlgorithm == null && vectorsConfiguration.getElementsLearningAlgorithm() != null) { // TODO: do ELA initialization try { elementsLearningAlgorithm = (SparkElementsLearningAlgorithm) Class .forName(vectorsConfiguration.getElementsLearningAlgorithm()).newInstance(); elementsLearningAlgorithm.configure(shallowVocabCache, null, vectorsConfiguration); } catch (Exception e) { throw new RuntimeException(e); } } if (sequenceLearningAlgorithm == null && vectorsConfiguration.getSequenceLearningAlgorithm() != null) { // TODO: do SLA initialization try { sequenceLearningAlgorithm = (SparkSequenceLearningAlgorithm) Class .forName(vectorsConfiguration.getSequenceLearningAlgorithm()).newInstance(); sequenceLearningAlgorithm.configure(shallowVocabCache, null, vectorsConfiguration); } catch (Exception e) { throw new RuntimeException(e); } } if (elementsLearningAlgorithm == null && sequenceLearningAlgorithm == null) { throw new ND4JIllegalStateException("No LearningAlgorithms specified!"); } /* at this moment we should have everything ready for actual initialization the only limitation we have - our sequence is detached from actual vocabulary, so we need to merge it back virtually */ Sequence<ShallowSequenceElement> mergedSequence = new Sequence<>(); for (T element : sequence.getElements()) { // it's possible to get null here, i.e. if frequency for this element is below minWordFrequency threshold ShallowSequenceElement reduced = shallowVocabCache.tokenFor(element.getStorageId()); if (reduced != null) mergedSequence.addElement(reduced); } // do the same with labels, transfer them, if any if (sequenceLearningAlgorithm != null && vectorsConfiguration.isTrainSequenceVectors()) { for (T label : sequence.getSequenceLabels()) { ShallowSequenceElement reduced = shallowVocabCache.tokenFor(label.getStorageId()); if (reduced != null) mergedSequence.addSequenceLabel(reduced); } } // now we have shallow sequence, which we'll use for training /** * All we want here, is uniform way to do training, that's matching both standalone and spark codebase. * So we need some neat method, that takes sequence as input, and returns **something** that's either used for aggregation, or for ParamServer message */ // FIXME: temporary hook if (sequence.size() > 0) paramServer.execDistributed( elementsLearningAlgorithm.frameSequence(mergedSequence, new AtomicLong(119), 25e-3)); else log.warn("Skipping empty sequence..."); } }
apache-2.0
apache/tapestry3
tapestry-framework/src/org/apache/tapestry/util/io/FloatAdaptor.java
1508
// Copyright 2004 The Apache Software Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package org.apache.tapestry.util.io; /** * Squeezes a {@link Float}. * * @author Howard Lewis Ship * @version $Id$ * **/ class FloatAdaptor implements ISqueezeAdaptor { private static final String PREFIX = "f"; /** * Registers using the prefix 'f'. * **/ public void register(DataSqueezer squeezer) { squeezer.register(PREFIX, Float.class, this); } /** * Invoked <code>toString()</code> on data (which is type {@link Float}), * and prefixs the result. * **/ public String squeeze(DataSqueezer squeezer, Object data) { return PREFIX + data.toString(); } /** * Constructs a {@link Float} from the string, after stripping * the prefix. * **/ public Object unsqueeze(DataSqueezer squeezer, String string) { return new Float(string.substring(1)); } }
apache-2.0
gustavoleitao/Easy-Cassandra
src/test/java/org/easycassandra/persistence/cassandra/EasyCassandraManagerTest.java
6413
package org.easycassandra.persistence.cassandra; import java.util.Arrays; import java.util.logging.Logger; import javax.persistence.Entity; import org.easycassandra.Constants; import org.easycassandra.FieldJavaNotEquivalentCQLException; import org.easycassandra.KeyProblemsException; import org.easycassandra.ReplicaStrategy; import org.easycassandra.bean.model.createtable.SimpleBean; import org.easycassandra.bean.model.createtable.SimpleBeanAlterTable; import org.easycassandra.bean.model.createtable.SimpleBeanComplexId; import org.easycassandra.bean.model.createtable.SimpleBeanEnum; import org.easycassandra.bean.model.createtable.SimpleBeanSubClass; import org.easycassandra.bean.model.createtable.SimpleBeanWrong; import org.easycassandra.bean.model.createtable.SimpleComplexBean; import org.junit.Assert; import org.junit.Before; import org.junit.Test; /** * test to Cassandra manager. * @author otaviojava */ public class EasyCassandraManagerTest { private static final int THREE = 3; private static EasyCassandraManager easyCassandraManager; { ClusterInformation clusterInformation = new ClusterInformation(); clusterInformation.setHosts(Arrays.asList(Constants.HOST)); clusterInformation.setKeySpace(Constants.KEY_SPACE_SIMPLE_TEST); easyCassandraManager = new EasyCassandraManager(clusterInformation); } /** * run before the test. */ @Before public void beforeClass() { easyCassandraManager.getPersistence(Constants.HOST, Constants.KEY_SPACE_SIMPLE_TEST, ReplicaStrategy.SIMPLES_TRATEGY, THREE); } /** * run the test. */ @Test public void getPersistenceTest() { Assert.assertNotNull(easyCassandraManager.getPersistence("javabahia")); } /** * run the test. */ @Test public void getCreateSimpleTest() { Assert.assertNotNull(easyCassandraManager.getPersistence( Constants.HOST, Constants.KEY_SPACE_SIMPLE_TEST, ReplicaStrategy.SIMPLES_TRATEGY, THREE)); } /** * run the test. */ @Test public void getCreateNetworkTopologyTest() { Assert.assertNotNull(easyCassandraManager.getPersistence( Constants.HOST, "simpleTest2", ReplicaStrategy.SIMPLES_TRATEGY, THREE)); } /** * run the test. */ @Test public void addColumnFamilyTest() { easyCassandraManager.getPersistence(Constants.HOST, Constants.KEY_SPACE_SIMPLE_TEST, ReplicaStrategy.SIMPLES_TRATEGY, THREE); Assert.assertTrue(easyCassandraManager.addFamilyObject( SimpleBean.class, "javabahia")); } private void runRemove(String columnFamily) { try { Persistence persistence = easyCassandraManager.getPersistence( Constants.HOST, Constants.KEY_SPACE_SIMPLE_TEST, ReplicaStrategy.SIMPLES_TRATEGY, THREE); persistence.executeUpdate("DROP TABLE ".concat( Constants.KEY_SPACE_SIMPLE_TEST.concat(".").concat( columnFamily)) .concat(" ;")); } catch (Exception exception) { Logger.getLogger(EasyCassandraManagerTest.class.getName()).info( "Column not exist: ".concat(exception.getMessage())); } } /** * run the test. */ @Test(expected = FieldJavaNotEquivalentCQLException.class) public void addColumnFamilyErrorTest() { runRemove("SimpleBeanWrong"); Persistence persistence = easyCassandraManager.getPersistence( Constants.HOST, Constants.KEY_SPACE_SIMPLE_TEST, ReplicaStrategy.SIMPLES_TRATEGY, THREE); persistence.executeUpdate("create table " + Constants.KEY_SPACE_SIMPLE_TEST + ".SimpleBeanWrong( id bigint," + " name ascii, born int, PRIMARY KEY (id) )"); Assert.assertTrue(easyCassandraManager.addFamilyObject( SimpleBeanWrong.class, Constants.KEY_SPACE_SIMPLE_TEST)); } /** * run the test. */ @Test(expected = KeyProblemsException.class) public void addColumnFamilyKeyMandatoryTest() { Assert.assertTrue(easyCassandraManager.addFamilyObject(BeanWrong.class, Constants.KEY_SPACE_SIMPLE_TEST)); } /** * run the test. */ @Test public void addColumnFamilyAlterTableTest() { runRemove("SimpleBeanAlterTable"); Persistence persistence = easyCassandraManager.getPersistence( Constants.HOST, Constants.KEY_SPACE_SIMPLE_TEST, ReplicaStrategy.SIMPLES_TRATEGY, THREE); persistence.executeUpdate("create table " + Constants.KEY_SPACE_SIMPLE_TEST + ".SimpleBeanAlterTable( id bigint, " + "name ascii, born int, PRIMARY KEY (id) )"); Assert.assertTrue(easyCassandraManager.addFamilyObject( SimpleBeanAlterTable.class, Constants.KEY_SPACE_SIMPLE_TEST)); } /** * run the test. */ @Test public void addColumnFamilyComplexTest() { Assert.assertTrue(easyCassandraManager.addFamilyObject( SimpleComplexBean.class, Constants.KEY_SPACE_SIMPLE_TEST)); } /** * run the test. */ @Test public void addColumnFamilyComplexIDTest() { Assert.assertTrue(easyCassandraManager.addFamilyObject( SimpleBeanComplexId.class, Constants.KEY_SPACE_SIMPLE_TEST)); } /** * run the test. */ @Test public void addColumnFamilySuperTest() { Assert.assertTrue(easyCassandraManager.addFamilyObject( SimpleBeanSubClass.class, Constants.KEY_SPACE_SIMPLE_TEST)); } /** * run the test. */ @Test public void addColumnFamilyEnumTest() { Assert.assertTrue(easyCassandraManager.addFamilyObject( SimpleBeanEnum.class, Constants.KEY_SPACE_SIMPLE_TEST)); } /** * class to test. * @author otaviojava */ @Entity(name = "wrong") public class BeanWrong { private String name; public String getName() { return name; } public void setName(String name) { this.name = name; } } }
apache-2.0
shiroyp/bigtop
bigtop-data-generators/bigpetstore-data-generator/src/test/java/org/apache/bigtop/datagenerators/bigpetstore/generators/customer/TestCustomerSampler.java
4344
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bigtop.datagenerators.bigpetstore.generators.customer; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import org.apache.bigtop.datagenerators.bigpetstore.Constants; import org.apache.bigtop.datagenerators.bigpetstore.datamodels.Customer; import org.apache.bigtop.datagenerators.bigpetstore.datamodels.Store; import org.apache.bigtop.datagenerators.bigpetstore.datamodels.inputs.ZipcodeRecord; import org.apache.bigtop.datagenerators.samplers.SeedFactory; import org.apache.bigtop.datagenerators.samplers.pdfs.ProbabilityDensityFunction; import org.apache.bigtop.datagenerators.samplers.samplers.ConditionalSampler; import org.apache.bigtop.datagenerators.samplers.samplers.RouletteWheelSampler; import org.apache.bigtop.datagenerators.samplers.samplers.Sampler; import org.apache.bigtop.datagenerators.samplers.samplers.SequenceSampler; import org.apache.commons.lang3.tuple.Pair; import org.junit.Test; import com.google.common.collect.Lists; import com.google.common.collect.Maps; public class TestCustomerSampler { protected ConditionalSampler<ZipcodeRecord, Store> buildLocationSampler(List<Store> stores, List<ZipcodeRecord> records, SeedFactory factory) { final Map<Store, Sampler<ZipcodeRecord>> locationSamplers = Maps.newHashMap(); for(Store store : stores) { ProbabilityDensityFunction<ZipcodeRecord> locationPDF = new CustomerLocationPDF(records, store, Constants.AVERAGE_CUSTOMER_STORE_DISTANCE); Sampler<ZipcodeRecord> locationSampler = RouletteWheelSampler.create(records, locationPDF, factory); locationSamplers.put(store, locationSampler); } return new ConditionalSampler<ZipcodeRecord, Store>() { public ZipcodeRecord sample(Store store) throws Exception { return locationSamplers.get(store).sample(); } }; } @Test public void testBuild() throws Exception { SeedFactory factory = new SeedFactory(1234); List<Pair<String, String>> nameList = Lists.newArrayList(); nameList.add(Pair.of("Fred", "Fred")); nameList.add(Pair.of("Gary", "Gary")); nameList.add(Pair.of("George", "George")); nameList.add(Pair.of("Fiona", "Fiona")); List<ZipcodeRecord> zipcodes = Arrays.asList(new ZipcodeRecord[] { new ZipcodeRecord("11111", Pair.of(1.0, 1.0), "AZ", "Tempte", 30000.0, 100), new ZipcodeRecord("22222", Pair.of(2.0, 2.0), "AZ", "Phoenix", 45000.0, 200), new ZipcodeRecord("33333", Pair.of(3.0, 3.0), "AZ", "Flagstaff", 60000.0, 300) }); List<Store> stores = new ArrayList<Store>(); for(int i = 0; i < zipcodes.size(); i++) { Store store = new Store(i, "Store_" + i, zipcodes.get(i)); stores.add(store); } Sampler<Integer> idSampler = new SequenceSampler(); Sampler<Pair<String, String>> nameSampler = RouletteWheelSampler.createUniform(nameList, factory); Sampler<Store> storeSampler = RouletteWheelSampler.createUniform(stores, factory); ConditionalSampler<ZipcodeRecord, Store> zipcodeSampler = buildLocationSampler(stores, zipcodes, factory); Sampler<Customer> sampler = new CustomerSampler(idSampler, nameSampler, storeSampler, zipcodeSampler); Customer customer = sampler.sample(); assertNotNull(customer); assertTrue(customer.getId() >= 0); assertNotNull(customer.getName()); assertTrue(nameList.contains(customer.getName())); assertNotNull(customer.getLocation()); assertTrue(zipcodes.contains(customer.getLocation())); } }
apache-2.0
liorgins/old-regression-maven-compat
src/com/aqua/jsystemobject/TestPropertiesAnalyzer.java
3401
package com.aqua.jsystemobject; import java.io.File; import java.io.StringWriter; import java.util.Enumeration; import java.util.Properties; import javax.xml.transform.OutputKeys; import javax.xml.transform.Result; import javax.xml.transform.Source; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; import jsystem.extensions.analyzers.document.DocumentAnalyzer; import jsystem.utils.FileUtils; import jsystem.utils.StringUtils; import org.apache.xpath.XPathAPI; import org.w3c.dom.NodeList; /** * Given a reports.xml dom object, test index and Properties * The analyzer verifies that the given test actually * includes the given properties. * * @author goland */ public class TestPropertiesAnalyzer extends DocumentAnalyzer { private int testIndex; private Properties props; public TestPropertiesAnalyzer(int testIndex,Properties props){ this.testIndex = testIndex; this.props = props; } public void analyze() { try { Source source = new DOMSource(doc); StringWriter stringWriter = new StringWriter(); Result result = new StreamResult(stringWriter); Transformer xformer = TransformerFactory.newInstance().newTransformer(); //pretty format the XML output xformer.setOutputProperty(OutputKeys.INDENT, "yes"); xformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "4"); //get the XML in a String xformer.transform(source, result); message = stringWriter.toString(); } catch (Exception e){ title = "Error in analyzing test properties"; message = StringUtils.getStackTrace(e); return; } try { NodeList nodeList = XPathAPI.selectNodeList(doc,"/reports/test[@testIndex=\"" + testIndex+ "\"]"); int elementsFound = nodeList.getLength(); if(elementsFound == 0){ status = false; title = " test with index "+testIndex + " not found"; return; } String properties = nodeList.item(0).getAttributes().getNamedItem("properties").getNodeValue(); Properties actualProps = StringUtils.stringToProperties(properties); Enumeration<Object> iter = props.keys(); while (iter.hasMoreElements()){ Object key = iter.nextElement(); Object val = props.get(key); if (!actualProps.containsKey(key)){ title = "Property " + key.toString() + " not found in test " + testIndex; status = false; message = actualProps.toString(); return; } Object actualVal = actualProps.get(key); if (!actualVal.equals(val)){ title = "Property " + key.toString() + " of test " + testIndex + " expected value " + val + " actual value: " + actualVal; status = false; return; } } title = " Properties found as expected"; status = true; } catch (Exception e) { title = "Error in analyzing test properties"; message = StringUtils.getStackTrace(e); return; } } public static void main(String args[]) throws Exception { Properties props = new Properties(); props.put("linkToFile","<A href=\"test_1/MyFile.txt\">MyFile.txt</A>"); props.put("linkToOne","<A href=\"http://www.one.co.il\">www.one.co.il</A>"); TestPropertiesAnalyzer analyzer = new TestPropertiesAnalyzer(0,props); analyzer.setTestAgainst(FileUtils.readDocumentFromFile(new File("C:/TAS/reports.0.xml"))); analyzer.analyze(); } }
apache-2.0
luchuangbin/test1
src/com/mit/dstore/entity/UserAmountExchange.java
652
package com.mit.dstore.entity; import java.util.List; public class UserAmountExchange extends JSON { /** * @Fields serialVersionUID : TODO(用一句话描述这个变量表示什么) */ private static final long serialVersionUID = -8817478648977034932L; private List<UserAmountExchangeInfo> Object = null; public List<UserAmountExchangeInfo> getObject() { return Object; } public void setObject(List<UserAmountExchangeInfo> object) { Object = object; } public UserAmountExchange() { super(); } public UserAmountExchange(List<UserAmountExchangeInfo> object) { super(); Object = object; } }
apache-2.0
apache/jena
jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/DatabaseMgr.java
4919
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jena.tdb2; import org.apache.jena.dboe.base.file.Location; import org.apache.jena.query.Dataset; import org.apache.jena.sparql.core.DatasetGraph; import org.apache.jena.tdb2.store.DatasetGraphSwitchable; import org.apache.jena.tdb2.store.DatasetGraphTDB; import org.apache.jena.tdb2.sys.DatabaseConnection; import org.apache.jena.tdb2.sys.DatabaseOps; import org.apache.jena.tdb2.sys.TDBInternal; /** Operations for TDBS DatasetGraph, including admin operations * See {@link TDB2Factory} for creating API-level {@link Dataset Datasets}. * * @see TDB2Factory */ public class DatabaseMgr { private DatabaseMgr() {} // All creation of DatasetGraph for TDB2 goes through this method. private static DatasetGraph DB_ConnectCreate(Location location) { return DatabaseConnection.connectCreate(location).getDatasetGraph(); } /** Create or connect to a TDB2-backed dataset */ public static DatasetGraph connectDatasetGraph(Location location) { return DB_ConnectCreate(location); } /** Create or connect to a TDB2-backed dataset */ public static DatasetGraph connectDatasetGraph(String location) { return connectDatasetGraph(Location.create(location)); } /** * Compact a datasets which must be a switchable TDB database. * This is the normal dataset type for on-disk TDB2 databases. * * Deletes old database after successful compaction if `shouldDeleteOld` is `true`. * * @param container * * @deprecated Use `compact(container, false)` instead. */ @Deprecated public static void compact(DatasetGraph container) { compact(container, false); } /** * Compact a datasets which must be a switchable TDB database. * This is the normal dataset type for on-disk TDB2 databases. * * Deletes old database after successful compaction if `shouldDeleteOld` is `true`. * * @param container * @param shouldDeleteOld */ public static void compact(DatasetGraph container, boolean shouldDeleteOld) { DatasetGraphSwitchable dsg = requireSwitchable(container); DatabaseOps.compact(dsg, shouldDeleteOld); } /** * Create a backup for a switchable TDB database. This is the normal dataset type for * on-disk TDB2 databases. * <p> * The backup is created in the databases folder, under "Backups". * <p> * Backup creates a consistent copy og the database. It is performed as a read-transaction * and does not lock out other use of the dataset. * * @param container * @return File name of the backup. */ public static String backup(DatasetGraph container) { DatasetGraphSwitchable dsg = requireSwitchable(container); return DatabaseOps.backup(dsg); } /** Create an in-memory TDB2-backed dataset (for testing) */ public static DatasetGraph createDatasetGraph() { return connectDatasetGraph(Location.mem()); } /** Return whether a {@code DatasetGraph} is a TDB2 database. */ public static boolean isTDB2(DatasetGraph datasetGraph) { return TDBInternal.isTDB2(datasetGraph); } /** Return the location of a DatasetGraph if it is backed by TDB, else null. */ public static Location location(DatasetGraph datasetGraph) { DatasetGraphSwitchable dsg = requireSwitchable(datasetGraph); if ( dsg == null ) return null; return dsg.getLocation(); } private static DatasetGraphSwitchable requireSwitchable(DatasetGraph datasetGraph) { if ( datasetGraph instanceof DatasetGraphSwitchable ) return (DatasetGraphSwitchable)datasetGraph; throw new TDBException("Not a switchable TDB database"); } static DatasetGraphTDB requireDirect(DatasetGraph datasetGraph) { DatasetGraphTDB dsg = TDBInternal.getDatasetGraphTDB(datasetGraph); if ( dsg == null ) throw new TDBException("Not a TDB database (argument is neither a switchable nor direct TDB DatasetGraph)"); return dsg; } }
apache-2.0
winval/druid
server/src/test/java/io/druid/server/coordinator/DruidCoordinatorConfigTest.java
4244
/* * Licensed to Metamarkets Group Inc. (Metamarkets) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Metamarkets licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.druid.server.coordinator; import io.druid.java.util.common.config.Config; import org.joda.time.Duration; import org.junit.Assert; import org.junit.Test; import org.skife.config.ConfigurationObjectFactory; import java.util.Properties; /** */ public class DruidCoordinatorConfigTest { @Test public void testDeserialization() throws Exception { ConfigurationObjectFactory factory = Config.createFactory(new Properties()); //with defaults DruidCoordinatorConfig config = factory.build(DruidCoordinatorConfig.class); Assert.assertEquals(new Duration("PT300s"), config.getCoordinatorStartDelay()); Assert.assertEquals(new Duration("PT60s"), config.getCoordinatorPeriod()); Assert.assertEquals(new Duration("PT1800s"), config.getCoordinatorIndexingPeriod()); Assert.assertFalse(config.isMergeSegments()); Assert.assertFalse(config.isConvertSegments()); Assert.assertFalse(config.isKillSegments()); Assert.assertFalse(config.isKillPendingSegments()); Assert.assertEquals(86400000, config.getCoordinatorKillPeriod().getMillis()); Assert.assertEquals(-1000, config.getCoordinatorKillDurationToRetain().getMillis()); Assert.assertEquals(0, config.getCoordinatorKillMaxSegments()); Assert.assertEquals(new Duration(15 * 60 * 1000), config.getLoadTimeoutDelay()); Assert.assertNull(config.getConsoleStatic()); Assert.assertEquals(Duration.millis(50), config.getLoadQueuePeonRepeatDelay()); //with non-defaults Properties props = new Properties(); props.setProperty("druid.coordinator.startDelay", "PT1s"); props.setProperty("druid.coordinator.period", "PT1s"); props.setProperty("druid.coordinator.period.indexingPeriod", "PT1s"); props.setProperty("druid.coordinator.merge.on", "true"); props.setProperty("druid.coordinator.conversion.on", "true"); props.setProperty("druid.coordinator.kill.on", "true"); props.setProperty("druid.coordinator.kill.period", "PT1s"); props.setProperty("druid.coordinator.kill.durationToRetain", "PT1s"); props.setProperty("druid.coordinator.kill.maxSegments", "10000"); props.setProperty("druid.coordinator.kill.pendingSegments.on", "true"); props.setProperty("druid.coordinator.load.timeout", "PT1s"); props.setProperty("druid.coordinator.console.static", "test"); props.setProperty("druid.coordinator.loadqueuepeon.repeatDelay", "PT0.100s"); factory = Config.createFactory(props); config = factory.build(DruidCoordinatorConfig.class); Assert.assertEquals(new Duration("PT1s"), config.getCoordinatorStartDelay()); Assert.assertEquals(new Duration("PT1s"), config.getCoordinatorPeriod()); Assert.assertEquals(new Duration("PT1s"), config.getCoordinatorIndexingPeriod()); Assert.assertTrue(config.isMergeSegments()); Assert.assertTrue(config.isConvertSegments()); Assert.assertTrue(config.isKillSegments()); Assert.assertTrue(config.isKillPendingSegments()); Assert.assertEquals(new Duration("PT1s"), config.getCoordinatorKillPeriod()); Assert.assertEquals(new Duration("PT1s"), config.getCoordinatorKillDurationToRetain()); Assert.assertEquals(10000, config.getCoordinatorKillMaxSegments()); Assert.assertEquals(new Duration("PT1s"), config.getLoadTimeoutDelay()); Assert.assertEquals("test", config.getConsoleStatic()); Assert.assertEquals(Duration.millis(100), config.getLoadQueuePeonRepeatDelay()); } }
apache-2.0
nknize/elasticsearch
modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java
4159
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.Query; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.CardinalityUpperBound; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; import java.util.Map; import static org.elasticsearch.search.aggregations.support.AggregationUsageService.OTHER_SUBTYPE; public class ChildrenAggregatorFactory extends ValuesSourceAggregatorFactory { private final Query parentFilter; private final Query childFilter; public ChildrenAggregatorFactory(String name, ValuesSourceConfig config, Query childFilter, Query parentFilter, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metadata) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metadata); this.childFilter = childFilter; this.parentFilter = parentFilter; } @Override protected Aggregator createUnmapped(Aggregator parent, Map<String, Object> metadata) throws IOException { return new NonCollectingAggregator(name, context, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { return new InternalChildren(name, 0, buildEmptySubAggregations(), metadata()); } }; } @Override protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata) throws IOException { ValuesSource rawValuesSource = config.getValuesSource(); if (rawValuesSource instanceof WithOrdinals == false) { throw new AggregationExecutionException("ValuesSource type " + rawValuesSource.toString() + "is not supported for aggregation " + this.name()); } WithOrdinals valuesSource = (WithOrdinals) rawValuesSource; long maxOrd = valuesSource.globalMaxOrd(context.searcher()); return new ParentToChildrenAggregator(name, factories, context, parent, childFilter, parentFilter, valuesSource, maxOrd, cardinality, metadata); } @Override public String getStatsSubtype() { // Child Aggregation is registered in non-standard way, so it might return child's values type return OTHER_SUBTYPE; } }
apache-2.0
vergilchiu/hive
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseTableSnapshotInputFormatUtil.java
4739
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.hbase; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl; import org.apache.hadoop.mapred.InputSplit; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; /** * A helper class to isolate newer HBase features from users running against older versions of * HBase that don't provide those features. * * TODO: remove this class when it's okay to drop support for earlier version of HBase. */ public class HBaseTableSnapshotInputFormatUtil { private static final Logger LOG = LoggerFactory.getLogger(HBaseTableSnapshotInputFormatUtil.class); /** The class we look for to determine if hbase snapshots are supported. */ private static final String TABLESNAPSHOTINPUTFORMAT_CLASS = "org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl"; private static final String TABLESNAPSHOTREGIONSPLIT_CLASS = "org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat$TableSnapshotRegionSplit"; /** True when {@link #TABLESNAPSHOTINPUTFORMAT_CLASS} is present. */ private static final boolean SUPPORTS_TABLE_SNAPSHOTS; static { boolean support = false; try { Class<?> clazz = Class.forName(TABLESNAPSHOTINPUTFORMAT_CLASS); support = clazz != null; } catch (ClassNotFoundException e) { // pass } SUPPORTS_TABLE_SNAPSHOTS = support; } /** Return true when the HBase runtime supports {@link HiveHBaseTableSnapshotInputFormat}. */ public static void assertSupportsTableSnapshots() { if (!SUPPORTS_TABLE_SNAPSHOTS) { throw new RuntimeException("This version of HBase does not support Hive over table " + "snapshots. Please upgrade to at least HBase 0.98.3 or later. See HIVE-6584 for details."); } } /** * Configures {@code conf} for the snapshot job. Call only when * {@link #assertSupportsTableSnapshots()} returns true. */ public static void configureJob(Configuration conf, String snapshotName, Path restoreDir) throws IOException { TableSnapshotInputFormatImpl.setInput(conf, snapshotName, restoreDir); } /** * Create a bare TableSnapshotRegionSplit. Needed because Writables require a * default-constructed instance to hydrate from the DataInput. * * TODO: remove once HBASE-11555 is fixed. */ public static InputSplit createTableSnapshotRegionSplit() { try { assertSupportsTableSnapshots(); } catch (RuntimeException e) { LOG.debug("Probably don't support table snapshots. Returning null instance.", e); return null; } try { Class<? extends InputSplit> resultType = (Class<? extends InputSplit>) Class.forName(TABLESNAPSHOTREGIONSPLIT_CLASS); Constructor<? extends InputSplit> cxtor = resultType.getDeclaredConstructor(new Class[]{}); cxtor.setAccessible(true); return cxtor.newInstance(new Object[]{}); } catch (ClassNotFoundException e) { throw new UnsupportedOperationException( "Unable to find " + TABLESNAPSHOTREGIONSPLIT_CLASS, e); } catch (IllegalAccessException e) { throw new UnsupportedOperationException( "Unable to access specified class " + TABLESNAPSHOTREGIONSPLIT_CLASS, e); } catch (InstantiationException e) { throw new UnsupportedOperationException( "Unable to instantiate specified class " + TABLESNAPSHOTREGIONSPLIT_CLASS, e); } catch (InvocationTargetException e) { throw new UnsupportedOperationException( "Constructor threw an exception for " + TABLESNAPSHOTREGIONSPLIT_CLASS, e); } catch (NoSuchMethodException e) { throw new UnsupportedOperationException( "Unable to find suitable constructor for class " + TABLESNAPSHOTREGIONSPLIT_CLASS, e); } } }
apache-2.0
mayonghui2112/helloWorld
sourceCode/testMaven/onjava8/src/main/java/collectiontopics/HTMLColorTest.java
2017
package collectiontopics;// collectiontopics/HTMLColorTest.java // (c)2017 MindView LLC: see Copyright.txt // We make no guarantees that this code is fit for any purpose. // Visit http://OnJava8.com for more book information. import static onjava.HTMLColors.*; public class HTMLColorTest { static final int DISPLAY_SIZE = 20; public static void main(String[] args) { show(MAP, DISPLAY_SIZE); border(); showInv(INVMAP, DISPLAY_SIZE); border(); show(LIST, DISPLAY_SIZE); border(); showrgb(RGBLIST, DISPLAY_SIZE); } } /* Output: 0xF0F8FF: AliceBlue 0xFAEBD7: AntiqueWhite 0x7FFFD4: Aquamarine 0xF0FFFF: Azure 0xF5F5DC: Beige 0xFFE4C4: Bisque 0x000000: Black 0xFFEBCD: BlanchedAlmond 0x0000FF: Blue 0x8A2BE2: BlueViolet 0xA52A2A: Brown 0xDEB887: BurlyWood 0x5F9EA0: CadetBlue 0x7FFF00: Chartreuse 0xD2691E: Chocolate 0xFF7F50: Coral 0x6495ED: CornflowerBlue 0xFFF8DC: Cornsilk 0xDC143C: Crimson 0x00FFFF: Cyan ****************************** AliceBlue 0xF0F8FF AntiqueWhite 0xFAEBD7 Aquamarine 0x7FFFD4 Azure 0xF0FFFF Beige 0xF5F5DC Bisque 0xFFE4C4 Black 0x000000 BlanchedAlmond 0xFFEBCD Blue 0x0000FF BlueViolet 0x8A2BE2 Brown 0xA52A2A BurlyWood 0xDEB887 CadetBlue 0x5F9EA0 Chartreuse 0x7FFF00 Chocolate 0xD2691E Coral 0xFF7F50 CornflowerBlue 0x6495ED Cornsilk 0xFFF8DC Crimson 0xDC143C Cyan 0x00FFFF ****************************** AliceBlue AntiqueWhite Aquamarine Azure Beige Bisque Black BlanchedAlmond Blue BlueViolet Brown BurlyWood CadetBlue Chartreuse Chocolate Coral CornflowerBlue Cornsilk Crimson Cyan ****************************** 0xF0F8FF 0xFAEBD7 0x7FFFD4 0xF0FFFF 0xF5F5DC 0xFFE4C4 0x000000 0xFFEBCD 0x0000FF 0x8A2BE2 0xA52A2A 0xDEB887 0x5F9EA0 0x7FFF00 0xD2691E 0xFF7F50 0x6495ED 0xFFF8DC 0xDC143C 0x00FFFF */
apache-2.0
davinash/geode
geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntryMessage.java
14183
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.internal.cache.partitioned; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.Collections; import java.util.Set; import org.apache.logging.log4j.Logger; import org.apache.geode.DataSerializer; import org.apache.geode.InternalGemFireError; import org.apache.geode.admin.OperationCancelledException; import org.apache.geode.cache.CacheException; import org.apache.geode.cache.EntryNotFoundException; import org.apache.geode.cache.TransactionException; import org.apache.geode.distributed.internal.ClusterDistributionManager; import org.apache.geode.distributed.internal.DistributionManager; import org.apache.geode.distributed.internal.DistributionMessage; import org.apache.geode.distributed.internal.InternalDistributedSystem; import org.apache.geode.distributed.internal.ReplyException; import org.apache.geode.distributed.internal.ReplyMessage; import org.apache.geode.distributed.internal.ReplyProcessor21; import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.Assert; import org.apache.geode.internal.InternalDataSerializer; import org.apache.geode.internal.NanoTimer; import org.apache.geode.internal.cache.DataLocationException; import org.apache.geode.internal.cache.EntrySnapshot; import org.apache.geode.internal.cache.ForceReattemptException; import org.apache.geode.internal.cache.KeyInfo; import org.apache.geode.internal.cache.PartitionedRegion; import org.apache.geode.internal.cache.PartitionedRegionDataStore; import org.apache.geode.internal.cache.PrimaryBucketException; import org.apache.geode.internal.logging.log4j.LogMarker; import org.apache.geode.internal.serialization.DeserializationContext; import org.apache.geode.internal.serialization.SerializationContext; import org.apache.geode.logging.internal.log4j.api.LogService; /** * This message is used as the request for a * {@link org.apache.geode.cache.Region#getEntry(Object)}operation. The reply is sent in a * {@link org.apache.geode.internal.cache.partitioned.FetchEntryMessage.FetchEntryReplyMessage}. * * @since GemFire 5.1 */ public class FetchEntryMessage extends PartitionMessage { private static final Logger logger = LogService.getLogger(); private Object key; private boolean access; // reusing an unused flag for HAS_ACCESS protected static final int HAS_ACCESS = HAS_FILTER_INFO; /** * Empty constructor to satisfy {@link DataSerializer} requirements */ public FetchEntryMessage() {} private FetchEntryMessage(InternalDistributedMember recipient, int regionId, ReplyProcessor21 processor, final Object key, boolean access) { super(recipient, regionId, processor); this.key = key; this.access = access; } /** * Sends a PartitionedRegion {@link org.apache.geode.cache.Region#getEntry(Object)} message * * @param recipient the member that the getEntry message is sent to * @param r the PartitionedRegion for which getEntry was performed upon * @param key the object to which the value should be feteched * @return the processor used to fetch the returned value associated with the key * @throws ForceReattemptException if the peer is no longer available */ public static FetchEntryResponse send(InternalDistributedMember recipient, PartitionedRegion r, final Object key, boolean access) throws ForceReattemptException { Assert.assertTrue(recipient != null, "FetchEntryMessage NULL recipient"); FetchEntryResponse p = new FetchEntryResponse(r.getSystem(), Collections.singleton(recipient), r, key); FetchEntryMessage m = new FetchEntryMessage(recipient, r.getPRId(), p, key, access); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); Set failures = r.getDistributionManager().putOutgoing(m); if (failures != null && failures.size() > 0) { throw new ForceReattemptException( String.format("Failed sending < %s >", m)); } return p; } public FetchEntryMessage(DataInput in) throws IOException, ClassNotFoundException { fromData(in, InternalDataSerializer.createDeserializationContext(in)); } @Override public boolean isSevereAlertCompatible() { // allow forced-disconnect processing for all cache op messages return true; } @Override protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion r, long startTime) throws ForceReattemptException { // FetchEntryMessage is used in refreshing client caches during interest list recovery, // so don't be too verbose or hydra tasks may time out PartitionedRegionDataStore ds = r.getDataStore(); EntrySnapshot val; if (ds != null) { try { KeyInfo keyInfo = r.getKeyInfo(key); val = (EntrySnapshot) r.getDataView().getEntryOnRemote(keyInfo, r, true); r.getPrStats().endPartitionMessagesProcessing(startTime); FetchEntryReplyMessage.send(getSender(), getProcessorId(), val, dm, null); } catch (TransactionException tex) { FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(tex)); } catch (PRLocallyDestroyedException pde) { FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(new ForceReattemptException( "Encountered PRLocallyDestroyed", pde))); } catch (EntryNotFoundException enfe) { FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException( "entry not found", enfe)); } catch (PrimaryBucketException pbe) { FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(pbe)); } catch (ForceReattemptException pbe) { pbe.checkKey(key); // Slightly odd -- we're marshalling the retry to the peer on another host... FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(pbe)); } catch (DataLocationException e) { FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(e)); } } else { throw new InternalGemFireError( "FetchEntryMessage message sent to wrong member"); } // Unless there was an exception thrown, this message handles sending the // response return false; } @Override public boolean canStartRemoteTransaction() { return true; } @Override protected void appendFields(StringBuilder buff) { super.appendFields(buff); buff.append("; key=").append(this.key); } @Override public int getDSFID() { return PR_FETCH_ENTRY_MESSAGE; } @Override public void fromData(DataInput in, DeserializationContext context) throws IOException, ClassNotFoundException { super.fromData(in, context); this.key = DataSerializer.readObject(in); } @Override protected void setBooleans(short s, DataInput in, DeserializationContext context) throws IOException, ClassNotFoundException { super.setBooleans(s, in, context); this.access = ((s & HAS_ACCESS) != 0); } @Override public void toData(DataOutput out, SerializationContext context) throws IOException { super.toData(out, context); DataSerializer.writeObject(this.key, out); } @Override protected short computeCompressedShort(short s) { s = super.computeCompressedShort(s); if (this.access) s |= HAS_ACCESS; return s; } public void setKey(Object key) { this.key = key; } /** * This message is used for the reply to a {@link FetchEntryMessage}. * * @since GemFire 5.0 */ public static class FetchEntryReplyMessage extends ReplyMessage { /** Propagated exception from remote node to operation initiator */ private EntrySnapshot value; /** * Empty constructor to conform to DataSerializable interface */ public FetchEntryReplyMessage() {} public FetchEntryReplyMessage(DataInput in) throws IOException, ClassNotFoundException { fromData(in, InternalDataSerializer.createDeserializationContext(in)); } private FetchEntryReplyMessage(int processorId, EntrySnapshot value, ReplyException re) { this.processorId = processorId; this.value = value; setException(re); } /** Send an ack */ public static void send(InternalDistributedMember recipient, int processorId, EntrySnapshot value, DistributionManager dm, ReplyException re) { Assert.assertTrue(recipient != null, "FetchEntryReplyMessage NULL recipient"); FetchEntryReplyMessage m = new FetchEntryReplyMessage(processorId, value, re); m.setRecipient(recipient); dm.putOutgoing(m); } /** * Processes this message. This method is invoked by the receiver of the message. * * @param dm the distribution manager that is processing the message. */ @Override public void process(final DistributionManager dm, final ReplyProcessor21 processor) { final long startTime = getTimestamp(); if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) { logger.trace(LogMarker.DM_VERBOSE, "FetchEntryReplyMessage process invoking reply processor with processorId: {}", this.processorId); } if (processor == null) { if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) { logger.trace(LogMarker.DM_VERBOSE, "FetchEntryReplyMessage processor not found"); } return; } processor.process(this); if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) { logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this); } dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime); } public EntrySnapshot getValue() { return this.value; } @Override public void toData(DataOutput out, SerializationContext context) throws IOException { super.toData(out, context); if (this.value == null) { out.writeBoolean(true); // null entry } else { out.writeBoolean(false); // null entry InternalDataSerializer.invokeToData(this.value, out); } } @Override public int getDSFID() { return PR_FETCH_ENTRY_REPLY_MESSAGE; } @Override public void fromData(DataInput in, DeserializationContext context) throws IOException, ClassNotFoundException { super.fromData(in, context); boolean nullEntry = in.readBoolean(); if (!nullEntry) { // since the Entry object shares state with the PartitionedRegion, // we have to find the region and ask it to create a new Entry instance // to be populated from the DataInput FetchEntryResponse processor = (FetchEntryResponse) ReplyProcessor21.getProcessor(this.processorId); if (processor == null) { throw new OperationCancelledException("This operation was cancelled (null processor)"); } this.value = new EntrySnapshot(in, processor.partitionedRegion); } } @Override public StringBuilder getStringBuilder() { StringBuilder sb = super.getStringBuilder(); if (getException() == null) { sb.append(" returning value=").append(this.value); } return sb; } } /** * A processor to capture the value returned by {@link FetchEntryMessage.FetchEntryReplyMessage} */ public static class FetchEntryResponse extends PartitionResponse { private volatile EntrySnapshot returnValue; final PartitionedRegion partitionedRegion; final Object key; public FetchEntryResponse(InternalDistributedSystem ds, Set recipients, PartitionedRegion theRegion, Object key) { super(ds, recipients); partitionedRegion = theRegion; this.key = key; } @Override public void process(DistributionMessage msg) { try { if (msg instanceof FetchEntryReplyMessage) { FetchEntryReplyMessage reply = (FetchEntryReplyMessage) msg; this.returnValue = reply.getValue(); if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) { logger.trace(LogMarker.DM_VERBOSE, "FetchEntryResponse return value is {}", this.returnValue); } } } finally { super.process(msg); } } /** * @return Object associated with the key that was sent in the get message * @throws ForceReattemptException if the peer is no longer available */ public EntrySnapshot waitForResponse() throws EntryNotFoundException, ForceReattemptException { try { waitForCacheException(); } catch (ForceReattemptException e) { e.checkKey(key); final String msg = "FetchEntryResponse got remote ForceReattemptException; rethrowing"; logger.debug(msg, e); throw e; } catch (EntryNotFoundException | TransactionException e) { throw e; } catch (CacheException ce) { logger.debug("FetchEntryResponse got remote CacheException; forcing reattempt.", ce); throw new ForceReattemptException( "FetchEntryResponse got remote CacheException; forcing reattempt.", ce); } return this.returnValue; } } }
apache-2.0
krasserm/ipf
platform-camel/core/src/main/java/org/openehealth/ipf/platform/camel/core/process/splitter/support/NoopLineSplitterLogic.java
1191
/* * Copyright 2008 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.openehealth.ipf.platform.camel.core.process.splitter.support; /** * A {@link LineSplitterLogic} that simply returns the text line without further * splitting * * @author Jens Riemschneider */ public class NoopLineSplitterLogic implements LineSplitterLogic { /* (non-Javadoc) * @see org.openehealth.ipf.platform.camel.core.process.splitter.support.LineSplitterLogic#splitLine(java.lang.String) */ @Override public String[] splitLine(String line) { return new String[] { line }; } }
apache-2.0
akomakom/ehcache3
osgi-test/src/test/java/org/ehcache/osgi/SimpleOsgiTest.java
4421
/* * Copyright Terracotta, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.ehcache.osgi; import org.ehcache.Cache; import org.ehcache.CacheManager; import org.ehcache.config.builders.CacheManagerBuilder; import org.ehcache.impl.config.copy.DefaultCopierConfiguration; import org.ehcache.impl.copy.ReadWriteCopier; import org.ehcache.impl.copy.SerializingCopier; import org.ehcache.xml.XmlConfiguration; import org.junit.Test; import org.junit.runner.RunWith; import org.ops4j.pax.exam.Configuration; import org.ops4j.pax.exam.Option; import org.ops4j.pax.exam.junit.PaxExam; import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; import org.ops4j.pax.exam.spi.reactors.PerMethod; import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; import static org.ehcache.config.builders.ResourcePoolsBuilder.heap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.ops4j.pax.exam.CoreOptions.bundle; import static org.ops4j.pax.exam.CoreOptions.junitBundles; import static org.ops4j.pax.exam.CoreOptions.mavenBundle; import static org.ops4j.pax.exam.CoreOptions.options; /** * SimpleOsgiTest */ @RunWith(PaxExam.class) @ExamReactorStrategy(PerMethod.class) public class SimpleOsgiTest { @Configuration public Option[] config() { return options( mavenBundle("org.slf4j", "slf4j-api", System.getProperty("ehcache.osgi.slf4j.version")), mavenBundle("org.slf4j", "slf4j-simple", System.getProperty("ehcache.osgi.slf4j.version")).noStart(), bundle("file:" + System.getProperty("ehcache.osgi.jar")), junitBundles() ); } @Test public void testEhcache3AsBundle() { CacheManager cacheManager = CacheManagerBuilder.newCacheManagerBuilder() .withCache("myCache", newCacheConfigurationBuilder(Long.class, String.class, heap(10)) .build()) .build(true); Cache<Long, String> myCache = cacheManager.getCache("myCache", Long.class, String.class); myCache.put(42L, "DaAnswer!"); assertEquals("DaAnswer!", myCache.get(42L)); } @Test public void testEhcache3WithSerializationAndClientClass() { CacheManager cacheManager = CacheManagerBuilder.newCacheManagerBuilder() .withCache("myCache", newCacheConfigurationBuilder(Long.class, Person.class, heap(10)) .add(new DefaultCopierConfiguration<Person>(SerializingCopier.<Person>asCopierClass(), DefaultCopierConfiguration.Type.VALUE)) .withClassLoader(getClass().getClassLoader()) .build()) .build(true); Cache<Long, Person> myCache = cacheManager.getCache("myCache", Long.class, Person.class); myCache.put(42L, new Person("Arthur")); assertTrue(myCache.get(42L) instanceof Person); } @Test public void testCustomCopier() { CacheManager cacheManager = CacheManagerBuilder.newCacheManagerBuilder() .withCache("myCache", newCacheConfigurationBuilder(Long.class, String.class, heap(10)) .add(new DefaultCopierConfiguration<String>(StringCopier.class, DefaultCopierConfiguration.Type.VALUE)) .withClassLoader(getClass().getClassLoader()) .build()) .build(true); Cache<Long, String> cache = cacheManager.getCache("myCache", Long.class, String.class); cache.put(42L, "What's the question again?"); cache.get(42L); } @Test public void testEhcacheXMLConfig() throws Exception { XmlConfiguration configuration = new XmlConfiguration(getClass().getResource("/org/ehcache/osgi/ehcache-osgi.xml").toURI().toURL(), getClass().getClassLoader()); assertEquals(Person.class, configuration.getCacheConfigurations().get("bar").getValueType()); } public static class StringCopier extends ReadWriteCopier<String> { @Override public String copy(String obj) { return new String(obj); } } }
apache-2.0
sdl/ecommerce-framework
framework/ecommerce-framework-odata-service/src/main/java/com/sdl/ecommerce/odata/function/ECommerceOperation.java
1152
package com.sdl.ecommerce.odata.function; import com.sdl.ecommerce.odata.model.ODataQueryResult; import com.sdl.ecommerce.odata.service.ODataRequestContextHolder; import com.sdl.odata.api.ODataException; import com.sdl.odata.api.edm.model.Operation; import com.sdl.odata.api.processor.datasource.factory.DataSourceFactory; import com.sdl.odata.api.service.ODataRequestContext; /** * ECommerce Operation * * @author nic */ public abstract class ECommerceOperation implements Operation<ODataQueryResult> { // TODO: Refactor this to not be dependent to ODataQueryResult @Override public ODataQueryResult doOperation(ODataRequestContext oDataRequestContext, DataSourceFactory dataSourceFactory) throws ODataException { ODataRequestContextHolder.set(oDataRequestContext); try { return this.doECommerceOperation(oDataRequestContext, dataSourceFactory); } finally { ODataRequestContextHolder.clear(); } } protected abstract ODataQueryResult doECommerceOperation(ODataRequestContext oDataRequestContext, DataSourceFactory dataSourceFactory) throws ODataException; }
apache-2.0
jamesagnew/hapi-fhir
hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/packages/PackageInstallationSpec.java
6058
package ca.uhn.fhir.jpa.packages; /*- * #%L * HAPI FHIR JPA Server * %% * Copyright (C) 2014 - 2022 Smile CDR, Inc. * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import ca.uhn.fhir.model.api.annotation.ExampleSupplier; import com.fasterxml.jackson.annotation.JsonAutoDetect; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import io.swagger.v3.oas.annotations.media.Schema; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @Schema( name = "PackageInstallationSpec", description = "Defines a set of instructions for package installation" ) @JsonPropertyOrder({ "name", "version", "packageUrl", "installMode", "installResourceTypes", "validationMode" }) @ExampleSupplier({PackageInstallationSpec.ExampleSupplier.class, PackageInstallationSpec.ExampleSupplier2.class}) @JsonInclude(JsonInclude.Include.NON_NULL) @JsonAutoDetect(creatorVisibility = JsonAutoDetect.Visibility.NONE, fieldVisibility = JsonAutoDetect.Visibility.NONE, getterVisibility = JsonAutoDetect.Visibility.NONE, isGetterVisibility = JsonAutoDetect.Visibility.NONE, setterVisibility = JsonAutoDetect.Visibility.NONE) public class PackageInstallationSpec { @Schema(description = "The direct package URL") @JsonProperty("packageUrl") private String myPackageUrl; @Schema(description = "The NPM package Name") @JsonProperty("name") private String myName; @Schema(description = "The direct package version") @JsonProperty("version") private String myVersion; @Schema(description = "Should resources from this package be extracted from the package and installed into the repository individually") @JsonProperty("installMode") private InstallModeEnum myInstallMode; @Schema(description = "If resources are being installed individually, this is list provides the resource types to install. By default, all conformance resources will be installed.") @JsonProperty("installResourceTypes") private List<String> myInstallResourceTypes; @Schema(description = "Should dependencies be automatically resolved, fetched and installed with the same settings") @JsonProperty("fetchDependencies") private boolean myFetchDependencies; @Schema(description = "Any values provided here will be interpreted as a regex. Dependencies with an ID matching any regex will be skipped.") private List<String> myDependencyExcludes; @JsonIgnore private byte[] myPackageContents; public List<String> getDependencyExcludes() { if (myDependencyExcludes == null) { myDependencyExcludes = new ArrayList<>(); } return myDependencyExcludes; } public void setDependencyExcludes(List<String> theDependencyExcludes) { myDependencyExcludes = theDependencyExcludes; } public boolean isFetchDependencies() { return myFetchDependencies; } public PackageInstallationSpec setFetchDependencies(boolean theFetchDependencies) { myFetchDependencies = theFetchDependencies; return this; } public String getPackageUrl() { return myPackageUrl; } public PackageInstallationSpec setPackageUrl(String thePackageUrl) { myPackageUrl = thePackageUrl; return this; } public InstallModeEnum getInstallMode() { return myInstallMode; } public PackageInstallationSpec setInstallMode(InstallModeEnum theInstallMode) { myInstallMode = theInstallMode; return this; } public List<String> getInstallResourceTypes() { if (myInstallResourceTypes == null) { myInstallResourceTypes = new ArrayList<>(); } return myInstallResourceTypes; } public void setInstallResourceTypes(List<String> theInstallResourceTypes) { myInstallResourceTypes = theInstallResourceTypes; } public String getName() { return myName; } public PackageInstallationSpec setName(String theName) { myName = theName; return this; } public String getVersion() { return myVersion; } public PackageInstallationSpec setVersion(String theVersion) { myVersion = theVersion; return this; } public byte[] getPackageContents() { return myPackageContents; } public PackageInstallationSpec setPackageContents(byte[] thePackageContents) { myPackageContents = thePackageContents; return this; } public PackageInstallationSpec addDependencyExclude(String theExclude) { getDependencyExcludes().add(theExclude); return this; } public PackageInstallationSpec addInstallResourceTypes(String... theResourceTypes) { for (String next : theResourceTypes) { getInstallResourceTypes().add(next); } return this; } public enum InstallModeEnum { STORE_ONLY, STORE_AND_INSTALL } public enum ValidationModeEnum { NOT_AVAILABLE, AVAILABLE } public static class ExampleSupplier implements Supplier<PackageInstallationSpec> { @Override public PackageInstallationSpec get() { return new PackageInstallationSpec() .setName("hl7.fhir.us.core") .setVersion("3.1.0") .setInstallMode(InstallModeEnum.STORE_ONLY) .setFetchDependencies(true); } } public static class ExampleSupplier2 implements Supplier<PackageInstallationSpec> { @Override public PackageInstallationSpec get() { return new PackageInstallationSpec() .setName("com.example.my-resources") .setVersion("1.0") .setPackageUrl("classpath:/my-resources.tgz") .setInstallMode(InstallModeEnum.STORE_AND_INSTALL) .addInstallResourceTypes("Organization", "Medication", "PlanDefinition", "SearchParameter"); } } }
apache-2.0
mayl8822/binnavi
src/main/java/com/google/security/zynamics/binnavi/debug/connection/packets/replyparsers/RequestTargetParser.java
2003
// Copyright 2011-2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.security.zynamics.binnavi.debug.connection.packets.replyparsers; import com.google.security.zynamics.binnavi.debug.connection.DebugCommandType; import com.google.security.zynamics.binnavi.debug.connection.interfaces.ClientReader; import com.google.security.zynamics.binnavi.debug.connection.packets.replies.RequestTargetReply; /** * Parser responsible for parsing replies for Request Target replies. This message is sent by the * debug client if no target was specified on the debug client side. When such a message is received * it is the job of BinNavi to specify a target file or target process to debug. */ public final class RequestTargetParser extends AbstractReplyParser<RequestTargetReply> { /** * Creates a new Request Target reply parser. * * @param clientReader Used to read messages sent by the debug client. */ public RequestTargetParser(final ClientReader clientReader) { super(clientReader, DebugCommandType.RESP_REQUEST_TARGET); } @Override protected RequestTargetReply parseError(final int packetId) { // TODO: There is no proper handling of errors on the side of the // client yet. throw new IllegalStateException("IE01088: Received invalid reply from the debug client"); } @Override public RequestTargetReply parseSuccess(final int packetId, final int argumentCount) { return new RequestTargetReply(packetId, 0); } }
apache-2.0
mayl8822/binnavi
src/main/java/com/google/security/zynamics/binnavi/disassembly/CProject.java
10169
// Copyright 2011-2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.security.zynamics.binnavi.disassembly; import com.google.common.base.Preconditions; import com.google.security.zynamics.binnavi.CUtilityFunctions; import com.google.security.zynamics.binnavi.Database.Exceptions.CouldntLoadDataException; import com.google.security.zynamics.binnavi.Database.Exceptions.CouldntSaveDataException; import com.google.security.zynamics.binnavi.Database.Exceptions.LoadCancelledException; import com.google.security.zynamics.binnavi.Database.Interfaces.SQLProvider; import com.google.security.zynamics.binnavi.debug.debugger.DebuggerTemplate; import com.google.security.zynamics.binnavi.debug.models.trace.TraceList; import com.google.security.zynamics.binnavi.disassembly.AddressSpaces.CAddressSpace; import com.google.security.zynamics.binnavi.disassembly.views.INaviView; import com.google.security.zynamics.zylib.general.ListenerProvider; import com.google.security.zynamics.zylib.types.lists.FilledList; import java.util.ArrayList; import java.util.Date; import java.util.List; /** * Represents a single BinNavi project. The information of a project is taken from the database. * * Projects can come in two states. Either a project is loaded or it is not loaded. A project that * is not loaded does not provide all functionality. For example, you can not access the address * spaces of a project that is not loaded. The methods which can not produce valid results before a * project is loaded throw an IllegalStateException if they are called before a project is loaded. */ public final class CProject implements INaviProject { /** * The SQL provider that is used to load and save this project. */ private final SQLProvider m_provider; /** * List of listeners that are notified about changes in the project. */ private final ListenerProvider<IProjectListener> m_listeners = new ListenerProvider<>(); /** * Number of address spaces in the project. This variable is only used until the project is * loaded. */ private final int m_addressSpaceCount; /** * Contains the configuration data of the project. */ private final CProjectConfiguration m_configuration; /** * Contains the loaded content of the project. */ private CProjectContent m_content = null; /** * Flag that indicates whether the project is currently being loaded from the database. */ private boolean m_isLoading = false; /** * Reports project loading events to listeners. */ private final CProjectLoaderReporter m_loadReporter = new CProjectLoaderReporter(m_listeners); /** * Creates a new project object that represents a BinNavi project as stored in the database. * * @param projectId The ID of the project as it is found in the projects table in the database. * @param name The name of the project. * @param description The description of the project. * @param creationDate The creation date of the project. * @param modificationDate The modification date of the project. * @param addressSpaceCount Number of address spaces in this project. * @param assignedDebuggers Debuggers assigned to this project. * @param provider The SQL provider that is used to load and save the project. */ public CProject(final int projectId, final String name, final String description, final Date creationDate, final Date modificationDate, final int addressSpaceCount, final List<DebuggerTemplate> assignedDebuggers, final SQLProvider provider) { Preconditions.checkArgument(projectId > 0, String.format( "IE00226: Project ID %d is invalid. Project IDs must be strictly positive", projectId)); Preconditions.checkNotNull(name, "IE00227: Project names can't be null"); Preconditions.checkNotNull(description, "IE00228: Project descriptions can't be null"); Preconditions.checkNotNull(creationDate, "IE00229: Project creation dates can't be null"); Preconditions.checkNotNull(modificationDate, "IE00230: Project modification dates can't be null"); Preconditions.checkNotNull(provider, "IE00231: The SQL provider of the project can't be null"); m_configuration = new CProjectConfiguration(this, m_listeners, provider, projectId, name, description, creationDate, modificationDate, assignedDebuggers); m_addressSpaceCount = addressSpaceCount; m_provider = provider; } @Override public void addListener(final IProjectListener listener) { m_listeners.addListener(listener); } @Override public boolean close() { if (!isLoaded()) { throw new IllegalStateException("IE00239: Project is not loaded"); } for (final IProjectListener listener : m_listeners) { try { if (!listener.closingProject(this)) { return false; } } catch (final Exception exception) { CUtilityFunctions.logException(exception); } } m_content.close(); m_content = null; for (final IProjectListener listener : m_listeners) { try { listener.closedProject(this); } catch (final Exception exception) { CUtilityFunctions.logException(exception); } } return true; } /** * Returns the number of address spaces in the project. * * @return The number of address spaces in the project. */ @Override public int getAddressSpaceCount() { return isLoaded() ? m_content.getAddressSpaces().size() : m_addressSpaceCount; } @Override public CProjectConfiguration getConfiguration() { return m_configuration; } @Override public CProjectContent getContent() { Preconditions.checkNotNull(m_content, "IE02198: Project is not loaded"); return m_content; } @Override public List<INaviView> getViewsWithAddresses(final List<UnrelocatedAddress> offset, final boolean all) throws CouldntLoadDataException { return m_provider.getViewsWithAddress(this, offset, all); } /** * Determines whether the project uses a given debugger. * * @param debugger The debugger to check for. * * @return True, if the project uses the debugger. False, otherwise. */ public boolean hasDebugger(final DebuggerTemplate debugger) { return m_configuration.hasDebugger(debugger); } @Override public boolean inSameDatabase(final IDatabaseObject object) { Preconditions.checkNotNull(object, "IE00250: Object argument can't be null"); return object.inSameDatabase(m_provider); } @Override public boolean inSameDatabase(final SQLProvider provider) { return provider.equals(m_provider); } @Override public boolean isLoaded() { return m_content != null; } @Override public boolean isLoading() { return m_isLoading; } @Override public void load() throws CouldntLoadDataException, LoadCancelledException { synchronized (m_loadReporter) { if (isLoaded()) { return; } m_isLoading = true; try { if (!m_loadReporter.report(ProjectLoadEvents.Starting)) { throw new LoadCancelledException(); } if (!m_loadReporter.report(ProjectLoadEvents.LoadingAddressSpaces)) { throw new LoadCancelledException(); } final List<CAddressSpace> addressSpaces = m_provider.loadAddressSpaces(this); for (final CAddressSpace space : addressSpaces) { space.load(); } if (!m_loadReporter.report(ProjectLoadEvents.LoadingCallgraphViews)) { throw new LoadCancelledException(); } final List<ICallgraphView> userCallgraphs = m_provider.loadCallgraphViews(this); if (!m_loadReporter.report(ProjectLoadEvents.LoadingFlowgraphViews)) { throw new LoadCancelledException(); } final List<IFlowgraphView> userFlowgraphs = m_provider.loadFlowgraphs(this); if (!m_loadReporter.report(ProjectLoadEvents.LoadingMixedgraphViews)) { throw new LoadCancelledException(); } final List<INaviView> userMixedgraphs = m_provider.loadMixedgraphs(this); if (!m_loadReporter.report(ProjectLoadEvents.LoadingTraces)) { throw new LoadCancelledException(); } final List<TraceList> traces = m_provider.loadTraces(this); final ArrayList<INaviView> views = new ArrayList<INaviView>(userCallgraphs); views.addAll(userFlowgraphs); views.addAll(userMixedgraphs); m_content = new CProjectContent(this, m_listeners, m_provider, addressSpaces, views, new FilledList<TraceList>(traces)); } catch (CouldntLoadDataException | LoadCancelledException e) { m_isLoading = false; throw e; } finally { m_loadReporter.report(ProjectLoadEvents.Finished); } for (final IProjectListener listener : m_listeners) { try { listener.loadedProject(this); } catch (final Exception exception) { CUtilityFunctions.logException(exception); } } m_isLoading = false; } } @Override public String readSetting(final String key) throws CouldntLoadDataException { return m_provider.readSetting(this, key); } @Override public void removeListener(final IProjectListener listener) { m_listeners.removeListener(listener); } @Override public void writeSetting(final String key, final String value) throws CouldntSaveDataException { m_provider.writeSetting(this, key, value); } }
apache-2.0
testify/XPathAssertion
src/main/java/XPath/XPathAssertion.java
4453
/* * Copyright 2015 Codice Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package XPath; import org.codice.testify.assertions.Assertion; import org.codice.testify.objects.AssertionStatus; import org.codice.testify.objects.TestifyLogger; import org.codice.testify.objects.Response; import org.osgi.framework.BundleActivator; import org.osgi.framework.BundleContext; import org.w3c.dom.Document; import org.xml.sax.InputSource; import org.xml.sax.SAXException; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.xpath.*; import java.io.ByteArrayInputStream; import java.io.IOException; /** * The XPathAssertion class is a Testify Assertion service that performs XPath based checks */ public class XPathAssertion implements BundleActivator, Assertion { @Override public AssertionStatus evaluateAssertion(String assertionInfo, Response response) { TestifyLogger.debug("Running XPathAssertion", this.getClass().getSimpleName()); //Get the processor response String responseValue = response.getResponse(); AssertionStatus status; //If no assertion info is provided, return a failure if (assertionInfo == null) { status = new AssertionStatus("No XPath expression provided with assertion"); //If the response from the processor is null, return a failure } else if (responseValue == null) { status = new AssertionStatus("No processor response"); } else { //Set up document and xpath objects DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setNamespaceAware(true); XPathFactory xpathFactory = XPathFactory.newInstance(); XPath xpath = xpathFactory.newXPath(); //Parse processor response into document Document document = null; try { DocumentBuilder builder = factory.newDocumentBuilder(); document = builder.parse(new InputSource(new ByteArrayInputStream(responseValue.getBytes("utf-8")))); } catch (ParserConfigurationException | SAXException | IOException e) { TestifyLogger.error(e.getMessage(), this.getClass().getSimpleName()); } //If document is not null, run xpath expression if (document != null) { try { //Run xpath expression and return boolean XPathExpression xpathExpression = xpath.compile(assertionInfo); boolean xpathResult = (boolean)xpathExpression.evaluate(document.getDocumentElement(), XPathConstants.BOOLEAN); //If the XPathExpression returns true, return failure details of null meaning a successful assertion if (xpathResult) { status = new AssertionStatus(null); //If the XPathExpression returns false, return a failure } else { status = new AssertionStatus("XPath expression returned false"); } } catch (XPathExpressionException e) { status = new AssertionStatus("XPath expression formatted incorrectly"); } } else { //If document is null, return a failure status = new AssertionStatus("Could not create xml document from response"); } } return status; } @Override public void start(BundleContext bundleContext) throws Exception { //Register the XPath service bundleContext.registerService(Assertion.class.getName(), new XPathAssertion(), null); } @Override public void stop(BundleContext bundleContext) throws Exception { } }
apache-2.0
yuyijq/dubbo
dubbo-container/dubbo-container-api/src/main/java/org/apache/dubbo/container/Main.java
4373
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dubbo.container; import org.apache.dubbo.common.extension.ExtensionLoader; import org.apache.dubbo.common.logger.Logger; import org.apache.dubbo.common.logger.LoggerFactory; import org.apache.dubbo.common.utils.ArrayUtils; import org.apache.dubbo.common.utils.ConfigUtils; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.List; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import static org.apache.dubbo.common.constants.CommonConstants.COMMA_SPLIT_PATTERN; /** * Main. (API, Static, ThreadSafe) * * This class is entry point loading containers. */ public class Main { public static final String CONTAINER_KEY = "dubbo.container"; public static final String SHUTDOWN_HOOK_KEY = "dubbo.shutdown.hook"; private static final Logger logger = LoggerFactory.getLogger(Main.class); private static final ExtensionLoader<Container> LOADER = ExtensionLoader.getExtensionLoader(Container.class); private static final ReentrantLock LOCK = new ReentrantLock(); private static final Condition STOP = LOCK.newCondition(); public static void main(String[] args) { try { if (ArrayUtils.isEmpty(args)) { String config = ConfigUtils.getProperty(CONTAINER_KEY, LOADER.getDefaultExtensionName()); args = COMMA_SPLIT_PATTERN.split(config); } final List<Container> containers = new ArrayList<Container>(); for (int i = 0; i < args.length; i++) { containers.add(LOADER.getExtension(args[i])); } logger.info("Use container type(" + Arrays.toString(args) + ") to run dubbo serivce."); if ("true".equals(System.getProperty(SHUTDOWN_HOOK_KEY))) { Runtime.getRuntime().addShutdownHook(new Thread("dubbo-container-shutdown-hook") { @Override public void run() { for (Container container : containers) { try { container.stop(); logger.info("Dubbo " + container.getClass().getSimpleName() + " stopped!"); } catch (Throwable t) { logger.error(t.getMessage(), t); } try { LOCK.lock(); STOP.signal(); } finally { LOCK.unlock(); } } } }); } for (Container container : containers) { container.start(); logger.info("Dubbo " + container.getClass().getSimpleName() + " started!"); } System.out.println(new SimpleDateFormat("[yyyy-MM-dd HH:mm:ss]").format(new Date()) + " Dubbo service server started!"); } catch (RuntimeException e) { logger.error(e.getMessage(), e); System.exit(1); } try { LOCK.lock(); STOP.await(); } catch (InterruptedException e) { logger.warn("Dubbo service server stopped, interrupted by other thread!", e); } finally { LOCK.unlock(); } } }
apache-2.0
izhangzhihao/SpringMVCSeedProject
src/main/java/com/github/izhangzhihao/SpringMVCSeedProject/Annotation/AuthByRole.java
435
package com.github.izhangzhihao.SpringMVCSeedProject.Annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * 根据角色判断权限 */ @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) public @interface AuthByRole { AuthorityType[] AuthorityType(); boolean validate() default true; }
apache-2.0
khartec/waltz
waltz-model/src/main/java/org/finos/waltz/model/IconProvider.java
725
/* * Waltz - Enterprise Architecture * Copyright (C) 2016, 2017, 2018, 2019 Waltz open source project * See README.md for more information * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific * */ package org.finos.waltz.model; public interface IconProvider { String icon(); }
apache-2.0
cloudera/cdk
cdk-morphlines/cdk-morphlines-core/src/main/java/com/cloudera/cdk/morphline/base/Fields.java
1245
/* * Copyright 2013 Cloudera Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cloudera.cdk.morphline.base; /** * Common record field names. */ public final class Fields { // logstash conventions: public static final String ID = "id"; public static final String BASE_ID = "base_id"; public static final String TIMESTAMP = "timestamp"; public static final String MESSAGE = "message"; // the original plain-text message public static final String ATTACHMENT_BODY = "_attachment_body"; public static final String ATTACHMENT_MIME_TYPE = "_attachment_mimetype"; public static final String ATTACHMENT_CHARSET = "_attachment_charset"; public static final String ATTACHMENT_NAME = "_attachment_name"; }
apache-2.0
awhitford/Resteasy
testsuite/integration-tests/src/test/java/org/jboss/resteasy/test/resource/path/resource/ResourceLocatorWithBaseNoExpressionSubresource.java
2444
package org.jboss.resteasy.test.resource.path.resource; import org.junit.Assert; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.core.Context; import javax.ws.rs.core.UriInfo; import java.util.List; public class ResourceLocatorWithBaseNoExpressionSubresource { @GET public String doGet(@Context UriInfo uri) { List<String> matchedURIs = uri.getMatchedURIs(); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, 2, matchedURIs.size()); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, "a1/base/1/resources", matchedURIs.get(0)); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, "a1", matchedURIs.get(1)); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, 2, uri.getMatchedResources().size()); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, ResourceLocatorWithBaseNoExpressionSubresource.class, uri.getMatchedResources().get(0).getClass()); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, ResourceLocatorWithBaseNoExpressionResource.class, uri.getMatchedResources().get(1).getClass()); return this.getClass().getName(); } @Path("/subresource2") public Object getSubresource2(@Context UriInfo uri) { Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, 3, uri.getMatchedURIs().size()); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, "a1/base/1/resources/subresource2", uri.getMatchedURIs().get(0)); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, "a1/base/1/resources", uri.getMatchedURIs().get(1)); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, "a1", uri.getMatchedURIs().get(2)); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, 2, uri.getMatchedResources().size()); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, ResourceLocatorWithBaseNoExpressionSubresource.class, uri.getMatchedResources().get(0).getClass()); Assert.assertEquals(ResourceLocatorWithBaseNoExpressionResource.ERROR_MSG, ResourceLocatorWithBaseNoExpressionResource.class, uri.getMatchedResources().get(1).getClass()); return new ResourceLocatorWithBaseNoExpressionSubresource2(); } }
apache-2.0
rekhajoshm/pigfork
src/org/apache/pig/impl/io/SequenceFileInterStorage.java
8378
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.pig.impl.io; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.DefaultCodec; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; import org.apache.hadoop.util.ReflectionUtils; import org.apache.pig.Expression; import org.apache.pig.FileInputLoadFunc; import org.apache.pig.LoadFunc; import org.apache.pig.LoadMetadata; import org.apache.pig.ResourceSchema; import org.apache.pig.ResourceStatistics; import org.apache.pig.StoreFunc; import org.apache.pig.StoreFuncInterface; import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit; import org.apache.pig.data.BinSedesTuple; import org.apache.pig.data.Tuple; import org.apache.pig.impl.util.Utils; /** * Store tuples (BinSedesTuples, specifically) using sequence files to leverage * sequence file's compression features. Replacement for previous use of {@code TFileStorage}, which * had some edge cases that were not properly handled there. */ @InterfaceAudience.Private public class SequenceFileInterStorage extends FileInputLoadFunc implements StoreFuncInterface, LoadMetadata { private static final Log mLog = LogFactory.getLog(SequenceFileInterStorage.class); public static final String useLog = "SequenceFile storage in use"; final private NullWritable KEY0 = NullWritable.get(); RecordReader<NullWritable, BinSedesTuple> recReader; RecordWriter<NullWritable, BinSedesTuple> recWriter; /** * Simple binary nested reader format */ public SequenceFileInterStorage() throws IOException { mLog.debug(useLog); } @Override public Tuple getNext() throws IOException { try { if (recReader.nextKeyValue()) { return recReader.getCurrentValue(); } else { return null; } } catch (InterruptedException e) { throw new IOException(e); } } @Override public void putNext(Tuple t) throws IOException { try { recWriter.write(KEY0, (BinSedesTuple) t); } catch (InterruptedException e) { throw new IOException(e); } } @SuppressWarnings("rawtypes") @Override public InputFormat getInputFormat() { return new SequenceFileInputFormat<Object, Tuple>(); } @Override public void setLocation(String location, Job job) throws IOException { FileInputFormat.setInputPaths(job, location); } @Override public ResourceSchema getSchema(String location, Job job) throws IOException { return Utils.getSchema(this, location, true, job); } @SuppressWarnings({ "unchecked", "rawtypes" }) @Override public void prepareToRead(RecordReader reader, PigSplit split) throws IOException { recReader = reader; } @Override public ResourceStatistics getStatistics(String location, Job job) throws IOException { return null; } @Override public String[] getPartitionKeys(String location, Job job) throws IOException { return null; } @Override public void setPartitionFilter(Expression partitionFilter) throws IOException { throw new UnsupportedOperationException(); } @Override public String relToAbsPathForStoreLocation(String location, Path curDir) throws IOException { return LoadFunc.getAbsolutePath(location, curDir); } @SuppressWarnings("rawtypes") @Override public OutputFormat getOutputFormat() throws IOException { return new SequenceFileTupleOutputFormat(); } public static class SequenceFileTupleOutputFormat extends SequenceFileOutputFormat<NullWritable, BinSedesTuple> { protected SequenceFile.Writer getSequenceWriter(TaskAttemptContext context) throws IOException { Configuration conf = context.getConfiguration(); CompressionCodec codec = null; CompressionType compressionType = CompressionType.NONE; if (getCompressOutput(context)) { // find the kind of compression to do compressionType = getOutputCompressionType(context); // find the right codec Class<?> codecClass = getOutputCompressorClass(context, DefaultCodec.class); codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf); } // get the path of the temporary output file Path file = getDefaultWorkFile(context, ""); FileSystem fs = file.getFileSystem(conf); return SequenceFile.createWriter(fs, conf, file, NullWritable.class, BinSedesTuple.class, compressionType, codec, context); } @Override public RecordWriter<NullWritable, BinSedesTuple> getRecordWriter(TaskAttemptContext context ) throws IOException, InterruptedException { final SequenceFile.Writer out = getSequenceWriter(context); return new RecordWriter<NullWritable, BinSedesTuple>() { @Override public void write(NullWritable key, BinSedesTuple value) throws IOException { out.append(key, value); } @Override public void close(TaskAttemptContext context) throws IOException { out.close(); } }; } } @Override public void setStoreLocation(String location, Job job) throws IOException { FileOutputFormat.setOutputPath(job, new Path(location)); } @Override public void checkSchema(ResourceSchema s) throws IOException { } @SuppressWarnings({ "unchecked", "rawtypes" }) @Override public void prepareToWrite(RecordWriter writer) throws IOException { recWriter = writer; } @Override public void setStoreFuncUDFContextSignature(String signature) { } @Override public void cleanupOnFailure(String location, Job job) throws IOException { StoreFunc.cleanupOnFailureImpl(location, job); } @Override public void cleanupOnSuccess(String location, Job job) throws IOException { // DEFAULT : do nothing } }
apache-2.0
lpxz/grail-derby104
java/engine/org/apache/derby/impl/io/JarStorageFactory.java
4689
/* Derby - Class org.apache.derby.impl.io.JarStorageFactory Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.impl.io; import java.io.File; import java.io.IOException; import java.util.zip.ZipFile; import org.apache.derby.io.StorageFile; /** * This class provides a Jar file based implementation of the StorageFactory interface. It is used by the * database engine to access persistent data and transaction logs under the jar subsubprotocol. */ public class JarStorageFactory extends BaseStorageFactory { ZipFile zipData; /** * Construct a persistent StorageFile from a path name. * * @param path The path name of the file * * @return A corresponding StorageFile object */ StorageFile newPersistentFile( String path) { return new JarDBFile( this, path); } /** * Construct a StorageFile from a directory and file name. * * @param directoryName The directory part of the path name. Must not be null, nor may it be in the temp dir. * @param fileName The name of the file within the directory. * * @return A corresponding StorageFile object */ StorageFile newPersistentFile( String directoryName, String fileName) { if( directoryName == null || directoryName.length() == 0) return newPersistentFile( fileName); return new JarDBFile( this, directoryName, fileName); } /** * Construct a StorageFile from a directory and file name. * * @param directoryName The directory part of the path name. * @param fileName The name of the file within the directory. * * @return A corresponding StorageFile object */ StorageFile newPersistentFile( StorageFile directoryName, String fileName) { if( directoryName == null) return newPersistentFile( fileName); return new JarDBFile( (JarDBFile) directoryName, fileName); } void doInit() throws IOException { if( dataDirectory == null) return; // Parse the dataDirectory name. It should be of the form "(jar-file)directory" or "jar-file" int offset = 0; while( offset < dataDirectory.length() & Character.isSpaceChar( dataDirectory.charAt( offset))) offset ++; int leftParen = -1; int rightParen = -1; if( offset < dataDirectory.length()) { leftParen = dataDirectory.indexOf( '(', offset); if( leftParen >= 0) rightParen = dataDirectory.indexOf( ')', leftParen + 1); } File jarFile = null; if( rightParen > 0) { jarFile = getJarFile( dataDirectory.substring( leftParen + 1, rightParen)); offset = rightParen + 1; while( offset < dataDirectory.length() & Character.isSpaceChar( dataDirectory.charAt( offset))) offset ++; dataDirectory = dataDirectory.substring( offset, dataDirectory.length()); } else { jarFile = getJarFile( dataDirectory); dataDirectory = ""; } zipData = new ZipFile( jarFile); canonicalName = "(" + jarFile.getCanonicalPath() + ")" + dataDirectory; separatedDataDirectory = dataDirectory + '/'; // Zip files use '/' as a separator createTempDir(); } // end of doInit /** * Close the opened jar/zip file on shutdown. * (Fix for DERBY-2083). */ public void shutdown() { if (zipData != null) { try { zipData.close(); } catch (IOException e) { } zipData = null; } } private File getJarFile( String name) { File jarFile = new File( name); if( home != null && !jarFile.isAbsolute()) jarFile = new File( home, name); return jarFile; } // end of getJarFile }
apache-2.0
davidkarlsen/camel
core/camel-core/src/main/java/org/apache/camel/reifier/RemoveHeaderReifier.java
1579
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.reifier; import org.apache.camel.Processor; import org.apache.camel.model.ProcessorDefinition; import org.apache.camel.model.RemoveHeaderDefinition; import org.apache.camel.processor.RemoveHeaderProcessor; import org.apache.camel.spi.RouteContext; import org.apache.camel.util.ObjectHelper; public class RemoveHeaderReifier extends ProcessorReifier<RemoveHeaderDefinition> { RemoveHeaderReifier(ProcessorDefinition<?> definition) { super((RemoveHeaderDefinition) definition); } @Override public Processor createProcessor(RouteContext routeContext) throws Exception { ObjectHelper.notNull(definition.getHeaderName(), "headerName", this); return new RemoveHeaderProcessor(definition.getHeaderName()); } }
apache-2.0
amezgin/amezgin
chapter_004/srp/src/main/java/ru/job4j/exceptions/MenuOutException.java
372
package ru.job4j.exceptions; /** * The class MenuOutException. * * @author Alexander Mezgin * @version 1.0 * @since 26.01.2017 */ public class MenuOutException extends RuntimeException { /** * Constructor fo class MenuOutException. * * @param message message. */ public MenuOutException(String message) { super(message); } }
apache-2.0
wendal/alipay-sdk
src/main/java/com/alipay/api/internal/util/codec/Base64.java
17613
/* * Copyright 2001-2004 The Apache Software Foundation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alipay.api.internal.util.codec; /** * Provides Base64 encoding and decoding as defined by RFC 2045. * * <p>This class implements section <cite>6.8. Base64 Content-Transfer-Encoding</cite> * from RFC 2045 <cite>Multipurpose Internet Mail Extensions (MIME) Part One: * Format of Internet Message Bodies</cite> by Freed and Borenstein.</p> * * @see <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045</a> * @author Apache Software Foundation * @since 1.0-dev * @version $Id: Base64.java,v 1.20 2004/05/24 00:21:24 ggregory Exp $ */ public class Base64 implements BinaryEncoder, BinaryDecoder { /** * Chunk size per RFC 2045 section 6.8. * * <p>The {@value} character limit does not count the trailing CRLF, but counts * all other characters, including any equal signs.</p> * * @see <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045 section 6.8</a> */ static final int CHUNK_SIZE = 76; /** * Chunk separator per RFC 2045 section 2.1. * * @see <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045 section 2.1</a> */ static final byte[] CHUNK_SEPARATOR = "\r\n".getBytes(); /** * The base length. */ static final int BASELENGTH = 255; /** * Lookup length. */ static final int LOOKUPLENGTH = 64; /** * Used to calculate the number of bits in a byte. */ static final int EIGHTBIT = 8; /** * Used when encoding something which has fewer than 24 bits. */ static final int SIXTEENBIT = 16; /** * Used to determine how many bits data contains. */ static final int TWENTYFOURBITGROUP = 24; /** * Used to get the number of Quadruples. */ static final int FOURBYTE = 4; /** * Used to test the sign of a byte. */ static final int SIGN = -128; /** * Byte used to pad output. */ static final byte PAD = (byte) '='; // Create arrays to hold the base64 characters and a // lookup for base64 chars private static byte[] base64Alphabet = new byte[BASELENGTH]; private static byte[] lookUpBase64Alphabet = new byte[LOOKUPLENGTH]; // Populating the lookup and character arrays static { for (int i = 0; i < BASELENGTH; i++) { base64Alphabet[i] = (byte) -1; } for (int i = 'Z'; i >= 'A'; i--) { base64Alphabet[i] = (byte) (i - 'A'); } for (int i = 'z'; i >= 'a'; i--) { base64Alphabet[i] = (byte) (i - 'a' + 26); } for (int i = '9'; i >= '0'; i--) { base64Alphabet[i] = (byte) (i - '0' + 52); } base64Alphabet['+'] = 62; base64Alphabet['/'] = 63; for (int i = 0; i <= 25; i++) { lookUpBase64Alphabet[i] = (byte) ('A' + i); } for (int i = 26, j = 0; i <= 51; i++, j++) { lookUpBase64Alphabet[i] = (byte) ('a' + j); } for (int i = 52, j = 0; i <= 61; i++, j++) { lookUpBase64Alphabet[i] = (byte) ('0' + j); } lookUpBase64Alphabet[62] = (byte) '+'; lookUpBase64Alphabet[63] = (byte) '/'; } private static boolean isBase64(byte octect) { if (octect == PAD) { return true; } else if (base64Alphabet[octect] == -1) { return false; } else { return true; } } /** * Tests a given byte array to see if it contains * only valid characters within the Base64 alphabet. * * @param arrayOctect byte array to test * @return true if all bytes are valid characters in the Base64 * alphabet or if the byte array is empty; false, otherwise */ public static boolean isArrayByteBase64(byte[] arrayOctect) { arrayOctect = discardWhitespace(arrayOctect); int length = arrayOctect.length; if (length == 0) { // shouldn't a 0 length array be valid base64 data? // return false; return true; } for (int i = 0; i < length; i++) { if (!isBase64(arrayOctect[i])) { return false; } } return true; } /** * Encodes binary data using the base64 algorithm but * does not chunk the output. * * @param binaryData binary data to encode * @return Base64 characters */ public static byte[] encodeBase64(byte[] binaryData) { return encodeBase64(binaryData, false); } /** * Encodes binary data using the base64 algorithm and chunks * the encoded output into 76 character blocks * * @param binaryData binary data to encode * @return Base64 characters chunked in 76 character blocks */ public static byte[] encodeBase64Chunked(byte[] binaryData) { return encodeBase64(binaryData, true); } /** * Decodes an Object using the base64 algorithm. This method * is provided in order to satisfy the requirements of the * Decoder interface, and will throw a DecoderException if the * supplied object is not of type byte[]. * * @param pObject Object to decode * @return An object (of type byte[]) containing the * binary data which corresponds to the byte[] supplied. * @throws DecoderException if the parameter supplied is not * of type byte[] */ public Object decode(Object pObject) throws DecoderException { if (!(pObject instanceof byte[])) { throw new DecoderException("Parameter supplied to Base64 decode is not a byte[]"); } return decode((byte[]) pObject); } /** * Decodes a byte[] containing containing * characters in the Base64 alphabet. * * @param pArray A byte array containing Base64 character data * @return a byte array containing binary data */ public byte[] decode(byte[] pArray) { return decodeBase64(pArray); } /** * Encodes binary data using the base64 algorithm, optionally * chunking the output into 76 character blocks. * * @param binaryData Array containing binary data to encode. * @param isChunked if isChunked is true this encoder will chunk * the base64 output into 76 character blocks * @return Base64-encoded data. */ public static byte[] encodeBase64(byte[] binaryData, boolean isChunked) { int lengthDataBits = binaryData.length * EIGHTBIT; int fewerThan24bits = lengthDataBits % TWENTYFOURBITGROUP; int numberTriplets = lengthDataBits / TWENTYFOURBITGROUP; byte encodedData[] = null; int encodedDataLength = 0; int nbrChunks = 0; if (fewerThan24bits != 0) { //data not divisible by 24 bit encodedDataLength = (numberTriplets + 1) * 4; } else { // 16 or 8 bit encodedDataLength = numberTriplets * 4; } // If the output is to be "chunked" into 76 character sections, // for compliance with RFC 2045 MIME, then it is important to // allow for extra length to account for the separator(s) if (isChunked) { nbrChunks = (CHUNK_SEPARATOR.length == 0 ? 0 : (int) Math.ceil((float) encodedDataLength / CHUNK_SIZE)); encodedDataLength += nbrChunks * CHUNK_SEPARATOR.length; } encodedData = new byte[encodedDataLength]; byte k = 0, l = 0, b1 = 0, b2 = 0, b3 = 0; int encodedIndex = 0; int dataIndex = 0; int i = 0; int nextSeparatorIndex = CHUNK_SIZE; int chunksSoFar = 0; //log.debug("number of triplets = " + numberTriplets); for (i = 0; i < numberTriplets; i++) { dataIndex = i * 3; b1 = binaryData[dataIndex]; b2 = binaryData[dataIndex + 1]; b3 = binaryData[dataIndex + 2]; //log.debug("b1= " + b1 +", b2= " + b2 + ", b3= " + b3); l = (byte) (b2 & 0x0f); k = (byte) (b1 & 0x03); byte val1 = ((b1 & SIGN) == 0) ? (byte) (b1 >> 2) : (byte) ((b1) >> 2 ^ 0xc0); byte val2 = ((b2 & SIGN) == 0) ? (byte) (b2 >> 4) : (byte) ((b2) >> 4 ^ 0xf0); byte val3 = ((b3 & SIGN) == 0) ? (byte) (b3 >> 6) : (byte) ((b3) >> 6 ^ 0xfc); encodedData[encodedIndex] = lookUpBase64Alphabet[val1]; //log.debug( "val2 = " + val2 ); //log.debug( "k4 = " + (k<<4) ); //log.debug( "vak = " + (val2 | (k<<4)) ); encodedData[encodedIndex + 1] = lookUpBase64Alphabet[val2 | (k << 4)]; encodedData[encodedIndex + 2] = lookUpBase64Alphabet[(l << 2) | val3]; encodedData[encodedIndex + 3] = lookUpBase64Alphabet[b3 & 0x3f]; encodedIndex += 4; // If we are chunking, let's put a chunk separator down. if (isChunked) { // this assumes that CHUNK_SIZE % 4 == 0 if (encodedIndex == nextSeparatorIndex) { System.arraycopy( CHUNK_SEPARATOR, 0, encodedData, encodedIndex, CHUNK_SEPARATOR.length); chunksSoFar++; nextSeparatorIndex = (CHUNK_SIZE * (chunksSoFar + 1)) + (chunksSoFar * CHUNK_SEPARATOR.length); encodedIndex += CHUNK_SEPARATOR.length; } } } // form integral number of 6-bit groups dataIndex = i * 3; if (fewerThan24bits == EIGHTBIT) { b1 = binaryData[dataIndex]; k = (byte) (b1 & 0x03); //log.debug("b1=" + b1); //log.debug("b1<<2 = " + (b1>>2) ); byte val1 = ((b1 & SIGN) == 0) ? (byte) (b1 >> 2) : (byte) ((b1) >> 2 ^ 0xc0); encodedData[encodedIndex] = lookUpBase64Alphabet[val1]; encodedData[encodedIndex + 1] = lookUpBase64Alphabet[k << 4]; encodedData[encodedIndex + 2] = PAD; encodedData[encodedIndex + 3] = PAD; } else if (fewerThan24bits == SIXTEENBIT) { b1 = binaryData[dataIndex]; b2 = binaryData[dataIndex + 1]; l = (byte) (b2 & 0x0f); k = (byte) (b1 & 0x03); byte val1 = ((b1 & SIGN) == 0) ? (byte) (b1 >> 2) : (byte) ((b1) >> 2 ^ 0xc0); byte val2 = ((b2 & SIGN) == 0) ? (byte) (b2 >> 4) : (byte) ((b2) >> 4 ^ 0xf0); encodedData[encodedIndex] = lookUpBase64Alphabet[val1]; encodedData[encodedIndex + 1] = lookUpBase64Alphabet[val2 | (k << 4)]; encodedData[encodedIndex + 2] = lookUpBase64Alphabet[l << 2]; encodedData[encodedIndex + 3] = PAD; } if (isChunked) { // we also add a separator to the end of the final chunk. if (chunksSoFar < nbrChunks) { System.arraycopy( CHUNK_SEPARATOR, 0, encodedData, encodedDataLength - CHUNK_SEPARATOR.length, CHUNK_SEPARATOR.length); } } return encodedData; } /** * Decodes Base64 data into octects * * @param base64Data Byte array containing Base64 data * @return Array containing decoded data. */ public static byte[] decodeBase64(byte[] base64Data) { // RFC 2045 requires that we discard ALL non-Base64 characters base64Data = discardNonBase64(base64Data); // handle the edge case, so we don't have to worry about it later if (base64Data.length == 0) { return new byte[0]; } int numberQuadruple = base64Data.length / FOURBYTE; byte decodedData[] = null; byte b1 = 0, b2 = 0, b3 = 0, b4 = 0, marker0 = 0, marker1 = 0; // Throw away anything not in base64Data int encodedIndex = 0; int dataIndex = 0; { // this sizes the output array properly - rlw int lastData = base64Data.length; // ignore the '=' padding while (base64Data[lastData - 1] == PAD) { if (--lastData == 0) { return new byte[0]; } } decodedData = new byte[lastData - numberQuadruple]; } for (int i = 0; i < numberQuadruple; i++) { dataIndex = i * 4; marker0 = base64Data[dataIndex + 2]; marker1 = base64Data[dataIndex + 3]; b1 = base64Alphabet[base64Data[dataIndex]]; b2 = base64Alphabet[base64Data[dataIndex + 1]]; if (marker0 != PAD && marker1 != PAD) { //No PAD e.g 3cQl b3 = base64Alphabet[marker0]; b4 = base64Alphabet[marker1]; decodedData[encodedIndex] = (byte) (b1 << 2 | b2 >> 4); decodedData[encodedIndex + 1] = (byte) (((b2 & 0xf) << 4) | ((b3 >> 2) & 0xf)); decodedData[encodedIndex + 2] = (byte) (b3 << 6 | b4); } else if (marker0 == PAD) { //Two PAD e.g. 3c[Pad][Pad] decodedData[encodedIndex] = (byte) (b1 << 2 | b2 >> 4); } else if (marker1 == PAD) { //One PAD e.g. 3cQ[Pad] b3 = base64Alphabet[marker0]; decodedData[encodedIndex] = (byte) (b1 << 2 | b2 >> 4); decodedData[encodedIndex + 1] = (byte) (((b2 & 0xf) << 4) | ((b3 >> 2) & 0xf)); } encodedIndex += 3; } return decodedData; } /** * Discards any whitespace from a base-64 encoded block. * * @param data The base-64 encoded data to discard the whitespace * from. * @return The data, less whitespace (see RFC 2045). */ static byte[] discardWhitespace(byte[] data) { byte groomedData[] = new byte[data.length]; int bytesCopied = 0; for (int i = 0; i < data.length; i++) { switch (data[i]) { case (byte) ' ' : case (byte) '\n' : case (byte) '\r' : case (byte) '\t' : break; default: groomedData[bytesCopied++] = data[i]; } } byte packedData[] = new byte[bytesCopied]; System.arraycopy(groomedData, 0, packedData, 0, bytesCopied); return packedData; } /** * Discards any characters outside of the base64 alphabet, per * the requirements on page 25 of RFC 2045 - "Any characters * outside of the base64 alphabet are to be ignored in base64 * encoded data." * * @param data The base-64 encoded data to groom * @return The data, less non-base64 characters (see RFC 2045). */ static byte[] discardNonBase64(byte[] data) { byte groomedData[] = new byte[data.length]; int bytesCopied = 0; for (int i = 0; i < data.length; i++) { if (isBase64(data[i])) { groomedData[bytesCopied++] = data[i]; } } byte packedData[] = new byte[bytesCopied]; System.arraycopy(groomedData, 0, packedData, 0, bytesCopied); return packedData; } // Implementation of the Encoder Interface /** * Encodes an Object using the base64 algorithm. This method * is provided in order to satisfy the requirements of the * Encoder interface, and will throw an EncoderException if the * supplied object is not of type byte[]. * * @param pObject Object to encode * @return An object (of type byte[]) containing the * base64 encoded data which corresponds to the byte[] supplied. * @throws EncoderException if the parameter supplied is not * of type byte[] */ public Object encode(Object pObject) throws EncoderException { if (!(pObject instanceof byte[])) { throw new EncoderException( "Parameter supplied to Base64 encode is not a byte[]"); } return encode((byte[]) pObject); } /** * Encodes a byte[] containing binary data, into a byte[] containing * characters in the Base64 alphabet. * * @param pArray a byte array containing binary data * @return A byte array containing only Base64 character data */ public byte[] encode(byte[] pArray) { return encodeBase64(pArray, false); } }
apache-2.0
apache/incubator-asterixdb
asterixdb/asterix-transactions/src/main/java/org/apache/asterix/transaction/management/service/locking/ConcurrentLockManager.java
43046
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.asterix.transaction.management.service.locking; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; import java.util.concurrent.TimeUnit; import org.apache.asterix.common.exceptions.ACIDException; import org.apache.asterix.common.transactions.DatasetId; import org.apache.asterix.common.transactions.ILockManager; import org.apache.asterix.common.transactions.ITransactionContext; import org.apache.asterix.common.transactions.ITransactionManager; import org.apache.asterix.transaction.management.service.transaction.TransactionManagementConstants.LockManagerConstants.LockMode; import org.apache.hyracks.api.lifecycle.ILifeCycleComponent; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import it.unimi.dsi.fastutil.longs.Long2LongMap; import it.unimi.dsi.fastutil.longs.Long2LongMaps; import it.unimi.dsi.fastutil.longs.Long2LongOpenHashMap; import it.unimi.dsi.fastutil.longs.LongArrayList; import it.unimi.dsi.fastutil.longs.LongList; /** * A concurrent implementation of the ILockManager interface. * * @see ResourceGroupTable * @see ResourceGroup */ @SuppressWarnings("squid:RedundantThrowsDeclarationCheck") // throws ACIDException public class ConcurrentLockManager implements ILockManager, ILifeCycleComponent { static final Logger LOGGER = LogManager.getLogger(); static final Level LVL = Level.TRACE; private static final boolean ENABLED_DEADLOCK_FREE_LOCKING_PROTOCOL = true; private static final int NIL = -1; static final long NILL = -1L; private static final boolean DEBUG_MODE = false;//true private static final boolean CHECK_CONSISTENCY = false; private final ResourceGroupTable table; private final ResourceArenaManager resArenaMgr; private final RequestArenaManager reqArenaMgr; private final JobArenaManager jobArenaMgr; private final Long2LongMap txnId2TxnSlotMap; private final LockManagerStats stats = new LockManagerStats(10000); enum LockAction { ERR(false, false), GET(false, false), UPD(false, true), // version of GET that updates the max lock mode WAIT(true, false), CONV(true, true) // convert (upgrade) a lock (e.g. from S to X) ; boolean wait; boolean modify; LockAction(boolean wait, boolean modify) { this.wait = wait; this.modify = modify; } } private static final LockAction[][] ACTION_MATRIX = { // new NL IS IX S X { LockAction.ERR, LockAction.UPD, LockAction.UPD, LockAction.UPD, LockAction.UPD }, // NL { LockAction.ERR, LockAction.GET, LockAction.UPD, LockAction.UPD, LockAction.WAIT }, // IS { LockAction.ERR, LockAction.GET, LockAction.GET, LockAction.WAIT, LockAction.WAIT }, // IX { LockAction.ERR, LockAction.GET, LockAction.WAIT, LockAction.GET, LockAction.WAIT }, // S { LockAction.ERR, LockAction.WAIT, LockAction.WAIT, LockAction.WAIT, LockAction.WAIT } // X }; public ConcurrentLockManager(final int lockManagerShrinkTimer, int tableSize) throws ACIDException { this(lockManagerShrinkTimer, Runtime.getRuntime().availableProcessors() * 2, tableSize); } public ConcurrentLockManager(final int lockManagerShrinkTimer, final int noArenas, final int tableSize) throws ACIDException { table = new ResourceGroupTable(tableSize); resArenaMgr = new ResourceArenaManager(noArenas, lockManagerShrinkTimer); reqArenaMgr = new RequestArenaManager(noArenas, lockManagerShrinkTimer); jobArenaMgr = new JobArenaManager(noArenas, lockManagerShrinkTimer); txnId2TxnSlotMap = Long2LongMaps.synchronize(new Long2LongOpenHashMap()); } @Override public void lock(DatasetId datasetId, int entityHashValue, byte lockMode, ITransactionContext txnContext) throws ACIDException { log("lock", datasetId.getId(), entityHashValue, lockMode, txnContext); stats.lock(); final long txnId = txnContext.getTxnId().getId(); final long jobSlot = findOrAllocJobSlot(txnId); final ResourceGroup group = table.get(datasetId.getId(), entityHashValue); group.getLatch(); try { validateJob(txnContext); final long resSlot = findOrAllocResourceSlot(group, datasetId.getId(), entityHashValue); final long reqSlot = allocRequestSlot(resSlot, jobSlot, lockMode); boolean locked = false; while (!locked) { final LockAction act = determineLockAction(resSlot, jobSlot, lockMode); switch (act) { case CONV: if (introducesDeadlock(resSlot, jobSlot, NOPTracker.INSTANCE)) { DeadlockTracker tracker = new CollectingTracker(); tracker.pushJob(jobSlot); introducesDeadlock(resSlot, jobSlot, tracker); requestAbort(txnContext, tracker.toString()); break; } else if (hasOtherHolders(resSlot, jobSlot)) { enqueueWaiter(group, reqSlot, resSlot, jobSlot, act, txnContext); break; } //fall-through case UPD: resArenaMgr.setMaxMode(resSlot, lockMode); //fall-through case GET: addHolder(reqSlot, resSlot, jobSlot); locked = true; break; case WAIT: enqueueWaiter(group, reqSlot, resSlot, jobSlot, act, txnContext); break; case ERR: default: throw new IllegalStateException(); } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new ACIDException(e); } finally { group.releaseLatch(); } if (CHECK_CONSISTENCY) { assertLocksCanBefoundInJobQueue(); } } private void enqueueWaiter(final ResourceGroup group, final long reqSlot, final long resSlot, final long jobSlot, final LockAction act, ITransactionContext txnContext) throws ACIDException, InterruptedException { final Queue queue = act.modify ? upgrader : waiter; if (introducesDeadlock(resSlot, jobSlot, NOPTracker.INSTANCE)) { DeadlockTracker tracker = new CollectingTracker(); tracker.pushJob(jobSlot); introducesDeadlock(resSlot, jobSlot, tracker); requestAbort(txnContext, tracker.toString()); } else { queue.add(reqSlot, resSlot, jobSlot); } try { group.await(txnContext); } finally { queue.remove(reqSlot, resSlot, jobSlot); } } interface DeadlockTracker { void pushResource(long resSlot); void pushRequest(long reqSlot); void pushJob(long jobSlot); void pop(); } private static class NOPTracker implements DeadlockTracker { static final DeadlockTracker INSTANCE = new NOPTracker(); @Override public void pushResource(long resSlot) { // no-op } @Override public void pushRequest(long reqSlot) { // no-op } @Override public void pushJob(long jobSlot) { // no-op } @Override public void pop() { // no-op } } private static class CollectingTracker implements DeadlockTracker { static final boolean DEBUG = false; LongList slots = new LongArrayList(); ArrayList<String> types = new ArrayList<>(); @Override public void pushResource(long resSlot) { types.add("Resource"); slots.add(resSlot); if (DEBUG) { LOGGER.info("push " + types.get(types.size() - 1) + " " + slots.getLong(slots.size() - 1)); } } @Override public void pushRequest(long reqSlot) { types.add("Request"); slots.add(reqSlot); if (DEBUG) { LOGGER.info("push " + types.get(types.size() - 1) + " " + slots.getLong(slots.size() - 1)); } } @Override public void pushJob(long jobSlot) { types.add("Job"); slots.add(jobSlot); if (DEBUG) { LOGGER.info("push " + types.get(types.size() - 1) + " " + slots.getLong(slots.size() - 1)); } } @Override public void pop() { if (DEBUG) { LOGGER.info("pop " + types.get(types.size() - 1) + " " + slots.getLong(slots.size() - 1)); } types.remove(types.size() - 1); slots.removeLong(slots.size() - 1); } @Override public String toString() { StringBuilder sb = new StringBuilder(); for (int i = 0; i < slots.size(); ++i) { sb.append(types.get(i)).append(" ").append(TypeUtil.Global.toString(slots.getLong(i))).append("\n"); } return sb.toString(); } } /** * determine if adding a job to the waiters of a resource will introduce a * cycle in the wait-graph where the job waits on itself - but not directly on itself (which happens e.g. in the * case of upgrading a lock from S to X). * * @param resSlot * the slot that contains the information about the resource * @param jobSlot * the slot that contains the information about the job * @return true if a cycle would be introduced, false otherwise */ private boolean introducesDeadlock(final long resSlot, final long jobSlot, final DeadlockTracker tracker) { /* * Due to the deadlock-free locking protocol, deadlock is not possible. * So, this method always returns false in that case */ return !ENABLED_DEADLOCK_FREE_LOCKING_PROTOCOL && introducesDeadlock(resSlot, jobSlot, tracker, 0); } private boolean introducesDeadlock(final long resSlot, final long jobSlot, final DeadlockTracker tracker, final int depth) { synchronized (jobArenaMgr) { tracker.pushResource(resSlot); long reqSlot = resArenaMgr.getLastHolder(resSlot); while (reqSlot >= 0) { tracker.pushRequest(reqSlot); final long holderJobSlot = reqArenaMgr.getJobSlot(reqSlot); tracker.pushJob(holderJobSlot); if (holderJobSlot == jobSlot && depth != 0) { return true; } // To determine if we have a deadlock we need to look at the waiters and at the upgraders. // The scanWaiters flag indicates if we are currently scanning the waiters (true) or the upgraders // (false). boolean scanWaiters = true; long jobWaiter = jobArenaMgr.getLastWaiter(holderJobSlot); if (jobWaiter < 0) { scanWaiters = false; jobWaiter = jobArenaMgr.getLastUpgrader(holderJobSlot); } while (jobWaiter >= 0) { long waitingOnResSlot = reqArenaMgr.getResourceId(jobWaiter); if (introducesDeadlock(waitingOnResSlot, jobSlot, tracker, depth + 1)) { return true; } jobWaiter = reqArenaMgr.getNextJobRequest(jobWaiter); if (jobWaiter < 0 && scanWaiters) { scanWaiters = false; jobWaiter = jobArenaMgr.getLastUpgrader(holderJobSlot); } } tracker.pop(); // job tracker.pop(); // request reqSlot = reqArenaMgr.getNextRequest(reqSlot); } tracker.pop(); // resource return false; } } @Override public void instantLock(DatasetId datasetId, int entityHashValue, byte lockMode, ITransactionContext txnContext) throws ACIDException { log("instantLock", datasetId.getId(), entityHashValue, lockMode, txnContext); stats.instantLock(); final long txnId = txnContext.getTxnId().getId(); final ResourceGroup group = table.get(datasetId.getId(), entityHashValue); if (group.firstResourceIndex == NILL) { validateJob(txnContext); // if we do not have a resource in the group, we know that the // resource that we are looking for is not locked return; } // we only allocate a request slot if we actually have to wait long reqSlot = NILL; group.getLatch(); try { validateJob(txnContext); final long resSlot = findResourceInGroup(group, datasetId.getId(), entityHashValue); if (resSlot < 0) { // if we don't find the resource, there are no locks on it. return; } final long jobSlot = findOrAllocJobSlot(txnId); while (true) { final LockAction act = determineLockAction(resSlot, jobSlot, lockMode); switch (act) { case UPD: case GET: return; case WAIT: case CONV: if (reqSlot == NILL) { reqSlot = allocRequestSlot(resSlot, jobSlot, lockMode); } enqueueWaiter(group, reqSlot, resSlot, jobSlot, act, txnContext); break; case ERR: default: throw new IllegalStateException(); } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new ACIDException(e); } finally { if (reqSlot != NILL) { // deallocate request, if we allocated one earlier if (DEBUG_MODE) { LOGGER.trace("del req slot " + TypeUtil.Global.toString(reqSlot)); } reqArenaMgr.deallocate(reqSlot); } group.releaseLatch(); } } @Override public boolean tryLock(DatasetId datasetId, int entityHashValue, byte lockMode, ITransactionContext txnContext) throws ACIDException { log("tryLock", datasetId.getId(), entityHashValue, lockMode, txnContext); stats.tryLock(); final long txnId = txnContext.getTxnId().getId(); final long jobSlot = findOrAllocJobSlot(txnId); final ResourceGroup group = table.get(datasetId.getId(), entityHashValue); group.getLatch(); try { validateJob(txnContext); final long resSlot = findOrAllocResourceSlot(group, datasetId.getId(), entityHashValue); final long reqSlot = allocRequestSlot(resSlot, jobSlot, lockMode); final LockAction act = determineLockAction(resSlot, jobSlot, lockMode); switch (act) { case UPD: resArenaMgr.setMaxMode(resSlot, lockMode); //fall-through case GET: addHolder(reqSlot, resSlot, jobSlot); return true; case WAIT: case CONV: return false; default: throw new IllegalStateException(); } } finally { group.releaseLatch(); } } @Override public boolean instantTryLock(DatasetId datasetId, int entityHashValue, byte lockMode, ITransactionContext txnContext) throws ACIDException { log("instantTryLock", datasetId.getId(), entityHashValue, lockMode, txnContext); stats.instantTryLock(); final long txnId = txnContext.getTxnId().getId(); final ResourceGroup group = table.get(datasetId.getId(), entityHashValue); if (group.firstResourceIndex == NILL) { validateJob(txnContext); // if we do not have a resource in the group, we know that the // resource that we are looking for is not locked return true; } group.getLatch(); try { validateJob(txnContext); final long resSlot = findResourceInGroup(group, datasetId.getId(), entityHashValue); if (resSlot < 0) { // if we don't find the resource, there are no locks on it. return true; } final long jobSlot = findOrAllocJobSlot(txnId); LockAction act = determineLockAction(resSlot, jobSlot, lockMode); switch (act) { case UPD: case GET: return true; case WAIT: case CONV: return false; case ERR: default: throw new IllegalStateException(); } } finally { group.releaseLatch(); } } @Override public void unlock(DatasetId datasetId, int entityHashValue, byte lockMode, ITransactionContext txnContext) throws ACIDException { log("unlock", datasetId.getId(), entityHashValue, lockMode, txnContext); final long txnId = txnContext.getTxnId().getId(); final long jobSlot = txnId2TxnSlotMap.get(txnId); unlock(datasetId.getId(), entityHashValue, lockMode, jobSlot); } private void unlock(int dsId, int entityHashValue, byte lockMode, long jobSlot) throws ACIDException { log("unlock", dsId, entityHashValue, lockMode, null); stats.unlock(); ResourceGroup group = table.get(dsId, entityHashValue); group.getLatch(); try { long resource = findResourceInGroup(group, dsId, entityHashValue); if (resource < 0) { throw new IllegalStateException("resource (" + dsId + ", " + entityHashValue + ") not found"); } if (CHECK_CONSISTENCY) { assertLocksCanBefoundInJobQueue(); } long holder = removeLastHolder(resource, jobSlot, lockMode); // deallocate request if (DEBUG_MODE) { LOGGER.trace("del req slot " + TypeUtil.Global.toString(holder)); } reqArenaMgr.deallocate(holder); // deallocate resource or fix max lock mode if (resourceNotUsed(resource)) { long prev = group.firstResourceIndex; if (prev == resource) { group.firstResourceIndex = resArenaMgr.getNext(resource); } else { while (resArenaMgr.getNext(prev) != resource) { prev = resArenaMgr.getNext(prev); } resArenaMgr.setNext(prev, resArenaMgr.getNext(resource)); } if (DEBUG_MODE) { LOGGER.trace("del res slot " + TypeUtil.Global.toString(resource)); } resArenaMgr.deallocate(resource); } else { final int oldMaxMode = resArenaMgr.getMaxMode(resource); final int newMaxMode = determineNewMaxMode(resource, oldMaxMode); resArenaMgr.setMaxMode(resource, newMaxMode); group.wakeUp(); } } finally { group.releaseLatch(); } } @Override public void releaseLocks(ITransactionContext txnContext) throws ACIDException { log("releaseLocks", NIL, NIL, LockMode.ANY, txnContext); stats.releaseLocks(); long txnId = txnContext.getTxnId().getId(); long jobSlot = txnId2TxnSlotMap.get(txnId); if (jobSlot == 0) { // we don't know the job, so there are no locks for it - we're done return; } //System.err.println(table.append(new StringBuilder(), true).toString()); if (LOGGER.isEnabled(LVL)) { LOGGER.log(LVL, "jobArenaMgr " + jobArenaMgr.addTo(new RecordManagerStats()).toString()); LOGGER.log(LVL, "resArenaMgr " + resArenaMgr.addTo(new RecordManagerStats()).toString()); LOGGER.log(LVL, "reqArenaMgr " + reqArenaMgr.addTo(new RecordManagerStats()).toString()); } long holder; synchronized (jobArenaMgr) { holder = jobArenaMgr.getLastHolder(jobSlot); } while (holder != NILL) { long resource = reqArenaMgr.getResourceId(holder); int dsId = resArenaMgr.getDatasetId(resource); int pkHashVal = resArenaMgr.getPkHashVal(resource); unlock(dsId, pkHashVal, LockMode.ANY, jobSlot); synchronized (jobArenaMgr) { holder = jobArenaMgr.getLastHolder(jobSlot); } } if (DEBUG_MODE) { LOGGER.trace("del job slot " + TypeUtil.Global.toString(jobSlot)); } jobArenaMgr.deallocate(jobSlot); txnId2TxnSlotMap.remove(txnId); stats.logCounters(LOGGER, Level.DEBUG, true); } private long findOrAllocJobSlot(long txnId) { long jobSlot = txnId2TxnSlotMap.get(txnId); if (jobSlot == 0) { jobSlot = jobArenaMgr.allocate(); if (DEBUG_MODE) { LOGGER.trace("new job slot " + TypeUtil.Global.toString(jobSlot) + " (" + txnId + ")"); } jobArenaMgr.setTxnId(jobSlot, txnId); long oldSlot = txnId2TxnSlotMap.putIfAbsent(txnId, jobSlot); if (oldSlot != 0) { // if another thread allocated a slot for this jobThreadId between // get(..) and putIfAbsent(..), we'll use that slot and // deallocate the one we allocated if (DEBUG_MODE) { LOGGER.trace("del job slot " + TypeUtil.Global.toString(jobSlot) + " due to conflict"); } jobArenaMgr.deallocate(jobSlot); jobSlot = oldSlot; } } assert jobSlot > 0; return jobSlot; } private long findOrAllocResourceSlot(ResourceGroup group, int dsId, int entityHashValue) { long resSlot = findResourceInGroup(group, dsId, entityHashValue); if (resSlot == NILL) { // we don't know about this resource, let's alloc a slot resSlot = resArenaMgr.allocate(); resArenaMgr.setDatasetId(resSlot, dsId); resArenaMgr.setPkHashVal(resSlot, entityHashValue); resArenaMgr.setNext(resSlot, group.firstResourceIndex); group.firstResourceIndex = resSlot; if (DEBUG_MODE) { LOGGER.trace("new res slot " + TypeUtil.Global.toString(resSlot) + " (" + dsId + ", " + entityHashValue + ")"); } } else { if (DEBUG_MODE) { LOGGER.trace("fnd res slot " + TypeUtil.Global.toString(resSlot) + " (" + dsId + ", " + entityHashValue + ")"); } } return resSlot; } private long allocRequestSlot(long resSlot, long jobSlot, byte lockMode) { long reqSlot = reqArenaMgr.allocate(); reqArenaMgr.setResourceId(reqSlot, resSlot); reqArenaMgr.setLockMode(reqSlot, lockMode); // lock mode is a byte!! reqArenaMgr.setJobSlot(reqSlot, jobSlot); if (DEBUG_MODE) { LOGGER.trace("new req slot " + TypeUtil.Global.toString(reqSlot) + " (" + TypeUtil.Global.toString(resSlot) + ", " + TypeUtil.Global.toString(jobSlot) + ", " + LockMode.toString(lockMode) + ")"); } return reqSlot; } private LockAction determineLockAction(long resSlot, long jobSlot, byte lockMode) { final int curLockMode = resArenaMgr.getMaxMode(resSlot); final LockAction act = ACTION_MATRIX[curLockMode][lockMode]; if (act == LockAction.WAIT) { return updateActionForSameJob(resSlot, jobSlot, lockMode); } return act; } /** * when we've got a lock conflict for a different job, we always have to * wait, if it is for the same job we either have to * a) (wait and) convert the lock once conversion becomes viable or * b) acquire the lock if we want to lock the same resource with the same * lock mode for the same job. * * @param resource * the resource slot that's being locked * @param job * the job slot of the job locking the resource * @param lockMode * the lock mode that the resource should be locked with * @return */ private LockAction updateActionForSameJob(long resource, long job, byte lockMode) { // TODO we can reduce the number of things we have to look at by // carefully distinguishing the different lock modes long holder = resArenaMgr.getLastHolder(resource); LockAction res = LockAction.WAIT; while (holder != NILL) { if (job == reqArenaMgr.getJobSlot(holder)) { if (reqArenaMgr.getLockMode(holder) == lockMode) { return LockAction.GET; } else { if (ENABLED_DEADLOCK_FREE_LOCKING_PROTOCOL) { throw new IllegalStateException( "Lock conversion is not supported when deadlock-free locking protocol is enabled!"); } res = LockAction.CONV; } } holder = reqArenaMgr.getNextRequest(holder); } return res; } private long findResourceInGroup(ResourceGroup group, int dsId, int entityHashValue) { stats.logCounters(LOGGER, LVL, false); long resSlot = group.firstResourceIndex; while (resSlot != NILL) { // either we already have a lock on this resource or we have a // hash collision if (resArenaMgr.getDatasetId(resSlot) == dsId && resArenaMgr.getPkHashVal(resSlot) == entityHashValue) { return resSlot; } else { resSlot = resArenaMgr.getNext(resSlot); } } return NILL; } private void addHolder(long request, long resource, long job) { long lastHolder = resArenaMgr.getLastHolder(resource); reqArenaMgr.setNextRequest(request, lastHolder); resArenaMgr.setLastHolder(resource, request); synchronized (jobArenaMgr) { long lastJobHolder = jobArenaMgr.getLastHolder(job); insertIntoJobQueue(request, lastJobHolder); jobArenaMgr.setLastHolder(job, request); } } private boolean hasOtherHolders(long resSlot, long jobSlot) { long holder = resArenaMgr.getLastHolder(resSlot); while (holder != NILL) { if (reqArenaMgr.getJobSlot(holder) != jobSlot) { return true; } holder = reqArenaMgr.getNextRequest(holder); } return false; } private long removeLastHolder(long resource, long jobSlot, byte lockMode) { long holder = resArenaMgr.getLastHolder(resource); if (holder < 0) { throw new IllegalStateException("no holder for resource " + resource); } // remove from the list of holders for a resource if (requestMatches(holder, jobSlot, lockMode)) { // if the head of the queue matches, we need to update the resource long next = reqArenaMgr.getNextRequest(holder); resArenaMgr.setLastHolder(resource, next); } else { holder = removeRequestFromQueueForJob(holder, jobSlot, lockMode); } synchronized (jobArenaMgr) { // remove from the list of requests for a job long newHead = removeRequestFromJob(holder, jobArenaMgr.getLastHolder(jobSlot)); jobArenaMgr.setLastHolder(jobSlot, newHead); } return holder; } private boolean requestMatches(long holder, long jobSlot, byte lockMode) { return jobSlot == reqArenaMgr.getJobSlot(holder) && (lockMode == LockMode.ANY || lockMode == reqArenaMgr.getLockMode(holder)); } private long removeRequestFromJob(long holder, long unmodified) { long prevForJob = reqArenaMgr.getPrevJobRequest(holder); long nextForJob = reqArenaMgr.getNextJobRequest(holder); if (nextForJob != NILL) { reqArenaMgr.setPrevJobRequest(nextForJob, prevForJob); } if (prevForJob == NILL) { return nextForJob; } else { reqArenaMgr.setNextJobRequest(prevForJob, nextForJob); return unmodified; } } interface Queue { void add(long request, long resource, long job); void remove(long request, long resource, long job); } private final Queue waiter = new Queue() { @Override public void add(long request, long resource, long job) { long waiter = resArenaMgr.getFirstWaiter(resource); reqArenaMgr.setNextRequest(request, NILL); if (waiter == NILL) { resArenaMgr.setFirstWaiter(resource, request); } else { appendToRequestQueue(waiter, request); } synchronized (jobArenaMgr) { waiter = jobArenaMgr.getLastWaiter(job); insertIntoJobQueue(request, waiter); jobArenaMgr.setLastWaiter(job, request); } } @Override public void remove(long request, long resource, long job) { long waiter = resArenaMgr.getFirstWaiter(resource); if (waiter == request) { long next = reqArenaMgr.getNextRequest(waiter); resArenaMgr.setFirstWaiter(resource, next); } else { waiter = removeRequestFromQueueForSlot(waiter, request); } synchronized (jobArenaMgr) { // remove from the list of requests for a job long newHead = removeRequestFromJob(waiter, jobArenaMgr.getLastWaiter(job)); jobArenaMgr.setLastWaiter(job, newHead); } } }; private final Queue upgrader = new Queue() { @Override public void add(long request, long resource, long job) { long upgrader = resArenaMgr.getFirstUpgrader(resource); reqArenaMgr.setNextRequest(request, NILL); if (upgrader == NILL) { resArenaMgr.setFirstUpgrader(resource, request); } else { appendToRequestQueue(upgrader, request); } synchronized (jobArenaMgr) { upgrader = jobArenaMgr.getLastUpgrader(job); insertIntoJobQueue(request, upgrader); jobArenaMgr.setLastUpgrader(job, request); } } @Override public void remove(long request, long resource, long job) { long upgrader = resArenaMgr.getFirstUpgrader(resource); if (upgrader == request) { long next = reqArenaMgr.getNextRequest(upgrader); resArenaMgr.setFirstUpgrader(resource, next); } else { upgrader = removeRequestFromQueueForSlot(upgrader, request); } synchronized (jobArenaMgr) { // remove from the list of requests for a job long newHead = removeRequestFromJob(upgrader, jobArenaMgr.getLastUpgrader(job)); jobArenaMgr.setLastUpgrader(job, newHead); } } }; private void insertIntoJobQueue(long newRequest, long oldRequest) { reqArenaMgr.setNextJobRequest(newRequest, oldRequest); reqArenaMgr.setPrevJobRequest(newRequest, NILL); if (oldRequest >= 0) { reqArenaMgr.setPrevJobRequest(oldRequest, newRequest); } } private void appendToRequestQueue(long head, long appendee) { long next = reqArenaMgr.getNextRequest(head); while (next != NILL) { head = next; next = reqArenaMgr.getNextRequest(head); } reqArenaMgr.setNextRequest(head, appendee); } private long removeRequestFromQueueForSlot(long head, long reqSlot) { long cur = head; long prev = cur; while (prev != NILL) { cur = reqArenaMgr.getNextRequest(prev); if (cur == NILL) { throw new IllegalStateException("request " + reqSlot + " not in queue"); } if (cur == reqSlot) { break; } prev = cur; } long next = reqArenaMgr.getNextRequest(cur); reqArenaMgr.setNextRequest(prev, next); return cur; } /** * remove the first request for a given job and lock mode from a request queue. * If the value of the parameter lockMode is LockMode.ANY the first request * for the job is removed - independent of the LockMode. * * @param head * the head of the request queue * @param jobSlot * the job slot * @param lockMode * the lock mode * @return the slot of the first request that matched the given job */ private long removeRequestFromQueueForJob(long head, long jobSlot, byte lockMode) { long holder = head; long prev = holder; while (prev != NILL) { holder = reqArenaMgr.getNextRequest(prev); if (holder == NILL) { throw new IllegalStateException("no entry for job " + jobSlot + " in queue"); } if (requestMatches(holder, jobSlot, lockMode)) { break; } prev = holder; } long next = reqArenaMgr.getNextRequest(holder); reqArenaMgr.setNextRequest(prev, next); return holder; } private int determineNewMaxMode(long resource, int oldMaxMode) { int newMaxMode = LockMode.NL; long holder = resArenaMgr.getLastHolder(resource); while (holder != NILL) { int curLockMode = reqArenaMgr.getLockMode(holder); if (curLockMode == oldMaxMode) { // we have another lock of the same mode - we're done return oldMaxMode; } switch (ACTION_MATRIX[newMaxMode][curLockMode]) { case UPD: newMaxMode = curLockMode; break; case GET: break; case WAIT: case CONV: case ERR: throw new IllegalStateException("incompatible locks in holder queue"); } holder = reqArenaMgr.getNextRequest(holder); } return newMaxMode; } private boolean resourceNotUsed(long resource) { return resArenaMgr.getLastHolder(resource) == NILL && resArenaMgr.getFirstUpgrader(resource) == NILL && resArenaMgr.getFirstWaiter(resource) == NILL; } private void validateJob(ITransactionContext txnContext) throws ACIDException { if (txnContext.getTxnState() == ITransactionManager.ABORTED) { throw new ACIDException("" + txnContext.getTxnId() + " is in ABORTED state."); } else if (txnContext.isTimeout()) { requestAbort(txnContext, "timeout"); } } private void requestAbort(ITransactionContext txnContext, String msg) throws ACIDException { txnContext.setTimeout(true); throw new ACIDException( "Transaction " + txnContext.getTxnId() + " should abort (requested by the Lock Manager)" + ":\n" + msg); } /* * Debugging support */ private void log(String string, int id, int entityHashValue, byte lockMode, ITransactionContext txnContext) { if (!LOGGER.isEnabled(LVL)) { return; } StringBuilder sb = new StringBuilder(); sb.append("{ op : ").append(string); if (id != NIL) { sb.append(" , dataset : ").append(id); } if (entityHashValue != NIL) { sb.append(" , entity : ").append(entityHashValue); } if (lockMode != LockMode.NL) { sb.append(" , mode : ").append(LockMode.toString(lockMode)); } if (txnContext != null) { sb.append(" , txnId : ").append(txnContext.getTxnId()); } sb.append(" , thread : ").append(Thread.currentThread().getName()); sb.append(" }"); LOGGER.log(LVL, sb.toString()); } private void assertLocksCanBefoundInJobQueue() throws ACIDException { try { for (int i = 0; i < table.size; ++i) { final ResourceGroup group = table.get(i); if (group.tryLatch(100, TimeUnit.MILLISECONDS)) { try { long resSlot = group.firstResourceIndex; while (resSlot != NILL) { int dsId = resArenaMgr.getDatasetId(resSlot); int entityHashValue = resArenaMgr.getPkHashVal(resSlot); long reqSlot = resArenaMgr.getLastHolder(resSlot); while (reqSlot != NILL) { byte lockMode = (byte) reqArenaMgr.getLockMode(reqSlot); long jobSlot = reqArenaMgr.getJobSlot(reqSlot); long txnId = jobArenaMgr.getTxnId(jobSlot); assertLockCanBeFoundInJobQueue(dsId, entityHashValue, lockMode, txnId); reqSlot = reqArenaMgr.getNextRequest(reqSlot); } resSlot = resArenaMgr.getNext(resSlot); } } finally { group.releaseLatch(); } } else { LOGGER.warn("Could not check locks for " + group); } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IllegalStateException("interrupted", e); } } private void assertLockCanBeFoundInJobQueue(int dsId, int entityHashValue, byte lockMode, long txnId) { if (findLockInJobQueue(dsId, entityHashValue, txnId, lockMode) == NILL) { String msg = "request for " + LockMode.toString(lockMode) + " lock on dataset " + dsId + " entity " + entityHashValue + " not found for txn " + txnId + " in thread " + Thread.currentThread().getName(); LOGGER.error(msg); throw new IllegalStateException(msg); } } /** * tries to find a lock request searching though the job queue * * @param dsId * dataset id * @param entityHashValue * primary key hash value * @param txnId * job id * @param lockMode * lock mode * @return the slot of the request, if the lock request is found, NILL otherwise */ private long findLockInJobQueue(final int dsId, final int entityHashValue, final long txnId, byte lockMode) { long jobSlot = txnId2TxnSlotMap.get(txnId); if (jobSlot == 0) { return NILL; } long holder; synchronized (jobArenaMgr) { holder = jobArenaMgr.getLastHolder(jobSlot); } while (holder != NILL) { long resource = reqArenaMgr.getResourceId(holder); if (dsId == resArenaMgr.getDatasetId(resource) && entityHashValue == resArenaMgr.getPkHashVal(resource) && jobSlot == reqArenaMgr.getJobSlot(holder) && (lockMode == reqArenaMgr.getLockMode(holder) || lockMode == LockMode.ANY)) { return holder; } synchronized (jobArenaMgr) { holder = reqArenaMgr.getNextJobRequest(holder); } } return NILL; } private TablePrinter getResourceTablePrinter() { return new ResourceTablePrinter(table, resArenaMgr, reqArenaMgr, jobArenaMgr); } private TablePrinter getDumpTablePrinter() { return new DumpTablePrinter(table, resArenaMgr, reqArenaMgr, jobArenaMgr, txnId2TxnSlotMap); } public String printByResource() { return getResourceTablePrinter().append(new StringBuilder()).append("\n").toString(); } @Override public String toString() { return printByResource(); } public String dump() { return getDumpTablePrinter().append(new StringBuilder()).toString(); } @Override public String prettyPrint() throws ACIDException { StringBuilder s = new StringBuilder("\n########### LockManager Status #############\n"); return getDumpTablePrinter().append(s).toString() + "\n"; } @Override public void start() { //no op } @Override public void dumpState(OutputStream os) throws IOException { os.write(dump().getBytes()); } @Override public void stop(boolean dumpState, OutputStream os) throws IOException { if (dumpState) { dumpState(os); } } }
apache-2.0
groboclown/p4ic4idea
p4java/r18-1/src/test/java/com/perforce/p4java/tests/dev/unit/features112/GetStreamInterchangesTest.java
9918
/** * Copyright (c) 2011 Perforce Software. All rights reserved. */ package com.perforce.p4java.tests.dev.unit.features112; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.List; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import com.perforce.p4java.client.IClient; import com.perforce.p4java.core.IChangelist; import com.perforce.p4java.core.file.FileSpecBuilder; import com.perforce.p4java.core.file.IFileSpec; import com.perforce.p4java.exception.P4JavaException; import com.perforce.p4java.impl.generic.core.file.FileSpec; import com.perforce.p4java.option.changelist.SubmitOptions; import com.perforce.p4java.option.client.CopyFilesOptions; import com.perforce.p4java.option.client.DeleteFilesOptions; import com.perforce.p4java.option.client.IntegrateFilesOptions; import com.perforce.p4java.option.server.GetInterchangesOptions; import com.perforce.p4java.server.IOptionsServer; import com.perforce.p4java.tests.dev.annotations.Jobs; import com.perforce.p4java.tests.dev.annotations.TestId; import com.perforce.p4java.tests.dev.unit.P4JavaTestCase; /** * Test "p4 interchanges -S stream -P parent". */ @Jobs({ "job046697" }) @TestId("Dev112_GetStreamInterchangesTest") public class GetStreamInterchangesTest extends P4JavaTestCase { IOptionsServer server = null; /** * @BeforeClass annotation to a method to be run before all the tests in a * class. */ @BeforeClass public static void oneTimeSetUp() { // one-time initialization code (before all the tests). } /** * @AfterClass annotation to a method to be run after all the tests in a * class. */ @AfterClass public static void oneTimeTearDown() { // one-time cleanup code (after all the tests). } /** * @Before annotation to a method to be run before each test in a class. */ @Before public void setUp() { // initialization code (before each test). try { server = getServer(); assertNotNull(server); } catch (P4JavaException e) { fail("Unexpected exception: " + e.getLocalizedMessage()); } catch (URISyntaxException e) { fail("Unexpected exception: " + e.getLocalizedMessage()); } } /** * @After annotation to a method to be run after each test in a class. */ @After public void tearDown() { // cleanup code (after each test). if (server != null) { this.endServerSession(server); } } /** * Test "p4 interchanges -S stream -P parent". */ @Test public void testStreamInterchanges() { IClient devStreamClient = null; IClient mainStreamClient = null; IChangelist changelist = null; List<IFileSpec> files = null; String devStream = "//p4java_stream/dev"; String mainStream = "//p4java_stream/main"; int randNum = getRandomInt(); String dir = "interchanges" + randNum; String mainSourceFile = mainStream + "/core/GetOpenedFilesTest/src/gnu/getopt/Getopt.java"; String mainTargetFile = mainStream + "/core/GetOpenedFilesTest/src/gnu/getopt/" + dir + "/Getopt.java"; String mainTargetFile2 = mainStream + "/core/GetOpenedFilesTest/src/gnu/getopt/" + dir + "/Getopt2.java"; String mainTargetFile3 = mainStream + "/core/GetOpenedFilesTest/src/gnu/getopt/" + dir + "/Getopt3.java"; String devTargetFile = devStream + "/core/GetOpenedFilesTest/src/gnu/getopt/" + dir + "/Getopt.java"; String devTargetFile2 = devStream + "/core/GetOpenedFilesTest/src/gnu/getopt/" + dir + "/Getopt2.java"; String devTargetFile3 = devStream + "/core/GetOpenedFilesTest/src/gnu/getopt/" + dir + "/Getopt3.java"; try { // Get the test main stream client mainStreamClient = server.getClient("p4java_stream_main"); assertNotNull(mainStreamClient); // Get the test dev stream client devStreamClient = server.getClient("p4java_stream_dev"); assertNotNull(devStreamClient); // Set the main stream client to the server. server.setCurrentClient(mainStreamClient); List<IChangelist> expectedChangelists = new ArrayList<IChangelist>(); // Copy the main source file to main target changelist = getNewChangelist(server, mainStreamClient, "Dev112_GetStreamInterchangesTest copy files " + randNum); assertNotNull(changelist); changelist = mainStreamClient.createChangelist(changelist); files = mainStreamClient.copyFiles(new FileSpec(mainSourceFile), new FileSpec(mainTargetFile), null, new CopyFilesOptions().setChangelistId(changelist.getId())); assertNotNull(files); changelist.refresh(); files = changelist.submit(new SubmitOptions()); assertNotNull(files); expectedChangelists.add(changelist); changelist = getNewChangelist(server, mainStreamClient, "Dev112_GetStreamInterchangesTest copy files " + randNum); assertNotNull(changelist); changelist = mainStreamClient.createChangelist(changelist); files = mainStreamClient.copyFiles(new FileSpec(mainSourceFile), new FileSpec(mainTargetFile2), null, new CopyFilesOptions().setChangelistId(changelist.getId())); assertNotNull(files); changelist.refresh(); files = changelist.submit(new SubmitOptions()); assertNotNull(files); expectedChangelists.add(changelist); changelist = getNewChangelist(server, mainStreamClient, "Dev112_GetStreamInterchangesTest copy files " + randNum); assertNotNull(changelist); changelist = mainStreamClient.createChangelist(changelist); files = mainStreamClient.copyFiles(new FileSpec(mainSourceFile), new FileSpec(mainTargetFile3), null, new CopyFilesOptions().setChangelistId(changelist.getId())); assertNotNull(files); changelist.refresh(); files = changelist.submit(new SubmitOptions()); assertNotNull(files); expectedChangelists.add(changelist); // Get a list interchanges List<IChangelist> returnedChangelists = server.getInterchanges( null, null, new GetInterchangesOptions().setReverseMapping(true).setStream(devStream)); assertNotNull(returnedChangelists); // Verify the expected changelists are in the returned list of // interchanges, since the streams files are not yet integrated from // the source stream to the target stream. for (IChangelist exp : expectedChangelists) { boolean found = false; for (IChangelist change : returnedChangelists) { if (exp.getId() == change.getId()) { found = true; } } // Should be found assertTrue(found); } // Merge-down the new file from the main stream to the dev stream. server.setCurrentClient(devStreamClient); changelist = getNewChangelist(server, devStreamClient, "Dev112_GetStreamInterchangesTest integ files"); assertNotNull(changelist); changelist = devStreamClient.createChangelist(changelist); // Since the specification of a mainline stream is not allowed, we // will need to add the "-r" flag along with the development stream // to reverse the direction of the merge source. files = devStreamClient.integrateFiles( null, FileSpecBuilder.makeFileSpecList(new String[] { devTargetFile, devTargetFile2, devTargetFile3 }), new IntegrateFilesOptions().setChangelistId( changelist.getId()).setReverseMapping(true).setStream(devStream)); assertNotNull(files); changelist.refresh(); files = changelist.submit(new SubmitOptions()); assertNotNull(files); // Get a list interchanges returnedChangelists = server.getInterchanges( null, null, null, new GetInterchangesOptions().setReverseMapping(true).setStream(devStream)); assertNotNull(returnedChangelists); // Verify the expected changelists are NOT in the returned list of // interchanges, since the stream files in the expected changeslists // had been integrated from the source stream to the target stream for (IChangelist exp : expectedChangelists) { boolean found = false; for (IChangelist change : returnedChangelists) { if (exp.getId() == change.getId()) { found = true; } } // Should NOT be found assertFalse(found); } } catch (P4JavaException e) { fail("Unexpected exception: " + e.getLocalizedMessage()); } finally { if (server != null) { if (devStreamClient != null) { try { // Delete submitted test files in the dev stream server.setCurrentClient(devStreamClient); changelist = getNewChangelist(server, devStreamClient, "Dev112_GetStreamInterchangesTest delete submitted files"); changelist = devStreamClient .createChangelist(changelist); devStreamClient.deleteFiles(FileSpecBuilder .makeFileSpecList(new String[] { devTargetFile, devTargetFile2, devTargetFile3 }), new DeleteFilesOptions() .setChangelistId(changelist.getId())); changelist.refresh(); changelist.submit(null); } catch (P4JavaException e) { // Can't do much here... } } if (mainStreamClient != null) { try { // Delete submitted test files in the main stream server.setCurrentClient(mainStreamClient); changelist = getNewChangelist(server, mainStreamClient, "Dev112_GetStreamInterchangesTest delete submitted files"); changelist = mainStreamClient .createChangelist(changelist); mainStreamClient.deleteFiles(FileSpecBuilder .makeFileSpecList(new String[] { mainTargetFile, mainTargetFile2, mainTargetFile3 }), new DeleteFilesOptions() .setChangelistId(changelist.getId())); changelist.refresh(); changelist.submit(null); } catch (P4JavaException e) { // Can't do much here... } } } } } }
apache-2.0
MaDaPHaKa/Orient-object
server/src/main/java/com/orientechnologies/orient/server/tx/OTransactionEntryProxy.java
1099
/* * Copyright 1999-2010 Luca Garulli (l.garulli--at--orientechnologies.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.orientechnologies.orient.server.tx; import java.io.IOException; import com.orientechnologies.orient.core.Orient; import com.orientechnologies.orient.core.db.record.ORecordOperation; public class OTransactionEntryProxy extends ORecordOperation { public OTransactionEntryProxy(final byte iRecordType) throws IOException { super(Orient.instance().getRecordFactoryManager().newInstance(iRecordType), (byte) 0); } }
apache-2.0
petabyte-research/redflags
redflags-web/src/test/java/hu/petabyte/redflags/web/AppTest.java
1283
/* Copyright 2014-2016 PetaByte Research Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package hu.petabyte.redflags.web; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; /** * Unit test for simple App. * @author Zsolt Jurányi */ public class AppTest extends TestCase { /** * Create the test case * * @param testName name of the test case */ public AppTest( String testName ) { super( testName ); } /** * @return the suite of tests being tested */ public static Test suite() { return new TestSuite( AppTest.class ); } /** * Rigourous Test :-) */ public void testApp() { assertTrue( true ); } }
apache-2.0
missedone/testng
src/main/java/org/testng/collections/Lists.java
1203
package org.testng.collections; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.stream.Collectors; public final class Lists { private Lists() {} public static <K> List<K> newArrayList() { return new ArrayList<>(); } public static <K> List<K> newLinkedList() { return new LinkedList<>(); } public static <K> List<K> newLinkedList(Collection<K> c) { return new LinkedList<>(c); } public static <K> List<K> newArrayList(Collection<K> c) { return new ArrayList<>(c); } @SafeVarargs public static <K> List<K> newArrayList(K... elements) { List<K> result = new ArrayList<>(); Collections.addAll(result, elements); return result; } public static <K> List<K> newArrayList(int size) { return new ArrayList<>(size); } public static <K> List<K> intersection(List<K> list1, List<K> list2) { return list1.stream().filter(list2::contains).collect(Collectors.toList()); } public static <K> List<K> merge(Collection<K> l1, Collection<K> l2) { List<K> result = newArrayList(l1); result.addAll(l2); return result; } }
apache-2.0
nishantmonu51/druid
server/src/main/java/org/apache/druid/server/coordinator/duty/CoordinatorCustomDuty.java
2940
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.server.coordinator.duty; import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.fasterxml.jackson.annotation.JsonTypeName; import org.apache.druid.guice.annotations.ExtensionPoint; import org.apache.druid.initialization.DruidModule; /** * This {@link ExtensionPoint} allows for coordinator duty to be pluggable * so that users can register their own duties without modifying Core Druid classes. * * Users can write their own custom coordinator duty implemnting this interface and setting the {@link JsonTypeName}. * Next, users will need to register their custom coordinator as subtypes in their * Module's {@link DruidModule#getJacksonModules()}. Once these steps are done, user will be able to load their * custom coordinator duty using the following properties: * druid.coordinator.dutyGroups=[<GROUP_NAME>] * druid.coordinator.<GROUP_NAME>.duties=[<DUTY_NAME_MATCHING_JSON_TYPE_NAME>] * druid.coordinator.<GROUP_NAME>.duty.<DUTY_NAME_MATCHING_JSON_TYPE_NAME>.<SOME_CONFIG_1>=100 * druid.coordinator.<GROUP_NAME>.duty.<DUTY_NAME_MATCHING_JSON_TYPE_NAME>.<SOME_CONFIG_2>=200 * druid.coordinator.<GROUP_NAME>.period="P1D" * * The duties can be grouped into multiple groups as per the elements in list druid.coordinator.dutyGroups. * All duties in the same group will have the same run period configured by druid.coordinator.<GROUP_NAME>.period. * There will be a single thread running the duties sequentially for each group. * * Note that custom duty does not implement CoordinatorDuty directly as existing Core Druid Coordinator Duties * don't have associated JSON type and should not be manually grouped/enabled/disabled by the users. * (The only exception is the metadata cleanup duties which we may refactor to use the custom duty system in the future) * */ @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") @JsonSubTypes({ @JsonSubTypes.Type(name = "killSupervisors", value = KillSupervisorsCustomDuty.class), }) @ExtensionPoint public interface CoordinatorCustomDuty extends CoordinatorDuty { }
apache-2.0
apache/bookkeeper
bookkeeper-server/src/test/java/org/apache/bookkeeper/test/LocalBookiesRegistryTest.java
1587
/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.apache.bookkeeper.test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import org.apache.bookkeeper.proto.LocalBookiesRegistry; import org.junit.Test; /** * Test the correctness and the availability outside of its package of LocalBookiesRegistryTest. */ public class LocalBookiesRegistryTest extends BookKeeperClusterTestCase { public LocalBookiesRegistryTest() { super(1); baseConf.setDisableServerSocketBind(true); baseConf.setEnableLocalTransport(true); } @Test public void testAccessibleLocalBookiesRegistry() throws Exception { assertEquals(1, bookieCount()); bookieAddresses().forEach(a -> assertTrue(LocalBookiesRegistry.isLocalBookie(a))); } }
apache-2.0
googleapis/google-api-java-client-services
clients/google-api-services-recommendationengine/v1beta1/1.31.0/com/google/api/services/recommendationengine/v1beta1/RecommendationsAIRequestInitializer.java
3598
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.recommendationengine.v1beta1; /** * RecommendationsAI request initializer for setting properties like key and userIp. * * <p> * The simplest usage is to use it to set the key parameter: * </p> * * <pre> public static final GoogleClientRequestInitializer KEY_INITIALIZER = new RecommendationsAIRequestInitializer(KEY); * </pre> * * <p> * There is also a constructor to set both the key and userIp parameters: * </p> * * <pre> public static final GoogleClientRequestInitializer INITIALIZER = new RecommendationsAIRequestInitializer(KEY, USER_IP); * </pre> * * <p> * If you want to implement custom logic, extend it like this: * </p> * * <pre> public static class MyRequestInitializer extends RecommendationsAIRequestInitializer { {@literal @}Override public void initializeRecommendationsAIRequest(RecommendationsAIRequest{@literal <}?{@literal >} request) throws IOException { // custom logic } } * </pre> * * <p> * Finally, to set the key and userIp parameters and insert custom logic, extend it like this: * </p> * * <pre> public static class MyRequestInitializer2 extends RecommendationsAIRequestInitializer { public MyKeyRequestInitializer() { super(KEY, USER_IP); } {@literal @}Override public void initializeRecommendationsAIRequest(RecommendationsAIRequest{@literal <}?{@literal >} request) throws IOException { // custom logic } } * </pre> * * <p> * Subclasses should be thread-safe. * </p> * * @since 1.12 */ public class RecommendationsAIRequestInitializer extends com.google.api.client.googleapis.services.json.CommonGoogleJsonClientRequestInitializer { public RecommendationsAIRequestInitializer() { super(); } /** * @param key API key or {@code null} to leave it unchanged */ public RecommendationsAIRequestInitializer(String key) { super(key); } /** * @param key API key or {@code null} to leave it unchanged * @param userIp user IP or {@code null} to leave it unchanged */ public RecommendationsAIRequestInitializer(String key, String userIp) { super(key, userIp); } @Override public final void initializeJsonRequest(com.google.api.client.googleapis.services.json.AbstractGoogleJsonClientRequest<?> request) throws java.io.IOException { super.initializeJsonRequest(request); initializeRecommendationsAIRequest((RecommendationsAIRequest<?>) request); } /** * Initializes RecommendationsAI request. * * <p> * Default implementation does nothing. Called from * {@link #initializeJsonRequest(com.google.api.client.googleapis.services.json.AbstractGoogleJsonClientRequest)}. * </p> * * @throws java.io.IOException I/O exception */ protected void initializeRecommendationsAIRequest(RecommendationsAIRequest<?> request) throws java.io.IOException { } }
apache-2.0
apache/continuum
continuum-webapp/src/test/java/org/apache/maven/continuum/web/action/stub/ProjectGroupActionStub.java
1560
package org.apache.maven.continuum.web.action.stub; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import org.apache.maven.continuum.web.action.ProjectGroupAction; import org.codehaus.plexus.redback.users.User; import org.codehaus.plexus.redback.users.UserNotFoundException; import org.codehaus.plexus.redback.users.jdo.JdoUser; public class ProjectGroupActionStub extends ProjectGroupAction { public String getProjectGroupName() { return "test-group"; } protected void checkViewProjectGroupAuthorization( String resource ) { // skip authorization check } protected User getUser( String principal ) throws UserNotFoundException { User user = new JdoUser(); user.setUsername( principal ); return user; } }
apache-2.0
apache/solr
solr/modules/analytics/src/java/org/apache/solr/analytics/function/mapping/NegateFunction.java
3077
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.analytics.function.mapping; import org.apache.solr.analytics.ExpressionFactory.CreatorFunction; import org.apache.solr.analytics.value.AnalyticsValueStream; import org.apache.solr.analytics.value.BooleanValueStream; import org.apache.solr.analytics.value.DoubleValueStream; import org.apache.solr.analytics.value.FloatValueStream; import org.apache.solr.analytics.value.IntValueStream; import org.apache.solr.analytics.value.LongValueStream; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; /** * A negation mapping function. * * <p>Takes a numeric or boolean ValueStream or Value and returns a ValueStream or Value of the same * numeric type. */ public class NegateFunction { public static final String name = "neg"; public static final CreatorFunction creatorFunction = (params -> { if (params.length != 1) { throw new SolrException( ErrorCode.BAD_REQUEST, "The " + name + " function requires 1 paramaters, " + params.length + " found."); } AnalyticsValueStream param = params[0]; if (param instanceof BooleanValueStream) { return LambdaFunction.createBooleanLambdaFunction( name, x -> !x, (BooleanValueStream) param); } if (param instanceof IntValueStream) { return LambdaFunction.createIntLambdaFunction(name, x -> x * -1, (IntValueStream) param); } if (param instanceof LongValueStream) { return LambdaFunction.createLongLambdaFunction( name, x -> x * -1, (LongValueStream) param); } if (param instanceof FloatValueStream) { return LambdaFunction.createFloatLambdaFunction( name, x -> x * -1, (FloatValueStream) param); } if (param instanceof DoubleValueStream) { return LambdaFunction.createDoubleLambdaFunction( name, x -> x * -1, (DoubleValueStream) param); } throw new SolrException( ErrorCode.BAD_REQUEST, "The " + name + " function requires a boolean or numeric parameter, " + param.getExpressionStr() + " found."); }); }
apache-2.0
googleapis/google-api-java-client-services
clients/google-api-services-secretmanager/v1beta1/1.31.0/com/google/api/services/secretmanager/v1beta1/model/Empty.java
1828
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.secretmanager.v1beta1.model; /** * A generic empty message that you can re-use to avoid defining duplicated empty messages in your * APIs. A typical example is to use it as the request or the response type of an API method. For * instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The * JSON representation for `Empty` is empty JSON object `{}`. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Secret Manager API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class Empty extends com.google.api.client.json.GenericJson { @Override public Empty set(String fieldName, Object value) { return (Empty) super.set(fieldName, value); } @Override public Empty clone() { return (Empty) super.clone(); } }
apache-2.0
Benjamin-Dobell/Apktool
brut.apktool/apktool-lib/src/test/java/brut/androlib/AndroidOreoNotSparseTest.java
2476
/** * Copyright (C) 2017 Ryszard Wiśniewski <brut.alll@gmail.com> * Copyright (C) 2017 Connor Tumbleson <connor.tumbleson@gmail.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package brut.androlib; import brut.directory.ExtFile; import brut.common.BrutException; import brut.util.OS; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import java.io.File; import java.util.logging.Logger; import static org.junit.Assert.assertTrue; /** * @author Connor Tumbleson <connor.tumbleson@gmail.com> */ public class AndroidOreoNotSparseTest { @BeforeClass public static void beforeClass() throws Exception { TestUtils.cleanFrameworkFile(); sTmpDir = new ExtFile(OS.createTempDirectory()); sTestOrigDir = new ExtFile(sTmpDir, "issue1594-orig"); sTestNewDir = new ExtFile(sTmpDir, "issue1594-new"); LOGGER.info("Unpacking not_sparse.apk..."); TestUtils.copyResourceDir(AndroidOreoNotSparseTest.class, "brut/apktool/issue1594", sTestOrigDir); File testApk = new File(sTestOrigDir, "not_sparse.apk"); LOGGER.info("Decoding not_sparse.apk..."); ApkDecoder apkDecoder = new ApkDecoder(testApk); apkDecoder.setOutDir(sTestNewDir); apkDecoder.decode(); LOGGER.info("Building not_sparse.apk..."); ApkOptions apkOptions = new ApkOptions(); new Androlib(apkOptions).build(sTestNewDir, testApk); } @AfterClass public static void afterClass() throws BrutException { OS.rmdir(sTmpDir); } @Test public void buildAndDecodeTest() throws BrutException { assertTrue(sTestNewDir.isDirectory()); assertTrue(sTestOrigDir.isDirectory()); } private static ExtFile sTmpDir; private static ExtFile sTestOrigDir; private static ExtFile sTestNewDir; private final static Logger LOGGER = Logger.getLogger(BuildAndDecodeJarTest.class.getName()); }
apache-2.0
apache/openwebbeans
webbeans-impl/src/test/java/org/apache/webbeans/test/specalization/observer/prot/BeanE.java
1219
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.webbeans.test.specalization.observer.prot; import javax.enterprise.context.ApplicationScoped; import javax.enterprise.inject.Specializes; @Specializes @ApplicationScoped public class BeanE extends BeanA { private static final long serialVersionUID = 821164664338581947L; @Override public String getBeanName() { return super.getBeanName() + ":[specialize]"; } }
apache-2.0
aaanders/dropwizard
dropwizard-jersey/src/main/java/io/dropwizard/jersey/filter/AllowedMethodsFilter.java
1980
package io.dropwizard.jersey.filter; import com.google.common.collect.ImmutableSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.util.Optional; public class AllowedMethodsFilter implements Filter { public static final String ALLOWED_METHODS_PARAM = "allowedMethods"; public static final ImmutableSet<String> DEFAULT_ALLOWED_METHODS = ImmutableSet.of( "GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH" ); private static final Logger LOG = LoggerFactory.getLogger(AllowedMethodsFilter.class); private ImmutableSet<String> allowedMethods = ImmutableSet.of(); @Override public void init(FilterConfig config) { allowedMethods = Optional.ofNullable(config.getInitParameter(ALLOWED_METHODS_PARAM)) .map(p -> ImmutableSet.copyOf(p.split(","))) .orElse(DEFAULT_ALLOWED_METHODS); } @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { handle((HttpServletRequest) request, (HttpServletResponse) response, chain); } private void handle(HttpServletRequest request, HttpServletResponse response, FilterChain chain) throws IOException, ServletException { if (allowedMethods.contains(request.getMethod())) { chain.doFilter(request, response); } else { LOG.debug("Request with disallowed method {} blocked", request.getMethod()); response.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED); } } @Override public void destroy() { } }
apache-2.0
dylanswartz/nakamura
bundles/files/impl/src/test/java/org/sakaiproject/nakamura/files/pool/CreateContentPoolServletTest.java
12715
/** * Licensed to the Sakai Foundation (SF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The SF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package org.sakaiproject.nakamura.files.pool; import static org.apache.jackrabbit.JcrConstants.JCR_CONTENT; import static org.apache.jackrabbit.JcrConstants.NT_RESOURCE; import static org.mockito.Mockito.when; import static org.sakaiproject.nakamura.api.files.FilesConstants.POOLED_CONTENT_MEMBERS_NODE; import com.google.common.collect.ImmutableMap; import org.apache.jackrabbit.api.JackrabbitSession; import org.apache.jackrabbit.api.security.principal.PrincipalManager; import org.apache.jackrabbit.api.security.user.UserManager; import org.apache.kahadb.util.ByteArrayInputStream; import org.apache.sling.api.SlingHttpServletRequest; import org.apache.sling.api.SlingHttpServletResponse; import org.apache.sling.api.request.RequestParameter; import org.apache.sling.api.request.RequestParameterMap; import org.apache.sling.api.request.RequestPathInfo; import org.apache.sling.api.resource.ResourceResolver; import org.apache.sling.commons.json.JSONObject; import org.apache.sling.jcr.api.SlingRepository; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.osgi.service.event.EventAdmin; import org.sakaiproject.nakamura.api.cluster.ClusterTrackingService; import org.sakaiproject.nakamura.api.files.FileUploadHandler; import org.sakaiproject.nakamura.api.lite.ClientPoolException; import org.sakaiproject.nakamura.api.lite.Session; import org.sakaiproject.nakamura.api.lite.StorageClientException; import org.sakaiproject.nakamura.api.lite.accesscontrol.AccessDeniedException; import org.sakaiproject.nakamura.api.lite.authorizable.AuthorizableManager; import org.sakaiproject.nakamura.api.user.AuthorizableCountChanger; import org.sakaiproject.nakamura.lite.BaseMemoryRepository; import org.sakaiproject.nakamura.lite.RepositoryImpl; import org.sakaiproject.nakamura.lite.jackrabbit.SparseMapUserManager; import java.io.IOException; import java.io.InputStream; import java.io.PrintWriter; import java.io.StringWriter; import java.security.Principal; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; import javax.jcr.Binary; import javax.jcr.Item; import javax.jcr.Node; import javax.jcr.RepositoryException; import javax.jcr.ValueFactory; import javax.jcr.security.AccessControlEntry; import javax.jcr.security.AccessControlException; import javax.jcr.security.AccessControlList; import javax.jcr.security.AccessControlManager; import javax.jcr.security.AccessControlPolicy; import javax.jcr.security.Privilege; public class CreateContentPoolServletTest { @Mock private SlingRepository slingRepository; @Mock private JackrabbitSession adminSession; @Mock private JackrabbitSession jcrSesson; @Mock private UserManager userManager; @Mock private PrincipalManager principalManager; @Mock private Node parentNode; @Mock private Node resourceNode; @Mock private Node membersNode; @Mock private Node memberNode; @Mock private AccessControlManager accessControlManager; @Mock private Privilege allPrivilege; private AccessControlList accessControlList; @Mock private ValueFactory valueFactory; @Mock private Binary binary; @Mock private ClusterTrackingService clusterTrackingService; @Mock private SlingHttpServletRequest request; @Mock private SlingHttpServletResponse response; @Mock private RequestParameterMap requestParameterMap; @Mock private RequestParameter requestParameter1; @Mock private RequestParameter requestParameter2; @Mock private RequestParameter requestParameter3; @Mock private RequestParameter requestParameterNot; @Mock private RequestPathInfo requestPathInfo; @Mock private ResourceResolver resourceResolver; @Mock private SparseMapUserManager sparseMapUserManager; @Mock private EventAdmin eventAdmin; @Mock private AuthorizableCountChanger authorizableCountChanger; private RepositoryImpl repository; CreateContentPoolServlet cp; @Before public void setUp() throws Exception { when(slingRepository.loginAdministrative(null)).thenReturn(adminSession); when(request.getResourceResolver()).thenReturn(resourceResolver); when(resourceResolver.adaptTo(javax.jcr.Session.class)).thenReturn(jcrSesson); Session session = repository.loginAdministrative("ieb"); when(jcrSesson.getUserManager()).thenReturn(sparseMapUserManager); when(sparseMapUserManager.getSession()).thenReturn(session); when(clusterTrackingService.getClusterUniqueId()).thenReturn(String.valueOf(System.currentTimeMillis())); when(request.getRequestPathInfo()).thenReturn(requestPathInfo); when(requestPathInfo.getExtension()).thenReturn(null); when(adminSession.getUserManager()).thenReturn(userManager); when(adminSession.getPrincipalManager()).thenReturn(principalManager); when(adminSession.getAccessControlManager()).thenReturn(accessControlManager); when(request.getRemoteUser()).thenReturn("ieb"); when(request.getRequestParameterMap()).thenReturn(requestParameterMap); Map<String, RequestParameter[]> map = new HashMap<String, RequestParameter[]>(); RequestParameter[] requestParameters = new RequestParameter[] { requestParameter1, requestParameterNot, requestParameter2, requestParameter3 }; map.put("files", requestParameters); when(requestParameterMap.entrySet()).thenReturn(map.entrySet()); when(requestParameter1.isFormField()).thenReturn(false); when(requestParameter1.getContentType()).thenReturn("application/pdf"); when(requestParameter1.getFileName()).thenReturn("testfilename.pdf"); InputStream input1 = new ByteArrayInputStream(new byte[10]); when(requestParameter1.getInputStream()).thenReturn(input1); when(requestParameter2.isFormField()).thenReturn(false); when(requestParameter2.getContentType()).thenReturn("text/html"); when(requestParameter2.getFileName()).thenReturn("index.html"); InputStream input2 = new ByteArrayInputStream(new byte[10]); when(requestParameter2.getInputStream()).thenReturn(input2); when(requestParameter3.isFormField()).thenReturn(false); when(requestParameter3.getContentType()).thenReturn("application/pdf"); when(requestParameter3.getFileName()).thenReturn("C:\\Users\\Nakamura User\\Documents\\testabspath.pdf"); InputStream input3 = new ByteArrayInputStream(new byte[10]); when(requestParameter3.getInputStream()).thenReturn(input3); when(requestParameterNot.isFormField()).thenReturn(true); // deep create // when(adminSession.nodeExists(CreateContentPoolServlet.POOLED_CONTENT_ROOT)).thenReturn(true); when(adminSession.itemExists(Mockito.anyString())).thenReturn(true); // Because the pooled content paths are generated by a private method, // mocking the repository is more problematic than usual. The test // therefore relies on inside knowledge that there should be three // calls to deepGetOrCreateNode for each file: one for the pooled content // node, one for its members node, and one for the manager node. when(adminSession.getItem(Mockito.anyString())).thenAnswer(new Answer<Item>() { public Item answer(InvocationOnMock invocation) throws Throwable { Object[] args = invocation.getArguments(); String path = (String) args[0]; if (path.endsWith(POOLED_CONTENT_MEMBERS_NODE)) { return membersNode; } else if (path.endsWith("ieb")) { return memberNode; } else { return parentNode; } } }); when(parentNode.addNode(JCR_CONTENT, NT_RESOURCE)).thenReturn(resourceNode); when(adminSession.getValueFactory()).thenReturn(valueFactory); when(valueFactory.createBinary(Mockito.any(InputStream.class))).thenReturn(binary); // access control utils accessControlList = new AccessControlList() { // Add an "addEntry" method so AccessControlUtil can execute something. // This method doesn't do anything useful. @SuppressWarnings("unused") public boolean addEntry(Principal principal, Privilege[] privileges, boolean isAllow) throws AccessControlException { return true; } public void removeAccessControlEntry(AccessControlEntry ace) throws AccessControlException, RepositoryException { } public AccessControlEntry[] getAccessControlEntries() throws RepositoryException { return new AccessControlEntry[0]; } public boolean addAccessControlEntry(Principal principal, Privilege[] privileges) throws AccessControlException, RepositoryException { return false; } }; when(accessControlManager.privilegeFromName(Mockito.anyString())).thenReturn( allPrivilege); AccessControlPolicy[] acp = new AccessControlPolicy[] { accessControlList }; when(accessControlManager.getPolicies(Mockito.anyString())).thenReturn(acp); cp = new CreateContentPoolServlet(); cp.eventAdmin = eventAdmin; cp.clusterTrackingService = clusterTrackingService; cp.sparseRepository = repository; cp.authorizableCountChanger = authorizableCountChanger; } public CreateContentPoolServletTest() throws ClientPoolException, StorageClientException, AccessDeniedException, ClassNotFoundException, IOException { MockitoAnnotations.initMocks(this); BaseMemoryRepository baseMemoryRepository = new BaseMemoryRepository(); repository = baseMemoryRepository.getRepository(); Session session = repository.loginAdministrative(); AuthorizableManager authorizableManager = session.getAuthorizableManager(); authorizableManager.createUser("ieb", "Ian Boston", "test", ImmutableMap.of("x",(Object)"y")); org.sakaiproject.nakamura.api.lite.authorizable.Authorizable authorizable = authorizableManager.findAuthorizable("ieb"); System.err.println("Got ieb as "+authorizable); session.logout(); } @Test public void testCreate() throws Exception { StringWriter stringWriter = new StringWriter(); when(response.getWriter()).thenReturn(new PrintWriter(stringWriter)); cp.doPost(request, response); // Verify that we created all the nodes. JSONObject jsonObject = new JSONObject(stringWriter.toString()); Assert.assertNotNull(jsonObject.getString("testfilename.pdf")); Assert.assertNotNull(jsonObject.getString("index.html")); Assert.assertNotNull(jsonObject.getString("testabspath.pdf")); // The servlet should scrub off the absolute path. Assert.assertEquals(3, jsonObject.length()); } @Test public void testFileUploadHandler() throws Exception { StringWriter stringWriter = new StringWriter(); when(response.getWriter()).thenReturn(new PrintWriter(stringWriter)); // Exceptions in a handler should be caught and logged, but shouldn't stop // other handlers from running. cp.bindFileUploadHandler(new FileUploadHandler() { public void handleFile(Map<String, Object> results, String poolId, InputStream fileInputStream, String userId, boolean isNew) throws IOException { throw new RuntimeException("Handler failed!"); } }); final ArrayList<String> notifiedFiles = new ArrayList<String>(); cp.bindFileUploadHandler(new FileUploadHandler() { public void handleFile(Map<String, Object> results, String poolId, InputStream fileInputStream, String userId, boolean isNew) throws IOException { notifiedFiles.add(poolId); } }); cp.doPost(request, response); Assert.assertTrue(notifiedFiles.size () == 3); } }
apache-2.0
bodar/totallylazy
src/com/googlecode/totallylazy/functions/And.java
302
package com.googlecode.totallylazy.functions; public enum And implements CurriedMonoid<Boolean> { instance; @Override public Boolean call(Boolean a, Boolean b) throws Exception { return a && b; } @Override public Boolean identity() { return true; } }
apache-2.0
Eshcar/hbase
hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
9436
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.backup; import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.Collection; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @Category(LargeTests.class) @RunWith(Parameterized.class) public class TestIncrementalBackup extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestIncrementalBackup.class); private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackup.class); @Parameterized.Parameters public static Collection<Object[]> data() { provider = "multiwal"; List<Object[]> params = new ArrayList<>(); params.add(new Object[] { Boolean.TRUE }); return params; } public TestIncrementalBackup(Boolean b) { } // implement all test cases in 1 test since incremental // backup/restore has dependencies @Test public void TestIncBackupRestore() throws Exception { int ADD_ROWS = 99; // #1 - create full backup for all tables LOG.info("create full backup image for all tables"); List<TableName> tables = Lists.newArrayList(table1, table2); final byte[] fam3Name = Bytes.toBytes("f3"); final byte[] mobName = Bytes.toBytes("mob"); table1Desc.addFamily(new HColumnDescriptor(fam3Name)); HColumnDescriptor mobHcd = new HColumnDescriptor(mobName); mobHcd.setMobEnabled(true); mobHcd.setMobThreshold(5L); table1Desc.addFamily(mobHcd); HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc); try (Connection conn = ConnectionFactory.createConnection(conf1)) { int NB_ROWS_FAM3 = 6; insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close(); insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close(); HBaseAdmin admin = null; admin = (HBaseAdmin) conn.getAdmin(); BackupAdminImpl client = new BackupAdminImpl(conn); BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); String backupIdFull = client.backupTables(request); assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); LOG.debug("writing " + ADD_ROWS + " rows to " + table1); Assert.assertEquals(HBaseTestingUtility.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); LOG.debug("written " + ADD_ROWS + " rows to " + table1); // additionally, insert rows to MOB cf int NB_ROWS_MOB = 111; insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB); LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF"); t1.close(); Assert.assertEquals(HBaseTestingUtility.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); HTable t2 = (HTable) conn.getTable(table2); Put p2; for (int i = 0; i < 5; i++) { p2 = new Put(Bytes.toBytes("row-t2" + i)); p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); t2.put(p2); } Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(t2)); t2.close(); LOG.debug("written " + 5 + " rows to " + table2); // split table1 MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); List<HRegion> regions = cluster.getRegions(table1); byte[] name = regions.get(0).getRegionInfo().getRegionName(); long startSplitTime = EnvironmentEdgeManager.currentTime(); try { admin.splitRegionAsync(name).get(); } catch (Exception e) { // although split fail, this may not affect following check in current API, // exception will be thrown. LOG.debug("region is not splittable, because " + e); } while (!admin.isTableAvailable(table1)) { Thread.sleep(100); } long endSplitTime = EnvironmentEdgeManager.currentTime(); // split finished LOG.debug("split finished in =" + (endSplitTime - startSplitTime)); // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2); request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); String backupIdIncMultiple = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple)); // add column family f2 to table1 final byte[] fam2Name = Bytes.toBytes("f2"); table1Desc.addFamily(new HColumnDescriptor(fam2Name)); // drop column family f3 table1Desc.removeFamily(fam3Name); HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc); int NB_ROWS_FAM2 = 7; HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2); t3.close(); // Wait for 5 sec to make sure that old WALs were deleted Thread.sleep(5000); // #4 - additional incremental backup for multiple tables request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); String backupIdIncMultiple2 = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple2)); // #5 - restore full backup for all tables TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; LOG.debug("Restoring full " + backupIdFull); client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, true)); // #6.1 - check tables for full restore HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); assertTrue(hAdmin.tableExists(table1_restore)); assertTrue(hAdmin.tableExists(table2_restore)); hAdmin.close(); // #6.2 - checking row count of tables for full restore HTable hTable = (HTable) conn.getTable(table1_restore); Assert.assertEquals(HBaseTestingUtility.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3); hTable.close(); hTable = (HTable) conn.getTable(table2_restore); Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtility.countRows(hTable)); hTable.close(); // #7 - restore incremental backup for multiple tables, with overwrite TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = (HTable) conn.getTable(table1_restore); LOG.debug("After incremental restore: " + hTable.getDescriptor()); int countFamName = TEST_UTIL.countRows(hTable, famName); LOG.debug("f1 has " + countFamName + " rows"); Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS); int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name); LOG.debug("f2 has " + countFam2Name + " rows"); Assert.assertEquals(countFam2Name, NB_ROWS_FAM2); int countMobName = TEST_UTIL.countRows(hTable, mobName); LOG.debug("mob has " + countMobName + " rows"); Assert.assertEquals(countMobName, NB_ROWS_MOB); hTable.close(); hTable = (HTable) conn.getTable(table2_restore); Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(hTable)); hTable.close(); admin.close(); } } }
apache-2.0
google/closure-templates
java/src/com/google/template/soy/jbcsrc/shared/LegacyFunctionAdapter.java
1543
/* * Copyright 2018 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.template.soy.jbcsrc.shared; import com.google.template.soy.data.SoyValue; import com.google.template.soy.plugin.java.restricted.JavaValueFactory; import com.google.template.soy.shared.restricted.SoyJavaFunction; import java.lang.reflect.Method; import java.util.List; /** * An adapter that SoySauceImpl installs for SoyJavaFunctions, which ExpressionCompiler delegates to * for running the java code. */ public final class LegacyFunctionAdapter { public static final Method METHOD = JavaValueFactory.createMethod(LegacyFunctionAdapter.class, "computeForJava", List.class); private final SoyJavaFunction legacyFn; public LegacyFunctionAdapter(SoyJavaFunction legacyFn) { this.legacyFn = legacyFn; } public SoyValue computeForJava(List<SoyValue> args) { return legacyFn.computeForJava(args); } @Override public String toString() { return "LegacyFunctionAdapter{" + legacyFn + "}"; } }
apache-2.0
dlnufox/ignite
modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java
70905
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.datastructures; import java.io.Externalizable; import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import javax.cache.event.CacheEntryEvent; import javax.cache.event.CacheEntryListenerException; import javax.cache.event.CacheEntryUpdatedListener; import javax.cache.event.EventType; import javax.cache.processor.EntryProcessor; import javax.cache.processor.EntryProcessorException; import javax.cache.processor.MutableEntry; import org.apache.ignite.IgniteAtomicLong; import org.apache.ignite.IgniteAtomicReference; import org.apache.ignite.IgniteAtomicSequence; import org.apache.ignite.IgniteAtomicStamped; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteCountDownLatch; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.IgniteQueue; import org.apache.ignite.IgniteSet; import org.apache.ignite.cache.CacheEntryEventSerializableFilter; import org.apache.ignite.configuration.AtomicConfiguration; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.CollectionConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.cluster.ClusterGroupEmptyCheckedException; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.processors.GridProcessorAdapter; import org.apache.ignite.internal.processors.cache.CachePartialUpdateCheckedException; import org.apache.ignite.internal.processors.cache.CacheType; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheInternal; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException; import org.apache.ignite.internal.util.lang.IgniteClosureX; import org.apache.ignite.internal.util.lang.IgniteInClosureX; import org.apache.ignite.internal.util.lang.IgniteOutClosureX; import org.apache.ignite.internal.util.typedef.CI1; import org.apache.ignite.internal.util.typedef.CIX1; import org.apache.ignite.internal.util.typedef.CX1; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.GPR; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; import org.jsr166.ConcurrentHashMap8; import static org.apache.ignite.cache.CacheAtomicWriteOrderMode.PRIMARY; import static org.apache.ignite.cache.CacheRebalanceMode.SYNC; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.DataStructureType.ATOMIC_LONG; import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.DataStructureType.ATOMIC_REF; import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.DataStructureType.ATOMIC_SEQ; import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.DataStructureType.ATOMIC_STAMPED; import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.DataStructureType.COUNT_DOWN_LATCH; import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.DataStructureType.QUEUE; import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.DataStructureType.SET; import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC; import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ; /** * Manager of data structures. */ public final class DataStructuresProcessor extends GridProcessorAdapter { /** */ public static final CacheDataStructuresConfigurationKey DATA_STRUCTURES_KEY = new CacheDataStructuresConfigurationKey(); /** */ private static final CacheDataStructuresCacheKey DATA_STRUCTURES_CACHE_KEY = new CacheDataStructuresCacheKey(); /** Initial capacity. */ private static final int INITIAL_CAPACITY = 10; /** */ private static final int MAX_UPDATE_RETRIES = 100; /** */ private static final long RETRY_DELAY = 1; /** Initialization latch. */ private final CountDownLatch initLatch = new CountDownLatch(1); /** Initialization failed flag. */ private boolean initFailed; /** Cache contains only {@code GridCacheInternal,GridCacheInternal}. */ private IgniteInternalCache<GridCacheInternal, GridCacheInternal> dsView; /** Internal storage of all dataStructures items (sequence, atomic long etc.). */ private final ConcurrentMap<GridCacheInternal, GridCacheRemovable> dsMap; /** Cache contains only {@code GridCacheAtomicValue}. */ private IgniteInternalCache<GridCacheInternalKey, GridCacheAtomicLongValue> atomicLongView; /** Cache contains only {@code GridCacheCountDownLatchValue}. */ private IgniteInternalCache<GridCacheInternalKey, GridCacheCountDownLatchValue> cntDownLatchView; /** Cache contains only {@code GridCacheAtomicReferenceValue}. */ private IgniteInternalCache<GridCacheInternalKey, GridCacheAtomicReferenceValue> atomicRefView; /** Cache contains only {@code GridCacheAtomicStampedValue}. */ private IgniteInternalCache<GridCacheInternalKey, GridCacheAtomicStampedValue> atomicStampedView; /** Cache contains only entry {@code GridCacheSequenceValue}. */ private IgniteInternalCache<GridCacheInternalKey, GridCacheAtomicSequenceValue> seqView; /** Cache context for atomic data structures. */ private GridCacheContext dsCacheCtx; /** Atomic data structures configuration. */ private final AtomicConfiguration atomicCfg; /** */ private IgniteInternalCache<CacheDataStructuresConfigurationKey, Map<String, DataStructureInfo>> utilityCache; /** */ private IgniteInternalCache<CacheDataStructuresCacheKey, List<CacheCollectionInfo>> utilityDataCache; /** */ private volatile UUID qryId; /** * @param ctx Context. */ public DataStructuresProcessor(GridKernalContext ctx) { super(ctx); dsMap = new ConcurrentHashMap8<>(INITIAL_CAPACITY); atomicCfg = ctx.config().getAtomicConfiguration(); } /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void onKernalStart() throws IgniteCheckedException { if (ctx.config().isDaemon()) return; utilityCache = (IgniteInternalCache)ctx.cache().utilityCache(); utilityDataCache = (IgniteInternalCache)ctx.cache().utilityCache(); assert utilityCache != null; if (atomicCfg != null) { IgniteInternalCache atomicsCache = ctx.cache().atomicsCache(); assert atomicsCache != null; dsView = atomicsCache; cntDownLatchView = atomicsCache; atomicLongView = atomicsCache; atomicRefView = atomicsCache; atomicStampedView = atomicsCache; seqView = atomicsCache; dsCacheCtx = atomicsCache.context(); } initLatch.countDown(); } /** * @throws IgniteCheckedException If failed. */ private void startQuery() throws IgniteCheckedException { if (qryId == null) { synchronized (this) { if (qryId == null) { qryId = dsCacheCtx.continuousQueries().executeInternalQuery(new DataStructuresEntryListener(), new DataStructuresEntryFilter(), dsCacheCtx.isReplicated() && dsCacheCtx.affinityNode(), false); } } } } /** {@inheritDoc} */ @Override public void onKernalStop(boolean cancel) { super.onKernalStop(cancel); if (initLatch.getCount() > 0) { initFailed = true; initLatch.countDown(); } if (qryId != null) dsCacheCtx.continuousQueries().cancelInternalQuery(qryId); } /** * @param key Key. * @param obj Object. */ void onRemoved(GridCacheInternal key, GridCacheRemovable obj) { dsMap.remove(key, obj); } /** {@inheritDoc} */ @Override public void onReconnected(boolean clusterRestarted) throws IgniteCheckedException { for (Map.Entry<GridCacheInternal, GridCacheRemovable> e : dsMap.entrySet()) { GridCacheRemovable obj = e.getValue(); if (clusterRestarted) { obj.onRemoved(); dsMap.remove(e.getKey(), obj); } else obj.needCheckNotRemoved(); } for (GridCacheContext cctx : ctx.cache().context().cacheContexts()) cctx.dataStructures().onReconnected(clusterRestarted); } /** * Gets a sequence from cache or creates one if it's not cached. * * @param name Sequence name. * @param initVal Initial value for sequence. If sequence already cached, {@code initVal} will be ignored. * @param create If {@code true} sequence will be created in case it is not in cache. * @return Sequence. * @throws IgniteCheckedException If loading failed. */ public final IgniteAtomicSequence sequence(final String name, final long initVal, final boolean create) throws IgniteCheckedException { A.notNull(name, "name"); awaitInitialization(); checkAtomicsConfiguration(); startQuery(); return getAtomic(new IgniteOutClosureX<IgniteAtomicSequence>() { @Override public IgniteAtomicSequence applyx() throws IgniteCheckedException { GridCacheInternalKey key = new GridCacheInternalKeyImpl(name); dsCacheCtx.gate().enter(); try (IgniteInternalTx tx = CU.txStartInternal(dsCacheCtx, dsView, PESSIMISTIC, REPEATABLE_READ)) { GridCacheAtomicSequenceValue seqVal = cast(dsView.get(key), GridCacheAtomicSequenceValue.class); // Check that sequence hasn't been created in other thread yet. GridCacheAtomicSequenceEx seq = cast(dsMap.get(key), GridCacheAtomicSequenceEx.class); if (seq != null) { assert seqVal != null; return seq; } if (seqVal == null && !create) return null; // We should use offset because we already reserved left side of range. long off = atomicCfg.getAtomicSequenceReserveSize() > 1 ? atomicCfg.getAtomicSequenceReserveSize() - 1 : 1; long upBound; long locCntr; if (seqVal == null) { locCntr = initVal; upBound = locCntr + off; // Global counter must be more than reserved region. seqVal = new GridCacheAtomicSequenceValue(upBound + 1); } else { locCntr = seqVal.get(); upBound = locCntr + off; // Global counter must be more than reserved region. seqVal.set(upBound + 1); } // Update global counter. dsView.put(key, seqVal); // Only one thread can be in the transaction scope and create sequence. seq = new GridCacheAtomicSequenceImpl(name, key, seqView, dsCacheCtx, atomicCfg.getAtomicSequenceReserveSize(), locCntr, upBound); dsMap.put(key, seq); tx.commit(); return seq; } catch (Error | Exception e) { dsMap.remove(key); U.error(log, "Failed to make atomic sequence: " + name, e); throw e; } finally { dsCacheCtx.gate().leave(); } } }, new DataStructureInfo(name, ATOMIC_SEQ, null), create, IgniteAtomicSequence.class); } /** * Removes sequence from cache. * * @param name Sequence name. * @throws IgniteCheckedException If removing failed. */ public final void removeSequence(final String name) throws IgniteCheckedException { assert name != null; awaitInitialization(); checkAtomicsConfiguration(); removeDataStructure(new IgniteOutClosureX<Void>() { @Override public Void applyx() throws IgniteCheckedException { dsCacheCtx.gate().enter(); try { GridCacheInternal key = new GridCacheInternalKeyImpl(name); removeInternal(key, GridCacheAtomicSequenceValue.class); } finally { dsCacheCtx.gate().leave(); } return null; } }, name, ATOMIC_SEQ, null); } /** * Gets an atomic long from cache or creates one if it's not cached. * * @param name Name of atomic long. * @param initVal Initial value for atomic long. If atomic long already cached, {@code initVal} * will be ignored. * @param create If {@code true} atomic long will be created in case it is not in cache. * @return Atomic long. * @throws IgniteCheckedException If loading failed. */ public final IgniteAtomicLong atomicLong(final String name, final long initVal, final boolean create) throws IgniteCheckedException { A.notNull(name, "name"); awaitInitialization(); checkAtomicsConfiguration(); startQuery(); return getAtomic(new IgniteOutClosureX<IgniteAtomicLong>() { @Override public IgniteAtomicLong applyx() throws IgniteCheckedException { final GridCacheInternalKey key = new GridCacheInternalKeyImpl(name); dsCacheCtx.gate().enter(); try (IgniteInternalTx tx = CU.txStartInternal(dsCacheCtx, dsView, PESSIMISTIC, REPEATABLE_READ)) { GridCacheAtomicLongValue val = cast(dsView.get(key), GridCacheAtomicLongValue.class); // Check that atomic long hasn't been created in other thread yet. GridCacheAtomicLongEx a = cast(dsMap.get(key), GridCacheAtomicLongEx.class); if (a != null) { assert val != null; return a; } if (val == null && !create) return null; if (val == null) { val = new GridCacheAtomicLongValue(initVal); dsView.put(key, val); } a = new GridCacheAtomicLongImpl(name, key, atomicLongView, dsCacheCtx); dsMap.put(key, a); tx.commit(); return a; } catch (Error | Exception e) { dsMap.remove(key); U.error(log, "Failed to make atomic long: " + name, e); throw e; } finally { dsCacheCtx.gate().leave(); } } }, new DataStructureInfo(name, ATOMIC_LONG, null), create, IgniteAtomicLong.class); } /** * @param c Closure creating data structure instance. * @param dsInfo Data structure info. * @param create Create flag. * @param cls Expected data structure class. * @return Data structure instance. * @throws IgniteCheckedException If failed. */ @Nullable private <T> T getAtomic(final IgniteOutClosureX<T> c, DataStructureInfo dsInfo, boolean create, Class<? extends T> cls) throws IgniteCheckedException { Map<String, DataStructureInfo> dsMap = utilityCache.get(DATA_STRUCTURES_KEY); if (!create && (dsMap == null || !dsMap.containsKey(dsInfo.name))) return null; IgniteCheckedException err = validateDataStructure(dsMap, dsInfo, create); if (err != null) throw err; final GridCacheInternalKey key = new GridCacheInternalKeyImpl(dsInfo.name); // Check type of structure received by key from local cache. T dataStructure = cast(this.dsMap.get(key), cls); if (dataStructure != null) return dataStructure; if (!create) return c.applyx(); while (true) { try (IgniteInternalTx tx = utilityCache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { err = utilityCache.invoke(DATA_STRUCTURES_KEY, new AddAtomicProcessor(dsInfo)).get(); if (err != null) throw err; dataStructure = c.applyx(); tx.commit(); return dataStructure; } catch (ClusterTopologyCheckedException e) { IgniteInternalFuture<?> fut = e.retryReadyFuture(); fut.get(); } catch (IgniteTxRollbackCheckedException ignore) { // Safe to retry right away. } } } /** * Removes atomic long from cache. * * @param name Atomic long name. * @throws IgniteCheckedException If removing failed. */ public final void removeAtomicLong(final String name) throws IgniteCheckedException { assert name != null; assert dsCacheCtx != null; awaitInitialization(); removeDataStructure(new IgniteOutClosureX<Void>() { @Override public Void applyx() throws IgniteCheckedException { dsCacheCtx.gate().enter(); try { removeInternal(new GridCacheInternalKeyImpl(name), GridCacheAtomicLongValue.class); } finally { dsCacheCtx.gate().leave(); } return null; } }, name, ATOMIC_LONG, null); } /** * @param c Closure. * @param name Data structure name. * @param type Data structure type. * @param afterRmv Optional closure to run after data structure removed. * @throws IgniteCheckedException If failed. */ private <T> void removeDataStructure(IgniteOutClosureX<T> c, String name, DataStructureType type, @Nullable IgniteInClosureX<T> afterRmv) throws IgniteCheckedException { Map<String, DataStructureInfo> dsMap = utilityCache.get(DATA_STRUCTURES_KEY); if (dsMap == null || !dsMap.containsKey(name)) return; DataStructureInfo dsInfo = new DataStructureInfo(name, type, null); IgniteCheckedException err = validateDataStructure(dsMap, dsInfo, false); if (err != null) throw err; while (true) { try (IgniteInternalTx tx = utilityCache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { T2<Boolean, IgniteCheckedException> res = utilityCache.invoke(DATA_STRUCTURES_KEY, new RemoveDataStructureProcessor(dsInfo)).get(); err = res.get2(); if (err != null) throw err; assert res.get1() != null; boolean exists = res.get1(); if (!exists) return; T rmvInfo = c.applyx(); tx.commit(); if (afterRmv != null && rmvInfo != null) afterRmv.applyx(rmvInfo); } catch (ClusterTopologyCheckedException e) { IgniteInternalFuture<?> fut = e.retryReadyFuture(); fut.get(); } catch (IgniteTxRollbackCheckedException ignore) { // Safe to retry right away. } } } /** * Gets an atomic reference from cache or creates one if it's not cached. * * @param name Name of atomic reference. * @param initVal Initial value for atomic reference. If atomic reference already cached, {@code initVal} * will be ignored. * @param create If {@code true} atomic reference will be created in case it is not in cache. * @return Atomic reference. * @throws IgniteCheckedException If loading failed. */ @SuppressWarnings("unchecked") public final <T> IgniteAtomicReference<T> atomicReference(final String name, final T initVal, final boolean create) throws IgniteCheckedException { A.notNull(name, "name"); awaitInitialization(); checkAtomicsConfiguration(); startQuery(); return getAtomic(new IgniteOutClosureX<IgniteAtomicReference>() { @Override public IgniteAtomicReference<T> applyx() throws IgniteCheckedException { GridCacheInternalKey key = new GridCacheInternalKeyImpl(name); dsCacheCtx.gate().enter(); try (IgniteInternalTx tx = CU.txStartInternal(dsCacheCtx, dsView, PESSIMISTIC, REPEATABLE_READ)) { GridCacheAtomicReferenceValue val = cast(dsView.get(key), GridCacheAtomicReferenceValue.class); // Check that atomic reference hasn't been created in other thread yet. GridCacheAtomicReferenceEx ref = cast(dsMap.get(key), GridCacheAtomicReferenceEx.class); if (ref != null) { assert val != null; return ref; } if (val == null && !create) return null; if (val == null) { val = new GridCacheAtomicReferenceValue(initVal); dsView.put(key, val); } ref = new GridCacheAtomicReferenceImpl(name, key, atomicRefView, dsCacheCtx); dsMap.put(key, ref); tx.commit(); return ref; } catch (Error | Exception e) { dsMap.remove(key); U.error(log, "Failed to make atomic reference: " + name, e); throw e; } finally { dsCacheCtx.gate().leave(); } } }, new DataStructureInfo(name, ATOMIC_REF, null), create, IgniteAtomicReference.class); } /** * Removes atomic reference from cache. * * @param name Atomic reference name. * @throws IgniteCheckedException If removing failed. */ public final void removeAtomicReference(final String name) throws IgniteCheckedException { assert name != null; assert dsCacheCtx != null; awaitInitialization(); removeDataStructure(new IgniteOutClosureX<Void>() { @Override public Void applyx() throws IgniteCheckedException { dsCacheCtx.gate().enter(); try { GridCacheInternal key = new GridCacheInternalKeyImpl(name); removeInternal(key, GridCacheAtomicReferenceValue.class); } finally { dsCacheCtx.gate().leave(); } return null; } }, name, ATOMIC_REF, null); } /** * Gets an atomic stamped from cache or creates one if it's not cached. * * @param name Name of atomic stamped. * @param initVal Initial value for atomic stamped. If atomic stamped already cached, {@code initVal} * will be ignored. * @param initStamp Initial stamp for atomic stamped. If atomic stamped already cached, {@code initStamp} * will be ignored. * @param create If {@code true} atomic stamped will be created in case it is not in cache. * @return Atomic stamped. * @throws IgniteCheckedException If loading failed. */ @SuppressWarnings("unchecked") public final <T, S> IgniteAtomicStamped<T, S> atomicStamped(final String name, final T initVal, final S initStamp, final boolean create) throws IgniteCheckedException { A.notNull(name, "name"); awaitInitialization(); checkAtomicsConfiguration(); startQuery(); return getAtomic(new IgniteOutClosureX<IgniteAtomicStamped>() { @Override public IgniteAtomicStamped<T, S> applyx() throws IgniteCheckedException { GridCacheInternalKeyImpl key = new GridCacheInternalKeyImpl(name); dsCacheCtx.gate().enter(); try (IgniteInternalTx tx = CU.txStartInternal(dsCacheCtx, dsView, PESSIMISTIC, REPEATABLE_READ)) { GridCacheAtomicStampedValue val = cast(dsView.get(key), GridCacheAtomicStampedValue.class); // Check that atomic stamped hasn't been created in other thread yet. GridCacheAtomicStampedEx stmp = cast(dsMap.get(key), GridCacheAtomicStampedEx.class); if (stmp != null) { assert val != null; return stmp; } if (val == null && !create) return null; if (val == null) { val = new GridCacheAtomicStampedValue(initVal, initStamp); dsView.put(key, val); } stmp = new GridCacheAtomicStampedImpl(name, key, atomicStampedView, dsCacheCtx); dsMap.put(key, stmp); tx.commit(); return stmp; } catch (Error | Exception e) { dsMap.remove(key); U.error(log, "Failed to make atomic stamped: " + name, e); throw e; } finally { dsCacheCtx.gate().leave(); } } }, new DataStructureInfo(name, ATOMIC_STAMPED, null), create, IgniteAtomicStamped.class); } /** * Removes atomic stamped from cache. * * @param name Atomic stamped name. * @throws IgniteCheckedException If removing failed. */ public final void removeAtomicStamped(final String name) throws IgniteCheckedException { assert name != null; assert dsCacheCtx != null; awaitInitialization(); removeDataStructure(new IgniteOutClosureX<Void>() { @Override public Void applyx() throws IgniteCheckedException { dsCacheCtx.gate().enter(); try { GridCacheInternal key = new GridCacheInternalKeyImpl(name); removeInternal(key, GridCacheAtomicStampedValue.class); } finally { dsCacheCtx.gate().leave(); } return null; } }, name, ATOMIC_STAMPED, null); } /** * Gets a queue from cache or creates one if it's not cached. * * @param name Name of queue. * @param cap Max size of queue. * @param cfg Non-null queue configuration if new queue should be created. * @return Instance of queue. * @throws IgniteCheckedException If failed. */ @SuppressWarnings("unchecked") public final <T> IgniteQueue<T> queue(final String name, int cap, @Nullable final CollectionConfiguration cfg) throws IgniteCheckedException { A.notNull(name, "name"); awaitInitialization(); String cacheName = null; if (cfg != null) { if (cap <= 0) cap = Integer.MAX_VALUE; cacheName = compatibleConfiguration(cfg); } DataStructureInfo dsInfo = new DataStructureInfo(name, QUEUE, cfg != null ? new QueueInfo(cacheName, cfg.isCollocated(), cap) : null); final int cap0 = cap; final boolean create = cfg != null; return getCollection(new IgniteClosureX<GridCacheContext, IgniteQueue<T>>() { @Override public IgniteQueue<T> applyx(GridCacheContext ctx) throws IgniteCheckedException { return ctx.dataStructures().queue(name, cap0, create && cfg.isCollocated(), create); } }, dsInfo, create); } /** * @param cfg Collection configuration. * @param name Cache name. * @return Cache configuration. */ private CacheConfiguration cacheConfiguration(CollectionConfiguration cfg, String name) { CacheConfiguration ccfg = new CacheConfiguration(); ccfg.setName(name); ccfg.setBackups(cfg.getBackups()); ccfg.setCacheMode(cfg.getCacheMode()); ccfg.setMemoryMode(cfg.getMemoryMode()); ccfg.setAtomicityMode(cfg.getAtomicityMode()); ccfg.setOffHeapMaxMemory(cfg.getOffHeapMaxMemory()); ccfg.setNodeFilter(cfg.getNodeFilter()); ccfg.setWriteSynchronizationMode(FULL_SYNC); ccfg.setAtomicWriteOrderMode(PRIMARY); ccfg.setRebalanceMode(SYNC); return ccfg; } /** * @param cfg Collection configuration. * @return Cache name. * @throws IgniteCheckedException If failed. */ private String compatibleConfiguration(CollectionConfiguration cfg) throws IgniteCheckedException { List<CacheCollectionInfo> caches = utilityDataCache.context().affinityNode() ? utilityDataCache.localPeek(DATA_STRUCTURES_CACHE_KEY, null, null) : utilityDataCache.get(DATA_STRUCTURES_CACHE_KEY); String cacheName = findCompatibleConfiguration(cfg, caches); if (cacheName == null) cacheName = utilityDataCache.invoke(DATA_STRUCTURES_CACHE_KEY, new AddDataCacheProcessor(cfg)).get(); assert cacheName != null; CacheConfiguration newCfg = cacheConfiguration(cfg, cacheName); if (ctx.cache().cache(cacheName) == null) ctx.cache().dynamicStartCache(newCfg, cacheName, null, CacheType.INTERNAL, false, true).get(); assert ctx.cache().cache(cacheName) != null : cacheName; return cacheName; } /** * @param name Queue name. * @param cctx Queue cache context. * @throws IgniteCheckedException If failed. */ public void removeQueue(final String name, final GridCacheContext cctx) throws IgniteCheckedException { assert name != null; assert cctx != null; awaitInitialization(); IgniteOutClosureX<GridCacheQueueHeader> rmv = new IgniteOutClosureX<GridCacheQueueHeader>() { @Override public GridCacheQueueHeader applyx() throws IgniteCheckedException { return (GridCacheQueueHeader)retryRemove(cctx.cache(), new GridCacheQueueHeaderKey(name)); } }; CIX1<GridCacheQueueHeader> afterRmv = new CIX1<GridCacheQueueHeader>() { @Override public void applyx(GridCacheQueueHeader hdr) throws IgniteCheckedException { if (hdr.empty()) return; GridCacheQueueAdapter.removeKeys(cctx.cache(), hdr.id(), name, hdr.collocated(), hdr.head(), hdr.tail(), 0); } }; removeDataStructure(rmv, name, QUEUE, afterRmv); } /** * @param c Closure creating collection. * @param dsInfo Data structure info. * @param create Create flag. * @return Collection instance. * @throws IgniteCheckedException If failed. */ @Nullable private <T> T getCollection(final IgniteClosureX<GridCacheContext, T> c, DataStructureInfo dsInfo, boolean create) throws IgniteCheckedException { awaitInitialization(); Map<String, DataStructureInfo> dsMap = utilityCache.get(DATA_STRUCTURES_KEY); if (!create && (dsMap == null || !dsMap.containsKey(dsInfo.name))) return null; IgniteCheckedException err = validateDataStructure(dsMap, dsInfo, create); if (err != null) throw err; if (!create) { DataStructureInfo oldInfo = dsMap.get(dsInfo.name); assert oldInfo.info instanceof CollectionInfo : oldInfo.info; String cacheName = ((CollectionInfo)oldInfo.info).cacheName; GridCacheContext cacheCtx = ctx.cache().getOrStartCache(cacheName).context(); return c.applyx(cacheCtx); } while (true) { try (IgniteInternalTx tx = utilityCache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { T2<String, IgniteCheckedException> res = utilityCache.invoke(DATA_STRUCTURES_KEY, new AddCollectionProcessor(dsInfo)).get(); err = res.get2(); if (err != null) throw err; String cacheName = res.get1(); final GridCacheContext cacheCtx = ctx.cache().internalCache(cacheName).context(); T col = c.applyx(cacheCtx); tx.commit(); return col; } catch (ClusterTopologyCheckedException e) { IgniteInternalFuture<?> fut = e.retryReadyFuture(); fut.get(); } catch (IgniteTxRollbackCheckedException ignore) { // Safe to retry right away. } } } /** * Awaits for processor initialization. */ private void awaitInitialization() { if (initLatch.getCount() > 0) { try { U.await(initLatch); if (initFailed) throw new IllegalStateException("Failed to initialize data structures processor."); } catch (IgniteInterruptedCheckedException e) { throw new IllegalStateException("Failed to initialize data structures processor " + "(thread has been interrupted).", e); } } } /** * @param dsMap Map with data structure information. * @param info New data structure information. * @param create Create flag. * @return {@link IgniteException} if validation failed. */ @Nullable private static IgniteCheckedException validateDataStructure( @Nullable Map<String, DataStructureInfo> dsMap, DataStructureInfo info, boolean create) { if (dsMap == null) return null; DataStructureInfo oldInfo = dsMap.get(info.name); if (oldInfo != null) return oldInfo.validate(info, create); return null; } /** * Gets or creates count down latch. If count down latch is not found in cache, * it is created using provided name and count parameter. * * @param name Name of the latch. * @param cnt Initial count. * @param autoDel {@code True} to automatically delete latch from cache when * its count reaches zero. * @param create If {@code true} latch will be created in case it is not in cache, * if it is {@code false} all parameters except {@code name} are ignored. * @return Count down latch for the given name or {@code null} if it is not found and * {@code create} is false. * @throws IgniteCheckedException If operation failed. */ public IgniteCountDownLatch countDownLatch(final String name, final int cnt, final boolean autoDel, final boolean create) throws IgniteCheckedException { A.notNull(name, "name"); awaitInitialization(); if (create) A.ensure(cnt >= 0, "count can not be negative"); checkAtomicsConfiguration(); startQuery(); return getAtomic(new IgniteOutClosureX<IgniteCountDownLatch>() { @Override public IgniteCountDownLatch applyx() throws IgniteCheckedException { GridCacheInternalKey key = new GridCacheInternalKeyImpl(name); dsCacheCtx.gate().enter(); try (IgniteInternalTx tx = CU.txStartInternal(dsCacheCtx, dsView, PESSIMISTIC, REPEATABLE_READ)) { GridCacheCountDownLatchValue val = cast(dsView.get(key), GridCacheCountDownLatchValue.class); // Check that count down hasn't been created in other thread yet. GridCacheCountDownLatchEx latch = cast(dsMap.get(key), GridCacheCountDownLatchEx.class); if (latch != null) { assert val != null; return latch; } if (val == null && !create) return null; if (val == null) { val = new GridCacheCountDownLatchValue(cnt, autoDel); dsView.put(key, val); } latch = new GridCacheCountDownLatchImpl(name, val.initialCount(), val.autoDelete(), key, cntDownLatchView, dsCacheCtx); dsMap.put(key, latch); tx.commit(); return latch; } catch (Error | Exception e) { dsMap.remove(key); U.error(log, "Failed to create count down latch: " + name, e); throw e; } finally { dsCacheCtx.gate().leave(); } } }, new DataStructureInfo(name, COUNT_DOWN_LATCH, null), create, GridCacheCountDownLatchEx.class); } /** * Removes count down latch from cache. * * @param name Name of the latch. * @throws IgniteCheckedException If operation failed. */ public void removeCountDownLatch(final String name) throws IgniteCheckedException { assert name != null; assert dsCacheCtx != null; awaitInitialization(); removeDataStructure(new IgniteOutClosureX<Void>() { @Override public Void applyx() throws IgniteCheckedException { GridCacheInternal key = new GridCacheInternalKeyImpl(name); dsCacheCtx.gate().enter(); try (IgniteInternalTx tx = CU.txStartInternal(dsCacheCtx, dsView, PESSIMISTIC, REPEATABLE_READ)) { // Check correctness type of removable object. GridCacheCountDownLatchValue val = cast(dsView.get(key), GridCacheCountDownLatchValue.class); if (val != null) { if (val.get() > 0) { throw new IgniteCheckedException("Failed to remove count down latch " + "with non-zero count: " + val.get()); } dsView.remove(key); tx.commit(); } else tx.setRollbackOnly(); return null; } finally { dsCacheCtx.gate().leave(); } } }, name, COUNT_DOWN_LATCH, null); } /** * Remove internal entry by key from cache. * * @param key Internal entry key. * @param cls Class of object which will be removed. If cached object has different type exception will be thrown. * @return Method returns true if sequence has been removed and false if it's not cached. * @throws IgniteCheckedException If removing failed or class of object is different to expected class. */ private <R> boolean removeInternal(final GridCacheInternal key, final Class<R> cls) throws IgniteCheckedException { return CU.outTx( new Callable<Boolean>() { @Override public Boolean call() throws Exception { try (IgniteInternalTx tx = CU.txStartInternal(dsCacheCtx, dsView, PESSIMISTIC, REPEATABLE_READ)) { // Check correctness type of removable object. R val = cast(dsView.get(key), cls); if (val != null) { dsView.remove(key); tx.commit(); } else tx.setRollbackOnly(); return val != null; } catch (Error | Exception e) { U.error(log, "Failed to remove data structure: " + key, e); throw e; } } }, dsCacheCtx ); } /** * */ static class DataStructuresEntryFilter implements CacheEntryEventSerializableFilter<Object, Object> { /** */ private static final long serialVersionUID = 0L; /** {@inheritDoc} */ @Override public boolean evaluate(CacheEntryEvent<?, ?> evt) throws CacheEntryListenerException { if (evt.getEventType() == EventType.CREATED || evt.getEventType() == EventType.UPDATED) return evt.getValue() instanceof GridCacheCountDownLatchValue; else { assert evt.getEventType() == EventType.REMOVED : evt; return true; } } /** {@inheritDoc} */ @Override public String toString() { return S.toString(DataStructuresEntryFilter.class, this); } } /** * */ private class DataStructuresEntryListener implements CacheEntryUpdatedListener<GridCacheInternalKey, GridCacheInternal> { /** {@inheritDoc} */ @Override public void onUpdated( Iterable<CacheEntryEvent<? extends GridCacheInternalKey, ? extends GridCacheInternal>> evts) throws CacheEntryListenerException { for (CacheEntryEvent<? extends GridCacheInternalKey, ? extends GridCacheInternal> evt : evts) { if (evt.getEventType() == EventType.CREATED || evt.getEventType() == EventType.UPDATED) { GridCacheInternal val0 = evt.getValue(); if (val0 instanceof GridCacheCountDownLatchValue) { GridCacheInternalKey key = evt.getKey(); // Notify latch on changes. final GridCacheRemovable latch = dsMap.get(key); GridCacheCountDownLatchValue val = (GridCacheCountDownLatchValue)val0; if (latch instanceof GridCacheCountDownLatchEx) { final GridCacheCountDownLatchEx latch0 = (GridCacheCountDownLatchEx)latch; latch0.onUpdate(val.get()); if (val.get() == 0 && val.autoDelete()) { dsMap.remove(key); IgniteInternalFuture<?> removeFut = ctx.closure().runLocalSafe(new GPR() { @Override public void run() { try { removeCountDownLatch(latch0.name()); } catch (IgniteCheckedException e) { U.error(log, "Failed to remove count down latch: " + latch0.name(), e); } } }); removeFut.listen(new CI1<IgniteInternalFuture<?>>() { @Override public void apply(IgniteInternalFuture<?> f) { try { f.get(); } catch (IgniteCheckedException e) { U.error(log, "Failed to remove count down latch: " + latch0.name(), e); } latch.onRemoved(); } }); } } else if (latch != null) { U.error(log, "Failed to cast object " + "[expected=" + IgniteCountDownLatch.class.getSimpleName() + ", actual=" + latch.getClass() + ", value=" + latch + ']'); } } } else { assert evt.getEventType() == EventType.REMOVED : evt; GridCacheInternal key = evt.getKey(); // Entry's val is null if entry deleted. GridCacheRemovable obj = dsMap.remove(key); if (obj != null) obj.onRemoved(); } } } /** {@inheritDoc} */ @Override public String toString() { return S.toString(DataStructuresEntryListener.class, this); } } /** * Gets a set from cache or creates one if it's not cached. * * @param name Set name. * @param cfg Set configuration if new set should be created. * @return Set instance. * @throws IgniteCheckedException If failed. */ @SuppressWarnings("unchecked") @Nullable public <T> IgniteSet<T> set(final String name, @Nullable final CollectionConfiguration cfg) throws IgniteCheckedException { A.notNull(name, "name"); awaitInitialization(); String cacheName = null; if (cfg != null) cacheName = compatibleConfiguration(cfg); DataStructureInfo dsInfo = new DataStructureInfo(name, SET, cfg != null ? new CollectionInfo(cacheName, cfg.isCollocated()) : null); final boolean create = cfg != null; return getCollection(new CX1<GridCacheContext, IgniteSet<T>>() { @Override public IgniteSet<T> applyx(GridCacheContext cctx) throws IgniteCheckedException { return cctx.dataStructures().set(name, create ? cfg.isCollocated() : false, create); } }, dsInfo, create); } /** * @param name Set name. * @param cctx Set cache context. * @throws IgniteCheckedException If failed. */ public void removeSet(final String name, final GridCacheContext cctx) throws IgniteCheckedException { assert name != null; assert cctx != null; awaitInitialization(); IgniteOutClosureX<GridCacheSetHeader> rmv = new IgniteOutClosureX<GridCacheSetHeader>() { @Override public GridCacheSetHeader applyx() throws IgniteCheckedException { return (GridCacheSetHeader)retryRemove(cctx.cache(), new GridCacheSetHeaderKey(name)); } }; CIX1<GridCacheSetHeader> afterRmv = new CIX1<GridCacheSetHeader>() { @Override public void applyx(GridCacheSetHeader hdr) throws IgniteCheckedException { cctx.dataStructures().removeSetData(hdr.id()); } }; removeDataStructure(rmv, name, SET, afterRmv); } /** * @param cache Cache. * @param key Key to remove. * @throws IgniteCheckedException If failed. * @return Removed value. */ @SuppressWarnings("unchecked") @Nullable private <T> T retryRemove(final IgniteInternalCache cache, final Object key) throws IgniteCheckedException { return retry(log, new Callable<T>() { @Nullable @Override public T call() throws Exception { return (T)cache.getAndRemove(key); } }); } /** * @param log Logger. * @param call Callable. * @return Callable result. * @throws IgniteCheckedException If all retries failed. */ public static <R> R retry(IgniteLogger log, Callable<R> call) throws IgniteCheckedException { try { int cnt = 0; while (true) { try { return call.call(); } catch (ClusterGroupEmptyCheckedException e) { throw new IgniteCheckedException(e); } catch (IgniteTxRollbackCheckedException | CachePartialUpdateCheckedException | ClusterTopologyCheckedException e) { if (cnt++ == MAX_UPDATE_RETRIES) throw e; else { U.warn(log, "Failed to execute data structure operation, will retry [err=" + e + ']'); U.sleep(RETRY_DELAY); } } } } catch (IgniteCheckedException e) { throw e; } catch (Exception e) { throw new IgniteCheckedException(e); } } /** * Tries to cast the object to expected type. * * @param obj Object which will be casted. * @param cls Class * @param <R> Type of expected result. * @return Object has casted to expected type. * @throws IgniteCheckedException If {@code obj} has different to {@code cls} type. */ @SuppressWarnings("unchecked") @Nullable private <R> R cast(@Nullable Object obj, Class<R> cls) throws IgniteCheckedException { if (obj == null) return null; if (cls.isInstance(obj)) return (R)obj; else throw new IgniteCheckedException("Failed to cast object [expected=" + cls + ", actual=" + obj.getClass() + ']'); } /** {@inheritDoc} */ @Override public void printMemoryStats() { X.println(">>> "); X.println(">>> Data structure processor memory stats [grid=" + ctx.gridName() + ", cache=" + (dsCacheCtx != null ? dsCacheCtx.name() : null) + ']'); X.println(">>> dsMapSize: " + dsMap.size()); } /** * @throws IgniteException If atomics configuration is not provided. */ private void checkAtomicsConfiguration() throws IgniteException { if (atomicCfg == null) throw new IgniteException("Atomic data structure can not be created, " + "need to provide IgniteAtomicConfiguration."); } /** * @param cfg Collection configuration. * @param infos Data structure caches. * @return Name of the cache with compatible configuration or null. */ private static String findCompatibleConfiguration(CollectionConfiguration cfg, List<CacheCollectionInfo> infos) { if (infos == null) return null; for (CacheCollectionInfo col : infos) { if (col.cfg.getAtomicityMode() == cfg.getAtomicityMode() && col.cfg.getMemoryMode() == cfg.getMemoryMode() && col.cfg.getCacheMode() == cfg.getCacheMode() && col.cfg.getBackups() == cfg.getBackups() && col.cfg.getOffHeapMaxMemory() == cfg.getOffHeapMaxMemory() && ((col.cfg.getNodeFilter() == null && cfg.getNodeFilter() == null) || (col.cfg.getNodeFilter() != null && col.cfg.getNodeFilter().equals(cfg.getNodeFilter())))) return col.cacheName; } return null; } /** * */ enum DataStructureType { /** */ ATOMIC_LONG(IgniteAtomicLong.class.getSimpleName()), /** */ ATOMIC_REF(IgniteAtomicReference.class.getSimpleName()), /** */ ATOMIC_SEQ(IgniteAtomicSequence.class.getSimpleName()), /** */ ATOMIC_STAMPED(IgniteAtomicStamped.class.getSimpleName()), /** */ COUNT_DOWN_LATCH(IgniteCountDownLatch.class.getSimpleName()), /** */ QUEUE(IgniteQueue.class.getSimpleName()), /** */ SET(IgniteSet.class.getSimpleName()); /** */ private static final DataStructureType[] VALS = values(); /** */ private String name; /** * @param name Name. */ DataStructureType(String name) { this.name = name; } /** * @return Data structure public class name. */ public String className() { return name; } /** * @param ord Ordinal value. * @return Enumerated value or {@code null} if ordinal out of range. */ @Nullable public static DataStructureType fromOrdinal(int ord) { return ord >= 0 && ord < VALS.length ? VALS[ord] : null; } } /** * */ static class CollectionInfo implements Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ private boolean collocated; /** */ private String cacheName; /** * Required by {@link Externalizable}. */ public CollectionInfo() { // No-op. } /** * @param cacheName Collection cache name. * @param collocated Collocated flag. */ public CollectionInfo(String cacheName, boolean collocated) { this.cacheName = cacheName; this.collocated = collocated; } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { collocated = in.readBoolean(); cacheName = U.readString(in); } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeBoolean(collocated); U.writeString(out, cacheName); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(CollectionInfo.class, this); } } /** * */ static class CacheCollectionInfo implements Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ private String cacheName; /** */ private CollectionConfiguration cfg; /** * Required by {@link Externalizable}. */ public CacheCollectionInfo() { // No-op. } /** * @param cacheName Collection cache name. * @param cfg CollectionConfiguration. */ public CacheCollectionInfo(String cacheName, CollectionConfiguration cfg) { this.cacheName = cacheName; this.cfg = cfg; } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { cfg = (CollectionConfiguration)in.readObject(); cacheName = U.readString(in); } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(cfg); U.writeString(out, cacheName); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(CacheCollectionInfo.class, this); } } /** * */ static class QueueInfo extends CollectionInfo { /** */ private static final long serialVersionUID = 0L; /** */ private int cap; /** * Required by {@link Externalizable}. */ public QueueInfo() { // No-op. } /** * @param collocated Collocated flag. * @param cap Queue capacity. * @param cacheName Cache name. */ public QueueInfo(String cacheName, boolean collocated, int cap) { super(cacheName, collocated); this.cap = cap; } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); cap = in.readInt(); } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { super.writeExternal(out); out.writeInt(cap); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(QueueInfo.class, this, "super", super.toString()); } } /** * */ static class DataStructureInfo implements Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ private String name; /** */ private DataStructureType type; /** */ private Object info; /** * Required by {@link Externalizable}. */ public DataStructureInfo() { // No-op. } /** * @param name Data structure name. * @param type Data structure type. * @param info Data structure information. */ DataStructureInfo(String name, DataStructureType type, Externalizable info) { this.name = name; this.type = type; this.info = info; } /** * @param dsInfo New data structure info. * @param create Create flag. * @return Exception if validation failed. */ @Nullable IgniteCheckedException validate(DataStructureInfo dsInfo, boolean create) { if (type != dsInfo.type) { return new IgniteCheckedException("Another data structure with the same name already created " + "[name=" + name + ", newType=" + dsInfo.type.className() + ", existingType=" + type.className() + ']'); } if (create) { if (type == QUEUE || type == SET) { CollectionInfo oldInfo = (CollectionInfo)info; CollectionInfo newInfo = (CollectionInfo)dsInfo.info; if (oldInfo.collocated != newInfo.collocated) { return new IgniteCheckedException("Another collection with the same name but different " + "configuration already created [name=" + name + ", newCollocated=" + newInfo.collocated + ", existingCollocated=" + newInfo.collocated + ']'); } if (type == QUEUE) { if (((QueueInfo)oldInfo).cap != ((QueueInfo)newInfo).cap) { return new IgniteCheckedException("Another queue with the same name but different " + "configuration already created [name=" + name + ", newCapacity=" + ((QueueInfo)newInfo).cap + ", existingCapacity=" + ((QueueInfo)oldInfo).cap + ']'); } } } } return null; } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { U.writeString(out, name); U.writeEnum(out, type); out.writeObject(info); } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { name = U.readString(in); type = DataStructureType.fromOrdinal(in.readByte()); info = in.readObject(); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(DataStructureInfo.class, this); } } /** * */ static class AddAtomicProcessor implements EntryProcessor<CacheDataStructuresConfigurationKey, Map<String, DataStructureInfo>, IgniteCheckedException>, Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ private DataStructureInfo info; /** * @param info Data structure information. */ AddAtomicProcessor(DataStructureInfo info) { assert info != null; this.info = info; } /** * Required by {@link Externalizable}. */ public AddAtomicProcessor() { // No-op. } /** {@inheritDoc} */ @Override public IgniteCheckedException process( MutableEntry<CacheDataStructuresConfigurationKey, Map<String, DataStructureInfo>> entry, Object... args) throws EntryProcessorException { Map<String, DataStructureInfo> map = entry.getValue(); if (map == null) { map = new HashMap<>(); map.put(info.name, info); entry.setValue(map); return null; } DataStructureInfo oldInfo = map.get(info.name); if (oldInfo == null) { map = new HashMap<>(map); map.put(info.name, info); entry.setValue(map); return null; } return oldInfo.validate(info, true); } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { info.writeExternal(out); } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { info = new DataStructureInfo(); info.readExternal(in); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(AddAtomicProcessor.class, this); } } /** * */ static class AddCollectionProcessor implements EntryProcessor<CacheDataStructuresConfigurationKey, Map<String, DataStructureInfo>, T2<String, IgniteCheckedException>>, Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ private DataStructureInfo info; /** * @param info Data structure information. */ AddCollectionProcessor(DataStructureInfo info) { assert info != null; assert info.info instanceof CollectionInfo; this.info = info; } /** * Required by {@link Externalizable}. */ public AddCollectionProcessor() { // No-op. } /** {@inheritDoc} */ @Override public T2<String, IgniteCheckedException> process( MutableEntry<CacheDataStructuresConfigurationKey, Map<String, DataStructureInfo>> entry, Object... args) { Map<String, DataStructureInfo> map = entry.getValue(); CollectionInfo colInfo = (CollectionInfo)info.info; if (map == null) { map = new HashMap<>(); map.put(info.name, info); entry.setValue(map); return new T2<>(colInfo.cacheName, null); } DataStructureInfo oldInfo = map.get(info.name); if (oldInfo == null) { map = new HashMap<>(map); map.put(info.name, info); entry.setValue(map); return new T2<>(colInfo.cacheName, null); } return new T2<>(colInfo.cacheName, oldInfo.validate(info, true)); } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { info.writeExternal(out); } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { info = new DataStructureInfo(); info.readExternal(in); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(AddCollectionProcessor.class, this); } } /** * */ static class AddDataCacheProcessor implements EntryProcessor<CacheDataStructuresCacheKey, List<CacheCollectionInfo>, String>, Externalizable { /** Cache name prefix. */ private static final String CACHE_NAME_PREFIX = "datastructures_"; /** */ private static final long serialVersionUID = 0L; /** */ private CollectionConfiguration cfg; /** * @param cfg Data structure information. */ AddDataCacheProcessor(CollectionConfiguration cfg) { this.cfg = cfg; } /** * Required by {@link Externalizable}. */ public AddDataCacheProcessor() { // No-op. } /** {@inheritDoc} */ @Override public String process( MutableEntry<CacheDataStructuresCacheKey, List<CacheCollectionInfo>> entry, Object... args) { List<CacheCollectionInfo> list = entry.getValue(); if (list == null) { list = new ArrayList<>(); String newName = CACHE_NAME_PREFIX + 0; list.add(new CacheCollectionInfo(newName, cfg)); entry.setValue(list); return newName; } String oldName = findCompatibleConfiguration(cfg, list); if (oldName != null) return oldName; String newName = CACHE_NAME_PREFIX + list.size(); List<CacheCollectionInfo> newList = new ArrayList<>(list); newList.add(new CacheCollectionInfo(newName, cfg)); return newName; } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(cfg); } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { cfg = (CollectionConfiguration)in.readObject(); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(AddDataCacheProcessor.class, this); } } /** * */ static class RemoveDataStructureProcessor implements EntryProcessor<CacheDataStructuresConfigurationKey, Map<String, DataStructureInfo>, T2<Boolean, IgniteCheckedException>>, Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ private DataStructureInfo info; /** * @param info Data structure information. */ RemoveDataStructureProcessor(DataStructureInfo info) { assert info != null; this.info = info; } /** * Required by {@link Externalizable}. */ public RemoveDataStructureProcessor() { // No-op. } /** {@inheritDoc} */ @Override public T2<Boolean, IgniteCheckedException> process( MutableEntry<CacheDataStructuresConfigurationKey, Map<String, DataStructureInfo>> entry, Object... args) { Map<String, DataStructureInfo> map = entry.getValue(); if (map == null) return new T2<>(false, null); DataStructureInfo oldInfo = map.get(info.name); if (oldInfo == null) return new T2<>(false, null); IgniteCheckedException err = oldInfo.validate(info, false); if (err == null) { map = new HashMap<>(map); map.remove(info.name); entry.setValue(map); } return new T2<>(true, err); } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { info.writeExternal(out); } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { info = new DataStructureInfo(); info.readExternal(in); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(RemoveDataStructureProcessor.class, this); } } }
apache-2.0
anton415/Job4j
Pre-middle/Part_1_Input_Output/src/main/java/ru/aserdyuchenko/testQuestions/Tracker.java
3859
package ru.aserdyuchenko.testQuestions; import java.time.LocalTime; import java.util.Random; import java.util.ArrayList; class Tracker { /** * Создание случайного(от 0 до 1000) количества посетителей. */ private Random random = new Random(); /** * @param arrayBankVisitors - массив посетителей банка. */ private ArrayList<BankVisitor> arrayBankVisitors; /** * @param arrayTimeOfBankWorking - Массив времени, работы банка(c 8:00 до 20:00). */ private ArrayList<TimeOfBankWorking> arrayTimeOfBankWorking; Tracker() { this.arrayTimeOfBankWorking = creatArrayTimeOfBankWorking(); this.arrayBankVisitors = creatArrayBankVisitors(); registrationVisitors(); } Tracker(int countVisitorsInAllDay, LocalTime timeIn, LocalTime timeOut) { this.arrayTimeOfBankWorking = creatArrayTimeOfBankWorking(); this.arrayBankVisitors = creatArrayBankVisitors(countVisitorsInAllDay, timeIn, timeOut); registrationVisitors(); } /** * Создание массива времени работы банка(c 8:00 до 20:00). */ public ArrayList<TimeOfBankWorking> creatArrayTimeOfBankWorking() { ArrayList<TimeOfBankWorking> newArray = new ArrayList<TimeOfBankWorking>(); for (int hour = 8; hour < 20; hour++) { for (int minute = 0; minute < 60; minute++) { newArray.add(new TimeOfBankWorking(LocalTime.of(hour, minute))); } } return newArray; } /** * Создание случайного количества посетителей банка. */ public ArrayList<BankVisitor> creatArrayBankVisitors() { ArrayList<BankVisitor> newArray = new ArrayList<BankVisitor>(); for (int index = 0; index < random.nextInt(1000); index++) { newArray.add(new BankVisitor()); } return newArray; } /** * Создание определенного количества посетителей. * Метод для тестирование. Т.е. ислючение случайных чисел. */ public ArrayList<BankVisitor> creatArrayBankVisitors(int countVisitorsInAllDay, LocalTime timeIn, LocalTime timeOut) { ArrayList<BankVisitor> newArray = new ArrayList<BankVisitor>(); for (int index = 0; index < countVisitorsInAllDay; index++) { newArray.add(new BankVisitor(timeIn, timeOut)); } return newArray; } /** * Регистрация посетителей. */ public void registrationVisitors() { for (TimeOfBankWorking bankTime : arrayTimeOfBankWorking) { for (BankVisitor visitor : arrayBankVisitors) { for (LocalTime visitorTime : visitor.getTimeStay()) { if (bankTime.getTime().equals(visitorTime)) { bankTime.addNewVisitorInCount(); } } } } } /** * Поиск максимального количества посетителей в какой-то момент времени. */ public int findMaxCountVisitorsInTimeOfBankWorking() { int maxCount = 0; for (TimeOfBankWorking bankTime : arrayTimeOfBankWorking) { if (bankTime.getCountVisitors() > maxCount) { maxCount = bankTime.getCountVisitors(); } } return maxCount; } /** * Возвращение времени, когда было максимальное количество посетителей. */ public LocalTime findTimeWhenMaxCountOfVisitors() { LocalTime time = LocalTime.of(8, 0); int maxCount = 0; for (TimeOfBankWorking bankTime : arrayTimeOfBankWorking) { if (bankTime.getCountVisitors() > maxCount) { maxCount = bankTime.getCountVisitors(); time = bankTime.getTime(); } } return time; } }
apache-2.0
leechoongyon/HadoopSourceAnalyze
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
40875
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.crypto.key.kms; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension; import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenRenewer; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL; import org.apache.hadoop.util.HttpExceptionUtils; import org.apache.hadoop.util.KMSUtil; import org.apache.http.client.utils.URIBuilder; import org.codehaus.jackson.map.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.net.ssl.HttpsURLConnection; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.lang.reflect.UndeclaredThrowableException; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.concurrent.ExecutionException; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Strings; /** * KMS client <code>KeyProvider</code> implementation. */ @InterfaceAudience.Private public class KMSClientProvider extends KeyProvider implements CryptoExtension, KeyProviderDelegationTokenExtension.DelegationTokenExtension { private static final Logger LOG = LoggerFactory.getLogger(KMSClientProvider.class); private static final String INVALID_SIGNATURE = "Invalid signature"; private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous requests are disallowed"; public static final String TOKEN_KIND_STR = KMSDelegationToken.TOKEN_KIND_STR; public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND; public static final String SCHEME_NAME = "kms"; private static final String UTF8 = "UTF-8"; private static final String CONTENT_TYPE = "Content-Type"; private static final String APPLICATION_JSON_MIME = "application/json"; private static final String HTTP_GET = "GET"; private static final String HTTP_POST = "POST"; private static final String HTTP_PUT = "PUT"; private static final String HTTP_DELETE = "DELETE"; private static final String CONFIG_PREFIX = "hadoop.security.kms.client."; /* It's possible to specify a timeout, in seconds, in the config file */ public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout"; public static final int DEFAULT_TIMEOUT = 60; /* Number of times to retry authentication in the event of auth failure * (normally happens due to stale authToken) */ public static final String AUTH_RETRY = CONFIG_PREFIX + "authentication.retry-count"; public static final int DEFAULT_AUTH_RETRY = 1; private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue; private class EncryptedQueueRefiller implements ValueQueue.QueueRefiller<EncryptedKeyVersion> { @Override public void fillQueueForKey(String keyName, Queue<EncryptedKeyVersion> keyQueue, int numEKVs) throws IOException { checkNotNull(keyName, "keyName"); Map<String, String> params = new HashMap<String, String>(); params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_GENERATE); params.put(KMSRESTConstants.EEK_NUM_KEYS, "" + numEKVs); URL url = createURL(KMSRESTConstants.KEY_RESOURCE, keyName, KMSRESTConstants.EEK_SUB_RESOURCE, params); HttpURLConnection conn = createConnection(url, HTTP_GET); conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME); List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class); List<EncryptedKeyVersion> ekvs = parseJSONEncKeyVersion(keyName, response); keyQueue.addAll(ekvs); } } /** * The KMS implementation of {@link TokenRenewer}. */ public static class KMSTokenRenewer extends TokenRenewer { private static final Logger LOG = LoggerFactory.getLogger(KMSTokenRenewer.class); @Override public boolean handleKind(Text kind) { return kind.equals(TOKEN_KIND); } @Override public boolean isManaged(Token<?> token) throws IOException { return true; } @Override public long renew(Token<?> token, Configuration conf) throws IOException { LOG.debug("Renewing delegation token {}", token); KeyProvider keyProvider = KMSUtil.createKeyProvider(conf, KeyProviderFactory.KEY_PROVIDER_PATH); try { if (!(keyProvider instanceof KeyProviderDelegationTokenExtension.DelegationTokenExtension)) { LOG.warn("keyProvider {} cannot renew dt.", keyProvider == null ? "null" : keyProvider.getClass()); return 0; } return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension) keyProvider).renewDelegationToken(token); } finally { if (keyProvider != null) { keyProvider.close(); } } } @Override public void cancel(Token<?> token, Configuration conf) throws IOException { LOG.debug("Canceling delegation token {}", token); KeyProvider keyProvider = KMSUtil.createKeyProvider(conf, KeyProviderFactory.KEY_PROVIDER_PATH); try { if (!(keyProvider instanceof KeyProviderDelegationTokenExtension.DelegationTokenExtension)) { LOG.warn("keyProvider {} cannot cancel dt.", keyProvider == null ? "null" : keyProvider.getClass()); return; } ((KeyProviderDelegationTokenExtension.DelegationTokenExtension) keyProvider).cancelDelegationToken(token); } finally { if (keyProvider != null) { keyProvider.close(); } } } } public static class KMSEncryptedKeyVersion extends EncryptedKeyVersion { public KMSEncryptedKeyVersion(String keyName, String keyVersionName, byte[] iv, String encryptedVersionName, byte[] keyMaterial) { super(keyName, keyVersionName, iv, new KMSKeyVersion(null, encryptedVersionName, keyMaterial)); } } @SuppressWarnings("rawtypes") private static List<EncryptedKeyVersion> parseJSONEncKeyVersion(String keyName, List valueList) { List<EncryptedKeyVersion> ekvs = new LinkedList<EncryptedKeyVersion>(); if (!valueList.isEmpty()) { for (Object values : valueList) { Map valueMap = (Map) values; String versionName = checkNotNull( (String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD), KMSRESTConstants.VERSION_NAME_FIELD); byte[] iv = Base64.decodeBase64(checkNotNull( (String) valueMap.get(KMSRESTConstants.IV_FIELD), KMSRESTConstants.IV_FIELD)); Map encValueMap = checkNotNull((Map) valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD), KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD); String encVersionName = checkNotNull((String) encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD), KMSRESTConstants.VERSION_NAME_FIELD); byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String) encValueMap.get(KMSRESTConstants.MATERIAL_FIELD), KMSRESTConstants.MATERIAL_FIELD)); ekvs.add(new KMSEncryptedKeyVersion(keyName, versionName, iv, encVersionName, encKeyMaterial)); } } return ekvs; } private static KeyVersion parseJSONKeyVersion(Map valueMap) { KeyVersion keyVersion = null; if (!valueMap.isEmpty()) { byte[] material = (valueMap.containsKey(KMSRESTConstants.MATERIAL_FIELD)) ? Base64.decodeBase64((String) valueMap.get(KMSRESTConstants.MATERIAL_FIELD)) : null; String versionName = (String)valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD); String keyName = (String)valueMap.get(KMSRESTConstants.NAME_FIELD); keyVersion = new KMSKeyVersion(keyName, versionName, material); } return keyVersion; } @SuppressWarnings("unchecked") private static Metadata parseJSONMetadata(Map valueMap) { Metadata metadata = null; if (!valueMap.isEmpty()) { metadata = new KMSMetadata( (String) valueMap.get(KMSRESTConstants.CIPHER_FIELD), (Integer) valueMap.get(KMSRESTConstants.LENGTH_FIELD), (String) valueMap.get(KMSRESTConstants.DESCRIPTION_FIELD), (Map<String, String>) valueMap.get(KMSRESTConstants.ATTRIBUTES_FIELD), new Date((Long) valueMap.get(KMSRESTConstants.CREATED_FIELD)), (Integer) valueMap.get(KMSRESTConstants.VERSIONS_FIELD)); } return metadata; } private static void writeJson(Map map, OutputStream os) throws IOException { Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8); ObjectMapper jsonMapper = new ObjectMapper(); jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map); } /** * The factory to create KMSClientProvider, which is used by the * ServiceLoader. */ public static class Factory extends KeyProviderFactory { /** * This provider expects URIs in the following form : * kms://<PROTO>@<AUTHORITY>/<PATH> * * where : * - PROTO = http or https * - AUTHORITY = <HOSTS>[:<PORT>] * - HOSTS = <HOSTNAME>[;<HOSTS>] * - HOSTNAME = string * - PORT = integer * * If multiple hosts are provider, the Factory will create a * {@link LoadBalancingKMSClientProvider} that round-robins requests * across the provided list of hosts. */ @Override public KeyProvider createProvider(URI providerUri, Configuration conf) throws IOException { if (SCHEME_NAME.equals(providerUri.getScheme())) { URL origUrl = new URL(extractKMSPath(providerUri).toString()); String authority = origUrl.getAuthority(); // check for ';' which delimits the backup hosts if (Strings.isNullOrEmpty(authority)) { throw new IOException( "No valid authority in kms uri [" + origUrl + "]"); } // Check if port is present in authority // In the current scheme, all hosts have to run on the same port int port = -1; String hostsPart = authority; if (authority.contains(":")) { String[] t = authority.split(":"); try { port = Integer.parseInt(t[1]); } catch (Exception e) { throw new IOException( "Could not parse port in kms uri [" + origUrl + "]"); } hostsPart = t[0]; } return createProvider(providerUri, conf, origUrl, port, hostsPart); } return null; } private KeyProvider createProvider(URI providerUri, Configuration conf, URL origUrl, int port, String hostsPart) throws IOException { String[] hosts = hostsPart.split(";"); if (hosts.length == 1) { return new KMSClientProvider(providerUri, conf); } else { KMSClientProvider[] providers = new KMSClientProvider[hosts.length]; for (int i = 0; i < hosts.length; i++) { try { providers[i] = new KMSClientProvider( new URI("kms", origUrl.getProtocol(), hosts[i], port, origUrl.getPath(), null, null), conf); } catch (URISyntaxException e) { throw new IOException("Could not instantiate KMSProvider..", e); } } return new LoadBalancingKMSClientProvider(providers, conf); } } } public static <T> T checkNotNull(T o, String name) throws IllegalArgumentException { if (o == null) { throw new IllegalArgumentException("Parameter '" + name + "' cannot be null"); } return o; } public static String checkNotEmpty(String s, String name) throws IllegalArgumentException { checkNotNull(s, name); if (s.isEmpty()) { throw new IllegalArgumentException("Parameter '" + name + "' cannot be empty"); } return s; } private String kmsUrl; private SSLFactory sslFactory; private ConnectionConfigurator configurator; private DelegationTokenAuthenticatedURL.Token authToken; private final int authRetry; @Override public String toString() { final StringBuilder sb = new StringBuilder("KMSClientProvider["); sb.append(kmsUrl).append("]"); return sb.toString(); } /** * This small class exists to set the timeout values for a connection */ private static class TimeoutConnConfigurator implements ConnectionConfigurator { private ConnectionConfigurator cc; private int timeout; /** * Sets the timeout and wraps another connection configurator * @param timeout - will set both connect and read timeouts - in seconds * @param cc - another configurator to wrap - may be null */ public TimeoutConnConfigurator(int timeout, ConnectionConfigurator cc) { this.timeout = timeout; this.cc = cc; } /** * Calls the wrapped configure() method, then sets timeouts * @param conn the {@link HttpURLConnection} instance to configure. * @return the connection * @throws IOException */ @Override public HttpURLConnection configure(HttpURLConnection conn) throws IOException { if (cc != null) { conn = cc.configure(conn); } conn.setConnectTimeout(timeout * 1000); // conversion to milliseconds conn.setReadTimeout(timeout * 1000); return conn; } } public KMSClientProvider(URI uri, Configuration conf) throws IOException { super(conf); kmsUrl = createServiceURL(extractKMSPath(uri)); if ("https".equalsIgnoreCase(new URL(kmsUrl).getProtocol())) { sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf); try { sslFactory.init(); } catch (GeneralSecurityException ex) { throw new IOException(ex); } } int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT); authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY); configurator = new TimeoutConnConfigurator(timeout, sslFactory); encKeyVersionQueue = new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>( conf.getInt( CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE, CommonConfigurationKeysPublic. KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT), conf.getFloat( CommonConfigurationKeysPublic. KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK, CommonConfigurationKeysPublic. KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT), conf.getInt( CommonConfigurationKeysPublic. KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS, CommonConfigurationKeysPublic. KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT), conf.getInt( CommonConfigurationKeysPublic. KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS, CommonConfigurationKeysPublic. KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT), new EncryptedQueueRefiller()); authToken = new DelegationTokenAuthenticatedURL.Token(); } private static Path extractKMSPath(URI uri) throws MalformedURLException, IOException { return ProviderUtils.unnestUri(uri); } private static String createServiceURL(Path path) throws IOException { String str = new URL(path.toString()).toExternalForm(); if (str.endsWith("/")) { str = str.substring(0, str.length() - 1); } return new URL(str + KMSRESTConstants.SERVICE_VERSION + "/"). toExternalForm(); } private URL createURL(String collection, String resource, String subResource, Map<String, ?> parameters) throws IOException { try { StringBuilder sb = new StringBuilder(); sb.append(kmsUrl); if (collection != null) { sb.append(collection); if (resource != null) { sb.append("/").append(URLEncoder.encode(resource, UTF8)); if (subResource != null) { sb.append("/").append(subResource); } } } URIBuilder uriBuilder = new URIBuilder(sb.toString()); if (parameters != null) { for (Map.Entry<String, ?> param : parameters.entrySet()) { Object value = param.getValue(); if (value instanceof String) { uriBuilder.addParameter(param.getKey(), (String) value); } else { for (String s : (String[]) value) { uriBuilder.addParameter(param.getKey(), s); } } } } return uriBuilder.build().toURL(); } catch (URISyntaxException ex) { throw new IOException(ex); } } private HttpURLConnection configureConnection(HttpURLConnection conn) throws IOException { if (sslFactory != null) { HttpsURLConnection httpsConn = (HttpsURLConnection) conn; try { httpsConn.setSSLSocketFactory(sslFactory.createSSLSocketFactory()); } catch (GeneralSecurityException ex) { throw new IOException(ex); } httpsConn.setHostnameVerifier(sslFactory.getHostnameVerifier()); } return conn; } private HttpURLConnection createConnection(final URL url, String method) throws IOException { HttpURLConnection conn; try { final String doAsUser = getDoAsUser(); conn = getActualUgi().doAs(new PrivilegedExceptionAction <HttpURLConnection>() { @Override public HttpURLConnection run() throws Exception { DelegationTokenAuthenticatedURL authUrl = new DelegationTokenAuthenticatedURL(configurator); return authUrl.openConnection(url, authToken, doAsUser); } }); } catch (IOException ex) { if (ex instanceof SocketTimeoutException) { LOG.warn("Failed to connect to {}:{}", url.getHost(), url.getPort()); } throw ex; } catch (UndeclaredThrowableException ex) { throw new IOException(ex.getUndeclaredThrowable()); } catch (Exception ex) { throw new IOException(ex); } conn.setUseCaches(false); conn.setRequestMethod(method); if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) { conn.setDoOutput(true); } conn = configureConnection(conn); return conn; } private <T> T call(HttpURLConnection conn, Map jsonOutput, int expectedResponse, Class<T> klass) throws IOException { return call(conn, jsonOutput, expectedResponse, klass, authRetry); } private <T> T call(HttpURLConnection conn, Map jsonOutput, int expectedResponse, Class<T> klass, int authRetryCount) throws IOException { T ret = null; try { if (jsonOutput != null) { writeJson(jsonOutput, conn.getOutputStream()); } } catch (IOException ex) { IOUtils.closeStream(conn.getInputStream()); throw ex; } if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN && (conn.getResponseMessage().equals(ANONYMOUS_REQUESTS_DISALLOWED) || conn.getResponseMessage().contains(INVALID_SIGNATURE))) || conn.getResponseCode() == HttpURLConnection.HTTP_UNAUTHORIZED) { // Ideally, this should happen only when there is an Authentication // failure. Unfortunately, the AuthenticationFilter returns 403 when it // cannot authenticate (Since a 401 requires Server to send // WWW-Authenticate header as well).. KMSClientProvider.this.authToken = new DelegationTokenAuthenticatedURL.Token(); if (authRetryCount > 0) { String contentType = conn.getRequestProperty(CONTENT_TYPE); String requestMethod = conn.getRequestMethod(); URL url = conn.getURL(); conn = createConnection(url, requestMethod); conn.setRequestProperty(CONTENT_TYPE, contentType); return call(conn, jsonOutput, expectedResponse, klass, authRetryCount - 1); } } try { AuthenticatedURL.extractToken(conn, authToken); } catch (AuthenticationException e) { // Ignore the AuthExceptions.. since we are just using the method to // extract and set the authToken.. (Workaround till we actually fix // AuthenticatedURL properly to set authToken post initialization) } HttpExceptionUtils.validateResponse(conn, expectedResponse); if (conn.getContentType() != null && conn.getContentType().trim().toLowerCase() .startsWith(APPLICATION_JSON_MIME) && klass != null) { ObjectMapper mapper = new ObjectMapper(); InputStream is = null; try { is = conn.getInputStream(); ret = mapper.readValue(is, klass); } finally { IOUtils.closeStream(is); } } return ret; } public static class KMSKeyVersion extends KeyVersion { public KMSKeyVersion(String keyName, String versionName, byte[] material) { super(keyName, versionName, material); } } @Override public KeyVersion getKeyVersion(String versionName) throws IOException { checkNotEmpty(versionName, "versionName"); URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE, versionName, null, null); HttpURLConnection conn = createConnection(url, HTTP_GET); Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class); return parseJSONKeyVersion(response); } @Override public KeyVersion getCurrentKey(String name) throws IOException { checkNotEmpty(name, "name"); URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE, null); HttpURLConnection conn = createConnection(url, HTTP_GET); Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class); return parseJSONKeyVersion(response); } @Override @SuppressWarnings("unchecked") public List<String> getKeys() throws IOException { URL url = createURL(KMSRESTConstants.KEYS_NAMES_RESOURCE, null, null, null); HttpURLConnection conn = createConnection(url, HTTP_GET); List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class); return (List<String>) response; } public static class KMSMetadata extends Metadata { public KMSMetadata(String cipher, int bitLength, String description, Map<String, String> attributes, Date created, int versions) { super(cipher, bitLength, description, attributes, created, versions); } } // breaking keyNames into sets to keep resulting URL undler 2000 chars private List<String[]> createKeySets(String[] keyNames) { List<String[]> list = new ArrayList<String[]>(); List<String> batch = new ArrayList<String>(); int batchLen = 0; for (String name : keyNames) { int additionalLen = KMSRESTConstants.KEY.length() + 1 + name.length(); batchLen += additionalLen; // topping at 1500 to account for initial URL and encoded names if (batchLen > 1500) { list.add(batch.toArray(new String[batch.size()])); batch = new ArrayList<String>(); batchLen = additionalLen; } batch.add(name); } if (!batch.isEmpty()) { list.add(batch.toArray(new String[batch.size()])); } return list; } @Override @SuppressWarnings("unchecked") public Metadata[] getKeysMetadata(String ... keyNames) throws IOException { List<Metadata> keysMetadata = new ArrayList<Metadata>(); List<String[]> keySets = createKeySets(keyNames); for (String[] keySet : keySets) { if (keyNames.length > 0) { Map<String, Object> queryStr = new HashMap<String, Object>(); queryStr.put(KMSRESTConstants.KEY, keySet); URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null, null, queryStr); HttpURLConnection conn = createConnection(url, HTTP_GET); List<Map> list = call(conn, null, HttpURLConnection.HTTP_OK, List.class); for (Map map : list) { keysMetadata.add(parseJSONMetadata(map)); } } } return keysMetadata.toArray(new Metadata[keysMetadata.size()]); } private KeyVersion createKeyInternal(String name, byte[] material, Options options) throws NoSuchAlgorithmException, IOException { checkNotEmpty(name, "name"); checkNotNull(options, "options"); Map<String, Object> jsonKey = new HashMap<String, Object>(); jsonKey.put(KMSRESTConstants.NAME_FIELD, name); jsonKey.put(KMSRESTConstants.CIPHER_FIELD, options.getCipher()); jsonKey.put(KMSRESTConstants.LENGTH_FIELD, options.getBitLength()); if (material != null) { jsonKey.put(KMSRESTConstants.MATERIAL_FIELD, Base64.encodeBase64String(material)); } if (options.getDescription() != null) { jsonKey.put(KMSRESTConstants.DESCRIPTION_FIELD, options.getDescription()); } if (options.getAttributes() != null && !options.getAttributes().isEmpty()) { jsonKey.put(KMSRESTConstants.ATTRIBUTES_FIELD, options.getAttributes()); } URL url = createURL(KMSRESTConstants.KEYS_RESOURCE, null, null, null); HttpURLConnection conn = createConnection(url, HTTP_POST); conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME); Map response = call(conn, jsonKey, HttpURLConnection.HTTP_CREATED, Map.class); return parseJSONKeyVersion(response); } @Override public KeyVersion createKey(String name, Options options) throws NoSuchAlgorithmException, IOException { return createKeyInternal(name, null, options); } @Override public KeyVersion createKey(String name, byte[] material, Options options) throws IOException { checkNotNull(material, "material"); try { return createKeyInternal(name, material, options); } catch (NoSuchAlgorithmException ex) { throw new RuntimeException("It should not happen", ex); } } private KeyVersion rollNewVersionInternal(String name, byte[] material) throws NoSuchAlgorithmException, IOException { checkNotEmpty(name, "name"); Map<String, String> jsonMaterial = new HashMap<String, String>(); if (material != null) { jsonMaterial.put(KMSRESTConstants.MATERIAL_FIELD, Base64.encodeBase64String(material)); } URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, null, null); HttpURLConnection conn = createConnection(url, HTTP_POST); conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME); Map response = call(conn, jsonMaterial, HttpURLConnection.HTTP_OK, Map.class); KeyVersion keyVersion = parseJSONKeyVersion(response); encKeyVersionQueue.drain(name); return keyVersion; } @Override public KeyVersion rollNewVersion(String name) throws NoSuchAlgorithmException, IOException { return rollNewVersionInternal(name, null); } @Override public KeyVersion rollNewVersion(String name, byte[] material) throws IOException { checkNotNull(material, "material"); try { return rollNewVersionInternal(name, material); } catch (NoSuchAlgorithmException ex) { throw new RuntimeException("It should not happen", ex); } } @Override public EncryptedKeyVersion generateEncryptedKey( String encryptionKeyName) throws IOException, GeneralSecurityException { try { return encKeyVersionQueue.getNext(encryptionKeyName); } catch (ExecutionException e) { if (e.getCause() instanceof SocketTimeoutException) { throw (SocketTimeoutException)e.getCause(); } throw new IOException(e); } } @SuppressWarnings("rawtypes") @Override public KeyVersion decryptEncryptedKey( EncryptedKeyVersion encryptedKeyVersion) throws IOException, GeneralSecurityException { checkNotNull(encryptedKeyVersion.getEncryptionKeyVersionName(), "versionName"); checkNotNull(encryptedKeyVersion.getEncryptedKeyIv(), "iv"); Preconditions.checkArgument( encryptedKeyVersion.getEncryptedKeyVersion().getVersionName() .equals(KeyProviderCryptoExtension.EEK), "encryptedKey version name must be '%s', is '%s'", KeyProviderCryptoExtension.EEK, encryptedKeyVersion.getEncryptedKeyVersion().getVersionName() ); checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey"); Map<String, String> params = new HashMap<String, String>(); params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_DECRYPT); Map<String, Object> jsonPayload = new HashMap<String, Object>(); jsonPayload.put(KMSRESTConstants.NAME_FIELD, encryptedKeyVersion.getEncryptionKeyName()); jsonPayload.put(KMSRESTConstants.IV_FIELD, Base64.encodeBase64String( encryptedKeyVersion.getEncryptedKeyIv())); jsonPayload.put(KMSRESTConstants.MATERIAL_FIELD, Base64.encodeBase64String( encryptedKeyVersion.getEncryptedKeyVersion().getMaterial())); URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE, encryptedKeyVersion.getEncryptionKeyVersionName(), KMSRESTConstants.EEK_SUB_RESOURCE, params); HttpURLConnection conn = createConnection(url, HTTP_POST); conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME); Map response = call(conn, jsonPayload, HttpURLConnection.HTTP_OK, Map.class); return parseJSONKeyVersion(response); } @Override public List<KeyVersion> getKeyVersions(String name) throws IOException { checkNotEmpty(name, "name"); URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, KMSRESTConstants.VERSIONS_SUB_RESOURCE, null); HttpURLConnection conn = createConnection(url, HTTP_GET); List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class); List<KeyVersion> versions = null; if (!response.isEmpty()) { versions = new ArrayList<KeyVersion>(); for (Object obj : response) { versions.add(parseJSONKeyVersion((Map) obj)); } } return versions; } @Override public Metadata getMetadata(String name) throws IOException { checkNotEmpty(name, "name"); URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, KMSRESTConstants.METADATA_SUB_RESOURCE, null); HttpURLConnection conn = createConnection(url, HTTP_GET); Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class); return parseJSONMetadata(response); } @Override public void deleteKey(String name) throws IOException { checkNotEmpty(name, "name"); URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, null, null); HttpURLConnection conn = createConnection(url, HTTP_DELETE); call(conn, null, HttpURLConnection.HTTP_OK, null); } @Override public void flush() throws IOException { // NOP // the client does not keep any local state, thus flushing is not required // because of the client. // the server should not keep in memory state on behalf of clients either. } @Override public void warmUpEncryptedKeys(String... keyNames) throws IOException { try { encKeyVersionQueue.initializeQueuesForKeys(keyNames); } catch (ExecutionException e) { throw new IOException(e); } } @Override public void drain(String keyName) { encKeyVersionQueue.drain(keyName); } @VisibleForTesting public int getEncKeyQueueSize(String keyName) { return encKeyVersionQueue.getSize(keyName); } @Override public long renewDelegationToken(final Token<?> dToken) throws IOException { try { final String doAsUser = getDoAsUser(); final DelegationTokenAuthenticatedURL.Token token = generateDelegationToken(dToken); final URL url = createURL(null, null, null, null); LOG.debug("Renewing delegation token {} with url:{}, as:{}", token, url, doAsUser); final DelegationTokenAuthenticatedURL authUrl = new DelegationTokenAuthenticatedURL(configurator); return getActualUgi().doAs( new PrivilegedExceptionAction<Long>() { @Override public Long run() throws Exception { return authUrl.renewDelegationToken(url, token, doAsUser); } } ); } catch (Exception ex) { if (ex instanceof IOException) { throw (IOException) ex; } else { throw new IOException(ex); } } } @Override public Void cancelDelegationToken(final Token<?> dToken) throws IOException { try { final String doAsUser = getDoAsUser(); final DelegationTokenAuthenticatedURL.Token token = generateDelegationToken(dToken); return getActualUgi().doAs( new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { final URL url = createURL(null, null, null, null); LOG.debug("Cancelling delegation token {} with url:{}, as:{}", dToken, url, doAsUser); final DelegationTokenAuthenticatedURL authUrl = new DelegationTokenAuthenticatedURL(configurator); authUrl.cancelDelegationToken(url, token, doAsUser); return null; } } ); } catch (Exception ex) { if (ex instanceof IOException) { throw (IOException) ex; } else { throw new IOException(ex); } } } /** * Get the doAs user name. * * 'actualUGI' is the UGI of the user creating the client * It is possible that the creator of the KMSClientProvier * calls this method on behalf of a proxyUser (the doAsUser). * In which case this call has to be made as the proxy user. * * @return the doAs user name. * @throws IOException */ private String getDoAsUser() throws IOException { UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser(); return (currentUgi.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.PROXY) ? currentUgi.getShortUserName() : null; } /** * Generate a DelegationTokenAuthenticatedURL.Token from the given generic * typed delegation token. * * @param dToken The delegation token. * @return The DelegationTokenAuthenticatedURL.Token, with its delegation * token set to the delegation token passed in. */ private DelegationTokenAuthenticatedURL.Token generateDelegationToken( final Token<?> dToken) { DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token(); Token<AbstractDelegationTokenIdentifier> dt = new Token<>(dToken.getIdentifier(), dToken.getPassword(), dToken.getKind(), dToken.getService()); token.setDelegationToken(dt); return token; } @Override public Token<?>[] addDelegationTokens(final String renewer, Credentials credentials) throws IOException { Token<?>[] tokens = null; Text dtService = getDelegationTokenService(); Token<?> token = credentials.getToken(dtService); if (token == null) { final URL url = createURL(null, null, null, null); final DelegationTokenAuthenticatedURL authUrl = new DelegationTokenAuthenticatedURL(configurator); try { final String doAsUser = getDoAsUser(); token = getActualUgi().doAs(new PrivilegedExceptionAction<Token<?>>() { @Override public Token<?> run() throws Exception { // Not using the cached token here.. Creating a new token here // everytime. return authUrl.getDelegationToken(url, new DelegationTokenAuthenticatedURL.Token(), renewer, doAsUser); } }); if (token != null) { credentials.addToken(token.getService(), token); tokens = new Token<?>[] { token }; } else { throw new IOException("Got NULL as delegation token"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new IOException(e); } } return tokens; } private Text getDelegationTokenService() throws IOException { URL url = new URL(kmsUrl); InetSocketAddress addr = new InetSocketAddress(url.getHost(), url.getPort()); Text dtService = SecurityUtil.buildTokenService(addr); return dtService; } private boolean currentUgiContainsKmsDt() throws IOException { // Add existing credentials from current UGI, since provider is cached. Credentials creds = UserGroupInformation.getCurrentUser(). getCredentials(); if (!creds.getAllTokens().isEmpty()) { org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dToken = creds.getToken(getDelegationTokenService()); if (dToken != null) { return true; } } return false; } private UserGroupInformation getActualUgi() throws IOException { final UserGroupInformation currentUgi = UserGroupInformation .getCurrentUser(); if (LOG.isDebugEnabled()) { UserGroupInformation.logAllUserInfo(currentUgi); } // Use current user by default UserGroupInformation actualUgi = currentUgi; if (currentUgi.getRealUser() != null) { // Use real user for proxy user actualUgi = currentUgi.getRealUser(); } else if (!currentUgiContainsKmsDt() && !currentUgi.hasKerberosCredentials()) { // Use login user for user that does not have either // Kerberos credential or KMS delegation token for KMS operations actualUgi = currentUgi.getLoginUser(); } return actualUgi; } /** * Shutdown valueQueue executor threads */ @Override public void close() throws IOException { try { encKeyVersionQueue.shutdown(); } catch (Exception e) { throw new IOException(e); } finally { if (sslFactory != null) { sslFactory.destroy(); sslFactory = null; } } } @VisibleForTesting String getKMSUrl() { return kmsUrl; } }
apache-2.0
secdec/bytefrog
sourcemap-parser/src/main/java/fm/ua/ikysil/smap/parser/VendorInfoBuilder.java
2570
/* * Sun Public License Notice * * This file is subject to the Sun Public License Version * 1.0 (the "License"); you may not use this file except in compliance with * the License. A copy of the License is available at http://www.sun.com/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is sourcemap Library. * The Initial Developer of the Original Code is Illya Kysil. * Portions created by the Initial Developer are Copyright (C) 2004 * the Initial Developer. All Rights Reserved. * * Alternatively, the Library may be used under the terms of either * the Mozilla Public License Version 1.1 or later (the "MPL"), * the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * (the "Alternative License"), in which case the provisions * of the respective Alternative License are applicable instead of those above. * If you wish to allow use of your version of this Library only under * the terms of an Alternative License, and not to allow others to use your * version of this Library under the terms of the License, indicate your decision by * deleting the provisions above and replace them with the notice and other * provisions required by the Alternative License. If you do not delete * the provisions above, a recipient may use your version of this Library under * the terms of any one of the SPL, the MPL, the GPL or the LGPL. */ /* * VendorInfoBuilder.java * * Created on May 3, 2004, 9:36 AM */ package fm.ua.ikysil.smap.parser; import fm.ua.ikysil.smap.*; /** * * @author Illya Kysil */ public class VendorInfoBuilder implements Builder { /** Creates a new instance of VendorInfoBuilder */ public VendorInfoBuilder() { } public String getSectionName() { return Constants.VendorSectionName; } public void build(State state, String[] lines) throws SourceMapException { if (lines.length < 1) { throw new SourceMapException("Vendor information expected"); } VendorInfo vendorInfo = new VendorInfo(lines[1]); String[] viLines = new String[lines.length - 2]; System.arraycopy(lines, 2, viLines, 0, viLines.length); vendorInfo.setData(viLines); state.getStratum().getVendorInfoList().add(vendorInfo); } }
apache-2.0
apache/incubator-shardingsphere
shardingsphere-infra/shardingsphere-infra-binder/src/test/java/org/apache/shardingsphere/infra/binder/segment/select/projection/impl/AggregationDistinctProjectionTest.java
1788
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.shardingsphere.infra.binder.segment.select.projection.impl; import org.apache.shardingsphere.infra.database.type.DatabaseType; import org.apache.shardingsphere.sql.parser.sql.common.constant.AggregationType; import org.junit.Test; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; import static org.mockito.Mockito.mock; public final class AggregationDistinctProjectionTest { private final AggregationDistinctProjection aggregationDistinctProjection = new AggregationDistinctProjection( 0, 0, AggregationType.COUNT, "(DISTINCT order_id)", "c", "order_id", mock(DatabaseType.class)); @Test public void assertGetDistinctColumnLabel() { assertThat(aggregationDistinctProjection.getDistinctColumnLabel(), is("c")); } @Test public void assertGetDistinctColumnName() { assertThat(aggregationDistinctProjection.getDistinctInnerExpression(), is("order_id")); } }
apache-2.0
whchoi83/arcus-java-client
src/main/java/net/spy/memcached/BulkService.java
8261
/* * arcus-java-client : Arcus Java client * Copyright 2010-2014 NAVER Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.spy.memcached; import net.spy.memcached.collection.CollectionResponse; import net.spy.memcached.compat.SpyObject; import net.spy.memcached.internal.BasicThreadFactory; import net.spy.memcached.internal.CollectionFuture; import net.spy.memcached.ops.CollectionOperationStatus; import net.spy.memcached.ops.StoreType; import net.spy.memcached.transcoders.Transcoder; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; class BulkService extends SpyObject { private static int DEFAULT_LOOP_LIMIT; private final ExecutorService executor; private final long singleOpTimeout; BulkService(int loopLimit, int threadCount, long singleOpTimeout) { this.executor = new ThreadPoolExecutor(threadCount, threadCount, 60L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), new BasicThreadFactory("bulk-service", true), new ThreadPoolExecutor.AbortPolicy()); BulkService.DEFAULT_LOOP_LIMIT = loopLimit; this.singleOpTimeout = singleOpTimeout; } <T> Future<Map<String, CollectionOperationStatus>> setBulk( List<String> keys, int exp, T value, Transcoder<T> transcoder, ArcusClient[] client) { if (keys == null) { throw new IllegalArgumentException("Key list is null."); } assert !executor.isShutdown() : "Pool has already shut down."; BulkSetWorker<T> w = new BulkSetWorker<T>(keys, exp, value, transcoder, client, singleOpTimeout); BulkService.Task<Map<String, CollectionOperationStatus>> task = new BulkService.Task<Map<String, CollectionOperationStatus>>( w); executor.submit(task); return task; } <T> Future<Map<String, CollectionOperationStatus>> setBulk( Map<String, T> o, int exp, Transcoder<T> transcoder, ArcusClient[] client) { if (o == null) { throw new IllegalArgumentException("Map is null."); } assert !executor.isShutdown() : "Pool has already shut down."; BulkSetWorker<T> w = new BulkSetWorker<T>(o, exp, transcoder, client, singleOpTimeout); BulkService.Task<Map<String, CollectionOperationStatus>> task = new BulkService.Task<Map<String, CollectionOperationStatus>>( w); executor.submit(task); return task; } void shutdown() { try { executor.shutdown(); } catch (Exception e) { getLogger().warn("exception while shutting down bulk set service.", e); } } private static class Task<T> extends FutureTask<T> { private final BulkWorker worker; public Task(Callable<T> callable) { super(callable); this.worker = (BulkWorker) callable; } @Override public boolean cancel(boolean mayInterruptIfRunning) { return worker.cancel() && super.cancel(mayInterruptIfRunning); } } /** * Bulk operation worker */ private abstract static class BulkWorker<T> extends SpyObject implements Callable<Map<String, CollectionOperationStatus>> { protected final ArcusClient[] clientList; protected final ArrayList<Future<Boolean>> future; protected final long operationTimeout; protected final AtomicBoolean isRunnable = new AtomicBoolean(true); protected final Map<String, CollectionOperationStatus> errorList; protected final int totalCount; protected final int fromIndex; protected final int toIndex; public BulkWorker(Collection keys, long timeout, ArcusClient[] clientList) { if(keys.size() < 1) { throw new IllegalArgumentException("Keys size must be greater than 0"); } this.future = new ArrayList<Future<Boolean>>(keys.size()); this.operationTimeout = timeout; this.clientList = getOptimalClients(clientList); this.errorList = new HashMap<String, CollectionOperationStatus>(); fromIndex = 0; toIndex = keys.size() - 1; totalCount = toIndex - fromIndex + 1; } public boolean cancel() { if (!isRunnable()) { return false; } isRunnable.set(false); boolean ret = true; for (Future<Boolean> f : future) { if (f == null) { continue; } if (f.isCancelled() || f.isDone()) { continue; } ret &= f.cancel(true); if (getLogger().isDebugEnabled()) { getLogger().debug("Cancel the future. " + f); } } getLogger().info("Cancel, bulk set worker."); return ret; } private ArcusClient[] getOptimalClients(ArcusClient[] clientList) { return clientList; } protected boolean isRunnable() { return isRunnable.get() && !Thread.currentThread().isInterrupted(); } protected void setErrorOpStatus(String key, int indexOfFuture) { errorList.put(key, ((CollectionFuture<Boolean>) future.get(indexOfFuture)) .getOperationStatus()); } public abstract Future<Boolean> processItem(int index); public abstract void awaitProcessResult(int index); public Map<String, CollectionOperationStatus> call() throws Exception { for (int pos = fromIndex; isRunnable() && pos <= toIndex; pos++) { if ((pos - fromIndex) > 0 && (pos - fromIndex) % DEFAULT_LOOP_LIMIT == 0) { for (int i = pos - DEFAULT_LOOP_LIMIT; isRunnable() && i < pos; i++) { awaitProcessResult(i); } } try { if (isRunnable()) { future.add(pos, processItem(pos)); } } catch (IllegalStateException e) { if (Thread.currentThread().isInterrupted()) { break; } else { throw e; } } } for (int i = toIndex - (totalCount % DEFAULT_LOOP_LIMIT == 0 ? DEFAULT_LOOP_LIMIT : totalCount % DEFAULT_LOOP_LIMIT) + 1; isRunnable() && i <= toIndex; i++) { awaitProcessResult(i); } return errorList; } } /** * Bulk set operation worker */ private static class BulkSetWorker<T> extends BulkWorker<T> { private final List<String> keys; private final int exp; private final int cntCos; private List<CachedData> cos; public BulkSetWorker(List<String> keys, int exp, T value, Transcoder<T> transcoder, ArcusClient[] clientList, long timeout) { super(keys, timeout, clientList); this.keys = keys; this.exp = exp; this.cos = new ArrayList<CachedData>(); this.cos.add(transcoder.encode(value)); this.cntCos = 1; } public BulkSetWorker(Map<String, T> o, int exp, Transcoder<T> transcoder, ArcusClient[] clientList, long timeout) { super(o.keySet(), timeout, clientList); this.keys = new ArrayList<String>(o.keySet()); this.exp = exp; this.cos = new ArrayList<CachedData>(); for (String key : keys) { this.cos.add(transcoder.encode(o.get(key))); } this.cntCos = this.cos.size(); } @Override public Future<Boolean> processItem(int index) { return clientList[index % clientList.length].asyncStore( StoreType.set, keys.get(index), exp, (this.cntCos > 1 ? cos.get(index) : cos.get(0))); } @Override public void awaitProcessResult(int index) { try { boolean success = future.get(index).get(operationTimeout, TimeUnit.MILLISECONDS); if (!success) { errorList.put( keys.get(index), new CollectionOperationStatus(false, String .valueOf(success), CollectionResponse.END)); } } catch (Exception e) { future.get(index).cancel(true); errorList.put(keys.get(index), new CollectionOperationStatus( false, e.getMessage(), CollectionResponse.EXCEPTION)); } } } }
apache-2.0
subutai-io/Subutai
management/server/core/identity-manager/identity-manager-impl/src/main/java/io/subutai/core/identity/impl/model/UserDelegateEntity.java
2648
package io.subutai.core.identity.impl.model; import javax.persistence.Access; import javax.persistence.AccessType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.Table; import io.subutai.common.security.objects.PermissionObject; import io.subutai.common.security.objects.UserType; import io.subutai.core.identity.api.model.UserDelegate; /** * Delegate for User */ @Entity @Table( name = "user_delegate" ) @Access( AccessType.FIELD ) public class UserDelegateEntity implements UserDelegate { @Id @Column( name = "id" ) private String id; @Column( name = "user_id" ) private long userId; @Column( name = "type" ) private int type = UserType.REGULAR.getId(); @Column( name = "relation_document", length = 3000 ) private String relationDocument; @Override public String getId() { return id; } @Override public void setId( final String id ) { this.id = id; } @Override public long getUserId() { return userId; } @Override public void setUserId( final long userId ) { this.userId = userId; } @Override public int getType() { return type; } @Override public void setType( final int type ) { this.type = type; } @Override public String getRelationDocument() { return relationDocument; } @Override public void setRelationDocument( final String relationDocument ) { this.relationDocument = relationDocument; } @Override public String getLinkId() { return String.format( "%s|%s", getClassPath(), getUniqueIdentifier() ); } @Override public String getUniqueIdentifier() { return getId(); } @Override public String getClassPath() { return this.getClass().getSimpleName(); } @Override public String getContext() { return PermissionObject.IDENTITY_MANAGEMENT.getName(); } @Override public String getKeyId() { return getId(); } @Override public boolean equals( final Object o ) { if ( this == o ) { return true; } if ( !( o instanceof UserDelegateEntity ) ) { return false; } final UserDelegateEntity that = ( UserDelegateEntity ) o; return id != null ? id.equals( that.id ) : that.id == null; } @Override public int hashCode() { return id != null ? id.hashCode() : 0; } }
apache-2.0
apache/calcite
core/src/test/java/org/apache/calcite/plan/volcano/TraitConversionTest.java
11320
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.plan.volcano; import org.apache.calcite.plan.Convention; import org.apache.calcite.plan.ConventionTraitDef; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptCost; import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelOptRuleCall; import org.apache.calcite.plan.RelRule; import org.apache.calcite.plan.RelTrait; import org.apache.calcite.plan.RelTraitDef; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.plan.volcano.AbstractConverter.ExpandConversionRule; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.checkerframework.checker.nullness.qual.Nullable; import org.immutables.value.Value; import org.junit.jupiter.api.Test; import java.util.List; import static org.apache.calcite.plan.volcano.PlannerTests.PHYS_CALLING_CONVENTION; import static org.apache.calcite.plan.volcano.PlannerTests.TestLeafRel; import static org.apache.calcite.plan.volcano.PlannerTests.TestSingleRel; import static org.apache.calcite.plan.volcano.PlannerTests.newCluster; import static org.junit.jupiter.api.Assertions.assertTrue; /** * Unit test for {@link org.apache.calcite.rel.RelDistributionTraitDef}. */ class TraitConversionTest { private static final ConvertRelDistributionTraitDef NEW_TRAIT_DEF_INSTANCE = new ConvertRelDistributionTraitDef(); private static final SimpleDistribution SIMPLE_DISTRIBUTION_ANY = new SimpleDistribution("ANY"); private static final SimpleDistribution SIMPLE_DISTRIBUTION_RANDOM = new SimpleDistribution("RANDOM"); private static final SimpleDistribution SIMPLE_DISTRIBUTION_SINGLETON = new SimpleDistribution("SINGLETON"); @Test void testTraitConversion() { final VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(NEW_TRAIT_DEF_INSTANCE); planner.addRule(RandomSingleTraitRule.INSTANCE); planner.addRule(SingleLeafTraitRule.INSTANCE); planner.addRule(ExpandConversionRule.INSTANCE); planner.setTopDownOpt(false); final RelOptCluster cluster = newCluster(planner); final NoneLeafRel leafRel = new NoneLeafRel(cluster, "a"); final NoneSingleRel singleRel = new NoneSingleRel(cluster, leafRel); final RelNode convertedRel = planner.changeTraits(singleRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.setRoot(convertedRel); final RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof RandomSingleRel); assertTrue(result.getTraitSet().contains(PHYS_CALLING_CONVENTION)); assertTrue(result.getTraitSet().contains(SIMPLE_DISTRIBUTION_RANDOM)); final RelNode input = result.getInput(0); assertTrue(input instanceof BridgeRel); assertTrue(input.getTraitSet().contains(PHYS_CALLING_CONVENTION)); assertTrue(input.getTraitSet().contains(SIMPLE_DISTRIBUTION_RANDOM)); final RelNode input2 = input.getInput(0); assertTrue(input2 instanceof SingletonLeafRel); assertTrue(input2.getTraitSet().contains(PHYS_CALLING_CONVENTION)); assertTrue(input2.getTraitSet().contains(SIMPLE_DISTRIBUTION_SINGLETON)); } /** Converts a {@link NoneSingleRel} (none convention, distribution any) * to {@link RandomSingleRel} (physical convention, distribution random). */ public static class RandomSingleTraitRule extends RelRule<RandomSingleTraitRule.Config> { static final RandomSingleTraitRule INSTANCE = ImmutableRandomSingleTraitRuleConfig.builder() .build() .withOperandSupplier(b -> b.operand(NoneSingleRel.class).anyInputs()) .toRule(); RandomSingleTraitRule(Config config) { super(config); } @Override public Convention getOutConvention() { return PHYS_CALLING_CONVENTION; } @Override public void onMatch(RelOptRuleCall call) { NoneSingleRel single = call.rel(0); RelNode input = single.getInput(); RelNode physInput = convert(input, single.getTraitSet() .replace(PHYS_CALLING_CONVENTION) .plus(SIMPLE_DISTRIBUTION_RANDOM)); call.transformTo( new RandomSingleRel( single.getCluster(), physInput)); } /** Rule configuration. */ @Value.Immutable @Value.Style(typeImmutable = "ImmutableRandomSingleTraitRuleConfig") public interface Config extends RelRule.Config { @Override default RandomSingleTraitRule toRule() { return new RandomSingleTraitRule(this); } } } /** Rel with physical convention and random distribution. */ private static class RandomSingleRel extends TestSingleRel { RandomSingleRel(RelOptCluster cluster, RelNode input) { super(cluster, cluster.traitSetOf(PHYS_CALLING_CONVENTION) .plus(SIMPLE_DISTRIBUTION_RANDOM), input); } @Override public @Nullable RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { return planner.getCostFactory().makeTinyCost(); } @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) { return new RandomSingleRel(getCluster(), sole(inputs)); } } /** Converts {@link NoneLeafRel} (none convention, any distribution) to * {@link SingletonLeafRel} (physical convention, singleton distribution). */ public static class SingleLeafTraitRule extends RelRule<SingleLeafTraitRule.Config> { static final SingleLeafTraitRule INSTANCE = ImmutableSingleLeafTraitRuleConfig.builder() .build() .withOperandSupplier(b -> b.operand(NoneLeafRel.class).anyInputs()) .toRule(); SingleLeafTraitRule(Config config) { super(config); } @Override public Convention getOutConvention() { return PHYS_CALLING_CONVENTION; } @Override public void onMatch(RelOptRuleCall call) { NoneLeafRel leafRel = call.rel(0); call.transformTo( new SingletonLeafRel(leafRel.getCluster(), leafRel.label)); } /** Rule configuration. */ @Value.Immutable @Value.Style(typeImmutable = "ImmutableSingleLeafTraitRuleConfig") public interface Config extends RelRule.Config { @Override default SingleLeafTraitRule toRule() { return new SingleLeafTraitRule(this); } } } /** Rel with singleton distribution, physical convention. */ private static class SingletonLeafRel extends TestLeafRel { SingletonLeafRel(RelOptCluster cluster, String label) { super(cluster, cluster.traitSetOf(PHYS_CALLING_CONVENTION) .plus(SIMPLE_DISTRIBUTION_SINGLETON), label); } @Override public @Nullable RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { return planner.getCostFactory().makeTinyCost(); } @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) { return new SingletonLeafRel(getCluster(), label); } } /** Bridges the {@link SimpleDistribution}, difference between * {@link SingletonLeafRel} and {@link RandomSingleRel}. */ private static class BridgeRel extends TestSingleRel { BridgeRel(RelOptCluster cluster, RelNode input) { super(cluster, cluster.traitSetOf(PHYS_CALLING_CONVENTION) .plus(SIMPLE_DISTRIBUTION_RANDOM), input); } @Override public @Nullable RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { return planner.getCostFactory().makeTinyCost(); } @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) { return new BridgeRel(getCluster(), sole(inputs)); } } /** Dummy distribution for test (simplified version of RelDistribution). */ private static class SimpleDistribution implements RelTrait { private final String name; SimpleDistribution(String name) { this.name = name; } @Override public String toString() { return name; } @Override public RelTraitDef getTraitDef() { return NEW_TRAIT_DEF_INSTANCE; } @Override public boolean satisfies(RelTrait trait) { return trait == this || trait == SIMPLE_DISTRIBUTION_ANY; } @Override public void register(RelOptPlanner planner) {} } /** Dummy distribution trait def for test (handles conversion of * SimpleDistribution). */ private static class ConvertRelDistributionTraitDef extends RelTraitDef<SimpleDistribution> { @Override public Class<SimpleDistribution> getTraitClass() { return SimpleDistribution.class; } @Override public String toString() { return getSimpleName(); } @Override public String getSimpleName() { return "ConvertRelDistributionTraitDef"; } @Override public @Nullable RelNode convert(RelOptPlanner planner, RelNode rel, SimpleDistribution toTrait, boolean allowInfiniteCostConverters) { if (toTrait == SIMPLE_DISTRIBUTION_ANY) { return rel; } return new BridgeRel(rel.getCluster(), rel); } @Override public boolean canConvert(RelOptPlanner planner, SimpleDistribution fromTrait, SimpleDistribution toTrait) { return (fromTrait == toTrait) || (toTrait == SIMPLE_DISTRIBUTION_ANY) || (fromTrait == SIMPLE_DISTRIBUTION_SINGLETON && toTrait == SIMPLE_DISTRIBUTION_RANDOM); } @Override public SimpleDistribution getDefault() { return SIMPLE_DISTRIBUTION_ANY; } } /** Any distribution and none convention. */ private static class NoneLeafRel extends TestLeafRel { NoneLeafRel(RelOptCluster cluster, String label) { super(cluster, cluster.traitSetOf(Convention.NONE), label); } @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) { assert traitSet.comprises(Convention.NONE, SIMPLE_DISTRIBUTION_ANY); assert inputs.isEmpty(); return this; } } /** Rel with any distribution and none convention. */ private static class NoneSingleRel extends TestSingleRel { NoneSingleRel(RelOptCluster cluster, RelNode input) { super(cluster, cluster.traitSetOf(Convention.NONE), input); } @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) { assert traitSet.comprises(Convention.NONE, SIMPLE_DISTRIBUTION_ANY); return new NoneSingleRel(getCluster(), sole(inputs)); } } }
apache-2.0
tnghsla13/Convenient-Store
app/src/main/java/company/kr/sand/adapter/FeedListAdapter.java
5608
package company.kr.sand.adapter; import android.app.Activity; import android.content.Context; import android.text.TextUtils; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.TextView; import com.android.volley.toolbox.ImageLoader; import com.android.volley.toolbox.NetworkImageView; import company.kr.sand.views.FeedImageView; import company.kr.sand.controller.AppController; import company.kr.sand.data.FeedItem; import company.kr.sand.R; import java.util.List; /** * Created by User on 2015-11-01. */ public class FeedListAdapter extends BaseAdapter { private Activity activity; private LayoutInflater inflater; private List<FeedItem> feedItems; ImageLoader imageLoader = AppController.getInstance().getImageLoader(); public FeedListAdapter(Activity activity, List<FeedItem> feedItems) { this.activity = activity; this.feedItems = feedItems; } @Override public int getCount() { return feedItems.size(); } @Override public Object getItem(int location) { return feedItems.get(location); } @Override public long getItemId(int position) { return position; } @Override public View getView(int position, View convertView, ViewGroup parent) { if (inflater == null) inflater = (LayoutInflater) activity .getSystemService(Context.LAYOUT_INFLATER_SERVICE); if (convertView == null) convertView = inflater.inflate(R.layout.feed_item, null); if (imageLoader == null) imageLoader = AppController.getInstance().getImageLoader(); TextView name = (TextView) convertView.findViewById(R.id.name); TextView timestamp = (TextView) convertView .findViewById(R.id.timestamp); TextView statusMsg = (TextView) convertView .findViewById(R.id.txtStatusMsg); TextView url = (TextView) convertView.findViewById(R.id.txtUrl); NetworkImageView profilePic = (NetworkImageView) convertView .findViewById(R.id.profilePic); FeedImageView feedImageView = (FeedImageView) convertView .findViewById(R.id.feedImage1); FeedItem item = feedItems.get(position); name.setText(item.getName()); // Converting timestamp into x ago format // CharSequence timeAgo = DateUtils.getRelativeTimeSpanString( // Long.parseLong(item.getTimeStamp()), // System.currentTimeMillis(), DateUtils.SECOND_IN_MILLIS); timestamp.setText(item.getTimeStamp()); // Chcek for empty status message if (!TextUtils.isEmpty(item.getStatus())) { statusMsg.setText(item.getStatus()); statusMsg.setVisibility(View.VISIBLE); } else { // status is empty, remove from view statusMsg.setVisibility(View.GONE); } // Checking for taste ImageView img_taste = (ImageView) convertView.findViewById(R.id.taste); String taste = item.getTaste(); Log.d("asdf", "taste = " + taste); switch(taste) { case "0": img_taste.setImageResource(R.drawable.bad); break; case "1": img_taste.setImageResource(R.drawable.soso); break; case "2": img_taste.setImageResource(R.drawable.good); break; } // Checking for quantity ImageView img_quantity = (ImageView) convertView.findViewById(R.id.quantity); String quantity = item.getQuantity(); //Log.d("asdf", "taste = " + taste); switch(taste) { case "0": img_quantity.setImageResource(R.drawable.bad); break; case "1": img_quantity.setImageResource(R.drawable.soso); break; case "2": img_quantity.setImageResource(R.drawable.good); break; } // Checking for performance ImageView img_performance = (ImageView) convertView.findViewById(R.id.performance); String performance = item.getPerformance(); //Log.d("asdf", "taste = " + taste); switch(performance) { case "0": img_performance.setImageResource(R.drawable.bad); break; case "1": img_performance.setImageResource(R.drawable.soso); break; case "2": img_performance.setImageResource(R.drawable.good); break; } ImageView img_follow = (ImageView) convertView.findViewById(R.id.follow); // user profile pic profilePic.setImageUrl(item.getProfilePic(), imageLoader); // Feed image if (item.getImge() != null) { feedImageView.setImageUrl(item.getImge(), imageLoader); feedImageView.setVisibility(View.VISIBLE); feedImageView .setResponseObserver(new FeedImageView.ResponseObserver() { @Override public void onError() { } @Override public void onSuccess() { } }); } else { feedImageView.setVisibility(View.GONE); } return convertView; } }
apache-2.0
gunnarmorling/beanvalidation-tck
tests/src/main/java/org/hibernate/beanvalidation/tck/tests/constraints/invalidconstraintdefinitions/InvalidDefaultPayload.java
1365
/** * Bean Validation TCK * * License: Apache License, Version 2.0 * See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>. */ package org.hibernate.beanvalidation.tck.tests.constraints.invalidconstraintdefinitions; import static java.lang.annotation.ElementType.TYPE; import static java.lang.annotation.RetentionPolicy.RUNTIME; import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.Target; import javax.validation.Constraint; import javax.validation.ConstraintValidator; import javax.validation.ConstraintValidatorContext; import javax.validation.Payload; /** * @author Hardy Ferentschik */ @Documented @Constraint(validatedBy = InvalidDefaultPayload.InvalidDefaultGroupValidator.class) @Target({ TYPE }) @Retention(RUNTIME) public @interface InvalidDefaultPayload { public abstract String message() default "default message"; public abstract Class<?>[] groups() default { }; public abstract Class<? extends Payload>[] payload() default DummyPayload.class; public class DummyPayload implements Payload { } public class InvalidDefaultGroupValidator implements ConstraintValidator<InvalidDefaultPayload, Object> { @Override public boolean isValid(Object object, ConstraintValidatorContext constraintValidatorContext) { return false; } } }
apache-2.0
petermr/norma
src/main/java/org/xmlcml/norma/sections/JATSRefElement.java
1067
package org.xmlcml.norma.sections; import java.util.Arrays; import java.util.List; import org.apache.log4j.Level; import org.apache.log4j.Logger; import nu.xom.Element; public class JATSRefElement extends JATSElement { static final Logger LOG = Logger.getLogger(JATSRefElement.class); static { LOG.setLevel(Level.DEBUG); } static final String TAG = "ref"; public final static List<String> ALLOWED_CHILD_NAMES = Arrays.asList(new String[] { JATSSpanFactory.LABEL, JATSDivFactory.ELEMENT_CITATION, }); private JATSElementCitationElement elementCitation; private String label; public JATSRefElement(Element element) { super(element); } public String getPMID() { return elementCitation == null ? null : elementCitation.getPMID(); } public String getPMCID() { return elementCitation == null ? null : elementCitation.getPMCID(); } protected void applyNonXMLSemantics() { label = getSingleChildValue(JATSSpanFactory.LABEL); elementCitation = (JATSElementCitationElement) getSingleChild(JATSElementCitationElement.TAG); } }
apache-2.0
dgutierr/kie-wb-common
kie-wb-common-services/kie-wb-common-services-api/src/main/java/org/kie/workbench/common/services/backend/project/KieProjectFactory.java
978
/* * Copyright 2016 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.services.backend.project; import org.guvnor.common.services.project.project.ProjectFactory; import org.kie.workbench.common.services.shared.project.KieProject; /** * Generics-free interface for KIE. Note this is ONLY a *server-side* service. */ public interface KieProjectFactory extends ProjectFactory<KieProject> { }
apache-2.0
nandakumar131/hadoop
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
8549
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.util.ArrayList; import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.ConcurrentMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * A cache saves OpenFileCtx objects for different users. Each cache entry is * used to maintain the writing context for a single file. */ class OpenFileCtxCache { private static final Logger LOG = LoggerFactory.getLogger(OpenFileCtxCache.class); // Insert and delete with openFileMap are synced private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps .newConcurrentMap(); private final int maxStreams; private final long streamTimeout; private final StreamMonitor streamMonitor; OpenFileCtxCache(NfsConfiguration config, long streamTimeout) { maxStreams = config.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_DEFAULT); LOG.info("Maximum open streams is " + maxStreams); this.streamTimeout = streamTimeout; streamMonitor = new StreamMonitor(); } /** * The entry to be evicted is based on the following rules:<br> * 1. if the OpenFileCtx has any pending task, it will not be chosen.<br> * 2. if there is inactive OpenFileCtx, the first found one is to evict. <br> * 3. For OpenFileCtx entries don't belong to group 1 or 2, the idlest one * is select. If it's idle longer than OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT, it * will be evicted. Otherwise, the whole eviction request is failed. */ @VisibleForTesting Entry<FileHandle, OpenFileCtx> getEntryToEvict() { Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { LOG.trace("openFileMap size:" + size()); } Entry<FileHandle, OpenFileCtx> idlest = null; while (it.hasNext()) { Entry<FileHandle, OpenFileCtx> pairs = it.next(); OpenFileCtx ctx = pairs.getValue(); if (!ctx.getActiveState()) { if (LOG.isDebugEnabled()) { LOG.debug("Got one inactive stream: " + ctx); } return pairs; } if (ctx.hasPendingWork()) { // Always skip files with pending work. continue; } if (idlest == null) { idlest = pairs; } else { if (ctx.getLastAccessTime() < idlest.getValue().getLastAccessTime()) { idlest = pairs; } } } if (idlest == null) { LOG.warn("No eviction candidate. All streams have pending work."); return null; } else { long idleTime = Time.monotonicNow() - idlest.getValue().getLastAccessTime(); if (idleTime < NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT) { if (LOG.isDebugEnabled()) { LOG.debug("idlest stream's idle time:" + idleTime); } LOG.warn("All opened streams are busy, can't remove any from cache."); return null; } else { return idlest; } } } boolean put(FileHandle h, OpenFileCtx context) { OpenFileCtx toEvict = null; synchronized (this) { Preconditions.checkState(size() <= this.maxStreams, "stream cache size " + size() + " is larger than maximum" + this .maxStreams); if (size() == this.maxStreams) { Entry<FileHandle, OpenFileCtx> pairs = getEntryToEvict(); if (pairs ==null) { return false; } else { if (LOG.isDebugEnabled()) { LOG.debug("Evict stream ctx: " + pairs.getValue()); } toEvict = openFileMap.remove(pairs.getKey()); Preconditions.checkState(toEvict == pairs.getValue(), "The deleted entry is not the same as odlest found."); } } openFileMap.put(h, context); } // Cleanup the old stream outside the lock if (toEvict != null) { toEvict.cleanup(); } return true; } @VisibleForTesting void scan(long streamTimeout) { ArrayList<OpenFileCtx> ctxToRemove = new ArrayList<OpenFileCtx>(); Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { LOG.trace("openFileMap size:" + size()); } while (it.hasNext()) { Entry<FileHandle, OpenFileCtx> pairs = it.next(); FileHandle handle = pairs.getKey(); OpenFileCtx ctx = pairs.getValue(); if (!ctx.streamCleanup(handle, streamTimeout)) { continue; } // Check it again inside lock before removing synchronized (this) { OpenFileCtx ctx2 = openFileMap.get(handle); if (ctx2 != null) { if (ctx2.streamCleanup(handle, streamTimeout)) { openFileMap.remove(handle); if (LOG.isDebugEnabled()) { LOG.debug("After remove stream " + handle.dumpFileHandle() + ", the stream number:" + size()); } ctxToRemove.add(ctx2); } } } } // Invoke the cleanup outside the lock for (OpenFileCtx ofc : ctxToRemove) { ofc.cleanup(); } } OpenFileCtx get(FileHandle key) { return openFileMap.get(key); } int size() { return openFileMap.size(); } void start() { streamMonitor.start(); } // Evict all entries void cleanAll() { ArrayList<OpenFileCtx> cleanedContext = new ArrayList<OpenFileCtx>(); synchronized (this) { Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { LOG.trace("openFileMap size:" + size()); } while (it.hasNext()) { Entry<FileHandle, OpenFileCtx> pairs = it.next(); OpenFileCtx ctx = pairs.getValue(); it.remove(); cleanedContext.add(ctx); } } // Invoke the cleanup outside the lock for (OpenFileCtx ofc : cleanedContext) { ofc.cleanup(); } } void shutdown() { // stop the dump thread if (streamMonitor.isAlive()) { streamMonitor.shouldRun(false); streamMonitor.interrupt(); try { streamMonitor.join(3000); } catch (InterruptedException ignored) { } } cleanAll(); } /** * StreamMonitor wakes up periodically to find and closes idle streams. */ class StreamMonitor extends Daemon { private final static int rotation = 5 * 1000; // 5 seconds private long lastWakeupTime = 0; private boolean shouldRun = true; void shouldRun(boolean shouldRun) { this.shouldRun = shouldRun; } @Override public void run() { while (shouldRun) { scan(streamTimeout); // Check if it can sleep try { long workedTime = Time.monotonicNow() - lastWakeupTime; if (workedTime < rotation) { if (LOG.isTraceEnabled()) { LOG.trace("StreamMonitor can still have a sleep:" + ((rotation - workedTime) / 1000)); } Thread.sleep(rotation - workedTime); } lastWakeupTime = Time.monotonicNow(); } catch (InterruptedException e) { LOG.info("StreamMonitor got interrupted"); return; } } } } }
apache-2.0
apache/sanselan
src/main/java/org/apache/commons/imaging/color/ColorHsv.java
1141
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.imaging.color; public final class ColorHsv { public final double H, S, V; public ColorHsv(double h, double s, double v) { H = h; S = s; V = v; } @Override public final String toString() { return "{H: " + H + ", S: " + S + ", V: " + V + "}"; } }
apache-2.0
jwren/intellij-community
platform/platform-impl/src/com/intellij/openapi/actionSystem/impl/ActionButton.java
21387
// Copyright 2000-2022 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license. package com.intellij.openapi.actionSystem.impl; import com.intellij.icons.AllIcons; import com.intellij.ide.HelpTooltip; import com.intellij.internal.statistic.collectors.fus.ui.persistence.ToolbarClicksCollector; import com.intellij.openapi.actionSystem.*; import com.intellij.openapi.actionSystem.ex.*; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.keymap.KeymapUtil; import com.intellij.openapi.keymap.impl.IdeMouseEventDispatcher; import com.intellij.openapi.ui.popup.JBPopup; import com.intellij.openapi.util.IconLoader; import com.intellij.openapi.util.Key; import com.intellij.openapi.util.NlsContexts; import com.intellij.openapi.util.NlsSafe; import com.intellij.openapi.util.registry.Registry; import com.intellij.openapi.util.text.StringUtil; import com.intellij.openapi.util.text.Strings; import com.intellij.ui.popup.PopupFactoryImpl; import com.intellij.ui.popup.PopupState; import com.intellij.ui.scale.JBUIScale; import com.intellij.util.ObjectUtils; import com.intellij.util.ui.*; import com.intellij.util.ui.accessibility.AccessibleContextUtil; import com.intellij.util.ui.accessibility.ScreenReader; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.accessibility.*; import javax.swing.*; import java.awt.*; import java.awt.event.*; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.util.Objects; import java.util.Set; public class ActionButton extends JComponent implements ActionButtonComponent, AnActionHolder, Accessible { // Contains action IDs which descriptions are permitted for displaying in the ActionButton tooltip @NonNls private static final Set<String> WHITE_LIST = Set.of("ExternalSystem.ProjectRefreshAction", "LoadConfigurationAction"); /** * By default, a popup action group button displays 'dropdown' icon. * Use this key to avoid suppress that icon for a presentation or a template presentation like this: * {@code presentation.putClientProperty(ActionButton.HIDE_DROPDOWN_ICON, true)}. */ public static final Key<Boolean> HIDE_DROPDOWN_ICON = Key.create("HIDE_DROPDOWN_ICON"); private JBDimension myMinimumButtonSize; private PropertyChangeListener myPresentationListener; private Icon myDisabledIcon; protected Icon myIcon; protected final Presentation myPresentation; protected final AnAction myAction; protected final String myPlace; protected final PopupState<JPopupMenu> myPopupState = PopupState.forPopupMenu(); private ActionButtonLook myLook = ActionButtonLook.SYSTEM_LOOK; private boolean myMouseDown; protected boolean myRollover; private boolean wasPopupJustClosedByButtonClick = false; private static boolean ourGlobalMouseDown; private boolean myNoIconsInPopup; private Insets myInsets; private boolean myManualUpdate; public ActionButton(@NotNull AnAction action, Presentation presentation, String place, @NotNull Dimension minimumSize) { setMinimumButtonSize(minimumSize); setIconInsets(null); myRollover = false; myMouseDown = false; myAction = action; myPresentation = presentation; myPlace = place; // Button should be focusable if screen reader is active setFocusable(ScreenReader.isActive()); enableEvents(AWTEvent.MOUSE_EVENT_MASK); // Pressing the SPACE key is the same as clicking the button addKeyListener(new KeyAdapter() { @Override public void keyReleased(KeyEvent e) { if (e.getModifiers() == 0 && e.getKeyCode() == KeyEvent.VK_SPACE) { click(); } } }); addFocusListener(new FocusListener() { @Override public void focusGained(FocusEvent e) { repaint(); } @Override public void focusLost(FocusEvent e) { repaint(); } }); putClientProperty(UIUtil.CENTER_TOOLTIP_DEFAULT, Boolean.TRUE); } public void setNoIconsInPopup(boolean noIconsInPopup) { myNoIconsInPopup = noIconsInPopup; } // used in Rider, please don't change visibility public void setMinimumButtonSize(@NotNull Dimension size) { myMinimumButtonSize = JBDimension.create(size); } @Override public void paintChildren(Graphics g) {} @Override public int getPopState() { return getPopState(isSelected()); } protected final boolean isRollover() { return myRollover; } public final boolean isSelected() { return myAction instanceof Toggleable && Toggleable.isSelected(myPresentation); } @Override public boolean isEnabled() { return isEnabled(super.isEnabled()); } protected boolean isEnabled(boolean componentEnabled) { return componentEnabled && myPresentation.isEnabled(); } private void onMousePresenceChanged(boolean setInfo) { ActionMenu.showDescriptionInStatusBar(setInfo, this, myPresentation.getDescription()); } public void click() { performAction(makeClickMouseEvent()); } @NotNull private MouseEvent makeClickMouseEvent() { return new MouseEvent(this, MouseEvent.MOUSE_CLICKED, System.currentTimeMillis(), 0, 0, 0, 1, false); } protected void performAction(MouseEvent e) { AnActionEvent event = AnActionEvent.createFromInputEvent(e, myPlace, myPresentation, getDataContext(), false, true); if (ActionUtil.lastUpdateAndCheckDumb(myAction, event, false) && isEnabled()) { ActionUtil.performDumbAwareWithCallbacks(myAction, event, () -> actionPerformed(event)); if (event.getInputEvent() instanceof MouseEvent) { ToolbarClicksCollector.record(myAction, myPlace, e, event.getDataContext()); } ActionToolbar toolbar = ActionToolbar.findToolbarBy(this); if (toolbar != null) { toolbar.updateActionsImmediately(); } } } protected DataContext getDataContext() { return ActionToolbar.getDataContextFor(this); } protected void actionPerformed(@NotNull AnActionEvent event) { HelpTooltip.hide(this); if (isPopupMenuAction(event)) { if (!wasPopupJustClosedByButtonClick) { showActionGroupPopup((ActionGroup)myAction, event); } wasPopupJustClosedByButtonClick = false; } else { myAction.actionPerformed(event); } } protected void showActionGroupPopup(@NotNull ActionGroup actionGroup, @NotNull AnActionEvent event) { createAndShowActionGroupPopup(actionGroup, event); } protected @NotNull JBPopup createAndShowActionGroupPopup(@NotNull ActionGroup actionGroup, @NotNull AnActionEvent event) { PopupFactoryImpl.ActionGroupPopup popup = new PopupFactoryImpl.ActionGroupPopup( null, actionGroup, event.getDataContext(), false, false, true, false, null, -1, null, ActionPlaces.getActionGroupPopupPlace(event.getPlace()), createPresentationFactory(), false) { @Override public void cancel(InputEvent inputEvent) { super.cancel(inputEvent); if (inputEvent instanceof MouseEvent && inputEvent.getID() == MouseEvent.MOUSE_PRESSED) { MouseEvent e = (MouseEvent)inputEvent; Component target = ObjectUtils.doIfNotNull(e.getComponent(), c -> SwingUtilities.getDeepestComponentAt(c, e.getX(), e.getY())); if (ActionButton.this == target) wasPopupJustClosedByButtonClick = true; } } }; popup.setShowSubmenuOnHover(true); popup.showUnderneathOf(event.getInputEvent().getComponent()); return popup; } @NotNull private MenuItemPresentationFactory createPresentationFactory() { return new MenuItemPresentationFactory() { @Override protected void processPresentation(@NotNull Presentation presentation) { super.processPresentation(presentation); if (myNoIconsInPopup) { presentation.setIcon(null); presentation.setHoveredIcon(null); } } }; } private boolean isPopupMenuAction(@NotNull AnActionEvent event) { if (!(myAction instanceof ActionGroup)) return false; if (myAction instanceof CustomComponentAction) return false; if (!event.getPresentation().isPopupGroup()) return false; // do not call potentially slow `canBePerformed` for a button managed by a toolbar if (event.getPresentation().isPerformGroup() || myManualUpdate && ((ActionGroup)myAction).canBePerformed(event.getDataContext())) return false; return true; } @Override public void removeNotify() { if (myRollover) { onMousePresenceChanged(false); } if (myPresentationListener != null) { myPresentation.removePropertyChangeListener(myPresentationListener); myPresentationListener = null; } myRollover = false; myMouseDown = false; HelpTooltip.dispose(this); super.removeNotify(); } @Override public void addNotify() { super.addNotify(); if (myPresentationListener == null) { myPresentation.addPropertyChangeListener(myPresentationListener = this::presentationPropertyChanged); } if (!(getParent() instanceof ActionToolbar)) { ActionManagerEx.doWithLazyActionManager(__ -> update()); } else { updateToolTipText(); updateIcon(); } } public void update() { myManualUpdate = true; // the following code mirrors the ActionUpdater#updateActionReal code boolean wasPopup = myAction instanceof ActionGroup && ((ActionGroup)myAction).isPopup(); myPresentation.setPopupGroup(myAction instanceof ActionGroup && (myPresentation.isPopupGroup() || wasPopup)); AnActionEvent e = AnActionEvent.createFromInputEvent(null, myPlace, myPresentation, getDataContext(), false, true); ActionUtil.performDumbAwareUpdate(myAction, e, false); ActionUpdater.assertActionGroupPopupStateIsNotChanged(myAction, myPlace, wasPopup, myPresentation); updateToolTipText(); updateIcon(); } @Override public void setToolTipText(@NlsContexts.Tooltip String toolTipText) { if (!Registry.is("ide.helptooltip.enabled")) { while (StringUtil.endsWithChar(toolTipText, '.')) { toolTipText = toolTipText.substring(0, toolTipText.length() - 1); } String shortcutsText = getShortcutText(); if (Strings.isNotEmpty(shortcutsText)) { toolTipText += " (" + shortcutsText + ")"; } super.setToolTipText(Strings.isNotEmpty(toolTipText) ? toolTipText : null); } } @Override public void updateUI() { if (myLook != null) { myLook.updateUI(); } updateToolTipText(); } @Override public Dimension getPreferredSize() { if (myMinimumButtonSize != null) myMinimumButtonSize.update(); Icon icon = getIcon(); Dimension size = icon.getIconWidth() < myMinimumButtonSize.width && icon.getIconHeight() < myMinimumButtonSize.height ? new Dimension(myMinimumButtonSize) : new Dimension(Math.max(myMinimumButtonSize.width, icon.getIconWidth() + myInsets.left + myInsets.right), Math.max(myMinimumButtonSize.height, icon.getIconHeight() + myInsets.top + myInsets.bottom)); JBInsets.addTo(size, getInsets()); return size; } public void setIconInsets(@Nullable Insets insets) { myInsets = insets != null ? JBInsets.create(insets) : JBInsets.emptyInsets(); } @Override public Dimension getMinimumSize() { return getPreferredSize(); } /** * @return button's icon. Icon depends on action's state and button's state. It means that the method returns * disabled icon if action is disabled. * In case of rollover (POPPED) or pressed (PUSHED) button's state hovered icon is used (if presented) * If the action's icon is {@code null} then it returns * an empty icon. */ public Icon getIcon() { boolean enabled = isEnabled(); int popState = getPopState(); Icon hoveredIcon = (popState == POPPED || popState == PUSHED) ? myPresentation.getHoveredIcon() : null; Icon icon = enabled ? (hoveredIcon == null ? myIcon : hoveredIcon) : myDisabledIcon; return icon == null ? getFallbackIcon(enabled) : icon; } @NotNull protected Icon getFallbackIcon(boolean enabled) { return EmptyIcon.ICON_18; } public void updateIcon() { myIcon = myPresentation.getIcon(); // set disabled icon if it is specified if (myPresentation.getDisabledIcon() != null) { myDisabledIcon = myPresentation.getDisabledIcon(); } else if (myIcon == null) { myDisabledIcon = null; } else if (IconLoader.isGoodSize(myIcon)) { myDisabledIcon = IconLoader.getDisabledIcon(myIcon); } else { myDisabledIcon = null; Logger.getInstance(ActionButton.class).error("invalid icon (" + myIcon + ") for action " + myAction.getClass()); } } protected void updateToolTipText() { String text = myPresentation.getText(); String description = myPresentation.getDescription(); if (Registry.is("ide.helptooltip.enabled")) { if (Strings.isNotEmpty(text) || Strings.isNotEmpty(description)) { HelpTooltip ht = new HelpTooltip().setTitle(text).setShortcut(getShortcutText()); if (myAction instanceof TooltipLinkProvider) { TooltipLinkProvider.TooltipLink link = ((TooltipLinkProvider)myAction).getTooltipLink(this); if (link != null) { ht.setLink(link.tooltip, link.action); } } String id = ActionManager.getInstance().getId(myAction); if (!Objects.equals(text, description) && ((id != null && WHITE_LIST.contains(id)) || myAction instanceof TooltipDescriptionProvider)) { ht.setDescription(description); } ht.installOn(this); } } else { HelpTooltip.dispose(this); setToolTipText(text == null ? description : text); } } @Nullable protected @NlsSafe String getShortcutText() { return KeymapUtil.getFirstKeyboardShortcutText(myAction); } @Override public void paintComponent(Graphics g) { jComponentPaint(g); paintButtonLook(g); if (shallPaintDownArrow()) { paintDownArrow(g); } } // used in Rider, please don't change visibility protected void jComponentPaint(Graphics g) { super.paintComponent(g); } protected boolean shallPaintDownArrow() { if (!(myAction instanceof ActionGroup)) return false; if (!myPresentation.isPopupGroup()) return false; if (Boolean.TRUE == myAction.getTemplatePresentation().getClientProperty(HIDE_DROPDOWN_ICON)) return false; if (Boolean.TRUE == myPresentation.getClientProperty(HIDE_DROPDOWN_ICON)) return false; return true; } private void paintDownArrow(Graphics g) { Container parent = getParent(); boolean horizontal = !(parent instanceof ActionToolbarImpl) || ((ActionToolbarImpl)parent).getOrientation() == SwingConstants.HORIZONTAL; int x = horizontal ? JBUIScale.scale(6) : JBUIScale.scale(5); int y = horizontal ? JBUIScale.scale(5) : JBUIScale.scale(6); Icon arrowIcon = isEnabled() ? AllIcons.General.Dropdown : IconLoader.getDisabledIcon(AllIcons.General.Dropdown); arrowIcon.paintIcon(this, g, x, y); } protected void paintButtonLook(Graphics g) { ActionButtonLook look = getButtonLook(); if (isEnabled() || !StartupUiUtil.isUnderDarcula()) { look.paintBackground(g, this); } look.paintIcon(g, this, getIcon()); look.paintBorder(g, this); } protected ActionButtonLook getButtonLook() { return myLook; } public void setLook(ActionButtonLook look) { myLook = look == null ? ActionButtonLook.SYSTEM_LOOK : look; repaint(); } @Override protected void processMouseEvent(MouseEvent e) { IdeMouseEventDispatcher.requestFocusInNonFocusedWindow(e); super.processMouseEvent(e); if (e.isConsumed()) return; boolean skipPress = checkSkipPressForEvent(e); switch (e.getID()) { case MouseEvent.MOUSE_PRESSED: if (skipPress || !isEnabled()) return; myMouseDown = true; onMousePressed(e); ourGlobalMouseDown = true; repaint(); break; case MouseEvent.MOUSE_RELEASED: if (skipPress || !isEnabled()) return; onMouseReleased(e); if (myRollover) { performAction(e); } repaint(); break; case MouseEvent.MOUSE_ENTERED: if (!myMouseDown && ourGlobalMouseDown) break; myRollover = true; repaint(); onMousePresenceChanged(true); break; case MouseEvent.MOUSE_EXITED: myRollover = false; if (!myMouseDown && ourGlobalMouseDown) break; repaint(); onMousePresenceChanged(false); break; } } protected void resetMouseState() { myMouseDown = false; ourGlobalMouseDown = false; } protected void onMouseReleased(@NotNull MouseEvent e) { resetMouseState(); // Extension point } protected void onMousePressed(@NotNull MouseEvent e) { // Extension point } private static boolean checkSkipPressForEvent(@NotNull MouseEvent e) { return e.isMetaDown() || e.getButton() != MouseEvent.BUTTON1; } private int getPopState(boolean isPushed) { if (isPushed || myRollover && myMouseDown && isEnabled()) { return PUSHED; } else if (myRollover && isEnabled()) { return POPPED; } else if (isFocusOwner()) { return SELECTED; } else { return NORMAL; } } @Override public @NotNull AnAction getAction() { return myAction; } protected void presentationPropertyChanged(@NotNull PropertyChangeEvent e) { @NonNls String propertyName = e.getPropertyName(); if (Presentation.PROP_TEXT.equals(propertyName) || Presentation.PROP_DESCRIPTION.equals(propertyName)) { updateToolTipText(); } else if (Presentation.PROP_ENABLED.equals(propertyName) || Presentation.PROP_ICON.equals(propertyName)) { updateIcon(); repaint(); } else if (Presentation.PROP_DISABLED_ICON.equals(propertyName)) { myDisabledIcon = myPresentation.getDisabledIcon(); repaint(); } else if ("selected".equals(propertyName)) { repaint(); } else if (HIDE_DROPDOWN_ICON.toString().equals(propertyName)) { repaint(); } } // Accessibility @Override @NotNull public AccessibleContext getAccessibleContext() { AccessibleContext context = accessibleContext; if(context == null) { accessibleContext = context = new AccessibleActionButton(); } return context; } protected class AccessibleActionButton extends JComponent.AccessibleJComponent implements AccessibleAction { protected AccessibleActionButton() { } @Override public AccessibleRole getAccessibleRole() { return AccessibleRole.PUSH_BUTTON; } @Override public String getAccessibleName() { String name = accessibleName; if (name == null) { name = (String)getClientProperty(ACCESSIBLE_NAME_PROPERTY); if (name == null) { name = ActionButton.this.getToolTipText(); if (name == null) { name = myPresentation.getText(); if (name == null) { name = super.getAccessibleName(); } } } } return name; } @Override public String getAccessibleDescription() { return AccessibleContextUtil.getUniqueDescription(this, super.getAccessibleDescription()); } @Override public AccessibleIcon[] getAccessibleIcon() { Icon icon = getIcon(); if (icon instanceof Accessible) { AccessibleContext context = ((Accessible)icon).getAccessibleContext(); if (context instanceof AccessibleIcon) { return new AccessibleIcon[]{(AccessibleIcon)context}; } } return null; } @Override public AccessibleStateSet getAccessibleStateSet() { AccessibleStateSet var1 = super.getAccessibleStateSet(); setCustomAccessibleStateSet(var1); return var1; } protected void setCustomAccessibleStateSet(@NotNull AccessibleStateSet accessibleStateSet) { int state = getPopState(); // TODO: Not sure what the "POPPED" state represents //if (state == POPPED) { // var1.add(AccessibleState.?); //} if (state == ActionButtonComponent.PUSHED) { accessibleStateSet.add(AccessibleState.PRESSED); } if (state == ActionButtonComponent.SELECTED) { accessibleStateSet.add(AccessibleState.CHECKED); } if (isFocusOwner()) { accessibleStateSet.add(AccessibleState.FOCUSED); } } @Override public AccessibleAction getAccessibleAction() { return this; } // Implements AccessibleAction @Override public int getAccessibleActionCount() { return 1; } @Override public String getAccessibleActionDescription(int index) { return index == 0 ? UIManager.getString("AbstractButton.clickText") : null; } @Override public boolean doAccessibleAction(int index) { if (index == 0) { click(); return true; } return false; } } }
apache-2.0