repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
Rikkola/kie-wb-common
kie-wb-common-stunner/kie-wb-common-stunner-sets/kie-wb-common-stunner-bpmn/kie-wb-common-stunner-bpmn-marshalling/src/main/java/org/kie/workbench/common/stunner/bpmn/client/marshall/converters/tostunner/properties/MultipleInstanceActivityPropertyReader.java
9924
/* * Copyright 2019 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.stunner.bpmn.client.marshall.converters.tostunner.properties; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; import org.eclipse.bpmn2.Activity; import org.eclipse.bpmn2.BaseElement; import org.eclipse.bpmn2.DataAssociation; import org.eclipse.bpmn2.DataInput; import org.eclipse.bpmn2.DataInputAssociation; import org.eclipse.bpmn2.DataOutput; import org.eclipse.bpmn2.DataOutputAssociation; import org.eclipse.bpmn2.FormalExpression; import org.eclipse.bpmn2.ItemAwareElement; import org.eclipse.bpmn2.MultiInstanceLoopCharacteristics; import org.eclipse.bpmn2.Property; import org.eclipse.bpmn2.di.BPMNDiagram; import org.kie.workbench.common.stunner.bpmn.client.marshall.converters.tostunner.DefinitionResolver; import org.kie.workbench.common.stunner.bpmn.client.marshall.converters.util.FormalExpressionBodyHandler; public class MultipleInstanceActivityPropertyReader extends ActivityPropertyReader { public MultipleInstanceActivityPropertyReader(Activity activity, BPMNDiagram diagram, DefinitionResolver definitionResolver) { super(activity, diagram, definitionResolver); } public boolean isMultipleInstance() { return getMultiInstanceLoopCharacteristics().isPresent(); } public String getCollectionInput() { String ieDataInputId = getLoopDataInputRefId(); return super.getDataInputAssociations().stream() .filter(dia -> hasTargetRef(dia, ieDataInputId)) .filter(MultipleInstanceActivityPropertyReader::hasSourceRefs) .map(dia -> ItemNameReader.from(dia.getSourceRef().get(0)).getName()) .findFirst() .orElse(null); } public String getCollectionOutput() { String ieDataOutputId = getLoopDataOutputRefId(); return super.getDataOutputAssociations().stream() .filter(doa -> hasSourceRef(doa, ieDataOutputId)) .map(doa -> ItemNameReader.from(doa.getTargetRef()).getName()) .findFirst() .orElse(null); } public String getDataInput() { return getMultiInstanceLoopCharacteristics() .map(MultiInstanceLoopCharacteristics::getInputDataItem) .map(d -> Optional.ofNullable(d.getName()).orElse(d.getId())) .orElse(""); } public String getDataOutput() { return getMultiInstanceLoopCharacteristics() .map(MultiInstanceLoopCharacteristics::getOutputDataItem) .map(d -> Optional.ofNullable(d.getName()).orElse(d.getId())) .orElse(""); } public String getCompletionCondition() { return getMultiInstanceLoopCharacteristics() .map(miloop -> (FormalExpression) miloop.getCompletionCondition()) .map(fe -> FormalExpressionBodyHandler.of(fe).getBody()) .orElse(""); } public boolean isSequential() { return getMultiInstanceLoopCharacteristics() .map(MultiInstanceLoopCharacteristics::isIsSequential) .orElse(false); } private Optional<MultiInstanceLoopCharacteristics> getMultiInstanceLoopCharacteristics() { return Optional.ofNullable((MultiInstanceLoopCharacteristics) activity.getLoopCharacteristics()); } private static String getVariableName(Property property) { return ProcessVariableReader.getProcessVariableName(property); } @Override protected List<DataInput> getDataInputs() { if (getMultiInstanceLoopCharacteristics().isPresent()) { String dataInputIdForInputVariable = getDataInputIdForDataInputVariable(); String dataInputIdForInputCollection = getLoopDataInputRefId(); return super.getDataInputs().stream() .filter(di -> !di.getId().equals(dataInputIdForInputVariable)) .filter(di -> !di.getId().equals(dataInputIdForInputCollection)) .collect(Collectors.toList()); } return super.getDataInputs(); } @Override protected List<DataOutput> getDataOutputs() { if (getMultiInstanceLoopCharacteristics().isPresent()) { String dataOuputIdForOutputVariable = getDataOutputIdForDataOutputVariable(); String dataOutputIdForCollection = getLoopDataOutputRefId(); return super.getDataOutputs().stream() .filter(dout -> !dout.getId().equals(dataOuputIdForOutputVariable)) .filter(dout -> !dout.getId().equals(dataOutputIdForCollection)) .collect(Collectors.toList()); } return super.getDataOutputs(); } @Override protected List<DataInputAssociation> getDataInputAssociations() { if (getMultiInstanceLoopCharacteristics().isPresent()) { String dataInputIdForInputVariable = getDataInputIdForDataInputVariable(); String dataInputIdForInputCollection = getLoopDataInputRefId(); return super.getDataInputAssociations().stream() .filter(dia -> !hasTargetRef(dia, dataInputIdForInputVariable)) .filter(dia -> !hasTargetRef(dia, dataInputIdForInputCollection)) .collect(Collectors.toList()); } return super.getDataInputAssociations(); } @Override protected List<DataOutputAssociation> getDataOutputAssociations() { if (getMultiInstanceLoopCharacteristics().isPresent()) { String dataOutputIdForOutputVariable = getDataOutputIdForDataOutputVariable(); String dataOutputIdForOutputCollection = getLoopDataOutputRefId(); return super.getDataOutputAssociations().stream() .filter(doa -> !hasSourceRef(doa, dataOutputIdForOutputVariable)) .filter(doa -> !hasSourceRef(doa, dataOutputIdForOutputCollection)) .collect(Collectors.toList()); } return super.getDataOutputAssociations(); } protected String getDataInputIdForDataInputVariable() { String dataInputVariableId = null; DataInput variableDataInput = getMultiInstanceLoopCharacteristics() .map(MultiInstanceLoopCharacteristics::getInputDataItem) .orElse(null); if (variableDataInput != null) { String itemSubjectRef = getItemSubjectRef(variableDataInput); String variableId = ItemNameReader.from(variableDataInput).getName(); dataInputVariableId = super.getDataInputs().stream() .filter(input -> Objects.equals(variableId, input.getName())) .filter(input -> hasItemSubjectRef(input, itemSubjectRef)) .map(BaseElement::getId) .findFirst().orElse(null); } return dataInputVariableId; } protected String getDataOutputIdForDataOutputVariable() { String dataOutputVariableId = null; DataOutput variableDataOutput = getMultiInstanceLoopCharacteristics() .map(MultiInstanceLoopCharacteristics::getOutputDataItem) .orElse(null); if (variableDataOutput != null) { String itemSubjectRef = getItemSubjectRef(variableDataOutput); String variableId = ItemNameReader.from(variableDataOutput).getName(); dataOutputVariableId = super.getDataOutputs().stream() .filter(output -> Objects.equals(variableId, output.getName())) .filter(output -> hasItemSubjectRef(output, itemSubjectRef)) .map(BaseElement::getId) .findFirst().orElse(null); } return dataOutputVariableId; } protected String getLoopDataInputRefId() { return getMultiInstanceLoopCharacteristics() .map(MultiInstanceLoopCharacteristics::getLoopDataInputRef) .map(ItemAwareElement::getId) .orElse(null); } protected String getLoopDataOutputRefId() { return getMultiInstanceLoopCharacteristics() .map(MultiInstanceLoopCharacteristics::getLoopDataOutputRef) .map(ItemAwareElement::getId) .orElse(null); } static boolean hasSourceRefs(DataAssociation dataAssociation) { return dataAssociation.getSourceRef() != null && !dataAssociation.getSourceRef().isEmpty(); } static boolean hasSourceRef(DataAssociation dataAssociation, String id) { return hasSourceRefs(dataAssociation) && Objects.equals(dataAssociation.getSourceRef().get(0).getId(), id); } static boolean hasTargetRef(DataAssociation dataAssociation, String id) { return dataAssociation.getTargetRef() != null && Objects.equals(dataAssociation.getTargetRef().getId(), id); } static boolean hasItemSubjectRef(ItemAwareElement element, String itemSubjectRef) { return element.getItemSubjectRef() != null && Objects.equals(element.getItemSubjectRef().getId(), itemSubjectRef); } static String getItemSubjectRef(ItemAwareElement element) { return element.getItemSubjectRef() != null ? element.getItemSubjectRef().getId() : null; } }
apache-2.0
opetrovski/development
oscm-portal/javasrc/org/oscm/ui/beans/ApplicationBean.java
28434
/******************************************************************************* * * Copyright FUJITSU LIMITED 2017 * * Creation Date: 18.02.2009 * *******************************************************************************/ package org.oscm.ui.beans; import java.io.File; import java.io.InputStream; import java.io.Serializable; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.TimeZone; import javax.faces.application.FacesMessage; import javax.faces.bean.ManagedBean; import javax.faces.bean.SessionScoped; import javax.faces.context.FacesContext; import javax.faces.model.SelectItem; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; import org.oscm.logging.Log4jLogger; import org.oscm.logging.LoggerFactory; import org.oscm.converter.DateConverter; import org.oscm.converter.PropertiesLoader; import org.oscm.types.constants.Configuration; import org.oscm.types.enumtypes.LogMessageIdentifier; import org.oscm.ui.common.Constants; import org.oscm.ui.common.JSFUtils; import org.oscm.ui.common.LocaleUtils; import org.oscm.ui.common.ServiceAccess; import org.oscm.ui.common.UiDelegate; import org.oscm.validator.ADMValidator; import org.oscm.internal.intf.ConfigurationService; import org.oscm.internal.intf.IdentityService; import org.oscm.internal.operatorservice.ManageLanguageService; import org.oscm.internal.operatorservice.POSupportedLanguage; import org.oscm.internal.subscriptions.POSubscriptionAndCustomer; import org.oscm.internal.types.enumtypes.AuthenticationMode; import org.oscm.internal.types.enumtypes.ConfigurationKey; import org.oscm.internal.types.exception.ObjectNotFoundException; import org.oscm.internal.types.exception.SaaSSystemException; import org.oscm.internal.vo.VOConfigurationSetting; import org.oscm.internal.vo.VOUserDetails; /** * Managed bean which provides some field settings to the view elements * */ @ManagedBean(name="appBean") @SessionScoped public class ApplicationBean implements Serializable { public static final String FCIP_BRANDING_PACKAGE = "fcip-branding"; public static final String FCIP_CONTEXT_PATH_ROOT = "/" + FCIP_BRANDING_PACKAGE; public static final String APPLICATIONS_ROOT_FOLDER = "applications"; UiDelegate ui = new UiDelegate(); private static final long serialVersionUID = -4465290515626464652L; private static final Log4jLogger logger = LoggerFactory .getLogger(ApplicationBean.class); private String buildId = null; private String buildDate = null; private String timeZoneId = null; private String oldUserLocale = ""; private boolean errorPanelForLocaleShow = false; private static final String SIMPLE_DATE_PATTERN = "yyyy-MM-dd z"; private String requestContextPath = ""; /** Configuration service instance. As member used for JUint for stubbing. */ private transient ConfigurationService configurationService = null; private transient ManageLanguageService manageLanguageService = null; transient IdentityService identityService = null; /** * List of menus and groups of fields in dialogs, which are hidden. */ private Map<String, Boolean> hiddenUIElements = null; /** * Cached boolean flag if reporting is available or not */ private Boolean reportingAvailable = null; /** * Cached boolean flag if auth mode is internal */ private Boolean internalAuthMode = null; /** * The server base URL to be used for building the service access URL. */ private String serverBaseUrl = null; /** * The https server base URL to be used for building the service access URL. */ private String serverBaseUrlHttps = null; /** * The interval in milliseconds between the previous response and the next * request of <a4j:poll> component. */ private Long interval = null; private Boolean samlSpAuthMode = null; /** * Read the build id and date from the ear manifest. */ private void initBuildIdAndDate() { if (buildId != null) { return; } buildId = "-1"; buildDate = ""; // read the implementation version property from the war manifest final InputStream in = FacesContext.getCurrentInstance() .getExternalContext() .getResourceAsStream("/META-INF/MANIFEST.MF"); String str = null; if (in != null) { final Properties prop = PropertiesLoader.loadProperties(in); str = prop.getProperty("Implementation-Version"); } if (str == null) { return; } // parse the implementation version final int sep = str.lastIndexOf("-"); buildId = str.substring(0, sep); SimpleDateFormat inFormat = new SimpleDateFormat("yyyyMMddHHmmss"); SimpleDateFormat outFormat = new SimpleDateFormat("yyyy/MM/dd"); try { buildDate = outFormat .format(inFormat.parse(str.substring(sep + 1))); } catch (ParseException e) { logger.logError(Log4jLogger.SYSTEM_LOG, e, LogMessageIdentifier.ERROR_FORMATTING_BUILD_DATE); } } public void setSamlSpAuthMode(Boolean samlSpAuthMode) { this.samlSpAuthMode = samlSpAuthMode; } /** * Enum for date patterns in the resource bundle message files */ public static enum DatePatternEnum { DATE_PATTERN("datePattern"), DATE_INPUT_PATTERN("dateInputPattern"), DATE_TIME_PATTERN( "dateTimePattern"); private final String messageKey; private DatePatternEnum(String messageKey) { this.messageKey = messageKey; } /** * This is the key used for the resource bundle message files. */ public String getMessageKey() { return messageKey; } } public String getDatePattern() { return DatePatternEnum.DATE_PATTERN.getMessageKey(); } public String getDateInputPattern() { return DatePatternEnum.DATE_INPUT_PATTERN.getMessageKey(); } public String getDateTimePattern() { return DatePatternEnum.DATE_TIME_PATTERN.getMessageKey(); } /** * Get the length for a field which contains an id. * * @return the length for a field which contains an id. */ public int getIdLen() { return ADMValidator.LENGTH_ID; } /** * Get the length for a field which contains an user id. * * @return the length for a field which contains an user id. */ public int getUserIdLen() { return ADMValidator.LENGTH_USERID; } /** * Get the length for a field which contains a name. * * @return the length for a field which contains a name. */ public int getNameLen() { return ADMValidator.LENGTH_NAME; } public int getTenantFieldLen() { return ADMValidator.LENGTH_TENANT_FIELD; } /** * Get the length for a field which contains a description. * * @return the length for a field which contains a description. */ public int getDescriptionLen() { return ADMValidator.LENGTH_DESCRIPTION; } /** * Get the length for a field which contains a reference ID. * * @return the length for a field which contains a reference ID. */ public int getReferenceIdLen() { return ADMValidator.LENGTH_REFERENCE_ID; } /** * Get the length for a field which contains a group name. * * @return the length for a field which contains a group name. */ public int getGroupNameLen() { return ADMValidator.LENGTH_USER_GROUP_NAME; } /** * Get the length for a field which contains a percent value. * * @return the length for a field which contains a percent value. */ public int getPercentValueLen() { return ADMValidator.LENGTH_PERCENT_VALUE; } public int getDNLen() { return ADMValidator.LENGTH_DN; } public int getIntLen() { return ADMValidator.LENGTH_INT; } public int getLongLen() { return ADMValidator.LENGTH_LONG; } /** * Get the length for a field which contains a discount period. * * @return the length for a field which contains a description. */ public int getDiscountPeriodLen() { return ADMValidator.LENGTH_DISCOUNT_PERIOD; } public List<String> getActiveLocales() { List<String> list = LocaleUtils.getSupportedLocales(); List<String> activeLanguages = getActiveLanguageCode(); activeLanguages.retainAll(list); return activeLanguages; } public List<SelectItem> getAvailableLanguageItems() { List<SelectItem> availableLanguageItems = new ArrayList<SelectItem>(); for (String isoCode : getActiveLocales()) { SelectItem selectItem = new SelectItem(); Locale languageLocale = new Locale(isoCode); String translatedLocale = languageLocale.getDisplayLanguage(ui .getViewLocale()); selectItem.setLabel(translatedLocale); selectItem.setValue(isoCode); availableLanguageItems.add(selectItem); } return availableLanguageItems; } public boolean checkLocaleValidation(String locale) { List<String> locales = getActiveLocales(); boolean isValid = true; if (locales != null && locale != null) { if (!locales.contains(locale)) { isValid = false; addMessage(null, FacesMessage.SEVERITY_WARN, BaseBean.WARNING_SUPPORTEDLANGUAGE_LOCALE_INVALID, new Locale(locale).getDisplayLanguage(ui .getViewLocale())); } } return isValid; } public String getUserLocaleUpdated() { loadIdentityService(); VOUserDetails voUser = identityService.getCurrentUserDetailsIfPresent(); VOUserDetails voUserInSession = this .getUserFromSessionWithoutException(); this.errorPanelForLocaleShow = false; if (voUser != null && voUser.getLocale() != null && voUserInSession != null && voUserInSession.getLocale() != null) { List<String> locales = getActiveLocales(); if (locales != null && !locales.isEmpty()) { this.oldUserLocale = voUser.getLocale(); if (!locales.contains(voUser.getLocale())) { voUser.setLocale(getDefaultLocale().getLanguage()); setUserInSession(voUser); this.errorPanelForLocaleShow = true; } else { if (!oldUserLocale.equalsIgnoreCase(voUserInSession .getLocale())) { setUserInSession(voUser); } } } } return ""; } public String getOldUserLocale() { return new Locale(this.oldUserLocale).getDisplayLanguage(ui .getViewLocale()); } public boolean getErrorPanelForLocaleShow() { return errorPanelForLocaleShow; } public Iterator<Locale> getSupportedLocalesIterator() { return getSupportedLocaleList().iterator(); } public List<Locale> getSupportedLocaleList() { List<String> languageIds = getActiveLanguageCode(); List<Locale> list = new ArrayList<Locale>(); Iterator<Locale> it = getFacesContext().getApplication() .getSupportedLocales(); while (it.hasNext()) { Locale locale = it.next(); if (languageIds.contains(locale.getLanguage())) { list.add(locale); } } return list; } private List<String> getActiveLanguageCode() { getManageLanguageService(); List<String> languageIds = new ArrayList<String>(); List<POSupportedLanguage> languages = manageLanguageService .getLanguages(true); if (languages != null && languages.size() > 0) { for (POSupportedLanguage lanugage : languages) { languageIds.add(lanugage.getLanguageISOCode()); } } return languageIds; } public Locale getDefaultLocale() { getManageLanguageService(); String defaultLanguageISOCode; try { defaultLanguageISOCode = manageLanguageService.getDefaultLanguage(); } catch (ObjectNotFoundException e) { logger.logError(Log4jLogger.SYSTEM_LOG, e, LogMessageIdentifier.ERROR_DEFAULT_LANGUAGE_NOT_FOUND, new String[] {}); return Locale.ENGLISH; } if (defaultLanguageISOCode != null) { return new Locale(defaultLanguageISOCode); } return Locale.ENGLISH; } public String getServiceBaseUri() { return Constants.SERVICE_BASE_URI; } public String getBuildId() { initBuildIdAndDate(); return buildId; } public String getBuildDate() { initBuildIdAndDate(); return buildDate; } public void setInternalAuthMode(Boolean internalAuthMode) { this.internalAuthMode = internalAuthMode; } /** * Creates an identifier based on the current time. * * @return the current time in milliseconds converted to a hex string */ public String getRandomId() { return Long.toHexString(System.currentTimeMillis()); } /** * Tests whether the UI element with the given ID is generally hidden in * this installation. * * @param id * id of the UI element * @return <code>true</code>, if the element should be hidden */ public boolean isUIElementHidden(String id) { final Map<String, Boolean> tmpSet = getHiddenUIElements(); return tmpSet.containsKey(id); } /** * Getter for hidden UI elements. Initialize the set only the first * invocation time. * * @return Map of hidden UI elements. */ public Map<String, Boolean> getHiddenUIElements() { // initialize only the first invocation time if (hiddenUIElements == null) { hiddenUIElements = new HashMap<String, Boolean>(); lookupConfigurationService(); VOConfigurationSetting hiddenUIElementsConf = configurationService .getVOConfigurationSetting( ConfigurationKey.HIDDEN_UI_ELEMENTS, Configuration.GLOBAL_CONTEXT); if (hiddenUIElementsConf != null) { String strHiddenUIElementsConf = hiddenUIElementsConf .getValue(); if (strHiddenUIElementsConf != null) { String[] results = strHiddenUIElementsConf.split(","); for (String str : results) { String trimmedStr = str.trim(); if (!trimmedStr.equals("")) { hiddenUIElements.put(trimmedStr, Boolean.FALSE); } } } } } return hiddenUIElements; } /** * Initialize the {@link ConfigurationService} if not already done. */ private void lookupConfigurationService() { if (configurationService == null) { configurationService = ServiceAccess.getServiceAcccessFor( JSFUtils.getRequest().getSession()).getService( ConfigurationService.class); } } /** * Determines if payment info should be visible in the marketplace * * @return true - if payment info should be visible, false - otherwise */ public boolean isPaymentInfoAvailable() { lookupConfigurationService(); return configurationService.isPaymentInfoAvailable(); } /** * Checks if the reporting is available. This is the case if the * {@link ConfigurationKey#REPORT_ENGINEURL} is set to a non empty value. * * @return <code>true</code> if reporting is available otherwise * <code>false</code>. */ public boolean isReportingAvailable() { if (reportingAvailable == null) { lookupConfigurationService(); VOConfigurationSetting reportEngineUrl = configurationService .getVOConfigurationSetting( ConfigurationKey.REPORT_ENGINEURL, Configuration.GLOBAL_CONTEXT); reportingAvailable = Boolean.valueOf(reportEngineUrl != null && reportEngineUrl.getValue() != null && reportEngineUrl.getValue().trim().length() > 0); } return reportingAvailable.booleanValue(); } /** * Checks if the {@link ConfigurationKey#AUTH_MODE} is set to INTERNAL. * * @return <code>true</code> if AUTH_MODE is set to INTERNAL otherwise * <code>false</code>. */ public boolean isInternalAuthMode() { if (internalAuthMode == null) { lookupConfigurationService(); VOConfigurationSetting authMode = configurationService .getVOConfigurationSetting(ConfigurationKey.AUTH_MODE, Configuration.GLOBAL_CONTEXT); internalAuthMode = Boolean.valueOf(authMode.getValue().equals( AuthenticationMode.INTERNAL.name())); } return internalAuthMode.booleanValue(); } /** * Checks if the {@link ConfigurationKey#AUTH_MODE} is set to INTERNAL. * * @return <code>true</code> if AUTH_MODE is set to INTERNAL otherwise * <code>false</code>. */ public boolean isSamlSpAuthMode() { if (samlSpAuthMode == null) { lookupConfigurationService(); VOConfigurationSetting authMode = configurationService .getVOConfigurationSetting(ConfigurationKey.AUTH_MODE, Configuration.GLOBAL_CONTEXT); samlSpAuthMode = Boolean.valueOf(authMode.getValue().equals( AuthenticationMode.SAML_SP.name())); } return samlSpAuthMode.booleanValue(); } /** * Setter for configuration service. Use it for JUnit for stubbing the EJB. * * @param configurationService * Configuration */ protected void setConfigurationService( ConfigurationService configurationService) { this.configurationService = configurationService; } /** * Reads the configured time zone from the server. If no information is * available, the default time zone will be used. * * @return the time zone id (see {@link TimeZone#getAvailableIDs()} */ public String getTimeZoneId() { if (timeZoneId == null) { lookupConfigurationService(); VOConfigurationSetting setting = configurationService .getVOConfigurationSetting(ConfigurationKey.TIME_ZONE_ID, Configuration.GLOBAL_CONTEXT); if (setting != null && setting.getValue() != null) { timeZoneId = TimeZone.getTimeZone(setting.getValue()).getID(); } else { timeZoneId = "GMT"; } } return timeZoneId; } /** * Returns the base URL configured on the server excluding the '/' at the * end. * * @return the server base URL */ public String getServerBaseUrl() { if (serverBaseUrl == null) { lookupConfigurationService(); VOConfigurationSetting setting = configurationService .getVOConfigurationSetting(ConfigurationKey.BASE_URL, Configuration.GLOBAL_CONTEXT); if (setting == null || setting.getValue() == null || setting.getValue().length() == 0) { setting = configurationService.getVOConfigurationSetting( ConfigurationKey.BASE_URL_HTTPS, Configuration.GLOBAL_CONTEXT); } if (setting != null) { serverBaseUrl = getTailoredUrl(setting); } } return serverBaseUrl; } /** * Returns the base URL for https configured on the server excluding the '/' * at the end. * * @return the server base URL */ public String getServerBaseUrlHttps() { if (serverBaseUrlHttps == null) { lookupConfigurationService(); VOConfigurationSetting setting = configurationService .getVOConfigurationSetting(ConfigurationKey.BASE_URL_HTTPS, Configuration.GLOBAL_CONTEXT); if (setting != null) { serverBaseUrlHttps = getTailoredUrl(setting); } } return serverBaseUrlHttps; } /** * Removes a possible trailing '/' from the config setting URL. */ private String getTailoredUrl(VOConfigurationSetting setting) { String url = setting.getValue(); if (url != null && url.endsWith("/")) { url = url.substring(0, url.length() - 1); } return url; } /** * Resets values defined by configuration of BES - to be used after * modifying the configuration. */ public void reset() { timeZoneId = null; serverBaseUrl = null; reportingAvailable = null; hiddenUIElements = null; } /** * @return the interval of keepAlive tag */ public Long getInterval() { if (interval == null) { FacesContext ctx = getFacesContext(); HttpSession httpSession = (HttpSession) ctx.getExternalContext() .getSession(false); int maxInactiveInterval = httpSession.getMaxInactiveInterval(); // To keep session alive, the interval value is 1 minute less than // session timeout. long intervalValue = (long) maxInactiveInterval * 1000 - 60000L; interval = Long.valueOf(intervalValue); } return interval; } /** * Setter for manage language service. Use it for JUnit for stubbing the * EJB. * * @param ManageLanguageService * */ protected void setManageLanguageService( ManageLanguageService manageLanguageService) { this.manageLanguageService = manageLanguageService; } // allow stubbing protected FacesContext getFacesContext() { return FacesContext.getCurrentInstance(); } /** * Initialize the {@link ManageLanguageService} if not already done. */ private void getManageLanguageService() { if (manageLanguageService == null) { manageLanguageService = ServiceAccess.getServiceAcccessFor( JSFUtils.getRequest().getSession()).getService( ManageLanguageService.class); } } protected void addMessage(String clientId, FacesMessage.Severity severity, String key, String param) { JSFUtils.addMessage(clientId, severity, key, new Object[] { param }); } private void loadIdentityService() { if (identityService == null) { identityService = ServiceAccess.getServiceAcccessFor( JSFUtils.getRequest().getSession()).getService( IdentityService.class); } } protected VOUserDetails getUserFromSessionWithoutException() { HttpServletRequest request = (HttpServletRequest) getFacesContext() .getExternalContext().getRequest(); VOUserDetails voUserDetails = (VOUserDetails) request.getSession() .getAttribute(Constants.SESS_ATTR_USER); return voUserDetails; } protected void setUserInSession(VOUserDetails voUserDetails) { if (voUserDetails == null) { throw new SaaSSystemException("voUSerDetails must not be null!"); } HttpServletRequest request = (HttpServletRequest) getFacesContext() .getExternalContext().getRequest(); request.getSession().setAttribute(Constants.SESS_ATTR_USER, voUserDetails); JSFUtils.verifyViewLocale(); } private boolean isBlank(final String str) { if (str == null) { return true; } return str.trim().length() == 0; } public String getMarketplaceId() { HttpServletRequest request = (HttpServletRequest) getFacesContext() .getExternalContext().getRequest(); String marketplaceId = (String) request .getAttribute(Constants.REQ_PARAM_MARKETPLACE_ID); if (isBlank(marketplaceId)) { HttpSession session = request.getSession(false); if (session != null) { marketplaceId = (String) session .getAttribute(Constants.REQ_PARAM_MARKETPLACE_ID); } } return marketplaceId; } public void convertActivationTimes( List<POSubscriptionAndCustomer> poSubscriptionAndCustomers) { for (POSubscriptionAndCustomer poSubscriptionAndCustomer : poSubscriptionAndCustomers) { convertActivationTime(poSubscriptionAndCustomer); } } private void convertActivationTime(POSubscriptionAndCustomer poSubscriptionAndCustomer) { String time = poSubscriptionAndCustomer.getActivation(); if (time == null) { poSubscriptionAndCustomer.setActivation(""); } else if (isLongValue(time)) { poSubscriptionAndCustomer.setActivation(DateConverter .convertLongToDateTimeFormat(Long.valueOf(time) .longValue(), TimeZone .getTimeZone(getTimeZoneId()), SIMPLE_DATE_PATTERN)); } } private boolean isLongValue(String value) { try { return (Long.parseLong(value) > 0); } catch (NumberFormatException ex) { return false; } } public String getBrandingURL() { if (requestContextPath.isEmpty()) { initRequestContextPath(); } return requestContextPath; } private void initRequestContextPath() { if (isFCIPBrandingPackageAvailable()) { // custom branding requestContextPath = FCIP_CONTEXT_PATH_ROOT; } else { // default branding requestContextPath = getFacesContext().getExternalContext() .getRequestContextPath(); } } boolean isFCIPBrandingPackageAvailable() { String glassfishRoot = ui.getSystemProperty("catalina.base"); String brandingPackageFolder = glassfishRoot + File.separator + APPLICATIONS_ROOT_FOLDER + File.separator + FCIP_BRANDING_PACKAGE; return fileExists(brandingPackageFolder); } boolean fileExists(String pathname) { return new File(pathname).exists(); } public String getJSMessageByKey(String msgKey) { return convertText(ui.getText(msgKey, (Object[]) null)); } private String convertText(String text) { String str = text; if (null == str || str.trim().length() == 0) { return ""; } if (str.indexOf("\"") > -1) { str = str.replaceAll("\"", "\\\\\""); } return str; } }
apache-2.0
hopecee/texsts
samples/src/java/org/jpox/samples/inheritance/MSub1.java
1237
/********************************************************************** Copyright (c) 2005 Andy Jefferson and others. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contributors: ... **********************************************************************/ package org.jpox.samples.inheritance; /** * Sub class of an inheritance hierarchy, using "new-table" strategy and parent using "subclass-table". * * @version $Revision: 1.1 $ */ public class MSub1 extends MBase { double value1; public MSub1(String name, MRelated related, double value) { super(name, related); this.value1 = value; } public double getValue() { return value1; } }
apache-2.0
xloye/tddl5
tddl-sample/src/main/java/com/taobao/tddl/sample/WeidaSample.java
1694
package com.taobao.tddl.sample; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import com.taobao.tddl.client.jdbc.TDataSource; public class WeidaSample { public static void main(String[] args) throws Exception { com.taobao.tddl.client.jdbc.TDataSource ds = new TDataSource(); ds.setDynamicRule(true); // init a datasource with dynamic config on diamond ds.setAppName("SPU_CENTER_APP"); // Map cp = new HashMap(); // cp.put(ConnectionProperties.MERGE_CONCURRENT, "true"); // ds.setConnectionProperties(cp); ds.setAppRuleFile("weida.xml"); ds.init(); System.out.println("init done"); Connection conn = ds.getConnection(); // insert a record // conn.prepareStatement("replace into sample_table (id,name,address) values (1,'sun','hz')").executeUpdate(); System.out.println("insert done"); // select all records PreparedStatement ps = conn.prepareStatement("SELECT * from sample_table"); ResultSet rs = ps.executeQuery(); while (rs.next()) { StringBuilder sb = new StringBuilder(); int count = rs.getMetaData().getColumnCount(); for (int i = 1; i <= count; i++) { String key = rs.getMetaData().getColumnLabel(i); Object val = rs.getObject(i); sb.append("[" + rs.getMetaData().getTableName(i) + "." + key + "->" + val + "]"); } System.out.println(sb.toString()); } rs.close(); ps.close(); conn.close(); System.out.println("query done"); } }
apache-2.0
firejack-open/Firejack-Platform
platform/src/main/java/net/firejack/platform/service/registry/broker/entity/ReadEntitiesByPackageLookupBroker.java
5926
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package net.firejack.platform.service.registry.broker.entity; import net.firejack.platform.api.registry.domain.Entity; import net.firejack.platform.core.broker.ListBroker; import net.firejack.platform.core.config.meta.utils.DiffUtils; import net.firejack.platform.core.domain.SimpleIdentifier; import net.firejack.platform.core.exception.BusinessFunctionException; import net.firejack.platform.core.model.registry.RegistryNodeModel; import net.firejack.platform.core.model.registry.RegistryNodeType; import net.firejack.platform.core.model.registry.domain.EntityModel; import net.firejack.platform.core.model.registry.domain.PackageModel; import net.firejack.platform.core.model.registry.system.DatabaseModel; import net.firejack.platform.core.request.ServiceRequest; import net.firejack.platform.core.store.registry.IDomainStore; import net.firejack.platform.core.store.registry.IEntityStore; import net.firejack.platform.core.store.registry.IPackageStore; import net.firejack.platform.core.store.registry.IRegistryNodeStore; import net.firejack.platform.core.utils.StringUtils; import net.firejack.platform.web.statistics.annotation.TrackDetails; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import java.util.*; @TrackDetails @Component("readEntitiesByPackageLookupBroker") public class ReadEntitiesByPackageLookupBroker extends ListBroker<EntityModel, Entity, SimpleIdentifier<String>> { @Autowired private IPackageStore packageStore; @Autowired private IEntityStore entityStore; @Autowired private IDomainStore domainStore; @Autowired private IRegistryNodeStore registryNodeStore; private String packageLookup; @Override protected List<EntityModel> getModelList(ServiceRequest<SimpleIdentifier<String>> request) throws BusinessFunctionException { packageLookup = request.getData().getIdentifier(); if (packageLookup == null) { throw new BusinessFunctionException("Package Lookup can't be empty."); } PackageModel packageModel = packageStore.findPackage(packageLookup); if (packageModel == null) { throw new BusinessFunctionException("Could not find Package by lookup: " + packageLookup); } Class[] registryNodeClasses = { RegistryNodeType.ENTITY.getClazz() }; return entityStore.findAllByPrefixLookupAndTypes(packageLookup, getFilter(), registryNodeClasses); } @Override protected List<Entity> result(List<Entity> dtoList, List<EntityModel> modelList) { List<Entity> entities = super.result(dtoList, modelList); Map<String, DatabaseModel> dataSources = domainStore.findAllWithDataSourcesByPackageLookup(packageLookup); Map<String, String> cacheDomainNames = new HashMap<String, String>(); for (Entity entity : entities) { String entityPath = entity.getPath(); String domainName = cacheDomainNames.get(entityPath); if (StringUtils.isBlank(domainName)) { List<RegistryNodeModel> registryNodeModels = registryNodeStore.findAllParentsForEntityLookup(entity.getLookup()); Collections.reverse(registryNodeModels); for (RegistryNodeModel registryNodeModel : registryNodeModels) { if (RegistryNodeType.DOMAIN.equals(registryNodeModel.getType())) { domainName = registryNodeModel.getName(); cacheDomainNames.put(entityPath, domainName); } } } Map<String, Object> parameters = new HashMap<String, Object>(); parameters.put("domainName", StringUtils.capitalize(domainName)); String path = entity.getPath(); DatabaseModel databaseModel = findDataSourceEntity(dataSources, path); if (databaseModel != null) { parameters.put("dataSource", databaseModel.getLookup()); } entity.setParameters(parameters); } Collections.sort(entities, new Comparator<Entity>() { public int compare(final Entity e1, final Entity e2) { String domainName1 = (String) e1.getParameters().get("domainName"); String domainName2 = (String) e2.getParameters().get("domainName"); int domainCompare = domainName1.compareTo(domainName2); if (domainCompare == 0) { return e1.getName().compareTo(e2.getName()); } else { return domainCompare; } } }); return entities; } private DatabaseModel findDataSourceEntity(Map<String, DatabaseModel> dataSources, String lookup) { DatabaseModel databaseModel = dataSources.get(lookup); if (databaseModel == null && StringUtils.isNotEmpty(lookup)) { String path = DiffUtils.extractPathFromLookup(lookup); databaseModel = findDataSourceEntity(dataSources, path); } return databaseModel; } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-dynamodb/src/main/java/com/amazonaws/services/dynamodbv2/datamodeling/DynamoDBTransactionWriteExpression.java
3089
/* * Copyright 2010-2022 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.dynamodbv2.datamodeling; import com.amazonaws.services.dynamodbv2.model.AttributeValue; import java.util.Map; /** * Enables adding condition expression parameter to transactionWrite operation via TransactionWriteRequest. * For example, you may want to update only if an attribute has a particular value. * @see TransactionWriteRequest#addPut(Object, DynamoDBTransactionWriteExpression) * @see DynamoDBMapper#transactionWrite(TransactionWriteRequest) */ public class DynamoDBTransactionWriteExpression { /** * A condition that must be satisfied in order for a conditional write to succeed. */ private String conditionExpression; /** * One or more substitution tokens for attribute names in an expression. */ private Map<String, String> expressionAttributeNames; /** * One or more values that can be substituted in an expression. */ private Map<String, AttributeValue> expressionAttributeValues; /** * A condition that must be satisfied in order for a conditional write to succeed. */ public DynamoDBTransactionWriteExpression withConditionExpression(String conditionExpression) { this.conditionExpression = conditionExpression; return this; } /** * A condition that must be satisfied in order for a conditional write to succeed. */ public String getConditionExpression() { return conditionExpression; } /** * One or more substitution tokens for attribute names in an expression. */ public DynamoDBTransactionWriteExpression withExpressionAttributeNames(Map<String, String> expressionAttributeNames) { this.expressionAttributeNames = expressionAttributeNames; return this; } /** * One or more substitution tokens for attribute names in an expression. */ public Map<String, String> getExpressionAttributeNames() { return expressionAttributeNames; } /** * One or more values that can be substituted in an expression. */ public DynamoDBTransactionWriteExpression withExpressionAttributeValues(Map<String, AttributeValue> expressionAttributeValues) { this.expressionAttributeValues = expressionAttributeValues; return this; } /** * One or more values that can be substituted in an expression. */ public Map<String, AttributeValue> getExpressionAttributeValues() { return expressionAttributeValues; } }
apache-2.0
KonkerLabs/konker-platform
konker.registry.services.core/src/main/java/com/konkerlabs/platform/registry/business/model/behaviors/URIDealer.java
908
package com.konkerlabs.platform.registry.business.model.behaviors; import org.springframework.util.StringUtils; import java.net.URI; import java.text.MessageFormat; public interface URIDealer { String URI_TEMPLATE = "{0}://{1}/{2}"; String getUriScheme(); String getContext(); String getGuid(); default String getRoutUriTemplate() throws IllegalArgumentException { if(StringUtils.isEmpty(getGuid())){ throw new IllegalArgumentException("GUID cannot be null or empty"); } if(StringUtils.isEmpty(getContext())){ throw new IllegalArgumentException("CONTEXT cannot be null or empty"); } return MessageFormat.format(URI_TEMPLATE, getUriScheme(), getContext(), getGuid()); } default URI toURI() throws IllegalArgumentException { return URI.create( getRoutUriTemplate() ); } }
apache-2.0
elastisys/scale.commons
util/src/main/java/com/elastisys/scale/commons/util/time/FrozenTime.java
2396
package com.elastisys.scale.commons.util.time; import org.joda.time.DateTime; import org.joda.time.DateTimeUtils; /** * A utility class that allows the application's notion of current time (as * returned by the Joda {@link DateTime} class) to be manipulated, for example, * by freezing the current time to a particular time-instant or advancing time * in a controlled manner. * <p/> * Controlling the current time is useful for tests/simulation when the * application's notion of current time needs to be controlled/manipulated. * * * */ public class FrozenTime { /** * Returns the current time as an UTC timestamp. * * @return The current UTC time. */ public static DateTime now() { return UtcTime.now(); } /** * Sets the current time (as returned by the {@link DateTime}) to a fixed * time instant. All subsequent requests for the current time to * {@link DateTime} will return this time instant (until * {@link #resumeSystemTime()} is invoked). * <p/> * Note: the system clock remains unaffected by this change. * * @param timeInstant * @return */ public static void setFixed(DateTime timeInstant) { DateTimeUtils.setCurrentMillisFixed(timeInstant.getMillis()); } /** * Advances the current time (as returned by the {@link DateTime}) by one * second. All subsequent requests for the current time to {@link DateTime} * will return the resulting time instant (until {@link #resumeSystemTime()} * is invoked). * <p/> * Note: the system clock remains unaffected by this change. * */ public static void tick() { tick(1); } /** * Advances the current time (as returned by the {@link DateTime}) by a * specified number of seconds. All subsequent requests for the current time * to {@link DateTime} will return the resulting time instant (until * {@link #resumeSystemTime()} is invoked). * <p/> * Note: the system clock remains unaffected by this change. * * * @param seconds */ public static void tick(int seconds) { setFixed(now().plusSeconds(seconds)); } /** * (Re)sets the current time to follow the system clock. */ public static void resumeSystemTime() { DateTimeUtils.setCurrentMillisSystem(); } }
apache-2.0
orsjb/HappyBrackets
Distribution/HappyBrackets Developer Kit/HappyBrackets Project/src/examples/basic/HelloWorld.java
1611
package examples.basic; import net.beadsproject.beads.data.Buffer; import net.beadsproject.beads.ugens.Gain; import net.beadsproject.beads.ugens.Glide; import net.beadsproject.beads.ugens.WavePlayer; import net.happybrackets.core.HBAction; import net.happybrackets.core.instruments.WaveModule; import net.happybrackets.device.HB; import java.lang.invoke.MethodHandles; /** * This sketch generates a 1KHz sine wave and plays it through a gain object and output to the device */ public class HelloWorld implements HBAction { @Override public void action(HB hb) { // remove this code if you do not want other compositions to run at the same time as this one hb.reset(); hb.setStatus(this.getClass().getSimpleName() + " Loaded"); final float INITIAL_FREQUENCY = 1000; // this is the frequency of the waveform we will make final float INITIAL_VOLUME = 0.1f; // define how loud we want the sound WaveModule waveModule = new WaveModule(INITIAL_FREQUENCY, INITIAL_VOLUME, Buffer.SINE); // Now plug the gain object into the audio output waveModule.connectTo(HB.getAudioOutput()); } //<editor-fold defaultstate="collapsed" desc="Debug Start"> /** * This function is used when running sketch in IntelliJ IDE for debugging or testing * * @param args standard args required */ public static void main(String[] args) { try { HB.runDebug(MethodHandles.lookup().lookupClass()); } catch (Exception e) { e.printStackTrace(); } } //</editor-fold> }
apache-2.0
pdrados/cas
core/cas-server-core-services-api/src/test/java/org/apereo/cas/services/util/CasAddonsRegisteredServicesJsonSerializerTests.java
1431
package org.apereo.cas.services.util; import org.apereo.cas.util.ResourceUtils; import lombok.SneakyThrows; import lombok.val; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.springframework.core.io.ClassPathResource; import java.io.File; import java.io.InputStream; import static org.junit.jupiter.api.Assertions.*; /** * This is {@link CasAddonsRegisteredServicesJsonSerializerTests}. * * @author Misagh Moayyed * @since 5.3.0 */ @Tag("RegisteredService") public class CasAddonsRegisteredServicesJsonSerializerTests { @Test public void verifySupports() { val s = new CasAddonsRegisteredServicesJsonSerializer(); assertTrue(s.supports(new File("servicesRegistry.conf"))); } @Test public void verifyLoad() { val s = new CasAddonsRegisteredServicesJsonSerializer(); val services = s.load(getServiceRegistryResource()); assertEquals(3, services.size()); } @Test public void verifyLoadEmpty() throws Exception { val s = new CasAddonsRegisteredServicesJsonSerializer(); val services = s.load(ResourceUtils.EMPTY_RESOURCE.getInputStream()); assertEquals(0, services.size()); } @SneakyThrows private static InputStream getServiceRegistryResource() { val file = new File("servicesRegistry.conf"); return new ClassPathResource(file.getPath()).getInputStream(); } }
apache-2.0
dongyuanlongwang/coder
design-pattern/src/main/java/rui/coder/design/pattern/behaviour/visitor/part/Monitor.java
306
package rui.coder.design.pattern.behaviour.visitor.part; import rui.coder.design.pattern.behaviour.visitor.ComputerPartVisitor; public class Monitor implements ComputerPart { @Override public void accept(ComputerPartVisitor computerPartVisitor) { computerPartVisitor.visit(this); } }
apache-2.0
guzy/OnceCenter
src/oncecenter/wizard/newvmfromtemp/SelectStorageWizardPage.java
16491
package oncecenter.wizard.newvmfromtemp; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import oncecenter.util.ImageRegistry; import oncecenter.util.TypeUtil; import oncecenter.views.xenconnectiontreeview.elements.VMTreeObject; import oncecenter.views.xenconnectiontreeview.elements.VMTreeObjectSR; import org.eclipse.jface.dialogs.Dialog; import org.eclipse.jface.viewers.IStructuredContentProvider; import org.eclipse.jface.viewers.ITableLabelProvider; import org.eclipse.jface.viewers.LabelProvider; import org.eclipse.jface.viewers.TableViewer; import org.eclipse.jface.viewers.Viewer; import org.eclipse.jface.wizard.IWizardPage; import org.eclipse.swt.SWT; import org.eclipse.swt.custom.CLabel; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.events.SelectionListener; import org.eclipse.swt.graphics.Color; import org.eclipse.swt.graphics.Image; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.layout.GridLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.Display; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Shell; import org.eclipse.swt.widgets.Table; import org.eclipse.swt.widgets.TableColumn; import org.eclipse.swt.widgets.Text; import org.eclipse.wb.swt.SWTResourceManager; import com.once.xenapi.SR; public class SelectStorageWizardPage extends NewVMPage { TableViewer tableViewer; private Table table; ArrayList<Storage> srs = new ArrayList<Storage>(); private String input = null; VMTreeObject template; Button editButton ; /** * Create the wizard. */ public SelectStorageWizardPage() { super("wizardPage"); setTitle("Ó²ÅÌÉèÖÃ"); setDescription("ΪÐéÄâ»úÑ¡ÔñÒ»¸öÓ²Å̴洢λÖò¢É趨ӲÅÌ´óС"); } /** * Create contents of the wizard. * * @param parent */ public void createControl(Composite parent) { getStorageList(); template = ((NewVmFTWizard)this.getWizard()).selectedTemp; Composite composite = new Composite(parent, SWT.NULL); composite.setLayout(new GridLayout(1,false)); composite.setLayoutData(new GridData(GridData.FILL_BOTH)); Composite operateComp = new Composite(composite,SWT.NULL); operateComp.setLayout(new GridLayout(3,false)); table = new Table( composite, SWT.BORDER | SWT.V_SCROLL | SWT.H_SCROLL|SWT.FULL_SELECTION|SWT.MULTI); table.setLayoutData(new GridData(GridData.FILL_BOTH)); table.setLinesVisible(true); TableColumn tc1 = new TableColumn(table, SWT.CENTER); TableColumn tc2 = new TableColumn(table, SWT.CENTER); TableColumn tc3 = new TableColumn(table, SWT.CENTER); TableColumn tc4 = new TableColumn(table, SWT.CENTER); TableColumn tc6 = new TableColumn(table, SWT.CENTER); TableColumn tc5 = new TableColumn(table, SWT.CENTER); tc1.setText(" "); tc2.setText("λÖÃ"); tc3.setText("ÀàÐÍ"); tc4.setText("´óС(G)"); tc6.setText("¿ÉÓôóС(G)"); tc5.setText("¹²Ïí"); tc1.setWidth(30); tc2.setWidth(180); tc3.setWidth(80); tc4.setWidth(60); tc6.setWidth(80); tc5.setWidth(60); table.setHeaderVisible(true); tableViewer = new TableViewer(table); tableViewer.setContentProvider(new StorageContentProvider()); tableViewer.setLabelProvider(new TableLabelProvider()); tableViewer.setInput(srs); int flag = 0; for(Storage s:srs){ if(s.getSize()>0) break; else flag++; } if(flag<srs.size()){ table.setSelection(flag); } table.pack(); if(table.getItemCount()==0) this.setPageComplete(false); editButton = new Button(composite,SWT.PUSH); editButton.setText("ÉèÖÃÓ²ÅÌ´óС"); GridData gridData = new GridData(); gridData.horizontalAlignment = GridData.END; editButton.setLayoutData(gridData); editButton.addSelectionListener(new SelectionListener(){ @Override public void widgetDefaultSelected(SelectionEvent arg0) { widgetSelected(arg0); } @Override public void widgetSelected(SelectionEvent arg0) { int index = table.getSelectionIndex(); if(index != -1) { EditDialog dialog = new EditDialog(Display.getCurrent().getActiveShell(),index); dialog.open(); } } }); if(srs.isEmpty()) editButton.setEnabled(false); //Èç¹ûÑ¡ÔñÓÃÄ£°å´´½¨£¬²»ÓÃÑ¡Ôñ´æ´¢ if(template != null) { table.setVisible(false); editButton.setVisible(false); } setControl(composite); } private void getStorageList() { boolean isAssignHost = ((NewVmFTWizard)this.getWizard()).isAssignHost; if(srs != null ) srs.clear(); if(((NewVmFTWizard)this.getWizard()).isPool){ for(VMTreeObjectSR o:((NewVmFTWizard)this.getWizard()).srs){ if(TypeUtil.getDiskSRTypes().contains(o.getSrType()) &&!o.getSrType().equals(TypeUtil.localSrType)){ Storage sr = new Storage(10,o); int index=0; for(;index<srs.size();index++){ if(srs.get(index).getMaxSize()<sr.maxSize) break; } srs.add(index, sr); } } if(isAssignHost){ VMTreeObjectSR localSR = (VMTreeObjectSR)((NewVmFTWizard)this.getWizard()).selectedSR; if(localSR != null) { Storage sr = new Storage(10,localSR); int index=0; for(;index<srs.size();index++){ if(srs.get(index).getMaxSize()<sr.maxSize) break; } srs.add(index, sr); } } } else { VMTreeObjectSR localSR = (VMTreeObjectSR)((NewVmFTWizard)this.getWizard()).selectedSR; if(localSR != null) { Storage sr = new Storage(10,localSR); int index=0; for(;index<srs.size();index++){ if(srs.get(index).getMaxSize()<sr.maxSize) break; } srs.add(index, sr); } } } class StorageContentProvider implements IStructuredContentProvider { protected TableViewer viewer; @Override public void dispose() { } @Override public Object[] getElements(Object inputElement) { if(inputElement instanceof List) return ((List)inputElement).toArray(); return null; } @Override public void inputChanged(Viewer viewer, Object oldInput, Object newInput) { } } class TableLabelProvider extends LabelProvider implements ITableLabelProvider { @Override public Image getColumnImage(Object element, int columnIndex) { if(element instanceof Storage) { switch(columnIndex) { case 0: return ImageRegistry.getImage(ImageRegistry.STORAGE); } } return null; } @Override public String getColumnText(Object element, int columnIndex) { if(element instanceof Storage) { Storage storage=(Storage)element; switch(columnIndex) { case 0: return ""; case 1: // if(storage.getObject().getParent()==null){ // System.out.println("Õâ¸ösrûÓÐparent"+storage.getObject().getName()); // } { VMTreeObjectSR srObject = storage.getObject(); if(srObject.getSrType().equals(TypeUtil.localSrType)){ return srObject.getName() + " on " + srObject.getParent().getName(); }else { SR.Record srRecord = srObject.getRecord(); String location = ""; if(srRecord!=null){ String descrip = srRecord.nameDescription; if(descrip!=null){ String [] m = descrip.split(":"); if(m!=null&&m.length>1){ String [] n = m[1].split(" "); if(n!=null&&n.length>1){ location += n[0]; } } } } if(location.length()>0){ return srObject.getName() + " on " + location; }else{ return srObject.getName(); } } } case 2: { String type = storage.getObject().getSrType(); if(type.equals("nfs_zfs")){ return "gluster_zfs"; }else{ return type; } } case 3: return storage.getSize()+""; case 4: return storage.getMaxSize()+""; case 5: return storage.getObject().getSrType().equals("local")?"²»¹²Ïí":"¹²Ïí"; } } return null; } } private boolean isValid(String inputSize,Storage sr) { //Èç¹ûÊäÈëΪ¿Õ£¬·µ»Øfalse if(inputSize.length() == 0 || inputSize.equals(null)) return false; //ÀûÓÃÕýÔò±í´ïʽÅжÏÊäÈëÊDz»ÊǸ¡µãÊý»òÕûÊýÊýÖµ Pattern p = Pattern.compile("^(\\+|-)?\\d+$",Pattern.CANON_EQ); Matcher matcher = p.matcher(inputSize); if(!matcher.find()) return false; //ÅжÏÊäÈë²»Êdz¬³ö×î´ó×îСֵ·¶Î§ double size = Double.parseDouble(inputSize); if(size > sr.maxSize || size<= sr.minSize) return false; return true; } public void refresh() { if(this.getControl() == null) return; if(template != null) { table.setVisible(false); editButton.setVisible(false); } else { getStorageList(); tableViewer.setInput(srs); table.select(0); } } class EditDialog extends Dialog { private static final int OK_ID = 0; private static final String OK_LABEL = "È·¶¨"; private static final int CLOSE_ID = 1; private static final String CLOSE_LABEL = "È¡Ïû"; //¶Ô»°¿òÒ³Ãæ¿Ø¼þ private Label locationLabel; private Text locationText; private Label typeLabel; private Text typeText; private Label sizeLabel; private Text sizeText; private Label shareLabel; private Text shareText; private int index; private Label errorMsg; protected EditDialog(Shell parentShell,int index) { super(parentShell); this.index = index; } protected void configureShell(Shell shell) { super.configureShell(shell); shell.setText("ÉèÖÃÓ²ÅÌ´óС"); shell.setBackground(new Color(null,255,255,255)); } protected Control createDialogArea(Composite parent) { Storage sr = srs.get(index); Composite composite = new Composite(parent,SWT.NONE); GridLayout layout = new GridLayout(3,true); layout.makeColumnsEqualWidth = false; composite.setLayout(layout); composite.setLayoutData(new GridData(GridData.FILL_HORIZONTAL)); locationLabel = new Label(composite,SWT.NONE); locationLabel.setText("´æ´¢Î»Öà £º"); locationText = new Text(composite,SWT.NONE); locationText.setText(sr.getObject().getName() + " on "+sr.getObject().getParent().getName()); locationText.setEditable(false); new Label(composite,SWT.NONE); typeLabel = new Label(composite,SWT.NONE); typeLabel.setText("´æ´¢ÀàÐÍ £º"); typeText = new Text(composite, SWT.NONE); typeText.setText(sr.getObject().getSrType()); typeText.setEditable(false); new Label(composite,SWT.NONE); sizeLabel = new Label(composite, SWT.NONE); sizeLabel.setText("´æ´¢´óС £º"); sizeText = new Text(composite, SWT.BORDER); sizeText.setFocus(); sizeText.setLayoutData(new GridData(GridData.FILL_HORIZONTAL)); sizeText.setText(sr.getSize()+""); new Label(composite,SWT.NONE).setText("G"); // sizeText.addModifyListener( new ModifyListener(){ // public void modifyText(ModifyEvent e) { // input = sizeText.getText(); // Storage storage = (Storage)tableViewer.getElementAt(table.getSelectionIndex()); // if(!isValid(input,storage)) // { // String errorMsg = "ÇëÊäÈëÔÚ" + storage.minSize + "--" + storage.maxSize + "Ö®¼äµÄÕýÕûÊý"; // InputErrorDialog dialog = new InputErrorDialog(Display.getCurrent().getActiveShell(),errorMsg); // dialog.open(); // } // int index = table.getSelectionIndex(); // srs.get(index).setSize(input); // } // }) ; shareLabel = new Label(composite,SWT.NONE); shareLabel.setText("ÊÇ·ñ¹²Ïí £º"); shareText = new Text(composite,SWT.NONE); shareText.setText(sr.getObject().getSrType().equals("local")?"²»¹²Ïí":"¹²Ïí"); shareText.setEditable(false); new Label(composite,SWT.NONE); new Label(composite,SWT.NONE); errorMsg = new Label(composite,SWT.NONE); errorMsg.setText(" "); errorMsg.setForeground(SWTResourceManager.getColor(SWT.COLOR_RED)); return parent; } protected void createButtonsForButtonBar(Composite parent) { createButton(parent,EditDialog.OK_ID,EditDialog.OK_LABEL,true); createButton(parent,EditDialog.CLOSE_ID,EditDialog.CLOSE_LABEL,true); } protected void buttonPressed(int buttonId) { if(EditDialog.OK_ID == buttonId) { input = sizeText.getText().split("G")[0]; System.out.println("input:" + input); Storage storage = (Storage)tableViewer.getElementAt(table.getSelectionIndex()); if(!isValid(input,storage)) { String msg = "ÇëÊäÈëÔÚ" + storage.minSize + "--" + storage.maxSize + "Ö®¼äµÄÕýÕûÊý"; errorMsg.setText(msg); } else { int index = table.getSelectionIndex(); srs.get(index).setSize(input); tableViewer.setInput(srs); //tableViewer.refresh(); this.close(); } } else if(EditDialog.CLOSE_ID == buttonId) { this.close(); } } } class InputErrorDialog extends Dialog { private CLabel imageCLabel; private String errorMsg = null; private Label errorLabel; private static final int CLOSE_ID = 0; private static final String CLOSE_LABEL = "Close"; protected InputErrorDialog(Shell parentShell,String errorMsg) { super(parentShell); this.errorMsg = errorMsg; } protected Control createDialogArea(Composite parent) { Composite composite = (Composite)super.createDialogArea(parent); GridLayout layout = new GridLayout(2,true); layout.verticalSpacing = 22; layout.horizontalSpacing = 15; layout.makeColumnsEqualWidth = false; composite.setLayout(layout); imageCLabel = new CLabel(composite, SWT.NONE); imageCLabel.setImage(ImageRegistry.getImage(ImageRegistry.SERVICENOTOPEN)); GridData imgData = new GridData(); imgData.verticalSpan = 2; imageCLabel.setLayoutData(imgData); errorLabel = new Label(composite, SWT.NONE); errorLabel.setText(errorMsg); return parent; } protected void createButtonsForButtonBar(Composite parent) { createButton(parent,InputErrorDialog.CLOSE_ID,InputErrorDialog.CLOSE_LABEL,true); } protected void buttonPressed(int buttonId) { if(InputErrorDialog.CLOSE_ID == buttonId) close(); } } class Storage { private Long size; private VMTreeObjectSR object; private double maxSize; private double minSize; public Storage(double size,VMTreeObjectSR object){ //this.setSize(size); this.setObject(object); SR.Record srRecord = (SR.Record)object.getRecord(); if(srRecord.physicalSize==null||srRecord.physicalUtilisation==null){ this.setMaxSize(Double.MAX_VALUE); }else{ this.setMaxSize((srRecord.physicalSize - srRecord.physicalUtilisation)/1024/1024/1024); } // System.out.println(srRecord.nameLabel+"µÄ×Ü´óСÊÇ"+srRecord.physicalSize); // System.out.println(srRecord.nameLabel+"µÄʹÓÃÁ¿ÊÇ"+srRecord.physicalUtilisation); // System.out.println(srRecord.nameLabel+"µÄÊ£Óà¿Õ¼äÊÇ"+this.maxSize+"G"); this.setMinSize(0); this.setSize((size>this.getMaxSize())?this.getMaxSize():size); } public VMTreeObjectSR getObject() { return object; } public void setObject(VMTreeObjectSR object) { this.object = object; } public double getMaxSize() { return maxSize; } public void setMaxSize(double maxSize) { this.maxSize = maxSize; } public double getMinSize() { return minSize; } public void setMinSize(double minSize) { this.minSize = minSize; } public Long getSize() { return size; } public void setSize(Long size) { this.size = size; } public void setSize(String size) { this.size = Long.parseLong(size); } public void setSize(double size) { this.size = (long) Math.floor(size); } } @Override protected boolean nextButtonClick() { int index = table.getSelectionIndex(); if(index != -1&&table.getItemCount()>0) { Storage sr = srs.get(index); ((NewVmFTWizard)this.getWizard()).storage = sr.getSize(); ((NewVmFTWizard)this.getWizard()).selectedSR = sr.getObject(); ((NewVmFTWizard)this.getWizard()).isShare = sr.getObject().getSrType().equals("local")? false:true; IWizardPage nextPage = getWizard().getNextPage(this); if(nextPage instanceof FinishWizardPage) ((FinishWizardPage)nextPage).refresh(); return true; } return false; } }
apache-2.0
marques-work/gocd
common/src/test/java/com/thoughtworks/go/util/DirectoryReaderTest.java
6877
/* * Copyright 2021 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.util; import java.io.File; import java.io.IOException; import java.net.URLEncoder; import java.util.List; import com.thoughtworks.go.domain.DirectoryEntry; import com.thoughtworks.go.domain.FolderDirectoryEntry; import com.thoughtworks.go.domain.JobIdentifier; import static org.hamcrest.Matchers.is; import org.apache.commons.io.FileUtils; import org.junit.After; import static org.junit.Assert.assertThat; import org.junit.Before; import org.junit.Test; public class DirectoryReaderTest { private File testFolder; private JobIdentifier jobIdentifier; private String folderRoot; @Before public void setUp() throws IOException { testFolder = TestFileUtil.createTempFolder("testFiles"); jobIdentifier = new JobIdentifier("pipelineName", -1, "LATEST", "stageName", "LATEST", "buildName", 123L); folderRoot = "/" + testFolder.getName(); } @After public void tearDown() { FileUtils.deleteQuietly(testFolder); } @Test public void shouldNotDieIfGivenBogusPath() throws Exception { DirectoryReader reader = new DirectoryReader(jobIdentifier); List<DirectoryEntry> entries = reader.listEntries(new File("totally bogus path!!!"), ""); assertThat(entries.size(), is(0)); } @Test public void shouldNotDieIfGivenBogusFile() { DirectoryReader reader = new DirectoryReader(jobIdentifier); List<DirectoryEntry> entries = reader.listEntries(null, ""); assertThat(entries.size(), is(0)); } @Test public void shouldGetFileList() throws Exception { String filename = "text.html$%"; TestFileUtil.createTestFile(testFolder, filename); DirectoryReader reader = new DirectoryReader(jobIdentifier); List<DirectoryEntry> entries = reader.listEntries(testFolder, folderRoot); assertThat(entries.size(), is(1)); assertThat(entries.get(0).getFileName(), is(filename)); assertThat(entries.get(0).getUrl(), is("/files/pipelineName/LATEST/stageName/LATEST/buildName" + folderRoot + "/" + URLEncoder.encode(filename))); } @Test public void shouldGetSubSubFolder() throws Exception { TestFileUtil.createTestFile(TestFileUtil.createTestFolder(TestFileUtil.createTestFolder(testFolder, "primate"), "monkey"), "baboon.html"); DirectoryReader reader = new DirectoryReader(jobIdentifier); List<DirectoryEntry> entries = reader.listEntries(testFolder, folderRoot); FolderDirectoryEntry folder = (FolderDirectoryEntry) entries.get(0); assertThat(folder.getFileName(), is("primate")); FolderDirectoryEntry subFolder = (FolderDirectoryEntry) folder.getSubDirectory().get(0); assertThat(subFolder.getFileName(), is("monkey")); assertThat(subFolder.getSubDirectory().get(0).getFileName(), is("baboon.html")); assertThat(subFolder.getSubDirectory().get(0).getUrl(), is("/files/pipelineName/LATEST/stageName/LATEST/buildName" + folderRoot + "/primate/monkey/baboon.html")); } @Test public void shouldGetListOfFilesAndFolders() throws Exception { TestFileUtil.createTestFile(testFolder, "text.html"); File subFolder = TestFileUtil.createTestFolder(testFolder, "primate"); TestFileUtil.createTestFile(subFolder, "baboon.html"); DirectoryReader reader = new DirectoryReader(jobIdentifier); List<DirectoryEntry> entries = reader.listEntries(testFolder, folderRoot); assertThat(entries.size(), is(2)); FolderDirectoryEntry folder = (FolderDirectoryEntry) entries.get(0); assertThat(folder.getFileName(), is("primate")); assertThat(folder.getUrl(), is("/files/pipelineName/LATEST/stageName/LATEST/buildName" + folderRoot + "/primate")); assertThat(entries.get(1).getFileName(), is("text.html")); assertThat(folder.getSubDirectory().get(0).getFileName(), is("baboon.html")); assertThat(folder.getSubDirectory().get(0).getUrl(), is("/files/pipelineName/LATEST/stageName/LATEST/buildName" + folderRoot + "/primate/baboon.html")); } @Test public void shouldGetListOfFilesWithDirectoriesFirstAndFilesInAlphabeticOrder() throws Exception { TestFileUtil.createTestFile(testFolder, "build.html"); File subFolder = TestFileUtil.createTestFolder(testFolder, "testoutput"); TestFileUtil.createTestFile(subFolder, "baboon.html"); TestFileUtil.createTestFile(subFolder, "apple.html"); TestFileUtil.createTestFile(subFolder, "pear.html"); DirectoryReader reader = new DirectoryReader(jobIdentifier); List<DirectoryEntry> entries = reader.listEntries(testFolder, folderRoot); assertThat(entries.size(), is(2)); FolderDirectoryEntry folder = (FolderDirectoryEntry) entries.get(0); assertThat(folder.getFileName(), is("testoutput")); assertThat(entries.get(1).getFileName(), is("build.html")); assertThat(folder.getSubDirectory().get(0).getFileName(), is("apple.html")); assertThat(folder.getSubDirectory().get(1).getFileName(), is("baboon.html")); assertThat(folder.getSubDirectory().get(2).getFileName(), is("pear.html")); } @Test public void shouldNotContainSerializedObjectFile() throws Exception { String filename = ".log200806041535.xml.ser"; TestFileUtil.createTestFile(testFolder, filename); DirectoryReader reader = new DirectoryReader(jobIdentifier); List<DirectoryEntry> entries = reader.listEntries(testFolder, folderRoot); assertThat(entries.size(), is(0)); } @Test public void shouldKeepRootsInUrl() throws Exception { File b = TestFileUtil.createTestFolder(testFolder, "b"); TestFileUtil.createTestFile(b, "c.xml"); List<DirectoryEntry> entries = new DirectoryReader(jobIdentifier).listEntries(b, folderRoot + "/b"); assertThat(entries.size(), is(1)); String expectedUrl = "/files/pipelineName/LATEST/stageName/LATEST/buildName/" + testFolder.getName() + "/b/c.xml"; assertThat(entries.get(0).getUrl(), is(expectedUrl)); } }
apache-2.0
AdaptiveMe/adaptive-arp-api-lib-java
src/main/java/me/adaptive/arp/api/AppResourceData.java
7460
/** --| ADAPTIVE RUNTIME PLATFORM |---------------------------------------------------------------------------------------- (C) Copyright 2013-2015 Carlos Lozano Diez t/a Adaptive.me <http://adaptive.me>. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 . Unless required by appli- -cable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Original author: * Carlos Lozano Diez <http://github.com/carloslozano> <http://twitter.com/adaptivecoder> <mailto:carlos@adaptive.me> Contributors: * Ferran Vila Conesa <http://github.com/fnva> <http://twitter.com/ferran_vila> <mailto:ferran.vila.conesa@gmail.com> * See source code files for contributors. Release: * @version v2.2.15 -------------------------------------------| aut inveniam viam aut faciam |-------------------------------------------- */ package me.adaptive.arp.api; import java.io.Serializable; /** This class represents a resource provided by the platform from the application's secure payload. @author Carlos Lozano Diez @since v2.1.3 @version 1.0 */ public class AppResourceData implements Serializable { /** Java serialization support. @since 2.2.13 */ private static final long serialVersionUID = 100343810L; /** Marker to indicate whether the resource is cooked in some way (compressed, encrypted, etc.) If true, the implementation must uncompress/unencrypt following the cookedType recipe specified by the payload. */ private boolean cooked; /** This is the length of the payload after cooking. In general, this length indicates the amount of space saved with regard to the rawLength of the payload. */ private long cookedLength; /** If the data is cooked, this field should contain the recipe to return the cooked data to its original uncompressed/unencrypted/etc format. */ private String cookedType; /** The payload data of the resource in ready to consume format. */ private byte[] data; /** The id or path identifier of the resource. */ private String id; /** The raw length of the payload before any cooking occurred. This is equivalent to the size of the resource after uncompressing and unencrypting. */ private long rawLength; /** The raw type of the payload - this is equivalent to the mimetype of the content. */ private String rawType; /** Default constructor. @since v2.1.3 */ public AppResourceData() { } /** Convenience constructor. @param id The id or path of the resource retrieved. @param data The payload data of the resource (uncooked). @param rawType The raw type/mimetype of the resource. @param rawLength The raw length/original length in bytes of the resource. @param cooked True if the resource is cooked. @param cookedType Type of recipe used for cooking. @param cookedLength The cooked length in bytes of the resource. @since v2.1.3 */ public AppResourceData(String id, byte[] data, String rawType, long rawLength, boolean cooked, String cookedType, long cookedLength) { this(); this.id = id; this.data = data; this.rawType = rawType; this.rawLength = rawLength; this.cooked = cooked; this.cookedType = cookedType; this.cookedLength = cookedLength; } /** Attribute to denote whether the payload of the resource is cooked. @return True if the resource is cooked, false otherwise. @since v2.1.3 */ public boolean getCooked() { return this.cooked; } /** Attribute to denote whether the payload of the resource is cooked. @param cooked True if the resource is cooked, false otherwise. @since v2.1.3 */ public void setCooked(boolean cooked) { this.cooked = cooked; } /** The length in bytes of the payload after cooking. @return Length in bytes of cooked payload. @since v2.1.3 */ public long getCookedLength() { return this.cookedLength; } /** The length in bytes of the payload after cooking. @param cookedLength Length in bytes of cooked payload. @since v2.1.3 */ public void setCookedLength(long cookedLength) { this.cookedLength = cookedLength; } /** If the resource is cooked, this will return the recipe used during cooking. @return The cooking recipe to reverse the cooking process. @since v2.1.3 */ public String getCookedType() { return this.cookedType; } /** If the resource is cooked, the type of recipe used during cooking. @param cookedType The cooking recipe used during cooking. @since v2.1.3 */ public void setCookedType(String cookedType) { this.cookedType = cookedType; } /** Returns the payload of the resource. @return Binary payload of the resource. @since v2.1.3 */ public byte[] getData() { return this.data; } /** Sets the payload of the resource. @param data Binary payload of the resource. @since v2.1.3 */ public void setData(byte[] data) { this.data = data; } /** Gets The id or path identifier of the resource. @return id The id or path identifier of the resource. */ public String getId() { return this.id; } /** Sets the id or path of the resource. @param id The id or path of the resource. @since v2.1.3 */ public void setId(String id) { this.id = id; } /** Gets the resource payload's original length. @return Original length of the resource in bytes before cooking. @since v2.1.3 */ public long getRawLength() { return this.rawLength; } /** Sets the resource payload's original length. @param rawLength Original length of the resource in bytes before cooking. @since v2.1.3 */ public void setRawLength(long rawLength) { this.rawLength = rawLength; } /** Gets the resource's raw type or mimetype. @return Resource's type or mimetype. @since v2.1.3 */ public String getRawType() { return this.rawType; } /** Sets the resource's raw type or mimetype. @param rawType Resource's type or mimetype. @since v2.1.3 */ public void setRawType(String rawType) { this.rawType = rawType; } } /** ------------------------------------| Engineered with ♥ in Barcelona, Catalonia |-------------------------------------- */
apache-2.0
hero-app/hero-android
app/src/main/java/com/hero/model/User.java
256
package com.hero.model; import com.fasterxml.jackson.annotation.JsonProperty; public class User { @JsonProperty("name") public String name; @JsonProperty("fbid") public String fbid; @JsonProperty("image") public String image; }
apache-2.0
lowyjoe/SpringProject
src/com/lyj/base/dao/MenuDao.java
7841
package com.lyj.base.dao; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import org.springframework.jdbc.core.RowMapper; import com.lyj.base.entity.MenuInfo; import com.lyj.base.entity.UserInfo; public class MenuDao extends BaseDao { public List<MenuInfo> getParentList(String pid) { final List<MenuInfo> list = new ArrayList<MenuInfo>(); MenuInfo ts=new MenuInfo(); //使用RowMapper封装查询对象 RowMapper rowMapper=new RowMapper(){ public Object mapRow(ResultSet rs, int rowNum) throws SQLException { MenuInfo emp=new MenuInfo(); emp.setId( rs.getInt("id")); emp.setMenuDesc (rs.getString("menu_desc")); emp.setMenuOrder(rs.getInt("menu_order")); emp.setParentId(rs.getInt("parent_id")); list.add(emp); return null; } }; Object arg[]={pid}; this.getJdbcTemplate().query("select id,menu_desc,menu_order,parent_id from menu_info where parent_id=? ",arg,rowMapper); /*for(MenuInfo object:list){ //System.out.println(object.toString()); }*/ return list; } public void addBrother(MenuInfo fun) throws Exception { //要执行的SQL语句 String sql="insert into menu_info (id,menu_desc, parent_id) values(?,?,?)"; //SQL语句中的参数 Object args[]={fun.getId(),fun.getMenuDesc(),fun.getParentId()}; //SQL语句中的参数类型 int argTypes[]={Types.INTEGER,Types.VARCHAR,Types.INTEGER}; //执行修改操作,返回影响行数 //this.getJdbcTemplate().update(sql); //直接操作SQL语句 //this.getJdbcTemplate().update(sql, args); //操作SQL语句+参数 this.getJdbcTemplate().update(sql, args, argTypes); //操作SQL语句+参数+参数类型 } public void delete(int id) throws Exception { //要执行的SQL语句 String sql="delete from menu_info where id=?"; //SQL语句中的参数 Object args[]={id}; //SQL语句中的参数类型 int argTypes[]={Types.INTEGER}; //执行删除操作,返回影响行数 //this.getJdbcTemplate().update(sql); //直接操作SQL语句 //this.getJdbcTemplate().update(sql, args); //操作SQL语句+参数 this.getJdbcTemplate().update(sql, args, argTypes); //操作SQL语句+参数+参数类型 } public void update(String id, String text) throws Exception { //要执行的SQL语句 String sql="update menu_info set menu_Desc=? where id= ? "; //SQL语句中的参数 Object args[]={text,id}; //SQL语句中的参数类型 int argTypes[]={Types.VARCHAR,Types.VARCHAR}; //执行修改操作,返回影响行数 //this.getJdbcTemplate().update(sql); //直接操作SQL语句 //this.getJdbcTemplate().update(sql, args); //操作SQL语句+参数 this.getJdbcTemplate().update(sql, args, argTypes); //操作SQL语句+参数+参数类型 } public List<String> findPath(String text) { final List<String> strList = new ArrayList<String>(); final ArrayList<String> path = new ArrayList<String>(); ArrayList<String> paths = new ArrayList<String>(); Object args[]={text}; this.getJdbcTemplate().queryForList("select id from menu_info where menu_desc=? ",args,new RowMapper(){ public Object mapRow(ResultSet rs, int rowNum) throws SQLException { strList.add(rs.getString("id")); return null; } }); this.getJdbcTemplate().queryForList("select id from menu_info where menu_desc=? ",args,new RowMapper(){ public Object mapRow(ResultSet rs, int rowNum) throws SQLException { strList.add(rs.getString("id")); return null; } }); for (int i = 0; i < strList.size(); i++) { Object str[]={strList.get(i)}; this.getJdbcTemplate().queryForList("select id from menu_info start with id= ? connect by prior parent_id=id ",str,new RowMapper(){ public Object mapRow(ResultSet rs, int rowNum) throws SQLException { path.add(rs.getString("id")); return null; } }); Collections.reverse(path); // 数组倒置 /*//System.out.println("path>>>>>>>>>>>>>>>>>>>>>>>>" + path.toString());*/ paths.add(path.toString()); path.clear(); } //System.out.println(paths.toString()); return paths; } /** * 根据pid查询子集合 * * @param pid * @return list */ public List<MenuInfo> querySonList(int pid) { final List<MenuInfo> list = new ArrayList<MenuInfo>(); //使用RowMapper封装查询对象 RowMapper rowMapper=new RowMapper(){ public Object mapRow(ResultSet rs, int rowNum) throws SQLException { MenuInfo emp=new MenuInfo(); emp.setId( rs.getInt("id")); emp.setMenuDesc (rs.getString("menu_desc")); emp.setMenuOrder(rs.getInt("menu_order")); emp.setParentId(rs.getInt("parent_id")); list.add(emp); return null; } }; this.getJdbcTemplate().query("select id,menu_desc,menu_order,parent_id from menu_info where parent_id='" + pid + "'",rowMapper); return list; } /** * 根据名字查询菜单 * * @param text * @return List<MenuInfo> */ public List<MenuInfo> findByText(String text) { List<MenuInfo> result = new ArrayList<MenuInfo>(); Object[] args={"%"+text+"%"}; result=super.list(" from MenuInfo where menuDesc like ? ",args); return result; } /* 增删改查 菜单方法*/ public List<Map<String, Object>> list(String name,int start, int size, String order) { List<Object> param = new ArrayList<Object>(); String sql = "select m.* from menu_info m where 1=1 "; if(null != name && name.trim().length() > 0){ sql += " and m.menu_desc like ? "; param.add("%"+name+"%"); } if(null == order || order.length() == 0){ order = " menu_order asc"; } return super.listByNative(sql, param.toArray(), start, size, order); } public List<Map<String, Object>> list(String name,int start, int size, String order,int treeClickId) { List<Object> param = new ArrayList<Object>(); String sql = "select m.* from menu_info m where 1=1 "; if(null != name && name.trim().length() > 0){ sql += " and m.menu_desc like ? "; param.add("%"+name+"%"); } if(-1 != treeClickId ){ sql += " and m.parent_id = ? or m.id= ?"; param.add(treeClickId); param.add(treeClickId); } if(null == order || order.length() == 0){ order = " menu_order asc"; } return super.listByNative(sql, param.toArray(), start, size, order); } public int count(String name,int start, int size, String order) { List<Object> param = new ArrayList<Object>(); String sql = "select count(*) from menu_info m where 1=1 "; if(null != name && name.trim().length() > 0){ sql += " and m.menu_desc like ? "; param.add("%"+name+"%"); } return super.countByNative(sql, param.toArray()); } public int count(String name,int start, int size, String order,int treeClickId) { List<Object> param = new ArrayList<Object>(); String sql = "select count(*) from menu_info m where 1=1 "; if(null != name && name.trim().length() > 0){ sql += " and m.menu_desc like ? "; param.add("%"+name+"%"); } if(-1 != treeClickId ){ sql += " and m.parent_id = ? or m.id= ?"; param.add(treeClickId); param.add(treeClickId); } return super.countByNative(sql, param.toArray()); } @SuppressWarnings("unchecked") public UserInfo getUserByName(String name) { String hql="select m from menu_info m where m.menu_desc=? "; List<UserInfo> list=super.list(hql, new Object[]{name}); if(list!=null&&list.size()>0){ return list.get(0); }else{ return null; } } }
apache-2.0
amaliujia/elasticsearch
core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java
109689
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.snapshots; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.*; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.metadata.*; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.snapshots.mockstore.MockRepositoryModule; import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.nio.channels.SeekableByteChannel; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.shard.IndexShard.INDEX_REFRESH_INTERVAL; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.*; @Slow public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { @Test public void basicWorkFlowTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); } refresh(); assertHitCount(client.prepareCount("test-idx-1").get(), 100L); assertHitCount(client.prepareCount("test-idx-2").get(), 100L); assertHitCount(client.prepareCount("test-idx-3").get(), 100L); ListenableActionFuture<FlushResponse> flushResponseFuture = null; if (randomBoolean()) { ArrayList<String> indicesToFlush = newArrayList(); for (int i = 1; i < 4; i++) { if (randomBoolean()) { indicesToFlush.add("test-idx-" + i); } } if (!indicesToFlush.isEmpty()) { String[] indices = indicesToFlush.toArray(new String[indicesToFlush.size()]); logger.info("--> starting asynchronous flush for indices {}", Arrays.toString(indices)); flushResponseFuture = client.admin().indices().prepareFlush(indices).execute(); } } logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); SnapshotInfo snapshotInfo = client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); logger.info("--> delete some data"); for (int i = 0; i < 50; i++) { client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); } for (int i = 50; i < 100; i++) { client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get(); } for (int i = 0; i < 100; i += 2) { client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); } assertAllSuccessful(refresh()); assertHitCount(client.prepareCount("test-idx-1").get(), 50L); assertHitCount(client.prepareCount("test-idx-2").get(), 50L); assertHitCount(client.prepareCount("test-idx-3").get(), 50L); logger.info("--> close indices"); client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); for (int i=0; i<5; i++) { assertHitCount(client.prepareCount("test-idx-1").get(), 100L); assertHitCount(client.prepareCount("test-idx-2").get(), 100L); assertHitCount(client.prepareCount("test-idx-3").get(), 50L); } // Test restore after index deletion logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); for (int i=0; i<5; i++) { assertHitCount(client.prepareCount("test-idx-1").get(), 100L); } ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); if (flushResponseFuture != null) { // Finish flush flushResponseFuture.actionGet(); } } @Test public void singleGetAfterRestoreTest() throws Exception { String indexName = "testindex"; String repoName = "test-restore-snapshot-repo"; String snapshotName = "test-restore-snapshot"; String absolutePath = randomRepoPath().toAbsolutePath().toString(); logger.info("Path [{}]", absolutePath); String restoredIndexName = indexName + "-restored"; String typeName = "actions"; String expectedValue = "expected"; Client client = client(); // Write a document String docId = Integer.toString(randomInt()); index(indexName, typeName, docId, "value", expectedValue); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository(repoName) .setType("fs").setSettings(Settings.settingsBuilder() .put("location", absolutePath) )); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(true) .setIndices(indexName) .get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repoName, snapshotName) .setWaitForCompletion(true) .setRenamePattern(indexName) .setRenameReplacement(restoredIndexName) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareGet(restoredIndexName, typeName, docId).get().isExists(), equalTo(true)); } @Test public void testFreshIndexUUID() { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); createIndex("test"); String originalIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID); assertTrue(originalIndexUUID, originalIndexUUID != null); assertFalse(originalIndexUUID, originalIndexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE)); ensureGreen(); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); NumShards numShards = getNumShards("test"); cluster().wipeIndices("test"); assertAcked(prepareCreate("test").setSettings(Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries))); ensureGreen(); String newIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID); assertTrue(newIndexUUID, newIndexUUID != null); assertFalse(newIndexUUID, newIndexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE)); assertFalse(newIndexUUID, newIndexUUID.equals(originalIndexUUID)); logger.info("--> close index"); client.admin().indices().prepareClose("test").get(); logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); String newAfterRestoreIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID); assertTrue("UUID has changed after restore: " + newIndexUUID + " vs. " + newAfterRestoreIndexUUID, newIndexUUID.equals(newAfterRestoreIndexUUID)); logger.info("--> restore indices with different names"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); String copyRestoreUUID = client().admin().indices().prepareGetSettings("test-copy").get().getSetting("test-copy", IndexMetaData.SETTING_INDEX_UUID); assertFalse("UUID has been reused on restore: " + copyRestoreUUID + " vs. " + originalIndexUUID, copyRestoreUUID.equals(originalIndexUUID)); } @Test public void restoreWithDifferentMappingsAndSettingsTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); logger.info("--> create index with foo type"); assertAcked(prepareCreate("test-idx", 2, Settings.builder() .put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 10, TimeUnit.SECONDS))); NumShards numShards = getNumShards("test-idx"); assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("foo").setSource("baz", "type=string")); ensureGreen(); logger.info("--> snapshot it"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> delete the index and recreate it with bar type"); cluster().wipeIndices("test-idx"); assertAcked(prepareCreate("test-idx", 2, Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5, TimeUnit.SECONDS))); assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("bar").setSource("baz", "type=string")); ensureGreen(); logger.info("--> close index"); client.admin().indices().prepareClose("test-idx").get(); logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that old mapping is restored"); ImmutableOpenMap<String, MappingMetaData> mappings = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test-idx").getMappings(); assertThat(mappings.get("foo"), notNullValue()); assertThat(mappings.get("bar"), nullValue()); logger.info("--> assert that old settings are restored"); GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet(); assertThat(getSettingsResponse.getSetting("test-idx", "index.refresh_interval"), equalTo("10000ms")); } @Test public void emptySnapshotTest() throws Exception { Client client = client(); logger.info("--> creating repository"); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); } @Test public void restoreAliasesTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath()))); logger.info("--> create test indices"); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); logger.info("--> create aliases"); assertAcked(client.admin().indices().prepareAliases() .addAlias("test-idx-1", "alias-123") .addAlias("test-idx-2", "alias-123") .addAlias("test-idx-3", "alias-123") .addAlias("test-idx-1", "alias-1") .get()); assertAliasesExist(client.admin().indices().prepareAliasesExist("alias-123").get()); logger.info("--> snapshot"); assertThat(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete all indices"); cluster().wipeIndices("test-idx-1", "test-idx-2", "test-idx-3"); assertAliasesMissing(client.admin().indices().prepareAliasesExist("alias-123", "alias-1").get()); logger.info("--> restore snapshot with aliases"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()))); logger.info("--> check that aliases are restored"); assertAliasesExist(client.admin().indices().prepareAliasesExist("alias-123", "alias-1").get()); logger.info("--> update aliases"); assertAcked(client.admin().indices().prepareAliases().removeAlias("test-idx-3", "alias-123")); assertAcked(client.admin().indices().prepareAliases().addAlias("test-idx-3", "alias-3")); logger.info("--> delete and close indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); assertAcked(client.admin().indices().prepareClose("test-idx-3")); assertAliasesMissing(client.admin().indices().prepareAliasesExist("alias-123", "alias-1").get()); logger.info("--> restore snapshot without aliases"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setRestoreGlobalState(true).setIncludeAliases(false).execute().actionGet(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()))); logger.info("--> check that aliases are not restored and existing aliases still exist"); assertAliasesMissing(client.admin().indices().prepareAliasesExist("alias-123", "alias-1").get()); assertAliasesExist(client.admin().indices().prepareAliasesExist("alias-3").get()); } @Test public void restoreTemplatesTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath()))); logger.info("--> creating test template"); assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete test template"); assertThat(client.admin().indices().prepareDeleteTemplate("test-template").get().isAcknowledged(), equalTo(true)); GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template is restored"); getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateExists(getIndexTemplatesResponse, "test-template"); } @Test public void includeGlobalStateTest() throws Exception { Client client = client(); logger.info("--> creating repository"); Path location = randomRepoPath(); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", location))); logger.info("--> creating test template"); assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true)); logger.info("--> snapshot without global state"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> snapshot with global state"); createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-with-global-state").setIndices().setIncludeGlobalState(true).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete test template"); cluster().wipeTemplates("test-template"); GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> try restoring cluster state from snapshot without global state"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template wasn't restored"); getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template is restored"); getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateExists(getIndexTemplatesResponse, "test-template"); createIndex("test-idx"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); logger.info("--> snapshot without global state but with indices"); createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index").setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete test template and index "); cluster().wipeIndices("test-idx"); cluster().wipeTemplates("test-template"); getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> try restoring index and cluster state from snapshot without global state"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); logger.info("--> check that template wasn't restored but index was"); getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); } @Test public void snapshotFileFailureDuringSnapshotTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings( Settings.settingsBuilder() .put("location", randomRepoPath()) .put("random", randomAsciiOfLength(10)) .put("random_control_io_exception_rate", 0.2)) .setVerify(false)); createIndex("test-idx"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); logger.info("--> snapshot"); try { CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) { // If we are here, that means we didn't have any failures, let's check it assertThat(getFailureCount("test-repo"), equalTo(0L)); } else { assertThat(getFailureCount("test-repo"), greaterThan(0L)); assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0)); for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) { assertThat(shardFailure.reason(), containsString("Random IOException")); assertThat(shardFailure.nodeId(), notNullValue()); assertThat(shardFailure.index(), equalTo("test-idx")); } GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get(); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0); if (snapshotInfo.state() == SnapshotState.SUCCESS) { assertThat(snapshotInfo.shardFailures().size(), greaterThan(0)); assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards())); } } } catch (Exception ex) { assertThat(getFailureCount("test-repo"), greaterThan(0L)); assertThat(ExceptionsHelper.detailedMessage(ex), containsString("IOException")); } } @Test public void dataFileFailureDuringSnapshotTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings( Settings.settingsBuilder() .put("location", randomRepoPath()) .put("random", randomAsciiOfLength(10)) .put("random_data_file_io_exception_rate", 0.3))); createIndex("test-idx"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) { logger.info("--> no failures"); // If we are here, that means we didn't have any failures, let's check it assertThat(getFailureCount("test-repo"), equalTo(0L)); } else { logger.info("--> some failures"); assertThat(getFailureCount("test-repo"), greaterThan(0L)); assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0)); for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) { assertThat(shardFailure.nodeId(), notNullValue()); assertThat(shardFailure.index(), equalTo("test-idx")); } GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get(); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.PARTIAL)); assertThat(snapshotInfo.shardFailures().size(), greaterThan(0)); assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards())); // Verify that snapshot status also contains the same failures SnapshotsStatusResponse snapshotsStatusResponse = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0); assertThat(snapshotStatus.getIndices().size(), equalTo(1)); SnapshotIndexStatus indexStatus = snapshotStatus.getIndices().get("test-idx"); assertThat(indexStatus, notNullValue()); assertThat(indexStatus.getShardsStats().getFailedShards(), equalTo(snapshotInfo.failedShards())); assertThat(indexStatus.getShardsStats().getDoneShards(), equalTo(snapshotInfo.successfulShards())); assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards())); int numberOfFailures = 0; for (SnapshotIndexShardStatus shardStatus : indexStatus.getShards().values()) { if (shardStatus.getStage() == SnapshotIndexShardStage.FAILURE) { assertThat(shardStatus.getFailure(), notNullValue()); numberOfFailures++; } else { assertThat(shardStatus.getFailure(), nullValue()); } } assertThat(indexStatus.getShardsStats().getFailedShards(), equalTo(numberOfFailures)); } } @Test public void dataFileFailureDuringRestoreTest() throws Exception { Path repositoryLocation = randomRepoPath(); Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation))); createIndex("test-idx"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards())); logger.info("--> update repository with mock version"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings( Settings.settingsBuilder() .put("location", repositoryLocation) .put("random", randomAsciiOfLength(10)) .put("random_data_file_io_exception_rate", 0.3))); // Test restore after index deletion logger.info("--> delete index"); cluster().wipeIndices("test-idx"); logger.info("--> restore index after deletion"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); CountResponse countResponse = client.prepareCount("test-idx").get(); assertThat(countResponse.getCount(), equalTo(100L)); } @Test public void deletionOfFailingToRecoverIndexShouldStopRestore() throws Exception { Path repositoryLocation = randomRepoPath(); Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation))); createIndex("test-idx"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards())); logger.info("--> update repository with mock version"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings( Settings.settingsBuilder() .put("location", repositoryLocation) .put("random", randomAsciiOfLength(10)) .put("random_data_file_io_exception_rate", 1.0) // Fail completely )); // Test restore after index deletion logger.info("--> delete index"); cluster().wipeIndices("test-idx"); logger.info("--> restore index after deletion"); ListenableActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute(); logger.info("--> wait for the index to appear"); // that would mean that recovery process started and failing assertThat(waitForIndex("test-idx", TimeValue.timeValueSeconds(10)), equalTo(true)); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); logger.info("--> get restore results"); // Now read restore results and make sure it failed RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotResponseFuture.actionGet(TimeValue.timeValueSeconds(10)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(restoreSnapshotResponse.getRestoreInfo().failedShards())); logger.info("--> restoring working repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation))); logger.info("--> trying to restore index again"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); CountResponse countResponse = client.prepareCount("test-idx").get(); assertThat(countResponse.getCount(), equalTo(100L)); } @Test public void unallocatedShardsTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()))); logger.info("--> creating index that cannot be allocated"); prepareCreate("test-idx", 2, Settings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + ".tag", "nowhere").put("index.number_of_shards", 3)).get(); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(3)); assertThat(createSnapshotResponse.getSnapshotInfo().reason(), startsWith("Indices don't have primary shards")); } @Test public void deleteSnapshotTest() throws Exception { final int numberOfSnapshots = between(5, 15); Client client = client(); Path repo = randomRepoPath(); logger.info("--> creating repository at " + repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) .put("compress", false) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); createIndex("test-idx"); ensureGreen(); int[] numberOfFiles = new int[numberOfSnapshots]; logger.info("--> creating {} snapshots ", numberOfSnapshots); for (int i = 0; i < numberOfSnapshots; i++) { for (int j = 0; j < 10; j++) { index("test-idx", "doc", Integer.toString(i * 10 + j), "foo", "bar" + i * 10 + j); } refresh(); logger.info("--> snapshot {}", i); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-" + i).setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); // Store number of files after each snapshot numberOfFiles[i] = numberOfFiles(repo); } assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots)); int numberOfFilesBeforeDeletion = numberOfFiles(repo); logger.info("--> delete all snapshots except the first one and last one"); for (int i = 1; i < numberOfSnapshots - 1; i++) { client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-" + i).get(); } int numberOfFilesAfterDeletion = numberOfFiles(repo); assertThat(numberOfFilesAfterDeletion, lessThan(numberOfFilesBeforeDeletion)); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); logger.info("--> restore index"); String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", lastSnapshot).setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots)); logger.info("--> delete the last snapshot"); client.admin().cluster().prepareDeleteSnapshot("test-repo", lastSnapshot).get(); logger.info("--> make sure that number of files is back to what it was when the first snapshot was made"); assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0])); } @Test public void deleteSnapshotWithMissingIndexAndShardMetadataTest() throws Exception { Client client = client(); Path repo = randomRepoPath(); logger.info("--> creating repository at " + repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) .put("compress", false) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); createIndex("test-idx-1", "test-idx-2"); ensureYellow(); logger.info("--> indexing some data"); indexRandom(true, client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"), client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> delete index metadata and shard metadata"); Path indices = repo.resolve("indices"); Path testIndex1 = indices.resolve("test-idx-1"); Path testIndex2 = indices.resolve("test-idx-2"); Path testIndex2Shard0 = testIndex2.resolve("0"); IOUtils.deleteFilesIgnoringExceptions(testIndex1.resolve("snapshot-test-snap-1")); IOUtils.deleteFilesIgnoringExceptions(testIndex2Shard0.resolve("snapshot-test-snap-1")); logger.info("--> delete snapshot"); client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class); } @Test public void deleteSnapshotWithMissingMetadataTest() throws Exception { Client client = client(); Path repo = randomRepoPath(); logger.info("--> creating repository at " + repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) .put("compress", false) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); createIndex("test-idx-1", "test-idx-2"); ensureYellow(); logger.info("--> indexing some data"); indexRandom(true, client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"), client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> delete index metadata and shard metadata"); Path metadata = repo.resolve("meta-test-snap-1.dat"); Files.delete(metadata); logger.info("--> delete snapshot"); client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class); } @Test public void deleteSnapshotWithCorruptedSnapshotFileTest() throws Exception { Client client = client(); Path repo = randomRepoPath(); logger.info("--> creating repository at " + repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) .put("compress", false) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); createIndex("test-idx-1", "test-idx-2"); ensureYellow(); logger.info("--> indexing some data"); indexRandom(true, client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"), client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> truncate snapshot file to make it unreadable"); Path snapshotPath = repo.resolve("snapshot-test-snap-1"); try(SeekableByteChannel outChan = Files.newByteChannel(snapshotPath, StandardOpenOption.WRITE)) { outChan.truncate(randomInt(10)); } logger.info("--> delete snapshot"); client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class); logger.info("--> make sure that we can create the snapshot again"); createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); } @Test public void snapshotClosedIndexTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()))); createIndex("test-idx", "test-idx-closed"); ensureGreen(); logger.info("--> closing index test-idx-closed"); assertAcked(client.admin().indices().prepareClose("test-idx-closed")); ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get(); assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(IndexMetaData.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue()); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().indices().size(), equalTo(1)); assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), equalTo(0)); logger.info("--> deleting snapshot"); client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get(); logger.info("--> snapshot with closed index"); assertBlocked(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx", "test-idx-closed"), MetaDataIndexStateService.INDEX_CLOSED_BLOCK); } @Test public void snapshotSingleClosedIndexTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()))); createIndex("test-idx"); ensureGreen(); logger.info("--> closing index test-idx"); assertAcked(client.admin().indices().prepareClose("test-idx")); logger.info("--> snapshot"); assertBlocked(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1") .setWaitForCompletion(true).setIndices("test-idx"), MetaDataIndexStateService.INDEX_CLOSED_BLOCK); } @Test public void renameOnRestoreTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()))); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); assertAcked(client.admin().indices().prepareAliases() .addAlias("test-idx-1", "alias-1") .addAlias("test-idx-2", "alias-2") .addAlias("test-idx-3", "alias-3") ); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1", "test-idx-2").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> restore indices with different names"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L)); logger.info("--> close just restored indices"); client.admin().indices().prepareClose("test-idx-1-copy", "test-idx-2-copy").get(); logger.info("--> and try to restore these indices again"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L)); logger.info("--> close indices"); assertAcked(client.admin().indices().prepareClose("test-idx-1", "test-idx-2-copy")); logger.info("--> restore indices with different names"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") .setRenamePattern("(.+-2)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-1-copy", "test-idx-2", "test-idx-2-copy"); logger.info("--> try renaming indices using the same name"); try { client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("(.+)").setRenameReplacement("same-name").setWaitForCompletion(true).execute().actionGet(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected } logger.info("--> try renaming indices using the same name"); try { client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("test-idx-2").setRenameReplacement("test-idx-1").setWaitForCompletion(true).execute().actionGet(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected } logger.info("--> try renaming indices using invalid index name"); try { client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern(".+").setRenameReplacement("__WRONG__").setWaitForCompletion(true).execute().actionGet(); fail("Shouldn't be here"); } catch (InvalidIndexNameException ex) { // Expected } logger.info("--> try renaming indices into existing alias name"); try { client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern(".+").setRenameReplacement("alias-3").setWaitForCompletion(true).execute().actionGet(); fail("Shouldn't be here"); } catch (InvalidIndexNameException ex) { // Expected } logger.info("--> try renaming indices into existing alias of itself"); try { client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern("test-idx").setRenameReplacement("alias").setWaitForCompletion(true).execute().actionGet(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected } logger.info("--> try renaming indices into existing alias of another restored index"); try { client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1", "test-idx-2").setRenamePattern("test-idx-1").setRenameReplacement("alias-2").setWaitForCompletion(true).execute().actionGet(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected } logger.info("--> try renaming indices into existing alias of itself, but don't restore aliases "); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") .setIndices("test-idx-1").setRenamePattern("test-idx").setRenameReplacement("alias") .setWaitForCompletion(true).setIncludeAliases(false).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); } @Test public void moveShardWhileSnapshottingTest() throws Exception { Client client = client(); Path repositoryLocation = randomRepoPath(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings( Settings.settingsBuilder() .put("location", repositoryLocation) .put("random", randomAsciiOfLength(10)) .put("wait_after_unblock", 200))); // Create index on 2 nodes and make sure each node has a primary by setting no replicas assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_replicas", 0))); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); // Pick one node and block it String blockedNode = blockNodeWithIndex("test-idx"); logger.info("--> snapshot"); client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60)); logger.info("--> execution was blocked on node [{}], moving shards away from this node", blockedNode); Settings.Builder excludeSettings = Settings.builder().put("index.routing.allocation.exclude._name", blockedNode); client().admin().indices().prepareUpdateSettings("test-idx").setSettings(excludeSettings).get(); logger.info("--> unblocking blocked node"); unblockNode(blockedNode); logger.info("--> waiting for completion"); SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600)); logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); logger.info("--> done"); List<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0)); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); logger.info("--> replace mock repository with real one at the same location"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation))); logger.info("--> restore index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); } @Test public void deleteRepositoryWhileSnapshottingTest() throws Exception { Client client = client(); Path repositoryLocation = randomRepoPath(); logger.info("--> creating repository"); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings( Settings.settingsBuilder() .put("location", repositoryLocation) .put("random", randomAsciiOfLength(10)) .put("wait_after_unblock", 200) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); // Create index on 2 nodes and make sure each node has a primary by setting no replicas assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_replicas", 0))); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); // Pick one node and block it String blockedNode = blockNodeWithIndex("test-idx"); logger.info("--> snapshot"); client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60)); logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode); try { client.admin().cluster().prepareDeleteRepository("test-repo").execute().get(); fail("shouldn't be able to delete in-use repository"); } catch (Exception ex) { logger.info("--> in-use repository deletion failed"); } logger.info("--> trying to move repository to another location"); try { client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation.resolve("test")) ).get(); fail("shouldn't be able to replace in-use repository"); } catch (Exception ex) { logger.info("--> in-use repository replacement failed"); } logger.info("--> trying to create a repository with different name"); assertAcked(client.admin().cluster().preparePutRepository("test-repo-2") .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation.resolve("test")))); logger.info("--> unblocking blocked node"); unblockNode(blockedNode); logger.info("--> waiting for completion"); SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600)); logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); logger.info("--> done"); List<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0)); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); logger.info("--> replace mock repository with real one at the same location"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation))); logger.info("--> restore index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); } @Test public void urlRepositoryTest() throws Exception { Client client = client(); logger.info("--> creating repository"); Path repositoryLocation = randomRepoPath(); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repositoryLocation) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); createIndex("test-idx"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); logger.info("--> create read-only URL repository"); assertAcked(client.admin().cluster().preparePutRepository("url-repo") .setType("url").setSettings(Settings.settingsBuilder() .put("url", repositoryLocation.toUri().toURL()) .put("list_directories", randomBoolean()))); logger.info("--> restore index after deletion"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("url-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); logger.info("--> list available shapshots"); GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); assertThat(getSnapshotsResponse.getSnapshots(), notNullValue()); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); logger.info("--> delete snapshot"); DeleteSnapshotResponse deleteSnapshotResponse = client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get(); assertAcked(deleteSnapshotResponse); logger.info("--> list available shapshot again, no snapshots should be returned"); getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); assertThat(getSnapshotsResponse.getSnapshots(), notNullValue()); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0)); } @Test public void throttlingTest() throws Exception { Client client = client(); logger.info("--> creating repository"); Path repositoryLocation = randomRepoPath(); boolean throttleSnapshot = randomBoolean(); boolean throttleRestore = randomBoolean(); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repositoryLocation) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) .put("max_restore_bytes_per_sec", throttleRestore ? "0.5k" : "0") .put("max_snapshot_bytes_per_sec", throttleSnapshot ? "0.5k" : "0"))); createIndex("test-idx"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); logger.info("--> restore index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); long snapshotPause = 0L; long restorePause = 0L; for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { snapshotPause += repositoriesService.repository("test-repo").snapshotThrottleTimeInNanos(); restorePause += repositoriesService.repository("test-repo").restoreThrottleTimeInNanos(); } if (throttleSnapshot) { assertThat(snapshotPause, greaterThan(0L)); } else { assertThat(snapshotPause, equalTo(0L)); } if (throttleRestore) { assertThat(restorePause, greaterThan(0L)); } else { assertThat(restorePause, equalTo(0L)); } } @Test public void snapshotStatusTest() throws Exception { Client client = client(); Path repositoryLocation = randomRepoPath(); logger.info("--> creating repository"); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings( Settings.settingsBuilder() .put("location", repositoryLocation) .put("random", randomAsciiOfLength(10)) .put("wait_after_unblock", 200) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); // Create index on 2 nodes and make sure each node has a primary by setting no replicas assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_replicas", 0))); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); // Pick one node and block it String blockedNode = blockNodeWithIndex("test-idx"); logger.info("--> snapshot"); client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60)); logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode); SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(SnapshotsInProgress.State.STARTED)); // We blocked the node during data write operation, so at least one shard snapshot should be in STARTED stage assertThat(snapshotStatus.getShardsStats().getStartedShards(), greaterThan(0)); for (SnapshotIndexShardStatus shardStatus : snapshotStatus.getIndices().get("test-idx")) { if (shardStatus.getStage() == SnapshotIndexShardStage.STARTED) { assertThat(shardStatus.getNodeId(), notNullValue()); } } logger.info("--> checking snapshot status for all currently running and snapshot with empty repository", blockedNode); response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(1)); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(SnapshotsInProgress.State.STARTED)); // We blocked the node during data write operation, so at least one shard snapshot should be in STARTED stage assertThat(snapshotStatus.getShardsStats().getStartedShards(), greaterThan(0)); for (SnapshotIndexShardStatus shardStatus : snapshotStatus.getIndices().get("test-idx")) { if (shardStatus.getStage() == SnapshotIndexShardStage.STARTED) { assertThat(shardStatus.getNodeId(), notNullValue()); } } logger.info("--> checking that _current returns the currently running snapshot", blockedNode); GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().execute().actionGet(); assertThat(getResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS)); logger.info("--> unblocking blocked node"); unblockNode(blockedNode); snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600)); logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); logger.info("--> done"); logger.info("--> checking snapshot status again after snapshot is done", blockedNode); response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").execute().actionGet(); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getIndices().size(), equalTo(1)); SnapshotIndexStatus indexStatus = snapshotStatus.getIndices().get("test-idx"); assertThat(indexStatus, notNullValue()); assertThat(indexStatus.getShardsStats().getInitializingShards(), equalTo(0)); assertThat(indexStatus.getShardsStats().getFailedShards(), equalTo(snapshotInfo.failedShards())); assertThat(indexStatus.getShardsStats().getDoneShards(), equalTo(snapshotInfo.successfulShards())); assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards())); logger.info("--> checking snapshot status after it is done with empty repository", blockedNode); response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(0)); logger.info("--> checking that _current no longer returns the snapshot", blockedNode); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").execute().actionGet().getSnapshots().isEmpty(), equalTo(true)); try { client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist").execute().actionGet(); fail(); } catch (SnapshotMissingException ex) { // Expected } } @Test public void snapshotRelocatingPrimary() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); // Create index on 1 nodes and make sure each node has a primary by setting no replicas assertAcked(prepareCreate("test-idx", 1, Settings.builder().put("number_of_replicas", 0))); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); } refresh(); assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L)); // Update settings to make sure that relocation is slow so we can start snapshot before relocation is finished assertAcked(client.admin().indices().prepareUpdateSettings("test-idx").setSettings(Settings.builder() .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "all") .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, 100, ByteSizeUnit.BYTES) )); logger.info("--> start relocations"); allowNodes("test-idx", internalCluster().numDataNodes()); logger.info("--> wait for relocations to start"); waitForRelocationsToStart("test-idx", TimeValue.timeValueMillis(300)); logger.info("--> snapshot"); client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); // Update settings to back to normal assertAcked(client.admin().indices().prepareUpdateSettings("test-idx").setSettings(Settings.builder() .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "node") )); logger.info("--> wait for snapshot to complete"); SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600)); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.shardFailures().size(), equalTo(0)); logger.info("--> done"); } public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedException { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); // only one shard assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1))); ensureGreen(); logger.info("--> indexing"); final int numdocs = randomIntBetween(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { builders[i] = client().prepareIndex("test", "doc", Integer.toString(i)).setSource("foo", "bar" + i); } indexRandom(true, builders); flushAndRefresh(); assertNoFailures(client().admin().indices().prepareOptimize("test").setFlush(true).setMaxNumSegments(1).get()); CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get(); assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseFirst.getSnapshotInfo().totalShards())); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); { SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test").get().getSnapshots().get(0); List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { assertThat(status.getStats().getProcessedFiles(), greaterThan(1)); } } CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get(); assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards())); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-1").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); { SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-1").get().getSnapshots().get(0); List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { assertThat(status.getStats().getProcessedFiles(), equalTo(0)); } } client().prepareDelete("test", "doc", "1").get(); CreateSnapshotResponse createSnapshotResponseThird = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-2").setWaitForCompletion(true).setIndices("test").get(); assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseThird.getSnapshotInfo().totalShards())); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-2").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); { SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-2").get().getSnapshots().get(0); List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { assertThat(status.getStats().getProcessedFiles(), equalTo(2)); // we flush before the snapshot such that we have to process the segments_N files plus the .del file } } } @Test public void changeSettingsOnRestoreTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); logger.info("--> create test index with synonyms search analyzer"); Settings.Builder indexSettings = Settings.builder() .put(indexSettings()) .put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)) .put(INDEX_REFRESH_INTERVAL, "10s") .put("index.analysis.analyzer.my_analyzer.type", "custom") .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") .putArray("index.analysis.analyzer.my_analyzer.filter", "lowercase", "my_synonym") .put("index.analysis.filter.my_synonym.type", "synonym") .put("index.analysis.filter.my_synonym.synonyms", "foo => bar"); assertAcked(prepareCreate("test-idx", 2, indexSettings)); int numberOfShards = getNumShards("test-idx").numPrimaries; assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("type1").setSource("field1", "type=string,analyzer=standard,search_analyzer=my_analyzer")); final int numdocs = randomIntBetween(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i); } indexRandom(true, builders); flushAndRefresh(); assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "foo")).get(), numdocs); assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "bar")).get(), numdocs); logger.info("--> snapshot it"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> delete the index and recreate it while changing refresh interval and analyzer"); cluster().wipeIndices("test-idx"); Settings newIndexSettings = Settings.builder() .put("refresh_interval", "5s") .put("index.analysis.analyzer.my_analyzer.type", "standard") .build(); Settings newIncorrectIndexSettings = Settings.builder() .put(newIndexSettings) .put(SETTING_NUMBER_OF_SHARDS, numberOfShards + 100) .build(); logger.info("--> try restoring while changing the number of shards - should fail"); assertThrows(client.admin().cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIncorrectIndexSettings) .setWaitForCompletion(true), SnapshotRestoreException.class); logger.info("--> try restoring while changing the number of replicas to a negative number - should fail"); Settings newIncorrectReplicasIndexSettings = Settings.builder() .put(newIndexSettings) .put(SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) .build(); assertThrows(client.admin().cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIncorrectReplicasIndexSettings) .setWaitForCompletion(true), IllegalArgumentException.class); logger.info("--> restore index with correct settings from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIndexSettings) .setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that correct settings are restored"); GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet(); assertThat(getSettingsResponse.getSetting("test-idx", INDEX_REFRESH_INTERVAL), equalTo("5s")); // Make sure that number of shards didn't change assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards)); assertThat(getSettingsResponse.getSetting("test-idx", "index.analysis.analyzer.my_analyzer.type"), equalTo("standard")); assertThat(getSettingsResponse.getSetting("test-idx", "index.analysis.filter.my_synonym.type"), nullValue()); assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "foo")).get(), 0); assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "bar")).get(), numdocs); logger.info("--> delete the index and recreate it while deleting all index settings"); cluster().wipeIndices("test-idx"); logger.info("--> restore index with correct settings from the snapshot"); restoreSnapshotResponse = client.admin().cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setIgnoreIndexSettings("*") // delete everything we can delete .setIndexSettings(newIndexSettings) .setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that correct settings are restored and index is still functional"); getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet(); assertThat(getSettingsResponse.getSetting("test-idx", INDEX_REFRESH_INTERVAL), equalTo("5s")); // Make sure that number of shards didn't change assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards)); assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "foo")).get(), 0); assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "bar")).get(), numdocs); } @Test public void deleteIndexDuringSnapshotTest() throws Exception { Client client = client(); boolean allowPartial = randomBoolean(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) .put("block_on_init", true) )); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); } refresh(); assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L)); logger.info("--> snapshot allow partial {}", allowPartial); ListenableActionFuture<CreateSnapshotResponse> future = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(allowPartial).execute(); logger.info("--> wait for block to kick in"); waitForBlock(internalCluster().getMasterName(), "test-repo", TimeValue.timeValueMinutes(1)); logger.info("--> delete some indices while snapshot is running"); client.admin().indices().prepareDelete("test-idx-1", "test-idx-2").get(); logger.info("--> unblock running master node"); unblockNode(internalCluster().getMasterName()); logger.info("--> waiting for snapshot to finish"); CreateSnapshotResponse createSnapshotResponse = future.get(); if (allowPartial) { logger.info("Deleted index during snapshot, but allow partial"); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.PARTIAL))); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(createSnapshotResponse.getSnapshotInfo().totalShards())); } else { logger.info("Deleted index during snapshot and doesn't allow partial"); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.FAILED))); } } @Test public void deleteOrphanSnapshotTest() throws Exception { Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) )); createIndex("test-idx"); ensureGreen(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); final CountDownLatch countDownLatch = new CountDownLatch(1); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> emulate an orphan snapshot"); clusterService.submitStateUpdateTask("orphan snapshot test", new ProcessedClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { // Simulate orphan snapshot ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shards = ImmutableMap.builder(); shards.put(new ShardId("test-idx", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED)); shards.put(new ShardId("test-idx", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED)); shards.put(new ShardId("test-idx", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED)); ImmutableList.Builder<Entry> entries = ImmutableList.builder(); entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, State.ABORTED, ImmutableList.of("test-idx"), System.currentTimeMillis(), shards.build())); return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(entries.build())).build(); } @Override public void onFailure(String source, Throwable t) { fail(); } @Override public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { countDownLatch.countDown(); } }); countDownLatch.await(); logger.info("--> try deleting the orphan snapshot"); assertAcked(client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get("10s")); } private boolean waitForIndex(final String index, TimeValue timeout) throws InterruptedException { return awaitBusy(new Predicate<Object>() { @Override public boolean apply(Object o) { return client().admin().indices().prepareExists(index).execute().actionGet().isExists(); } }, timeout.millis(), TimeUnit.MILLISECONDS); } private boolean waitForRelocationsToStart(final String index, TimeValue timeout) throws InterruptedException { return awaitBusy(new Predicate<Object>() { @Override public boolean apply(Object o) { return client().admin().cluster().prepareHealth(index).execute().actionGet().getRelocatingShards() > 0; } }, timeout.millis(), TimeUnit.MILLISECONDS); } @Test @TestLogging("cluster:DEBUG") public void batchingShardUpdateTaskTest() throws Exception { final Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); assertAcked(prepareCreate("test-idx", 0, settingsBuilder().put("number_of_shards", between(1, 20)) .put("number_of_replicas", 0))); ensureGreen(); logger.info("--> indexing some data"); final int numdocs = randomIntBetween(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i); } indexRandom(true, builders); flushAndRefresh(); final int numberOfShards = getNumShards("test-idx").numPrimaries; logger.info("number of shards: {}", numberOfShards); final ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName()); BlockingClusterStateListener snapshotListener = new BlockingClusterStateListener(clusterService, "update_snapshot [", "update snapshot state", Priority.HIGH); try { clusterService.addFirst(snapshotListener); logger.info("--> snapshot"); ListenableActionFuture<CreateSnapshotResponse> snapshotFuture = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute(); // Await until shard updates are in pending state. assertBusyPendingTasks("update snapshot state", numberOfShards); snapshotListener.unblock(); // Check that the snapshot was successful CreateSnapshotResponse createSnapshotResponse = snapshotFuture.actionGet(); assertEquals(SnapshotState.SUCCESS, createSnapshotResponse.getSnapshotInfo().state()); assertEquals(numberOfShards, createSnapshotResponse.getSnapshotInfo().totalShards()); assertEquals(numberOfShards, createSnapshotResponse.getSnapshotInfo().successfulShards()); } finally { clusterService.remove(snapshotListener); } // Check that we didn't timeout assertFalse(snapshotListener.timedOut()); // Check that cluster state update task was called only once assertEquals(1, snapshotListener.count()); logger.info("--> close indices"); client.admin().indices().prepareClose("test-idx").get(); BlockingClusterStateListener restoreListener = new BlockingClusterStateListener(clusterService, "restore_snapshot[", "update snapshot state", Priority.HIGH); try { clusterService.addFirst(restoreListener); logger.info("--> restore snapshot"); ListenableActionFuture<RestoreSnapshotResponse> futureRestore = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute(); // Await until shard updates are in pending state. assertBusyPendingTasks("update snapshot state", numberOfShards); restoreListener.unblock(); RestoreSnapshotResponse restoreSnapshotResponse = futureRestore.actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(numberOfShards)); } finally { clusterService.remove(restoreListener); } // Check that we didn't timeout assertFalse(restoreListener.timedOut()); // Check that cluster state update task was called only once assertEquals(1, restoreListener.count()); } @Test public void snapshotNameTest() throws Exception { final Client client = client(); logger.info("--> creating repository"); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); try { client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo").get(); fail("shouldn't be here"); } catch (InvalidSnapshotNameException ex) { assertThat(ex.getMessage(), containsString("Invalid snapshot name")); } try { client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo").get(); fail("shouldn't be here"); } catch (InvalidSnapshotNameException ex) { assertThat(ex.getMessage(), containsString("Invalid snapshot name")); } try { client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo").get(); fail("shouldn't be here"); } catch (InvalidSnapshotNameException ex) { assertThat(ex.getMessage(), containsString("Invalid snapshot name")); } try { client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo").get(); fail("shouldn't be here"); } catch (InvalidSnapshotNameException ex) { assertThat(ex.getMessage(), containsString("Invalid snapshot name")); } } }
apache-2.0
dotCipher/dropwizard-flyway
src/main/java/io/dropwizard/flyway/cli/DbCleanCommand.java
852
package io.dropwizard.flyway.cli; import io.dropwizard.flyway.FlywayConfiguration; import io.dropwizard.Configuration; import io.dropwizard.db.DatabaseConfiguration; import net.sourceforge.argparse4j.inf.Namespace; import org.flywaydb.core.Flyway; public class DbCleanCommand<T extends Configuration> extends AbstractFlywayCommand<T> { public DbCleanCommand(final DatabaseConfiguration<T> databaseConfiguration, final FlywayConfiguration<T> flywayConfiguration, final Class<T> configurationClass) { super("clean", "Drops all objects in the configured schemas.", databaseConfiguration, flywayConfiguration, configurationClass); } @Override protected void run(final Namespace namespace, final Flyway flyway) throws Exception { flyway.clean(); } }
apache-2.0
garethahealy/camel-dynamic-loadbalancer
dynamic-lb-core/src/test/java/com/garethahealy/camel/dynamic/loadbalancer/core/DynamicWeightedRoundRobinLoadBalancerTest.java
7910
/* * #%L * GarethHealy :: Camel Dynamic LoadBalance :: Core * %% * Copyright (C) 2013 - 2018 Gareth Healy * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.garethahealy.camel.dynamic.loadbalancer.core; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import com.garethahealy.camel.dynamic.loadbalancer.statistics.EveryXDeterministicCollectorStrategy; import com.garethahealy.camel.dynamic.loadbalancer.statistics.MeanProcessingTimeProcessorSelectorStrategy; import com.garethahealy.camel.dynamic.loadbalancer.statistics.ProcessorHolder; import com.garethahealy.camel.dynamic.loadbalancer.statistics.RouteStatistics; import com.garethahealy.camel.dynamic.loadbalancer.statistics.strategy.ProcessorSelectorStrategy; import com.garethahealy.camel.dynamic.loadbalancer.statistics.strategy.RouteStatisticsCollector; import org.apache.camel.Processor; import org.apache.camel.test.junit4.ExchangeTestSupport; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; public class DynamicWeightedRoundRobinLoadBalancerTest extends ExchangeTestSupport { @Test public void handlesTwoProcessors() throws Exception { ProcessorHolder processorHolder1 = new ProcessorHolder(); processorHolder1.setProcessor(Mockito.mock(Processor.class)); ProcessorHolder processorHolder2 = new ProcessorHolder(); processorHolder2.setProcessor(Mockito.mock(Processor.class)); RouteStatistics stat1 = new RouteStatistics(); stat1.setProcessorHolder(processorHolder1); stat1.setMeanProcessingTime(12345L); RouteStatistics stat2 = new RouteStatistics(); stat2.setProcessorHolder(processorHolder2); stat2.setMeanProcessingTime(54321L); List<RouteStatistics> stats = new ArrayList<RouteStatistics>(); stats.add(stat1); stats.add(stat2); List<Processor> processors = new LinkedList<Processor>(); processors.add(processorHolder1.getProcessor()); processors.add(processorHolder2.getProcessor()); RouteStatisticsCollector routeStatisticsCollectorMocked = Mockito.mock(RouteStatisticsCollector.class); Mockito.when(routeStatisticsCollectorMocked.query(processors, exchange)).thenReturn(stats); DynamicLoadBalancerConfiguration config = new DynamicLoadBalancerConfiguration(); config.setRouteStatisticsCollector(routeStatisticsCollectorMocked); config.setDeterministicCollectorStrategy(new EveryXDeterministicCollectorStrategy(0, 0)); config.setRouteStatsSelectorStrategy(new MeanProcessingTimeProcessorSelectorStrategy()); DynamicWeightedRoundRobinLoadBalancer loadBalancer = new DynamicWeightedRoundRobinLoadBalancer(config); for (Processor current : processors) { loadBalancer.addProcessor(current); } loadBalancer.doStart(); Processor answer = loadBalancer.chooseProcessor(processors, exchange); Assert.assertNotNull(answer); Assert.assertEquals(processorHolder1.getProcessor(), answer); } @Test public void handlesEmptyStatsReturned() throws Exception { List<Processor> processors = new LinkedList<Processor>(); processors.add(Mockito.mock(Processor.class)); processors.add(Mockito.mock(Processor.class)); RouteStatisticsCollector routeStatisticsCollectorMocked = Mockito.mock(RouteStatisticsCollector.class); Mockito.when(routeStatisticsCollectorMocked.query(processors, createExchange())).thenReturn(new ArrayList<RouteStatistics>()); DynamicLoadBalancerConfiguration config = new DynamicLoadBalancerConfiguration(); config.setRouteStatisticsCollector(routeStatisticsCollectorMocked); config.setDeterministicCollectorStrategy(new EveryXDeterministicCollectorStrategy(0, 0)); config.setRouteStatsSelectorStrategy(new MeanProcessingTimeProcessorSelectorStrategy()); DynamicWeightedRoundRobinLoadBalancer loadBalancer = new DynamicWeightedRoundRobinLoadBalancer(config); for (Processor current : processors) { loadBalancer.addProcessor(current); } loadBalancer.doStart(); Processor answer = loadBalancer.chooseProcessor(processors, exchange); Assert.assertNotNull(answer); Assert.assertEquals(processors.get(0), answer); } @Test public void handlesEmptyWeightsReturned() throws Exception { List<Processor> processors = new LinkedList<Processor>(); processors.add(Mockito.mock(Processor.class)); processors.add(Mockito.mock(Processor.class)); List<RouteStatistics> stats = new ArrayList<RouteStatistics>(); stats.add(new RouteStatistics()); RouteStatisticsCollector routeStatisticsCollectorMocked = Mockito.mock(RouteStatisticsCollector.class); Mockito.when(routeStatisticsCollectorMocked.query(processors, createExchange())).thenReturn(stats); ProcessorSelectorStrategy processorSelectorStrategyMocked = Mockito.mock(ProcessorSelectorStrategy.class); Mockito.when(processorSelectorStrategyMocked.getWeightedProcessors(stats, processors)).thenReturn(new ArrayList<Integer>()); DynamicLoadBalancerConfiguration config = new DynamicLoadBalancerConfiguration(); config.setRouteStatisticsCollector(routeStatisticsCollectorMocked); config.setDeterministicCollectorStrategy(new EveryXDeterministicCollectorStrategy(0, 0)); config.setRouteStatsSelectorStrategy(processorSelectorStrategyMocked); DynamicWeightedRoundRobinLoadBalancer loadBalancer = new DynamicWeightedRoundRobinLoadBalancer(config); for (Processor current : processors) { loadBalancer.addProcessor(current); } loadBalancer.doStart(); Processor answer = loadBalancer.chooseProcessor(processors, exchange); Assert.assertNotNull(answer); Assert.assertEquals(processors.get(0), answer); } @Test public void usesDefaultWeighted() throws Exception { List<Processor> processors = new LinkedList<Processor>(); processors.add(Mockito.mock(Processor.class)); processors.add(Mockito.mock(Processor.class)); DynamicLoadBalancerConfiguration config = new DynamicLoadBalancerConfiguration(); config.setRouteStatisticsCollector(Mockito.mock(RouteStatisticsCollector.class)); config.setDeterministicCollectorStrategy(new EveryXDeterministicCollectorStrategy(10, 10)); config.setRouteStatsSelectorStrategy(new MeanProcessingTimeProcessorSelectorStrategy()); DynamicWeightedRoundRobinLoadBalancer loadBalancer = new DynamicWeightedRoundRobinLoadBalancer(config); for (Processor current : processors) { loadBalancer.addProcessor(current); } loadBalancer.doStart(); Processor answer = loadBalancer.chooseProcessor(processors, exchange); Assert.assertNotNull(answer); Assert.assertEquals(processors.get(0), answer); } @Test public void canUseToString() { DynamicWeightedRoundRobinLoadBalancer loadBalancer = new DynamicWeightedRoundRobinLoadBalancer(new DynamicLoadBalancerConfiguration()); String answer = loadBalancer.toString(); Assert.assertNotNull(answer); Assert.assertTrue(answer.contains("config")); } }
apache-2.0
yuanfayang/qiniu-demo
src/main/java/com/changhong/yuan/web/controller/TestController.java
2854
package com.changhong.yuan.web.controller; import com.changhong.yuan.web.base.Utils.HMACSHA1Helper; import com.changhong.yuan.web.qiniu.QiniuCloudConfig; import com.changhong.yuan.web.qiniu.UploadTokenHelper; import com.google.gson.JsonObject; import com.qiniu.storage.UploadManager; import com.qiniu.util.UrlSafeBase64; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import static com.changhong.yuan.web.qiniu.QiniuCloudConfig.bucket; /** * Created with IntelliJ IDEA. * * @authr: Fayang Yuan * @Date: 2015/12/17 * @Time: 22:12 * @Description: */ @Controller public class TestController { private UploadManager uploadManager; @RequestMapping(value = "/",method = RequestMethod.GET) public String index(Model model){ model.addAttribute("token", UploadTokenHelper.buildFrontEndUploadToken()); return "index"; } /** * * @param model * @return * 七牛上传策略 * scope = 'my-bucket:sunflower.jpg' * deadline = 1451491200 returnBody = '{ "name": $(fname), "size": $(fsize), "w": $(imageInfo.width), "h": $(imageInfo.height), "hash": $(etag) }' */ @RequestMapping(value = "/test",method = RequestMethod.GET) public String test(Model model){ String token = QiniuCloudConfig.auth.uploadToken(bucket); //七牛上传策略 JsonObject uploadStrategyJsonObject = new JsonObject(); uploadStrategyJsonObject.addProperty("scope", QiniuCloudConfig.bucket); uploadStrategyJsonObject.addProperty("deadline", 1451491200); uploadStrategyJsonObject.addProperty("returnBody", "{'" + "name':$(fname)" + "'size':$(fsize)" + "'w':$(imageInfo.width)" + "'h':$(imageInfo.height)" + "'hash':$(etag)" + "}"); //对JSON编码的上传策略进行URL安全的Base64编码 String encodePutPolicy = UrlSafeBase64.encodeToString(uploadStrategyJsonObject.toString()); //使用SecretKey对上一步生成的待签名字符串计算HMAC-SHA1签名 byte[] sign = null; try { sign = HMACSHA1Helper.HmacSHA1Encrypt(encodePutPolicy,QiniuCloudConfig.SECRET_KEY); } catch (Exception e) { e.printStackTrace(); } //对签名进行URL安全的Base64编码 String encodeSign = UrlSafeBase64.encodeToString(sign); //将AccessKey、encodedSign和encodedPutPolicy用:连接起来: String uploadToken = QiniuCloudConfig.ACCESS_KEY+":"+encodeSign+":"+encodePutPolicy; model.addAttribute("token", uploadToken); return "test"; } }
apache-2.0
HubSpot/Singularity
SingularityService/src/main/java/com/hubspot/singularity/data/curator/ZkClientsLoadDistributor.java
1548
package com.hubspot.singularity.data.curator; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import org.apache.curator.framework.CuratorFramework; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ZkClientsLoadDistributor { private static final Logger LOG = LoggerFactory.getLogger( ZkClientsLoadDistributor.class ); private final List<CuratorFramework> curatorFrameworks; private final AtomicInteger curatorIndex; public ZkClientsLoadDistributor(List<CuratorFramework> curatorFrameworks) { this.curatorFrameworks = curatorFrameworks; this.curatorIndex = new AtomicInteger(0); } public void start() { for (CuratorFramework framework : curatorFrameworks) { try { framework.start(); } catch (Exception e) { LOG.warn("Error starting framework: "); } } } public void close() { for (CuratorFramework framework : curatorFrameworks) { try { framework.close(); } catch (Exception e) { LOG.warn("Error starting framework: "); } } } public CuratorFramework getCuratorFramework() { int ci = curatorIndex.getAndUpdate(i -> (i + 1) % curatorFrameworks.size()); return curatorFrameworks.get(ci); } public List<CuratorFramework> getAll() { return this.curatorFrameworks; } public boolean isStarted() { boolean started = true; for (CuratorFramework framework : curatorFrameworks) { started = started & framework.isStarted(); } return started; } }
apache-2.0
osmanpub/oracle-samples
case-studies/dukes-bookstore/src/main/java/javaeetutorial/dukesbookstore/web/managedbeans/CatalogBean.java
2311
/** * Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved. * * You may not modify, use, reproduce, or distribute this software except in * compliance with the terms of the License at: * http://java.net/projects/javaeetutorial/pages/BerkeleyLicense */ package javaeetutorial.dukesbookstore.web.managedbeans; import java.io.Serializable; import java.util.List; import javaeetutorial.dukesbookstore.entity.Book; import javax.enterprise.context.SessionScoped; import javax.inject.Named; /** * <p>Backing bean for the <code>/bookcatalog.xhtml</code> page.</p> */ @Named("catalog") @SessionScoped public class CatalogBean extends AbstractBean implements Serializable { private static final long serialVersionUID = -3594317405246398714L; private int totalBooks = 0; /** * <p>Return the currently selected * <code>Book</code> instance from the user request.</p> */ protected Book book() { Book book; book = (Book) context().getExternalContext() .getRequestMap().get("book"); return (book); } /** * <p>Add the selected item to our shopping cart.</p> */ public String add() { Book book = book(); cart.add(book.getBookId(), book); message(null, "ConfirmAdd", new Object[]{book.getTitle()}); return ("bookcatalog"); } /** * <p>Show the details page for the current book.</p> */ public String details() { context().getExternalContext().getSessionMap().put("selected", book()); return ("bookdetails"); } public int getTotalBooks() { totalBooks = cart.getNumberOfItems(); return totalBooks; } public void setTotalBooks(int totalBooks) { this.totalBooks = totalBooks; } public int getBookQuantity() { int bookQuantity = 0; Book book = book(); if (book == null) { return bookQuantity; } List<ShoppingCartItem> results = cart.getItems(); for (ShoppingCartItem item : results) { Book bd = (Book) item.getItem(); if ((bd.getBookId()).equals(book.getBookId())) { bookQuantity = item.getQuantity(); break; } } return bookQuantity; } }
apache-2.0
opensciencegrid/oim
src/edu/iu/grid/oim/servlet/MetricEditServlet.java
3320
package edu.iu.grid.oim.servlet; import java.io.IOException; import java.sql.SQLException; import javax.servlet.Servlet; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; import com.divrep.DivRepRoot; import edu.iu.grid.oim.lib.Authorization; import edu.iu.grid.oim.lib.StaticConfig; import edu.iu.grid.oim.model.UserContext; import edu.iu.grid.oim.model.db.MetricModel; import edu.iu.grid.oim.model.db.OsgGridTypeModel; import edu.iu.grid.oim.model.db.record.MetricRecord; import edu.iu.grid.oim.model.db.record.OsgGridTypeRecord; import edu.iu.grid.oim.view.BootBreadCrumbView; import edu.iu.grid.oim.view.BootMenuView; import edu.iu.grid.oim.view.BootPage; import edu.iu.grid.oim.view.BreadCrumbView; import edu.iu.grid.oim.view.ContentView; import edu.iu.grid.oim.view.DivRepWrapper; import edu.iu.grid.oim.view.HtmlView; import edu.iu.grid.oim.view.MenuView; import edu.iu.grid.oim.view.Page; import edu.iu.grid.oim.view.SideContentView; import edu.iu.grid.oim.view.divrep.form.MetricFormDE; import edu.iu.grid.oim.view.divrep.form.OsgGridTypeFormDE; public class MetricEditServlet extends ServletBase implements Servlet { private static final long serialVersionUID = 1L; static Logger log = Logger.getLogger(MetricEditServlet.class); private String parent_page = "metric"; protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { UserContext context = new UserContext(request); Authorization auth = context.getAuthorization(); auth.check("admin"); MetricRecord rec; String title; //if osg_grid_type_id is provided then we are doing update, otherwise do new. // AG: Do we need any request parameter-value checks? String id_str = request.getParameter("id"); if(id_str != null) { //pull record to update int id = Integer.parseInt(id_str); MetricModel model = new MetricModel(context); try { rec = model.get(id); } catch (SQLException e) { throw new ServletException(e); } title = "Update " + rec.name; } else { rec = new MetricRecord(); title = "New"; } MetricFormDE form; //String origin_url = StaticConfig.getApplicationBase()+"/"+parent_page; try { form = new MetricFormDE(context, rec, parent_page); } catch (SQLException e) { throw new ServletException(e); } //put the form in a view and display ContentView contentview = new ContentView(context); //contentview.add(new HtmlView("<h1>"+title+"</h1>")); contentview.add(new DivRepWrapper(form)); //setup crumbs BootBreadCrumbView bread_crumb = new BootBreadCrumbView(); bread_crumb.addCrumb("Administration", "admin"); bread_crumb.addCrumb("RSV Metric", parent_page); bread_crumb.addCrumb(title, null); contentview.setBreadCrumb(bread_crumb); BootPage page = new BootPage(context, new BootMenuView(context, "admin"), contentview, createSideView()); page.render(response.getWriter()); } private SideContentView createSideView() { SideContentView view = new SideContentView(); //view.add("Misc-no-op", new HtmlView("Misc-no-op")); return view; } }
apache-2.0
748251120/bainian_audit01
audit3/src/java/com/gbicc/bll/rep/service/impl/RepInfoServiceImpl.java
4924
/** *类名称:报表定义信息 *公司: 吉贝克信息技术(北京)有限公司 *文件名:RepInfoService.java *作者: 张明金 *创建时间: 2013-09-26 10:10 *最后更新: 2013-09-26 10:10 * *1.作者: 张明金 2013-09-26 10:10 *初始版本 *本类提供以下方法: * **(如要修改,请注释 修改人、修改时间、修改说明) * */ package com.gbicc.bll.rep.service.impl; import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import org.jdom.JDOMException; import com.gbicc.bll.PaginateInfo; import com.gbicc.bll.doubt.bean.DouMap; import com.gbicc.bll.model.bean.ModInfo; import com.gbicc.bll.rep.bean.KPIInfo; import com.gbicc.bll.rep.bean.RadarChart; import com.gbicc.bll.rep.bean.RepInfo; import com.gbicc.bll.rep.dao.RepInfoDAO; import com.gbicc.bll.rep.service.RepInfoService; import com.gbicc.bll.uitl.DateTimeUtils; import com.gbicc.bll.uitl.RadarUtil; import com.gbicc.bll.uitl.XmlUtil; public class RepInfoServiceImpl implements RepInfoService { private RepInfoDAO repInfoDAO; public void setRepInfoDAO(RepInfoDAO repInfoDAO) { this.repInfoDAO = repInfoDAO; } /** * 查询 */ public List getRepInfoList(){ return this.repInfoDAO.getRepInfoList(); } /** * 根据 报表编号、报表名称 查询报表基本信息 * @param repInfo * @return */ public List reportListByIdName(RepInfo repInfo){ return this.repInfoDAO.reportListByIdName(repInfo); } @Override public List getRepInfoListByFolderId(String folderId) { // TODO Auto-generated method stub return this.repInfoDAO.getRepInfoListByFolderId(folderId); } /** * 报表基本信息新增 * @param app * @return */ public String reportSave(RepInfo repInfo){ return this.repInfoDAO.reportSave(repInfo); } public String reportSavess(RepInfo repInfo){ return this.repInfoDAO.reportSavess(repInfo); } /** * 通过ID获取报表基本信息 * @param idstr * @return */ public RepInfo getReportInfoById(String idstr){ return this.repInfoDAO.getReportInfoById(idstr); } /** * 报表基本信息修改 * @param app * @return */ public int reportUpdate(RepInfo repInfo){ return this.repInfoDAO.reportUpdate(repInfo); } /** * 报表基本信息删除 * @param idstr * @return */ public int reportDelete(String idstr){ return this.repInfoDAO.reportDelete(idstr); } /** * 我的桌面-雷达预警图 * @param userId 登录用户id * @param webRealPath 项目绝对路径 * @param orgId 机构id * @return */ public Map<String,String> createRadarChartXmlFile(String userId,String webRealPath,String orgId){ //get date string String dateString = DateTimeUtils.now2StrDateTime2(); String fileName = "radar_" + dateString + "_" + userId+ ".xml"; String xmlPath = "temp/" + fileName; String radarYear = ""; String radarMonth = ""; String yearMonth = ""; List<RadarChart> radarChartList = this.repInfoDAO.getRadarChartList(orgId); if(radarChartList.size()>0){ yearMonth = radarChartList.get(0).getMonthId(); if(yearMonth != null){ radarYear = yearMonth.substring(0,4); radarMonth = yearMonth.substring(4,6); } }else{ yearMonth = ""; radarYear = ""; radarMonth = ""; } String srcPath = "radar.xml"; String destPath = webRealPath + fileName; RadarUtil radarUtil = new RadarUtil(); try { radarUtil.copyAndUpdateXml(srcPath, destPath, radarChartList); } catch (JDOMException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } Map<String,String> map = new HashMap<String,String>(); map.put("xmlPath", xmlPath); map.put("radarYear", radarYear); map.put("radarMonth",radarMonth); return map; } @Override public List<RepInfo> queryData(RepInfo repInfo, PaginateInfo paginateInfo) { return this.repInfoDAO.queryData(repInfo,paginateInfo); } /*@Override public int queryCount(RepInfo repInfo) { // TODO Auto-generated method stub return this.repInfoDAO.queryCount(repInfo); }*/ @Override public int queryCount(String reportId, String reportName, String folderId) { return this.repInfoDAO.queryCount(reportId,reportName,folderId); } @Override public List<KPIInfo> selectKPIList(String type, String code, String zzCode, String yearMonth) { // TODO Auto-generated method stub return this.repInfoDAO.selectKPIList(type,code,zzCode,yearMonth); } @Override public List<KPIInfo> getOrgListByLevel3Type(String orgCode, String levelType) { // TODO Auto-generated method stub return this.repInfoDAO.getOrgListByLevel3Type(orgCode,levelType); } }
apache-2.0
gregorydgraham/DBvolution
src/main/java/nz/co/gregs/dbvolution/internal/postgres/StringFunctions.java
2494
/* * Copyright 2015 gregorygraham. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package nz.co.gregs.dbvolution.internal.postgres; import java.sql.SQLException; import java.sql.Statement; /** * * <p style="color: #F90;">Support DBvolution at * <a href="http://patreon.com/dbvolution" target=new>Patreon</a></p> * * @author gregorygraham */ public enum StringFunctions { /** * */ SUBSTRINGBEFORE(Language.sql, "text", "sourceText text, rightHandSide text", "select (CASE WHEN POSITION(rightHandSide IN (sourceText)::VARCHAR) > 0 THEN SUBSTRING((sourceText)::VARCHAR FROM 0 + 1 FOR POSITION(rightHandSide IN (sourceText)::VARCHAR) - 1 - 0) ELSE $$$$ END);"), /** * */ SUBSTRINGAFTER(Language.sql, "text", "sourceText text, leftHandSide text", " select (CASE WHEN POSITION(leftHandSide IN (sourceText)::VARCHAR) > 0 THEN SUBSTRING((sourceText)::VARCHAR FROM POSITION(leftHandSide IN (sourceText)::VARCHAR) + 1 FOR CHAR_LENGTH( (sourceText)::VARCHAR ) - POSITION(leftHandSide IN (sourceText)::VARCHAR)) ELSE $$$$ END);"); // private final String functionName; private final Language language; private final String returnType; private final String parameters; private final String code; StringFunctions(Language language, String returnType, String parameters, String code) { // this.functionName = functionName; this.language = language; this.returnType = returnType; this.parameters = parameters; this.code = code; } @Override public String toString() { return "DBV_STRINGFN_" + name(); } /** * * @param stmt * @throws SQLException */ public void add(Statement stmt) throws SQLException { try { stmt.execute("DROP FUNCTION " + this + "(" + parameters + ");"); } catch (SQLException sqlex) { ; } stmt.execute("CREATE OR REPLACE FUNCTION " + this + "(" + this.parameters + ")\n" + " RETURNS " + this.returnType + " AS\n" + "'\n" + this.code + "'\n" + "LANGUAGE '" + this.language.name() + "';"); } }
apache-2.0
Wokdsem/Kinject
kinject/src/main/java/com/wokdsem/kinject/annotations/Named.java
282
package com.wokdsem.kinject.annotations; import com.wokdsem.kinject.core.KinjectValues; import java.lang.annotation.ElementType; import java.lang.annotation.Target; @Target(ElementType.PARAMETER) public @interface Named { String value() default KinjectValues.DEFAULT_NAMED; }
apache-2.0
naver/android-utilset
UtilSet/src/com/navercorp/utilset/device/LauncherInfo.java
727
package com.navercorp.utilset.device; import android.content.Context; import android.content.Intent; import android.content.pm.PackageManager; import android.content.pm.ResolveInfo; class LauncherInfo { public static String getName(Context context) { PackageManager pm = context.getPackageManager(); final Intent mainIntent = new Intent(Intent.ACTION_MAIN, null); mainIntent.addCategory(Intent.CATEGORY_HOME); ResolveInfo resolveInfo = pm.resolveActivity(mainIntent, PackageManager.MATCH_DEFAULT_ONLY); if (resolveInfo.activityInfo.applicationInfo.className == null) { return "ANDROID"; } return resolveInfo.activityInfo.applicationInfo.packageName + resolveInfo.activityInfo.applicationInfo.className; } }
apache-2.0
dagnir/aws-sdk-java
aws-java-sdk-redshift/src/main/java/com/amazonaws/services/redshift/model/DescribeOrderableClusterOptionsResult.java
10541
/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.redshift.model; import java.io.Serializable; import javax.annotation.Generated; /** * <p> * Contains the output from the <a>DescribeOrderableClusterOptions</a> action. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeOrderableClusterOptions" * target="_top">AWS API Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class DescribeOrderableClusterOptionsResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable { /** * <p> * An <code>OrderableClusterOption</code> structure containing information about orderable options for the cluster. * </p> */ private com.amazonaws.internal.SdkInternalList<OrderableClusterOption> orderableClusterOptions; /** * <p> * A value that indicates the starting point for the next set of response records in a subsequent request. If a * value is returned in a response, you can retrieve the next set of records by providing this returned marker value * in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all * response records have been retrieved for the request. * </p> */ private String marker; /** * <p> * An <code>OrderableClusterOption</code> structure containing information about orderable options for the cluster. * </p> * * @return An <code>OrderableClusterOption</code> structure containing information about orderable options for the * cluster. */ public java.util.List<OrderableClusterOption> getOrderableClusterOptions() { if (orderableClusterOptions == null) { orderableClusterOptions = new com.amazonaws.internal.SdkInternalList<OrderableClusterOption>(); } return orderableClusterOptions; } /** * <p> * An <code>OrderableClusterOption</code> structure containing information about orderable options for the cluster. * </p> * * @param orderableClusterOptions * An <code>OrderableClusterOption</code> structure containing information about orderable options for the * cluster. */ public void setOrderableClusterOptions(java.util.Collection<OrderableClusterOption> orderableClusterOptions) { if (orderableClusterOptions == null) { this.orderableClusterOptions = null; return; } this.orderableClusterOptions = new com.amazonaws.internal.SdkInternalList<OrderableClusterOption>(orderableClusterOptions); } /** * <p> * An <code>OrderableClusterOption</code> structure containing information about orderable options for the cluster. * </p> * <p> * <b>NOTE:</b> This method appends the values to the existing list (if any). Use * {@link #setOrderableClusterOptions(java.util.Collection)} or * {@link #withOrderableClusterOptions(java.util.Collection)} if you want to override the existing values. * </p> * * @param orderableClusterOptions * An <code>OrderableClusterOption</code> structure containing information about orderable options for the * cluster. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeOrderableClusterOptionsResult withOrderableClusterOptions(OrderableClusterOption... orderableClusterOptions) { if (this.orderableClusterOptions == null) { setOrderableClusterOptions(new com.amazonaws.internal.SdkInternalList<OrderableClusterOption>(orderableClusterOptions.length)); } for (OrderableClusterOption ele : orderableClusterOptions) { this.orderableClusterOptions.add(ele); } return this; } /** * <p> * An <code>OrderableClusterOption</code> structure containing information about orderable options for the cluster. * </p> * * @param orderableClusterOptions * An <code>OrderableClusterOption</code> structure containing information about orderable options for the * cluster. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeOrderableClusterOptionsResult withOrderableClusterOptions(java.util.Collection<OrderableClusterOption> orderableClusterOptions) { setOrderableClusterOptions(orderableClusterOptions); return this; } /** * <p> * A value that indicates the starting point for the next set of response records in a subsequent request. If a * value is returned in a response, you can retrieve the next set of records by providing this returned marker value * in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all * response records have been retrieved for the request. * </p> * * @param marker * A value that indicates the starting point for the next set of response records in a subsequent request. If * a value is returned in a response, you can retrieve the next set of records by providing this returned * marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> * field is empty, all response records have been retrieved for the request. */ public void setMarker(String marker) { this.marker = marker; } /** * <p> * A value that indicates the starting point for the next set of response records in a subsequent request. If a * value is returned in a response, you can retrieve the next set of records by providing this returned marker value * in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all * response records have been retrieved for the request. * </p> * * @return A value that indicates the starting point for the next set of response records in a subsequent request. * If a value is returned in a response, you can retrieve the next set of records by providing this returned * marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> * field is empty, all response records have been retrieved for the request. */ public String getMarker() { return this.marker; } /** * <p> * A value that indicates the starting point for the next set of response records in a subsequent request. If a * value is returned in a response, you can retrieve the next set of records by providing this returned marker value * in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all * response records have been retrieved for the request. * </p> * * @param marker * A value that indicates the starting point for the next set of response records in a subsequent request. If * a value is returned in a response, you can retrieve the next set of records by providing this returned * marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> * field is empty, all response records have been retrieved for the request. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeOrderableClusterOptionsResult withMarker(String marker) { setMarker(marker); return this; } /** * Returns a string representation of this object; useful for testing and debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getOrderableClusterOptions() != null) sb.append("OrderableClusterOptions: ").append(getOrderableClusterOptions()).append(","); if (getMarker() != null) sb.append("Marker: ").append(getMarker()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof DescribeOrderableClusterOptionsResult == false) return false; DescribeOrderableClusterOptionsResult other = (DescribeOrderableClusterOptionsResult) obj; if (other.getOrderableClusterOptions() == null ^ this.getOrderableClusterOptions() == null) return false; if (other.getOrderableClusterOptions() != null && other.getOrderableClusterOptions().equals(this.getOrderableClusterOptions()) == false) return false; if (other.getMarker() == null ^ this.getMarker() == null) return false; if (other.getMarker() != null && other.getMarker().equals(this.getMarker()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getOrderableClusterOptions() == null) ? 0 : getOrderableClusterOptions().hashCode()); hashCode = prime * hashCode + ((getMarker() == null) ? 0 : getMarker().hashCode()); return hashCode; } @Override public DescribeOrderableClusterOptionsResult clone() { try { return (DescribeOrderableClusterOptionsResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
apache-2.0
jjeb/kettle-trunk
engine/src/org/pentaho/di/job/entries/unzip/JobEntryUnZip.java
46125
/******************************************************************************* * * Pentaho Data Integration * * Copyright (C) 2002-2012 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.job.entries.unzip; import static org.pentaho.di.job.entry.validator.AbstractFileValidator.putVariableSpace; import static org.pentaho.di.job.entry.validator.AndValidator.putValidators; import static org.pentaho.di.job.entry.validator.JobEntryValidatorUtils.andValidator; import static org.pentaho.di.job.entry.validator.JobEntryValidatorUtils.fileDoesNotExistValidator; import static org.pentaho.di.job.entry.validator.JobEntryValidatorUtils.notBlankValidator; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.text.SimpleDateFormat; import java.util.Date; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.vfs.AllFileSelector; import org.apache.commons.vfs.FileObject; import org.apache.commons.vfs.FileSelectInfo; import org.apache.commons.vfs.FileSystemException; import org.apache.commons.vfs.FileType; import org.pentaho.di.cluster.SlaveServer; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.Const; import org.pentaho.di.core.Result; import org.pentaho.di.core.ResultFile; import org.pentaho.di.core.RowMetaAndData; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.exception.KettleDatabaseException; import org.pentaho.di.core.exception.KettleException; import org.pentaho.di.core.exception.KettleXMLException; import org.pentaho.di.core.util.StringUtil; import org.pentaho.di.core.vfs.KettleVFS; import org.pentaho.di.core.xml.XMLHandler; import org.pentaho.di.i18n.BaseMessages; import org.pentaho.di.job.Job; import org.pentaho.di.job.JobMeta; import org.pentaho.di.job.entry.JobEntryBase; import org.pentaho.di.job.entry.JobEntryInterface; import org.pentaho.di.job.entry.validator.ValidatorContext; import org.pentaho.di.repository.ObjectId; import org.pentaho.di.repository.Repository; import org.w3c.dom.Node; /** * This defines a 'unzip' job entry. Its main use would be to * unzip files in a directory * * @author Samatar Hassan * @since 25-09-2007 * */ public class JobEntryUnZip extends JobEntryBase implements Cloneable, JobEntryInterface { private static Class<?> PKG = JobEntryUnZip.class; // for i18n purposes, needed by Translator2!! $NON-NLS-1$ private String zipFilename; public int afterunzip; private String wildcard; private String wildcardexclude; private String sourcedirectory; // targetdirectory on screen, renamed because of PDI-7761 private String movetodirectory; private boolean addfiletoresult; private boolean isfromprevious; private boolean adddate; private boolean addtime; private boolean SpecifyFormat; private String date_time_format; private boolean rootzip; private boolean createfolder; private String nr_limit; private String wildcardSource; private int iffileexist; private boolean createMoveToDirectory; private boolean addOriginalTimestamp; private boolean setOriginalModificationDate; public String SUCCESS_IF_AT_LEAST_X_FILES_UN_ZIPPED="success_when_at_least"; public String SUCCESS_IF_ERRORS_LESS="success_if_errors_less"; public String SUCCESS_IF_NO_ERRORS="success_if_no_errors"; private String success_condition; public static final int IF_FILE_EXISTS_SKIP = 0; public static final int IF_FILE_EXISTS_OVERWRITE = 1; public static final int IF_FILE_EXISTS_UNIQ = 2; public static final int IF_FILE_EXISTS_FAIL = 3; public static final int IF_FILE_EXISTS_OVERWRITE_DIFF_SIZE = 4; public static final int IF_FILE_EXISTS_OVERWRITE_EQUAL_SIZE = 5; public static final int IF_FILE_EXISTS_OVERWRITE_ZIP_BIG = 6; public static final int IF_FILE_EXISTS_OVERWRITE_ZIP_BIG_EQUAL = 7; public static final int IF_FILE_EXISTS_OVERWRITE_ZIP_SMALL = 8; public static final int IF_FILE_EXISTS_OVERWRITE_ZIP_SMALL_EQUAL = 9; public static final String typeIfFileExistsCode[] = /* WARNING: DO NOT TRANSLATE THIS. */ { "SKIP", "OVERWRITE", "UNIQ", "FAIL", "OVERWRITE_DIFF_SIZE", "OVERWRITE_EQUAL_SIZE", "OVERWRITE_ZIP_BIG", "OVERWRITE_ZIP_BIG_EQUAL", "OVERWRITE_ZIP_BIG_SMALL", "OVERWRITE_ZIP_BIG_SMALL_EQUAL", }; public static final String typeIfFileExistsDesc[] = { BaseMessages.getString(PKG, "JobUnZip.Skip.Label"), BaseMessages.getString(PKG, "JobUnZip.Overwrite.Label"), BaseMessages.getString(PKG, "JobUnZip.Give_Unique_Name.Label"), BaseMessages.getString(PKG, "JobUnZip.Fail.Label"), BaseMessages.getString(PKG, "JobUnZip.OverwriteIfSizeDifferent.Label"), BaseMessages.getString(PKG, "JobUnZip.OverwriteIfSizeEquals.Label"), BaseMessages.getString(PKG, "JobUnZip.OverwriteIfZipBigger.Label"), BaseMessages.getString(PKG, "JobUnZip.OverwriteIfZipBiggerOrEqual.Label"), BaseMessages.getString(PKG, "JobUnZip.OverwriteIfZipSmaller.Label"), BaseMessages.getString(PKG, "JobUnZip.OverwriteIfZipSmallerOrEqual.Label"), }; private int NrErrors=0; private int NrSuccess=0; boolean successConditionBroken=false; boolean successConditionBrokenExit=false; int limitFiles=0; private static SimpleDateFormat daf; private boolean dateFormatSet=false; public JobEntryUnZip(String n) { super(n, ""); zipFilename=null; afterunzip=0; wildcard=null; wildcardexclude=null; sourcedirectory=null; movetodirectory=null; addfiletoresult = false; isfromprevious = false; adddate=false; addtime=false; SpecifyFormat=false; rootzip=false; createfolder=false; nr_limit="10"; wildcardSource=null; iffileexist=IF_FILE_EXISTS_SKIP; success_condition=SUCCESS_IF_NO_ERRORS; createMoveToDirectory=false; addOriginalTimestamp=false; setOriginalModificationDate=false; setID(-1L); } public JobEntryUnZip() { this(""); } public Object clone() { JobEntryUnZip je = (JobEntryUnZip) super.clone(); return je; } public String getXML() { StringBuffer retval = new StringBuffer(50); retval.append(super.getXML()); retval.append(" ").append(XMLHandler.addTagValue("zipfilename", zipFilename)); retval.append(" ").append(XMLHandler.addTagValue("wildcard", wildcard)); retval.append(" ").append(XMLHandler.addTagValue("wildcardexclude", wildcardexclude)); retval.append(" ").append(XMLHandler.addTagValue("targetdirectory", sourcedirectory)); retval.append(" ").append(XMLHandler.addTagValue("movetodirectory", movetodirectory)); retval.append(" ").append(XMLHandler.addTagValue("afterunzip", afterunzip)); retval.append(" ").append(XMLHandler.addTagValue("addfiletoresult", addfiletoresult)); retval.append(" ").append(XMLHandler.addTagValue("isfromprevious", isfromprevious)); retval.append(" ").append(XMLHandler.addTagValue("adddate", adddate)); retval.append(" ").append(XMLHandler.addTagValue("addtime", addtime)); retval.append(" ").append(XMLHandler.addTagValue("addOriginalTimestamp", addOriginalTimestamp)); retval.append(" ").append(XMLHandler.addTagValue("SpecifyFormat", SpecifyFormat)); retval.append(" ").append(XMLHandler.addTagValue("date_time_format", date_time_format)); retval.append(" ").append(XMLHandler.addTagValue("rootzip", rootzip)); retval.append(" ").append(XMLHandler.addTagValue("createfolder", createfolder)); retval.append(" ").append(XMLHandler.addTagValue("nr_limit", nr_limit)); retval.append(" ").append(XMLHandler.addTagValue("wildcardSource", wildcardSource)); retval.append(" ").append(XMLHandler.addTagValue("success_condition", success_condition)); retval.append(" ").append(XMLHandler.addTagValue("iffileexists", getIfFileExistsCode(iffileexist))); retval.append(" ").append(XMLHandler.addTagValue("create_move_to_directory", createMoveToDirectory)); retval.append(" ").append(XMLHandler.addTagValue("setOriginalModificationDate", setOriginalModificationDate)); return retval.toString(); } public void loadXML(Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep) throws KettleXMLException { try { super.loadXML(entrynode, databases, slaveServers); zipFilename = XMLHandler.getTagValue(entrynode, "zipfilename"); afterunzip = Const.toInt(XMLHandler.getTagValue(entrynode, "afterunzip"), -1); wildcard = XMLHandler.getTagValue(entrynode, "wildcard"); wildcardexclude = XMLHandler.getTagValue(entrynode, "wildcardexclude"); sourcedirectory = XMLHandler.getTagValue(entrynode, "targetdirectory"); movetodirectory = XMLHandler.getTagValue(entrynode, "movetodirectory"); addfiletoresult = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "addfiletoresult")); isfromprevious = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "isfromprevious")); adddate = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "adddate")); addtime = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "addtime")); addOriginalTimestamp = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "addOriginalTimestamp")); SpecifyFormat = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "SpecifyFormat")); date_time_format = XMLHandler.getTagValue(entrynode, "date_time_format"); rootzip = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "rootzip")); createfolder = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "createfolder")); nr_limit = XMLHandler.getTagValue(entrynode, "nr_limit"); wildcardSource = XMLHandler.getTagValue(entrynode, "wildcardSource"); success_condition = XMLHandler.getTagValue(entrynode, "success_condition"); if(Const.isEmpty(success_condition)) success_condition=SUCCESS_IF_NO_ERRORS; iffileexist = getIfFileExistsInt(XMLHandler.getTagValue(entrynode, "iffileexists")); createMoveToDirectory = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "create_move_to_directory")); setOriginalModificationDate = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "setOriginalModificationDate")); } catch(KettleXMLException xe) { throw new KettleXMLException("Unable to load job entry of type 'unzip' from XML node", xe); } } public void loadRep(Repository rep, ObjectId id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers) throws KettleException { try { zipFilename = rep.getJobEntryAttributeString(id_jobentry, "zipfilename"); afterunzip=(int) rep.getJobEntryAttributeInteger(id_jobentry, "afterunzip"); wildcard = rep.getJobEntryAttributeString(id_jobentry, "wildcard"); wildcardexclude = rep.getJobEntryAttributeString(id_jobentry, "wildcardexclude"); sourcedirectory = rep.getJobEntryAttributeString(id_jobentry, "targetdirectory"); movetodirectory = rep.getJobEntryAttributeString(id_jobentry, "movetodirectory"); addfiletoresult=rep.getJobEntryAttributeBoolean(id_jobentry, "addfiletoresult"); isfromprevious=rep.getJobEntryAttributeBoolean(id_jobentry, "isfromprevious"); adddate=rep.getJobEntryAttributeBoolean(id_jobentry, "adddate"); addtime=rep.getJobEntryAttributeBoolean(id_jobentry, "addtime"); addOriginalTimestamp=rep.getJobEntryAttributeBoolean(id_jobentry, "addOriginalTimestamp"); SpecifyFormat=rep.getJobEntryAttributeBoolean(id_jobentry, "SpecifyFormat"); date_time_format = rep.getJobEntryAttributeString(id_jobentry, "date_time_format"); rootzip=rep.getJobEntryAttributeBoolean(id_jobentry, "rootzip"); createfolder=rep.getJobEntryAttributeBoolean(id_jobentry, "createfolder"); nr_limit=rep.getJobEntryAttributeString(id_jobentry, "nr_limit"); wildcardSource=rep.getJobEntryAttributeString(id_jobentry, "wildcardSource"); success_condition = rep.getJobEntryAttributeString(id_jobentry, "success_condition"); if(Const.isEmpty(success_condition)) success_condition=SUCCESS_IF_NO_ERRORS; iffileexist = getIfFileExistsInt(rep.getJobEntryAttributeString(id_jobentry,"iffileexists") ); createMoveToDirectory=rep.getJobEntryAttributeBoolean(id_jobentry, "create_move_to_directory"); setOriginalModificationDate=rep.getJobEntryAttributeBoolean(id_jobentry, "setOriginalModificationDate"); } catch(KettleException dbe) { throw new KettleException("Unable to load job entry of type 'unzip' from the repository for id_jobentry="+id_jobentry, dbe); } } public void saveRep(Repository rep, ObjectId id_job) throws KettleException { try { rep.saveJobEntryAttribute(id_job, getObjectId(), "zipfilename", zipFilename); rep.saveJobEntryAttribute(id_job, getObjectId(), "afterunzip", afterunzip); rep.saveJobEntryAttribute(id_job, getObjectId(), "wildcard", wildcard); rep.saveJobEntryAttribute(id_job, getObjectId(), "wildcardexclude", wildcardexclude); rep.saveJobEntryAttribute(id_job, getObjectId(), "targetdirectory", sourcedirectory); rep.saveJobEntryAttribute(id_job, getObjectId(), "movetodirectory", movetodirectory); rep.saveJobEntryAttribute(id_job, getObjectId(), "addfiletoresult", addfiletoresult); rep.saveJobEntryAttribute(id_job, getObjectId(), "isfromprevious", isfromprevious); rep.saveJobEntryAttribute(id_job, getObjectId(), "addtime", addtime); rep.saveJobEntryAttribute(id_job, getObjectId(), "adddate", adddate); rep.saveJobEntryAttribute(id_job, getObjectId(), "addOriginalTimestamp", addOriginalTimestamp); rep.saveJobEntryAttribute(id_job, getObjectId(), "SpecifyFormat", SpecifyFormat); rep.saveJobEntryAttribute(id_job, getObjectId(), "date_time_format", date_time_format); rep.saveJobEntryAttribute(id_job, getObjectId(), "rootzip", rootzip); rep.saveJobEntryAttribute(id_job, getObjectId(), "createfolder", createfolder); rep.saveJobEntryAttribute(id_job, getObjectId(), "nr_limit", nr_limit); rep.saveJobEntryAttribute(id_job, getObjectId(), "wildcardSource", wildcardSource); rep.saveJobEntryAttribute(id_job, getObjectId(), "success_condition", success_condition); rep.saveJobEntryAttribute(id_job, getObjectId(), "iffileexists", getIfFileExistsCode(iffileexist)); rep.saveJobEntryAttribute(id_job, getObjectId(), "create_move_to_directory", createMoveToDirectory); rep.saveJobEntryAttribute(id_job, getObjectId(), "setOriginalModificationDate", setOriginalModificationDate); } catch(KettleDatabaseException dbe) { throw new KettleException("Unable to save job entry of type 'unzip' to the repository for id_job="+id_job, dbe); } } public Result execute(Result previousResult, int nr) { Result result = previousResult; result.setResult( false ); result.setNrErrors(1); List<RowMetaAndData> rows = result.getRows(); RowMetaAndData resultRow = null; String realFilenameSource = environmentSubstitute(zipFilename); String realWildcardSource = environmentSubstitute(wildcardSource); String realWildcard = environmentSubstitute(wildcard); String realWildcardExclude = environmentSubstitute(wildcardexclude); String realTargetdirectory = environmentSubstitute(sourcedirectory); String realMovetodirectory = environmentSubstitute(movetodirectory); limitFiles=Const.toInt(environmentSubstitute(getLimit()),10); NrErrors=0; NrSuccess=0; successConditionBroken=false; successConditionBrokenExit=false; if(isfromprevious) { if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.ArgFromPrevious.Found",(rows!=null?rows.size():0)+ "")); if(rows.size()==0) return result; }else { if(Const.isEmpty(zipFilename)) { // Zip file/folder is missing logError(BaseMessages.getString(PKG, "JobUnZip.No_ZipFile_Defined.Label")); return result; } } FileObject fileObject = null; FileObject targetdir=null; FileObject movetodir=null; try { // Let's make some checks here, before running job entry ... if(Const.isEmpty(realTargetdirectory)) { logError(BaseMessages.getString(PKG, "JobUnZip.Error.TargetFolderMissing")); return result; } boolean exitjobentry=false; // Target folder targetdir = KettleVFS.getFileObject(realTargetdirectory, this); if (!targetdir.exists()) { if(createfolder) { targetdir.createFolder(); if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.TargetFolderCreated",realTargetdirectory)); }else { log.logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label"), BaseMessages.getString(PKG, "JobUnZip.TargetFolderNotFound.Label")); exitjobentry=true; } }else{ if (!(targetdir.getType() == FileType.FOLDER)) { log.logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label"), BaseMessages.getString(PKG, "JobUnZip.TargetFolderNotFolder.Label",realTargetdirectory)); exitjobentry=true; }else { if (log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.TargetFolderExists.Label",realTargetdirectory)); } } // If user want to move zip files after process // movetodirectory must be provided if(afterunzip==2) { if(Const.isEmpty(movetodirectory)) { log.logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label"), BaseMessages.getString(PKG, "JobUnZip.MoveToDirectoryEmpty.Label")); exitjobentry=true; }else { movetodir = KettleVFS.getFileObject(realMovetodirectory, this); if (!(movetodir.exists()) || !(movetodir.getType() == FileType.FOLDER)) { if(createMoveToDirectory) { movetodir.createFolder(); if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.MoveToFolderCreated",realMovetodirectory)); }else { log.logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label"), BaseMessages.getString(PKG, "JobUnZip.MoveToDirectoryNotExists.Label")); exitjobentry=true; } } } } // We found errors...now exit if(exitjobentry) { return result; } if(isfromprevious) { if (rows!=null) // Copy the input row to the (command line) arguments { for (int iteration=0;iteration<rows.size() && !parentJob.isStopped();iteration++) { if(successConditionBroken){ if(!successConditionBrokenExit){ logError(BaseMessages.getString(PKG, "JobUnZip.Error.SuccessConditionbroken",""+NrErrors)); successConditionBrokenExit=true; } result.setNrErrors(NrErrors); return result; } resultRow = rows.get(iteration); // Get sourcefile/folder and wildcard realFilenameSource = resultRow.getString(0,null); realWildcardSource = resultRow.getString(1,null); fileObject = KettleVFS.getFileObject(realFilenameSource, this); if(fileObject.exists()) { processOneFile(result,parentJob, fileObject,realTargetdirectory, realWildcard,realWildcardExclude, movetodir,realMovetodirectory, realWildcardSource); }else { updateErrors(); logError( BaseMessages.getString(PKG, "JobUnZip.Error.CanNotFindFile", realFilenameSource)); } } } }else{ fileObject = KettleVFS.getFileObject(realFilenameSource, this); if (!fileObject.exists()) { log.logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label"), BaseMessages.getString(PKG, "JobUnZip.ZipFile.NotExists.Label",realFilenameSource)); return result; } if (log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.Zip_FileExists.Label",realFilenameSource)); if(Const.isEmpty(sourcedirectory)) { log.logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label"), BaseMessages.getString(PKG, "JobUnZip.TargetFolderNotFound.Label")); return result; } processOneFile(result,parentJob, fileObject,realTargetdirectory, realWildcard,realWildcardExclude, movetodir,realMovetodirectory, realWildcardSource); } } catch (Exception e) { log.logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label"), BaseMessages.getString(PKG, "JobUnZip.ErrorUnzip.Label",realFilenameSource,e.getMessage())); updateErrors(); } finally { if ( fileObject != null ){ try{ fileObject.close(); }catch ( IOException ex ) {}; } if ( targetdir != null ){ try{ targetdir.close(); }catch ( IOException ex ) {}; } if ( movetodir != null ){ try{ movetodir.close(); }catch ( IOException ex ) {}; } } result.setNrErrors(NrErrors); result.setNrLinesWritten(NrSuccess); if(getSuccessStatus()) result.setResult(true); displayResults(); return result; } private void displayResults() { if(log.isDetailed()){ logDetailed("======================================="); logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.Info.FilesInError","" + NrErrors)); logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.Info.FilesInSuccess","" + NrSuccess)); logDetailed("======================================="); } } private boolean processOneFile(Result result,Job parentJob, FileObject fileObject,String realTargetdirectory, String realWildcard,String realWildcardExclude, FileObject movetodir,String realMovetodirectory, String realWildcardSource) { boolean retval=false; try{ if(fileObject.getType().equals(FileType.FILE)) { // We have to unzip one zip file if(!unzipFile(fileObject, realTargetdirectory,realWildcard, realWildcardExclude,result, parentJob, fileObject, movetodir,realMovetodirectory)) updateErrors(); else updateSuccess(); }else { // Folder..let's see wildcard FileObject[] children = fileObject.getChildren(); for (int i=0; i<children.length && !parentJob.isStopped(); i++) { if(successConditionBroken){ if(!successConditionBrokenExit){ logError(BaseMessages.getString(PKG, "JobUnZip.Error.SuccessConditionbroken",""+NrErrors)); successConditionBrokenExit=true; } return false; } // Get only file! if (!children[i].getType().equals(FileType.FOLDER)) { boolean unzip=true; String filename=children[i].getName().getPath(); Pattern patternSource = null; if (!Const.isEmpty(realWildcardSource)) patternSource = Pattern.compile(realWildcardSource); // First see if the file matches the regular expression! if (patternSource!=null) { Matcher matcher = patternSource.matcher(filename); unzip = matcher.matches(); } if(unzip) { if(!unzipFile(children[i],realTargetdirectory,realWildcard, realWildcardExclude,result, parentJob, fileObject,movetodir, realMovetodirectory)) updateErrors(); else updateSuccess(); } } } } }catch(Exception e) { updateErrors(); logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label",e.getMessage())); }finally { if ( fileObject != null ) { try { fileObject.close(); }catch ( IOException ex ) {}; } } return retval; } private boolean unzipFile(FileObject sourceFileObject, String realTargetdirectory, String realWildcard, String realWildcardExclude, Result result, Job parentJob, FileObject fileObject, FileObject movetodir, String realMovetodirectory) { boolean retval=false; String unzipToFolder=realTargetdirectory; try { if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.ProcessingFile",sourceFileObject.toString())); // Do you create a root folder? // if(rootzip) { String shortSourceFilename = sourceFileObject.getName().getBaseName(); int lenstring=shortSourceFilename.length(); int lastindexOfDot=shortSourceFilename.lastIndexOf('.'); if(lastindexOfDot==-1) lastindexOfDot=lenstring; String foldername=realTargetdirectory + "/" + shortSourceFilename.substring(0, lastindexOfDot); FileObject rootfolder=KettleVFS.getFileObject(foldername, this); if(!rootfolder.exists()) { try { rootfolder.createFolder(); if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.RootFolderCreated",foldername)); } catch(Exception e) { throw new Exception(BaseMessages.getString(PKG, "JobUnZip.Error.CanNotCreateRootFolder",foldername), e); } } unzipToFolder=foldername; } // Try to read the entries from the VFS object... // String zipFilename = "zip:"+sourceFileObject.getName().getFriendlyURI(); FileObject zipFile = KettleVFS.getFileObject(zipFilename, this); FileObject[] items = zipFile.findFiles( new AllFileSelector() { public boolean traverseDescendents(FileSelectInfo info) { return true; } public boolean includeFile(FileSelectInfo info) { // Never return the parent directory of a file list. if (info.getDepth() == 0) { return false; } FileObject fileObject = info.getFile(); return fileObject!=null; } } ); Pattern pattern = null; if (!Const.isEmpty(realWildcard)) { pattern = Pattern.compile(realWildcard); } Pattern patternexclude = null; if (!Const.isEmpty(realWildcardExclude)) { patternexclude = Pattern.compile(realWildcardExclude); } for (FileObject item : items) { if(successConditionBroken){ if(!successConditionBrokenExit){ logError(BaseMessages.getString(PKG, "JobUnZip.Error.SuccessConditionbroken",""+NrErrors)); successConditionBrokenExit=true; } return false; } FileObject newFileObject=null; try{ if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.ProcessingZipEntry",item.getName().getURI(), sourceFileObject.toString())); // get real destination filename // String newFileName = unzipToFolder + Const.FILE_SEPARATOR + getTargetFilename(item); newFileObject = KettleVFS.getFileObject(newFileName, this); if( item.getType().equals(FileType.FOLDER)) { // Directory // if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.CreatingDirectory.Label",newFileName)); // Create Directory if necessary ... // if(!newFileObject.exists()) newFileObject.createFolder(); } else { // File // boolean getIt = true; boolean getItexclude = false; // First see if the file matches the regular expression! // if (pattern!=null) { Matcher matcher = pattern.matcher(item.getName().getURI()); getIt = matcher.matches(); } if (patternexclude!=null) { Matcher matcherexclude = patternexclude.matcher(item.getName().getURI()); getItexclude = matcherexclude.matches(); } boolean take=takeThisFile(item, newFileName); if (getIt && !getItexclude && take) { if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.ExtractingEntry.Label",item.getName().getURI(),newFileName)); if(iffileexist==IF_FILE_EXISTS_UNIQ) { // Create file with unique name int lenstring=newFileName.length(); int lastindexOfDot=newFileName.lastIndexOf('.'); if(lastindexOfDot==-1) lastindexOfDot=lenstring; newFileName=newFileName.substring(0, lastindexOfDot) + StringUtil.getFormattedDateTimeNow(true) + newFileName.substring(lastindexOfDot, lenstring); if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.CreatingUniqFile",newFileName)); } // See if the folder to the target file exists... // if (!newFileObject.getParent().exists()) { newFileObject.getParent().createFolder(); // creates the whole path. } InputStream is = null; OutputStream os = null; try { is = KettleVFS.getInputStream(item); os = KettleVFS.getOutputStream(newFileObject, false); if(is!=null) { byte[] buff=new byte[2048]; int len; while((len=is.read(buff))>0) { os.write(buff,0,len); } // Add filename to result filenames addFilenameToResultFilenames(result, parentJob, newFileName); } } finally { if(is!=null) is.close(); if(os!=null) os.close(); } }// end if take } } catch(Exception e) { updateErrors(); logError(BaseMessages.getString(PKG, "JobUnZip.Error.CanNotProcessZipEntry",item.getName().getURI(), sourceFileObject.toString()), e); } finally { if(newFileObject!=null) { try { newFileObject.close(); if(setOriginalModificationDate) { // Change last modification date newFileObject.getContent().setLastModifiedTime(item.getContent().getLastModifiedTime()); } }catch(Exception e){};// ignore this } // Close file object // close() does not release resources! KettleVFS.getInstance().getFileSystemManager().closeFileSystem(item.getFileSystem()); if(items!=null) items=null; } }// End for // Here gc() is explicitly called if e.g. createfile is used in the same // job for the same file. The problem is that after creating the file the // file object is not properly garbaged collected and thus the file cannot // be deleted anymore. This is a known problem in the JVM. //System.gc(); // Unzip done... if (afterunzip==1) { // delete zip file boolean deleted = fileObject.delete(); if ( ! deleted ) { updateErrors(); logError(BaseMessages.getString(PKG, "JobUnZip.Cant_Delete_File.Label", sourceFileObject.toString())); } // File deleted if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.File_Deleted.Label", sourceFileObject.toString())); } else if(afterunzip == 2) { FileObject destFile=null; // Move File try { String destinationFilename=movetodir+Const.FILE_SEPARATOR+ fileObject.getName().getBaseName(); destFile=KettleVFS.getFileObject(destinationFilename, this); fileObject.moveTo(destFile); // File moved if(log.isDetailed()) logDetailed(BaseMessages.getString(PKG, "JobUnZip.Log.FileMovedTo",sourceFileObject.toString(),realMovetodirectory)); } catch (Exception e) { updateErrors(); logError(BaseMessages.getString(PKG, "JobUnZip.Cant_Move_File.Label",sourceFileObject.toString(),realMovetodirectory,e.getMessage())); }finally { if ( destFile != null ){ try{ destFile.close(); }catch ( IOException ex ) {}; } } } retval=true; } catch (Exception e) { updateErrors(); log.logError(BaseMessages.getString(PKG, "JobUnZip.Error.Label"), BaseMessages.getString(PKG, "JobUnZip.ErrorUnzip.Label",sourceFileObject.toString(),e.getMessage()), e); } return retval; } private void addFilenameToResultFilenames(Result result, Job parentJob, String newfile) throws Exception { if (addfiletoresult) { // Add file to result files name ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL , KettleVFS.getFileObject(newfile, this), parentJob.getJobname(), toString()); result.getResultFiles().put(resultFile.getFile().toString(), resultFile); } } private void updateErrors() { NrErrors++; if(checkIfSuccessConditionBroken()) { // Success condition was broken successConditionBroken=true; } } private void updateSuccess() { NrSuccess++; } private boolean checkIfSuccessConditionBroken() { boolean retval=false; if ((NrErrors>0 && getSuccessCondition().equals(SUCCESS_IF_NO_ERRORS)) || (NrErrors>=limitFiles && getSuccessCondition().equals(SUCCESS_IF_ERRORS_LESS))) { retval=true; } return retval; } private boolean getSuccessStatus() { boolean retval=false; if ((NrErrors==0 && getSuccessCondition().equals(SUCCESS_IF_NO_ERRORS)) || (NrSuccess>=limitFiles && getSuccessCondition().equals(SUCCESS_IF_AT_LEAST_X_FILES_UN_ZIPPED)) || (NrErrors<=limitFiles && getSuccessCondition().equals(SUCCESS_IF_ERRORS_LESS))) { retval=true; } return retval; } private boolean takeThisFile(FileObject sourceFile, String destinationFile) throws FileSystemException { boolean retval=false; File destination= new File(destinationFile); if(!destination.exists()) { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.CanNotFindFile",destinationFile)); return true; } if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileExists",destinationFile)); if(iffileexist==IF_FILE_EXISTS_SKIP) { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileSkip",destinationFile)); return false; } if(iffileexist==IF_FILE_EXISTS_FAIL) { updateErrors(); logError(BaseMessages.getString(PKG, "JobUnZip.Log.FileError",destinationFile,""+NrErrors)); return false; } if(iffileexist==IF_FILE_EXISTS_OVERWRITE) { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileOverwrite",destinationFile)); return true; } Long entrySize=sourceFile.getContent().getSize(); Long destinationSize=destination.length(); if(iffileexist==IF_FILE_EXISTS_OVERWRITE_DIFF_SIZE) { if(entrySize!=destinationSize) { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileDiffSize.Diff", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return true; } else { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileDiffSize.Same", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return false; } } if(iffileexist==IF_FILE_EXISTS_OVERWRITE_EQUAL_SIZE) { if(entrySize==destinationSize) { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileEqualSize.Same", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return true; } else { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileEqualSize.Diff", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return false; } } if(iffileexist==IF_FILE_EXISTS_OVERWRITE_ZIP_BIG) { if(entrySize>destinationSize) { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileBigSize.Big", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return true; } else { if(log.isDebug()) logDebug(BaseMessages.getString(PKG, "JobUnZip.Log.FileBigSize.Small", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return false; } } if(iffileexist==IF_FILE_EXISTS_OVERWRITE_ZIP_BIG_EQUAL) { if(entrySize>=destinationSize) { if(log.isDebug()) logDebug( BaseMessages.getString(PKG, "JobUnZip.Log.FileBigEqualSize.Big", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return true; } else { if(log.isDebug()) logDebug( BaseMessages.getString(PKG, "JobUnZip.Log.FileBigEqualSize.Small", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return false; } } if(iffileexist==IF_FILE_EXISTS_OVERWRITE_ZIP_SMALL) { if(entrySize<destinationSize) { if(log.isDebug()) logDebug( BaseMessages.getString(PKG, "JobUnZip.Log.FileSmallSize.Small", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return true; } else { if(log.isDebug()) logDebug( BaseMessages.getString(PKG, "JobUnZip.Log.FileSmallSize.Big", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return false; } } if(iffileexist==IF_FILE_EXISTS_OVERWRITE_ZIP_SMALL_EQUAL) { if(entrySize<=destinationSize) { if(log.isDebug()) logDebug( BaseMessages.getString(PKG, "JobUnZip.Log.FileSmallEqualSize.Small", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return true; } else { if(log.isDebug()) logDebug( BaseMessages.getString(PKG, "JobUnZip.Log.FileSmallEqualSize.Big", sourceFile.getName().getURI(),""+entrySize,destinationFile,""+destinationSize)); return false; } } if(iffileexist==IF_FILE_EXISTS_UNIQ) { // Create file with unique name return true; } return retval; } public boolean evaluates() { return true; } public static final int getIfFileExistsInt(String desc) { for (int i=0;i<typeIfFileExistsCode.length;i++) { if (typeIfFileExistsCode[i].equalsIgnoreCase(desc)) return i; } return 0; } public static final String getIfFileExistsCode(int i) { if (i<0 || i>=typeIfFileExistsCode.length) return null; return typeIfFileExistsCode[i]; } /** * @return Returns the iffileexist. */ public int getIfFileExist() { return iffileexist; } /** * @param setIfFileExist The iffileexist to set. */ public void setIfFileExists(int iffileexist) { this.iffileexist = iffileexist; } public boolean isCreateMoveToDirectory() { return createMoveToDirectory; } public void setCreateMoveToDirectory(boolean createMoveToDirectory) { this.createMoveToDirectory=createMoveToDirectory; } public void setZipFilename(String zipFilename) { this.zipFilename = zipFilename; } public void setWildcard(String wildcard) { this.wildcard = wildcard; } public void setWildcardExclude(String wildcardexclude) { this.wildcardexclude = wildcardexclude; } public void setSourceDirectory(String targetdirectoryin) { this.sourcedirectory = targetdirectoryin; } public void setMoveToDirectory(String movetodirectory) { this.movetodirectory = movetodirectory; } public String getSourceDirectory() { return sourcedirectory; } public String getMoveToDirectory() { return movetodirectory; } public String getZipFilename() { return zipFilename; } public String getWildcardSource() { return wildcardSource; } public void setWildcardSource(String wildcardSource) { this.wildcardSource=wildcardSource; } public String getWildcard() { return wildcard; } public String getWildcardExclude() { return wildcardexclude; } public void setAddFileToResult(boolean addfiletoresultin) { this.addfiletoresult = addfiletoresultin; } public boolean isAddFileToResult() { return addfiletoresult; } public void setDateInFilename(boolean adddate) { this.adddate= adddate; } public void setAddOriginalTimestamp(boolean addOriginalTimestamp) { this.addOriginalTimestamp= addOriginalTimestamp; } public boolean isOriginalTimestamp() { return addOriginalTimestamp; } public void setOriginalModificationDate(boolean setOriginalModificationDate) { this.setOriginalModificationDate= setOriginalModificationDate; } public boolean isOriginalModificationDate() { return setOriginalModificationDate; } public boolean isDateInFilename() { return adddate; } public void setTimeInFilename(boolean addtime) { this.addtime= addtime; } public boolean isTimeInFilename() { return addtime; } public boolean isSpecifyFormat() { return SpecifyFormat; } public void setSpecifyFormat(boolean SpecifyFormat) { this.SpecifyFormat=SpecifyFormat; } public String getDateTimeFormat() { return date_time_format; } public void setDateTimeFormat(String date_time_format) { this.date_time_format=date_time_format; } public void setDatafromprevious(boolean isfromprevious) { this.isfromprevious = isfromprevious; } public boolean getDatafromprevious() { return isfromprevious; } public void setCreateRootFolder(boolean rootzip) { this.rootzip=rootzip; } public boolean isCreateRootFolder() { return rootzip; } public void setCreateFolder(boolean createfolder) { this.createfolder=createfolder; } public boolean isCreateFolder() { return createfolder; } public void setLimit(String nr_limitin) { this.nr_limit=nr_limitin; } public String getLimit() { return nr_limit; } public void setSuccessCondition(String success_condition) { this.success_condition=success_condition; } public String getSuccessCondition() { return success_condition; } /** * @param string the filename from * * @return the calculated target filename */ protected String getTargetFilename(FileObject file) throws FileSystemException { String retval=""; String filename=file.getName().getPath(); // Replace possible environment variables... if(filename!=null) retval=filename; if(file.getType()!= FileType.FILE) return retval; if(!SpecifyFormat && !adddate && !addtime) return retval; int lenstring=retval.length(); int lastindexOfDot=retval.lastIndexOf('.'); if(lastindexOfDot==-1) lastindexOfDot=lenstring; retval=retval.substring(0, lastindexOfDot); if(daf==null) daf = new SimpleDateFormat(); Date timestamp = new Date(); if(addOriginalTimestamp) timestamp=new Date(file.getContent().getLastModifiedTime()); if(SpecifyFormat && !Const.isEmpty(date_time_format)){ if(!dateFormatSet) daf.applyPattern(date_time_format); String dt = daf.format(timestamp); retval+=dt; }else { if (adddate) { if(!dateFormatSet) daf.applyPattern("yyyyMMdd"); String d = daf.format(timestamp); retval+="_"+d; } if (addtime) { if(!dateFormatSet) daf.applyPattern("HHmmssSSS"); String t = daf.format(timestamp); retval+="_"+t; } } if(daf!=null) dateFormatSet=true; retval+=filename.substring(lastindexOfDot, lenstring); return retval; } @Override public void check(List<CheckResultInterface> remarks, JobMeta jobMeta) { ValidatorContext ctx1 = new ValidatorContext(); putVariableSpace(ctx1, getVariables()); putValidators(ctx1, notBlankValidator(), fileDoesNotExistValidator()); andValidator().validate(this, "zipFilename", remarks, ctx1);//$NON-NLS-1$ if (2 == afterunzip) { // setting says to move andValidator().validate(this, "moveToDirectory", remarks, putValidators(notBlankValidator())); //$NON-NLS-1$ } andValidator().validate(this, "sourceDirectory", remarks, putValidators(notBlankValidator())); //$NON-NLS-1$ } }
apache-2.0
rodsol/relex-temp
src/java_test/relex/test/TestStanford.java
16686
/* * Copyright 2009 Linas Vepstas * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package relex.test; import java.util.ArrayList; import java.util.Collections; import relex.ParsedSentence; import relex.RelationExtractor; import relex.Sentence; import relex.output.StanfordView; public class TestStanford { private RelationExtractor re; private int pass; private int fail; public TestStanford() { re = new RelationExtractor(); re.do_stanford = true; pass = 0; fail = 0; } public ArrayList<String> split(String a) { String[] sa = a.split("\n"); ArrayList<String> saa = new ArrayList<String>(); for (String s : sa) { saa.add(s); } Collections.sort (saa); return saa; } /** * First argument is the sentence. * Second argument is a list of the relations that the * Stanford parser generates. * Return true if relex generates that same dependencies * as the second argument. */ public boolean test_sentence (String sent, String sf) { re.do_penn_tagging = false; Sentence sntc = re.processSentence(sent); ParsedSentence parse = sntc.getParses().get(0); String rs = StanfordView.printRelations(parse, false); ArrayList<String> sfa = split(sf); ArrayList<String> rsa = split(rs); if (sfa.size() != rsa.size()) { System.err.println("Error: size miscompare:\n" + "\tStanford = " + sfa + "\n" + "\tRelEx = " + rsa + "\n" + "\tSentence = " + sent); fail ++; return false; } for (int i=0; i< sfa.size(); i++) { if (!sfa.get(i).equals (rsa.get(i))) { System.err.println("Error: content miscompare:\n" + "\tStanford = " + sfa + "\n" + "\tRelEx = " + rsa + "\n" + "\tSentence = " + sent); fail ++; return false; } } pass ++; return true; } public boolean test_tagged_sentence (String sent, String sf) { re.do_penn_tagging = true; Sentence sntc = re.processSentence(sent); ParsedSentence parse = sntc.getParses().get(0); String rs = StanfordView.printRelations(parse, true); ArrayList<String> sfa = split(sf); ArrayList<String> rsa = split(rs); if (sfa.size() != rsa.size()) { System.err.println("Error: size miscompare:\n" + "\tStanford = " + sfa + "\n" + "\tRelEx = " + rsa + "\n" + "\tSentence = " + sent); fail ++; return false; } for (int i=0; i< sfa.size(); i++) { if (!sfa.get(i).equals (rsa.get(i))) { System.err.println("Error: content miscompare:\n" + "\tStanford = " + sfa + "\n" + "\tRelEx = " + rsa + "\n" + "\tSentence = " + sent); fail ++; return false; } } pass ++; return true; } public static void main(String[] args) { TestStanford ts = new TestStanford(); boolean rc = true; rc &= ts.test_sentence ("Who invented sliced bread?", "nsubj(invented-2, who-1)\n" + "amod(bread-4, sliced-3)\n" + "dobj(invented-2, bread-4)"); rc &= ts.test_sentence ("Jim runs quickly.", "nsubj(runs-2, Jim-1)\n" + "advmod(runs-2, quickly-3)"); rc &= ts.test_sentence ("The bird, a robin, sang sweetly.", "det(bird-2, the-1)\n" + "nsubj(sang-7, bird-2)\n" + "det(robin-5, a-4)\n" + "appos(bird-2, robin-5)\n" + "advmod(sang-7, sweetly-8)"); rc &= ts.test_sentence ("There is a place we can go.", "expl(is-2, there-1)\n" + "det(place-4, a-3)\n" + "nsubj(is-2, place-4)\n" + "nsubj(go-7, we-5)\n" + "aux(go-7, can-6)"); // wtf ?? dep is not documented .. not sure what to do here ... // "dep(is-2, go-7)"); rc &= ts.test_sentence ("The linebacker gave the quarterback a push.", "det(linebacker-2, the-1)\n" + "nsubj(gave-3, linebacker-2)\n" + "det(quarterback-5, the-4)\n" + "iobj(gave-3, quarterback-5)\n" + "det(push-7, a-6)\n" + "dobj(gave-3, push-7)\n"); rc &= ts.test_sentence ("He stood at the goal line.", "nsubj(stood-2, he-1)\n" + "det(line-6, the-4)\n" + "nn(line-6, goal-5)\n" + "prep_at(stood-2, line-6)"); // acomp example from Stanford docs rc &= ts.test_sentence ("She looks very beautiful.", "nsubj(looks-2, she-1)\n" + "advmod(beautiful-4, very-3)\n" + "acomp(looks-2, beautiful-4)"); // advcl example from Stanford docs rc &= ts.test_sentence ("The accident happened as the night was falling.", "det(accident-2, the-1)\n" + "nsubj(happened-3, accident-2)\n" + "mark(falling-8, as-4)\n" + "det(night-6, the-5)\n" + "nsubj(falling-8, night-6)\n" + "aux(falling-8, was-7)\n" + "advcl(happened-3, falling-8)"); // advcl example from Stanford docs rc &= ts.test_sentence ("If you know who did it, you should tell the teacher.", "mark(know-3, if-1)\n" + "nsubj(know-3, you-2)\n" + "advcl(tell-10, know-3)\n" + "nsubj(did-5, who-4)\n" + "ccomp(know-3, did-5)\n" + "dobj(did-5, it-6)\n" + "nsubj(tell-10, you-8)\n" + "aux(tell-10, should-9)\n" + "det(teacher-12, the-11)\n" + "dobj(tell-10, teacher-12)"); // agent example from Stanford docs rc &= ts.test_sentence ("The man has been killed by the police.", "det(man-2, the-1)\n" + "nsubjpass(killed-5, man-2)\n" + "aux(killed-5, has-3)\n" + "auxpass(killed-5, been-4)\n" + "det(police-8, the-7)\n" + "agent(killed-5, police-8)"); rc &= ts.test_sentence ("Effects caused by the protein are important.", "nsubj(important-7, effects-1)\n" + "partmod(effects-1, caused-2)\n" + "det(protein-5, the-4)\n" + "agent(caused-2, protein-5)\n" + "cop(important-7, are-6)"); rc &= ts.test_sentence ("Sam, my brother, has arrived.", "nsubj(arrived-7, Sam-1)\n" + "poss(brother-4, my-3)\n" + "appos(Sam-1, brother-4)\n" + "aux(arrived-7, has-6)"); rc &= ts.test_sentence ("What is that?", "attr(is-2, what-1)\n" + "nsubj(is-2, that-3)"); rc &= ts.test_sentence ("Reagan has died.", "nsubj(died-3, Reagan-1)\n" + "aux(died-3, has-2)"); rc &= ts.test_sentence ("He should leave.", "nsubj(leave-3, he-1)\n" + "aux(leave-3, should-2)"); rc &= ts.test_sentence ("Kennedy has been killed.", "nsubjpass(killed-4, Kennedy-1)\n" + "aux(killed-4, has-2)\n" + "auxpass(killed-4, been-3)"); rc &= ts.test_sentence ("Kennedy was killed.", "nsubjpass(killed-3, Kennedy-1)\n" + "auxpass(killed-3, was-2)"); rc &= ts.test_sentence ("Kennedy got killed.", "nsubjpass(killed-3, Kennedy-1)\n" + "auxpass(killed-3, got-2)"); rc &= ts.test_sentence ("Bill is big.", "nsubj(big-3, Bill-1)\n" + "cop(big-3, is-2)\n"); rc &= ts.test_sentence ("Bill is an honest man.", "nsubj(man-5, Bill-1)\n" + "cop(man-5, is-2)\n" + "det(man-5, an-3)\n" + "amod(man-5, honest-4)"); rc &= ts.test_sentence ("What she said makes sense.", "dobj(said-3, what-1)\n" + "nsubj(said-3, she-2)\n" + "csubj(makes-4, said-3)\n" + "dobj(makes-4, sense-5)"); rc &= ts.test_sentence ("What she said is not true.", "dobj(said-3, what-1)\n" + "nsubj(said-3, she-2)\n" + "csubj(true-6, said-3)\n" + "cop(true-6, is-4)\n" + "neg(true-6, not-5)"); rc &= ts.test_sentence ("Which book do you prefer?", "det(book-2, which-1)\n" + "dobj(prefer-5, book-2)\n" + "aux(prefer-5, do-3)\n" + "nsubj(prefer-5, you-4)"); rc &= ts.test_sentence ("There is a ghost in the room.", "expl(is-2, there-1)\n" + "det(ghost-4, a-3)\n" + "nsubj(is-2, ghost-4)\n" + "det(room-7, the-6)\n" + "prep_in(is-2, room-7)"); rc &= ts.test_sentence ("She gave me a raise.", "nsubj(gave-2, she-1)\n" + "iobj(gave-2, me-3)\n" + "det(raise-5, a-4)\n" + "dobj(gave-2, raise-5)"); rc &= ts.test_sentence ("The director is 65 years old.", "det(director-2, the-1)\n" + "nsubj(old-6, director-2)\n" + "cop(old-6, is-3)\n" + "num(years-5, 65-4)\n" + "measure(old-6, years-5)"); rc &= ts.test_sentence ("Sam eats 3 sheep.", "nsubj(eats-2, Sam-1)\n" + "num(sheep-4, 3-3)\n" + "dobj(eats-2, sheep-4)"); /**************** * I don't get it. Stanford makes a num/number distinction I can't grok. rc &= ts.test_sentence ("I lost $ 3.2 billion.", "nsubj(lost-2, I-1)\n" + "dobj(lost-2, $-3)\n" + "number($-3, 3.2-4)\n" + "number($-3, billion-5)"); ***********/ rc &= ts.test_sentence ("Truffles picked during the spring are tasty.", "nsubj(tasty-7, truffles-1)\n" + "partmod(truffles-1, picked-2)\n" + "det(spring-5, the-4)\n" + "prep_during(picked-2, spring-5)\n" + "cop(tasty-7, are-6)"); /**************** * Currently fails due to xcomp generation problems * rc &= ts.test_sentence ("We went to their offices to get Bill's clothes.", "nsubj(went-2, we-1)\n" + "xsubj(get-7, we-1)\n" + "poss(offices-5, their-4)\n" + "prep_to(went-2, offices-5)\n" + "aux(get-7, to-6)\n" + "xcomp(went-2, get-7)\n" + "poss(clothes-10, Bill-8)\n" + "dobj(get-7, clothes-10)"); ***********/ /**************** * See README-Stanford for details. rc &= ts.test_sentence ("All the boys are here.", "predet(boys-3, all-1)\n" + "det(boys-3, the-2)\n" + "nsubj(are-4, boys-3)\n" + "advmod(are-4, here-5)"); ***********/ /**************** * These are ambiguous parses. * Stanford picks the opposite choice from Relex. * See the README-Stanford for a discussion. rc &= ts.test_sentence ("I saw a cat in a hat.", "nsubj(saw-2, I-1)\n" + "det(cat-4, a-3)\n" + "dobj(saw-2, cat-4)\n" + "det(hat-7, a-6)\n" + "prep_in(cat-4, hat-7)"); rc &= ts.test_sentence ("I saw a cat with a telescope.", "nsubj(saw-2, I-1)\n" + "det(cat-4, a-3)\n" + "dobj(saw-2, cat-4)\n" + "det(telescope-7, a-6)\n" + "prep_with(cat-4, telescope-7)"); ***********/ rc &= ts.test_sentence ("He is responsible for meals.", "nsubj(responsible-3, he-1)\n" + "cop(responsible-3, is-2)\n" + "prep_for(responsible-3, meals-5)\n"); rc &= ts.test_sentence ("They shut down the station.", "nsubj(shut-2, they-1)\n" + "prt(shut-2, down-3)\n" + "det(station-5, the-4)\n" + "dobj(shut-2, station-5)"); rc &= ts.test_sentence ("About 200 people came to the party.", "quantmod(200-2, about-1)\n" + "num(people-3, 200-2)\n" + "nsubj(came-4, people-3)\n" + "det(party-7, the-6)\n" + "prep_to(came-4, party-7)"); rc &= ts.test_sentence ("I saw the man who you love.", "nsubj(saw-2, I-1)\n" + "det(man-4, the-3)\n" + "dobj(saw-2, man-4)\n" + "dobj(love-7, man-4)\n" + "rel(love-7, who-5)\n" + "nsubj(love-7, you-6)\n" + "rcmod(man-4, love-7)"); /**************** * * relex is failing to generate teh following: * Almost got it w/the B** rules but not quite ... rel(love-8, wife-6) rcmod(man-4, love-8) rc &= ts.test_sentence ("I saw the man whose wife you love.", "nsubj(saw-2, I-1)\n" + "det(man-4, the-3)\n" + "dobj(saw-2, man-4)\n" + "poss(wife-6, whose-5)\n" + "dobj(love-8, wife-6)\n" + "rel(love-8, wife-6)\n" + "nsubj(love-8, you-7)\n" + "rcmod(man-4, love-8)"); ***********/ rc &= ts.test_sentence ("I am ready to leave.", "nsubj(ready-3, I-1)\n" + "cop(ready-3, am-2)\n" + "aux(leave-5, to-4)\n" + "xcomp(ready-3, leave-5)"); rc &= ts.test_sentence ("Tom likes to eat fish.", "nsubj(likes-2, Tom-1)\n" + "xsubj(eat-4, Tom-1)\n" + "aux(eat-4, to-3)\n" + "xcomp(likes-2, eat-4)\n" + "dobj(eat-4, fish-5)"); /**************** rc &= ts.test_sentence ("He says that you like to swim.", "nsubj(says-2, he-1)\n" + "complm(like-5, that-3)\n" + "nsubj(like-5, you-4)\n" + "ccomp(says-2, like-5)\n" + "nsubj(swim-7, to-6)\n" + // NFW that this can be right. "ccomp(like-5, swim-7)"); ***********/ /**************** rc &= ts.test_sentence ("The garage is next to the house.", "det(garage-2, the-1)\n" + "nsubj(next-4, garage-2)\n" + "cop(next-4, is-3)\n" + "det(house-7, the-6)\n" + "prep_to(next-4, house-7)"); ***********/ // ========================================================= // PENN PART_OF_SPEECH TAGGING // ========================================================= // rc &= ts.test_tagged_sentence ("Truffles picked during the spring are tasty.", "nsubj(tasty-7-JJ, truffles-1-NNS)\n" + "partmod(truffles-1-NNS, picked-2-VBN)\n" + "det(spring-5-NN, the-4-DT)\n" + "prep_during(picked-2-VBN, spring-5-NN)\n" + "cop(tasty-7-JJ, are-6-VBP)"); rc &= ts.test_tagged_sentence ("I ate twelve truffles.", "nsubj(ate-2-VBD, I-1-PRP)\n" + "num(truffles-4-NNS, twelve-3-CD)\n" + "dobj(ate-2-VBD, truffles-4-NNS)"); rc &= ts.test_tagged_sentence ("I have eaten twelve truffles.", "nsubj(eaten-3-VBN, I-1-PRP)\n" + "aux(eaten-3-VBN, have-2-VBP)\n" + "num(truffles-5-NNS, twelve-4-CD)\n" + "dobj(eaten-3-VBN, truffles-5-NNS)"); rc &= ts.test_tagged_sentence ("I had eaten twelve truffles.", "nsubj(eaten-3-VBN, I-1-PRP)\n" + "aux(eaten-3-VBN, had-2-VBD)\n" + "num(truffles-5-NNS, twelve-4-CD)\n" + "dobj(eaten-3-VBN, truffles-5-NNS)"); rc &= ts.test_tagged_sentence ("The truffles were eaten.", "det(truffles-2-NNS, the-1-DT)\n" + "nsubjpass(eaten-4-VBN, truffles-2-NNS)\n" + "auxpass(eaten-4-VBN, were-3-VBD)"); // Full disclosure: Stanford currently generates // dep(time-4-NN, young-8-JJ) which just means it doesn't know // the right answer (which is advcl, right?). // It also generates advmod(young-8-JJ, when-5-WRB) in addition // to rel(young-8-JJ, when-5-WRB) which is not quite right // either. rc &= ts.test_tagged_sentence ("There was a time when we were young.", "expl(was-2-VBD, there-1-EX)\n" + "det(time-4-NN, a-3-DT)\n" + "nsubj(was-2-VBD, time-4-NN)\n" + "rel(young-8-JJ, when-5-WRB)\n" + "nsubj(young-8-JJ, we-6-PRP)\n" + "cop(young-8-JJ, were-7-VBD)\n" + "advcl(time-4-NN, young-8-JJ)"); rc &= ts.test_tagged_sentence ("Is there a better way?", "expl(is-1-VBZ, there-2-EX)\n" + "det(way-5-NN, a-3-DT)\n" + "amod(way-5-NN, better-4-JJR)\n" + "nsubj(is-1-VBZ, way-5-NN)"); rc &= ts.test_tagged_sentence ("Is this the largest you can find?", "cop(largest-4-JJS, is-1-VBZ)\n" + "nsubj(largest-4-JJS, this-2-DT)\n" + "det(largest-4-JJS, the-3-DT)\n" + "nsubj(find-7-VB, you-5-PRP)\n" + "aux(find-7-VB, can-6-MD)\n" + "rcmod(largest-4-JJS, find-7-VB)"); rc &= ts.test_tagged_sentence ("But my efforts to win his heart have failed.", "poss(efforts-3-NNS, my-2-PRP$)\n" + "nsubj(failed-9-VBN, efforts-3-NNS)\n" + "aux(win-5-VB, to-4-TO)\n" + "infmod(efforts-3-NNS, win-5-VB)\n" + "poss(heart-7-NN, his-6-PRP$)\n" + "dobj(win-5-VB, heart-7-NN)\n" + "aux(failed-9-VBN, have-8-VBP)"); rc &= ts.test_tagged_sentence ("The undergrads are occasionally late.", "det(undergrads-2-NNS, the-1-DT)\n" + "nsubj(late-5-JJ, undergrads-2-NNS)\n" + "cop(late-5-JJ, are-3-VBP)\n" + "advmod(late-5-JJ, occasionally-4-RB)"); rc &= ts.test_tagged_sentence ("The height of Mount Everest is 8,848 metres.", "det(height-2-NN, the-1-DT)\n" + "nsubj(metres-8-NNS, height-2-NN)\n" + "nn(Everest-5-NNP, Mount-4-NNP)\n" + "prep_of(height-2-NN, Everest-5-NNP)\n" + "cop(metres-8-NNS, is-6-VBZ)\n" + "num(metres-8-NNS, 8,848-7-CD)"); rc &= ts.test_tagged_sentence ("It happened on December 3rd, 1990.", "nsubj(happened-2-VBD, it-1-PRP)\n" + "prep_on(happened-2-VBD, December-4-NNP)\n" + "num(December-4-NNP, 3rd-5-CD)\n" + "num(December-4-NNP, 1990-7-CD)"); if (rc) { System.err.println("Tested " + ts.pass + " sentences, test passed OK"); } else { System.err.println("Test failed\n\t" + ts.fail + " sentences failed\n\t" + ts.pass + " sentences passed"); } } }
apache-2.0
cpollet/itinerants
webservice/messaging/src/main/java/net/cpollet/itinerants/messaging/context/NotifierContext.java
569
package net.cpollet.itinerants.messaging.context; import net.cpollet.itinerants.core.domain.Person; import net.cpollet.itinerants.messaging.RabbitPersonNotifier; import org.springframework.amqp.rabbit.core.RabbitTemplate; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; /** * Created by cpollet on 13.05.17. */ @Configuration public class NotifierContext { @Bean Person.Notifier personNotifier(RabbitTemplate rabbitTemplate) { return new RabbitPersonNotifier(rabbitTemplate); } }
apache-2.0
dreamquster/yarn-learn
src/main/java/org/dknight/app/kafka/KafkaProduceEncoder.java
570
package org.dknight.app.kafka; import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.MessageToByteEncoder; import org.dknight.app.kafka.request.KafkaProduceRequest; /** * Created by fanming.chen on 2016/9/5 0005. */ public class KafkaProduceEncoder extends MessageToByteEncoder<KafkaProduceRequest> { @Override protected void encode(ChannelHandlerContext channelHandlerContext, KafkaProduceRequest produceRequest, ByteBuf byteBuf) throws Exception { produceRequest.serializeTo(byteBuf); } }
apache-2.0
Nikit2793/Aranya-2014
src/com/limitskyapps/aranya2014/StarNight.java
3913
package com.limitskyapps.aranya2014; import com.limitskyapps.aranya2014.Home.MyAdapter; import android.app.ActionBar; import android.app.Activity; import android.content.Context; import android.content.Intent; import android.graphics.Color; import android.graphics.drawable.ColorDrawable; import android.os.Bundle; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.view.animation.Animation; import android.view.animation.AnimationUtils; import android.widget.AdapterView; import android.widget.AdapterView.OnItemClickListener; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.ListView; import android.widget.RelativeLayout; import android.widget.TextView; public class StarNight extends Activity{ private ListView starlist; private StarAdapter starlistAdapter; @Override protected void onCreate(Bundle savedInstanceState) { // TODO Auto-generated method stub super.onCreate(savedInstanceState); setContentView(R.layout.starnight_layout); ActionBar bar = getActionBar(); bar.setBackgroundDrawable(new ColorDrawable(Color.parseColor("#00bf8f"))); bar.setTitle("Shows"); bar.setDisplayHomeAsUpEnabled(true); starlist=(ListView)findViewById(R.id.listView1); starlistAdapter=new StarAdapter(this); starlist.setAdapter(starlistAdapter); starlist.setOnItemClickListener(new OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { // TODO Auto-generated method stub if(position==0){ Intent i=new Intent(StarNight.this,StarPage.class); i.addFlags(Intent.FLAG_ACTIVITY_NO_ANIMATION); startActivityForResult(i,0); } } }); } private class StarAdapter extends BaseAdapter{ private Context context; int [] images={R.drawable.gmann,R.drawable.lasernight,R.drawable.wheelchair}; String []name ={"Gurdas Maan","Laser Show","Wheel Chair Dance"}; String []date={"15 Nov","14 Nov","14 Nov"}; String []desig={"Singer/Composer","Show","Show"}; public StarAdapter(Context c){ this.context=c; } @Override public int getCount() { // TODO Auto-generated method stub return name.length; } @Override public Object getItem(int position) { // TODO Auto-generated method stub return name[position]; } @Override public long getItemId(int position) { // TODO Auto-generated method stub return position; } private int lastPosition = -1; @Override public View getView(int position, View convertView, ViewGroup parent) { // TODO Auto-generated method stub View row=null; if(convertView==null){ LayoutInflater inflator=(LayoutInflater)context.getSystemService(Context.LAYOUT_INFLATER_SERVICE); row=inflator.inflate(R.layout.starnight_inflator,parent,false); } else{ row=convertView; } TextView star_name=(TextView)row.findViewById(R.id.star_name); TextView dates=(TextView)row.findViewById(R.id.date); TextView designation=(TextView)row.findViewById(R.id.Designation); TextView Viewinfo=(TextView)row.findViewById(R.id.textView1); if((position==1) || (position==2)){ Viewinfo.setText(""); Viewinfo.setBackgroundColor(Color.TRANSPARENT); } star_name.setText(name[position]); dates.setText(date[position]); designation.setText(desig[position]); RelativeLayout star_image=(RelativeLayout)row.findViewById(R.id.relativeLayout1); star_image.setBackgroundResource(images[position]); Animation animation = AnimationUtils.loadAnimation(context , (position > lastPosition) ? R.anim.up_from_bottom : R.anim.down_from_top); row.startAnimation(animation); lastPosition = position; return row; } } @Override protected void onPause() { // TODO Auto-generated method stub super.onPause(); //finish(); } }
apache-2.0
tabish121/proton4j
protonj2/src/test/java/org/apache/qpid/protonj2/types/messaging/ApplicationPropertiesTest.java
2191
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.qpid.protonj2.types.messaging; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNull; import java.util.HashMap; import java.util.Map; import org.apache.qpid.protonj2.types.messaging.Section.SectionType; import org.junit.Test; public class ApplicationPropertiesTest { @Test public void testToStringOnEmptyObject() { assertNotNull(new ApplicationProperties(null).toString()); } @Test public void testGetPropertiesFromEmptySection() { assertNull(new ApplicationProperties(null).getValue()); } @Test public void testCopyFromEmpty() { assertNull(new ApplicationProperties(null).copy().getValue()); } @Test public void testCopy() { Map<String, Object> payload = new HashMap<>(); payload.put("key", "value"); ApplicationProperties original = new ApplicationProperties(payload); ApplicationProperties copy = original.copy(); assertNotSame(original, copy); assertNotSame(original.getValue(), copy.getValue()); assertEquals(original.getValue(), copy.getValue()); } @Test public void testGetType() { assertEquals(SectionType.ApplicationProperties, new ApplicationProperties(null).getType()); } }
apache-2.0
LiuJianan/Graduate-Graph
src/java/com/chinamobile/bcbsp/examples/MaxAggregator.java
1671
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.chinamobile.bcbsp.examples; import com.chinamobile.bcbsp.api.Aggregator; /** * MaxAggregator An example implementation of Aggregator. To do the max * operation on outgoing edge number. */ public class MaxAggregator extends Aggregator<AggregateValueOutEdgeNum> { /** * Implemented by the user. * @param aggValues AggregateValueOutEdgeNum * @return result the aggregateValue of outEdgeNum */ @Override public AggregateValueOutEdgeNum aggregate( Iterable<AggregateValueOutEdgeNum> aggValues) { /** Define max variable */ long max = 0; for (AggregateValueOutEdgeNum aggValue : aggValues) { if (aggValue.getValue() > max) { max = aggValue.getValue(); } } AggregateValueOutEdgeNum result = new AggregateValueOutEdgeNum(); result.setValue(max); return result; } //end-aggregate }
apache-2.0
eidos71/kingchallenge
src/main/java/org/eidos/kingchallenge/repository/SimpleLoginRepository.java
3779
package org.eidos.kingchallenge.repository; import java.util.Collections; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; import org.eidos.kingchallenge.KingdomConfManager; import org.eidos.kingchallenge.domain.model.KingUser; import org.eidos.kingchallenge.exceptions.KingInvalidSessionException; import org.eidos.kingchallenge.exceptions.LogicKingChallengeException; import org.eidos.kingchallenge.exceptions.enums.LogicKingError; import org.eidos.kingchallenge.persistance.LoginPersistanceMap; @ThreadSafe public final class SimpleLoginRepository implements LoginRepository { /** * Persistance storage for logged users */ @GuardedBy("loginPersistance") private final LoginPersistanceMap<Long, String, KingUser> loginPersistance; public SimpleLoginRepository() { loginPersistance = KingdomConfManager.getInstance().getPersistanceBag() .getLoginPersistance(); } @Override public Map<Long, KingUser> getAllKingdomByLogin() { Map<Long, KingUser> result = loginPersistance.getMapByLogin(); if (result == null) return Collections.emptyMap(); return result; } @Override public Map<String, KingUser> getAllKingdomBySession() { Map<String, KingUser> result = loginPersistance.getMapBySession(); if (result == null) return Collections.emptyMap(); return result; } @Override public String addKingUser(KingUser user) { synchronized (loginPersistance) { boolean exists = getAllKingdomByLogin().containsKey( user.getKingUserId().get()); if (exists) { this.loginPersistance.removeByLogin(user.getKingUserId().get()); } //throw new LogicKingChallengeException(LogicKingError.USER_EXISTS); this.loginPersistance.put(user.getKingUserId().get(), user.getSessionKey(), user); } return user.getSessionKey(); } @Override public boolean removeKingUserByLogin(AtomicLong loginKey) { if (loginKey == null || loginKey.get() == 0) throw new LogicKingChallengeException(LogicKingError.INVALID_TOKEN); boolean missing = !getAllKingdomByLogin().containsKey(loginKey.get()); if (missing) throw new LogicKingChallengeException(LogicKingError.USER_NOT_FOUND); return this.loginPersistance.removeByLogin(loginKey.get()); } @Override public boolean removeKingUserBySession(String sessionId) { if (sessionId == null || "".equals(sessionId)) throw new KingInvalidSessionException(); boolean missing = !getAllKingdomBySession().containsKey(sessionId); if (missing) throw new LogicKingChallengeException(LogicKingError.USER_NOT_FOUND); return this.loginPersistance.removeBySession(sessionId); } @Override public void removeKingUser(KingUser user) { throw new UnsupportedOperationException(); } @Override public KingUser updateKingUser(KingUser user) { // if the user comes empty, we return the update action String resultSession=""; if (user == null) return null; synchronized (loginPersistance) { boolean found = !getAllKingdomByLogin().containsKey( user.getKingUserId().get()); if (found) { this.loginPersistance.removeByLogin(user.getKingUserId().get()); } this.loginPersistance.put(user.getKingUserId().get(), user.getSessionKey(), user); } return user; } @Override public KingUser findByLoginId(AtomicLong loginKey) { if (loginKey == null || loginKey.get() == 0) throw new LogicKingChallengeException(LogicKingError.INVALID_TOKEN); return getAllKingdomByLogin().get(loginKey.get()); } @Override public KingUser findBySessionId(String sessionId) { if (sessionId == null || "".equals(sessionId)) throw new KingInvalidSessionException(); return getAllKingdomBySession().get(sessionId); } }
apache-2.0
GerritCodeReview/plugins_code-owners
java/com/google/gerrit/plugins/codeowners/api/RequiredApprovalInfo.java
1479
// Copyright (C) 2020 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.gerrit.plugins.codeowners.api; /** * Representation of a configured required approval in the REST API. * * <p>This class determines the JSON format of a configured required approval in the REST API. * * <p>Represents an approval that is required for an action. * * <p>A required approval is used to represent: * * <ul> * <li>the approval that is required from code owners to approve the files in a change (see {@link * CodeOwnerProjectConfigInfo#requiredApproval}) * <li>the approval that is required to override the code owners submit check (see {@link * CodeOwnerProjectConfigInfo#overrideApproval}) * </ul> */ public class RequiredApprovalInfo { /** The name of the label on which an approval is required. */ public String label; /** The voting value that is required on the label. */ public short value; }
apache-2.0
Siziksu/AndroidIntents
app/src/main/java/com/siziksu/intents/ui/activity/WebViewActivity.java
1702
package com.siziksu.intents.ui.activity; import android.annotation.SuppressLint; import android.net.Uri; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.support.v7.widget.Toolbar; import android.webkit.WebResourceRequest; import android.webkit.WebView; import android.webkit.WebViewClient; import com.siziksu.intents.R; public class WebViewActivity extends AppCompatActivity { @SuppressLint("SetJavaScriptEnabled") @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_webview); Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar); setSupportActionBar(toolbar); Uri url = getIntent().getData(); WebView webView = (WebView) findViewById(R.id.web_view); webView.setWebViewClient(new LocalWebViewClient()); webView.getSettings().setJavaScriptEnabled(true); webView.loadUrl(url.toString()); } /** * Handling Page Navigation * The default web browser opens and loads the destination URL. * However, you can override this behavior for your WebView, so links open within your WebView. * You can then allow the user to navigate backward and forward through their web page history * that's maintained by your WebView. * To open links clicked by the user, simply provide a WebViewClient for your WebView, using * setWebViewClient(). */ private class LocalWebViewClient extends WebViewClient { @Override public boolean shouldOverrideUrlLoading(WebView view, WebResourceRequest request) { return false; } } }
apache-2.0
deepakddixit/incubator-geode
geode-cq/src/distributedTest/java/org/apache/geode/cache/query/cq/dunit/CqTimeTestListener.java
8047
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.cache.query.cq.dunit; import java.util.Collections; import java.util.HashSet; import java.util.Set; import org.apache.geode.LogWriter; import org.apache.geode.cache.Operation; import org.apache.geode.cache.query.CqEvent; import org.apache.geode.cache.query.CqListener; import org.apache.geode.cache.query.data.Portfolio; import org.apache.geode.test.dunit.Wait; import org.apache.geode.test.dunit.WaitCriterion; public class CqTimeTestListener implements CqListener { protected final LogWriter logger; protected volatile int eventCreateCount = 0; protected volatile int eventUpdateCount = 0; protected volatile int eventDeleteCount = 0; protected volatile int eventInvalidateCount = 0; protected volatile int eventErrorCount = 0; protected volatile int totalEventCount = 0; protected volatile int eventQueryInsertCount = 0; protected volatile int eventQueryUpdateCount = 0; protected volatile int eventQueryDeleteCount = 0; protected volatile int eventQueryInvalidateCount = 0; protected volatile long eventQueryInsertTime = 0; protected volatile long eventQueryUpdateTime = 0; protected volatile boolean eventClose = false; public final Set destroys = Collections.synchronizedSet(new HashSet()); public final Set creates = Collections.synchronizedSet(new HashSet()); public final Set invalidates = Collections.synchronizedSet(new HashSet()); public final Set updates = Collections.synchronizedSet(new HashSet()); private static final String WAIT_PROPERTY = "CQueryTestListener.maxWaitTime"; private static final int WAIT_DEFAULT = (20 * 1000); public static final long MAX_TIME = Integer.getInteger(WAIT_PROPERTY, WAIT_DEFAULT).intValue();; public String cqName; public CqTimeTestListener(LogWriter logger) { this.logger = logger; } public void onEvent(CqEvent cqEvent) { this.totalEventCount++; long currentTime = System.currentTimeMillis(); Operation baseOperation = cqEvent.getBaseOperation(); Operation queryOperation = cqEvent.getQueryOperation(); Object key = cqEvent.getKey(); logger.info("### Got CQ Event ###; baseOp=" + baseOperation + ";queryOp=" + queryOperation); logger.info("Number of events for the CQ: " + this.cqName + " : " + this.totalEventCount + " Key : " + key); if (baseOperation.isUpdate()) { this.eventUpdateCount++; this.updates.add(key); } else if (baseOperation.isCreate()) { this.eventCreateCount++; this.creates.add(key); } else if (baseOperation.isDestroy()) { this.eventDeleteCount++; this.destroys.add(key); } else if (baseOperation.isInvalidate()) { this.eventDeleteCount++; this.invalidates.add(key); } if (queryOperation.isUpdate()) { this.eventQueryUpdateCount++; long createTime = ((Portfolio) cqEvent.getNewValue()).getCreateTime(); this.eventQueryUpdateTime += (currentTime - createTime); } else if (queryOperation.isCreate()) { this.eventQueryInsertCount++; long createTime = ((Portfolio) cqEvent.getNewValue()).getCreateTime(); this.eventQueryInsertTime += (currentTime - createTime); } else if (queryOperation.isDestroy()) { this.eventQueryDeleteCount++; } else if (queryOperation.isInvalidate()) { this.eventQueryInvalidateCount++; } } public void onError(CqEvent cqEvent) { this.eventErrorCount++; } public int getErrorEventCount() { return this.eventErrorCount; } public int getTotalEventCount() { return this.totalEventCount; } public int getCreateEventCount() { return this.eventCreateCount; } public int getUpdateEventCount() { return this.eventUpdateCount; } public int getDeleteEventCount() { return this.eventDeleteCount; } public int getInvalidateEventCount() { return this.eventInvalidateCount; } public int getQueryInsertEventCount() { return this.eventQueryInsertCount; } public int getQueryUpdateEventCount() { return this.eventQueryUpdateCount; } public int getQueryDeleteEventCount() { return this.eventQueryDeleteCount; } public int getQueryInvalidateEventCount() { return this.eventQueryInvalidateCount; } public long getTotalQueryUpdateTime() { return this.eventQueryUpdateTime; } public long getTotalQueryCreateTime() { return this.eventQueryInsertTime; } public void close() { this.eventClose = true; } public void printInfo() { logger.info("####" + this.cqName + ": " + " Events Total :" + this.getTotalEventCount() + " Events Created :" + this.eventCreateCount + " Events Updated :" + this.eventUpdateCount + " Events Deleted :" + this.eventDeleteCount + " Events Invalidated :" + this.eventInvalidateCount + " Query Inserts :" + this.eventQueryInsertCount + " Query Updates :" + this.eventQueryUpdateCount + " Query Deletes :" + this.eventQueryDeleteCount + " Query Invalidates :" + this.eventQueryInvalidateCount + " Total Events :" + this.totalEventCount); } public boolean waitForCreated(final Object key) { WaitCriterion ev = new WaitCriterion() { public boolean done() { return CqTimeTestListener.this.creates.contains(key); } public String description() { return "never got create event for CQ " + CqTimeTestListener.this.cqName; } }; Wait.waitForCriterion(ev, MAX_TIME, 200, true); return true; } public boolean waitForDestroyed(final Object key) { WaitCriterion ev = new WaitCriterion() { public boolean done() { return CqTimeTestListener.this.destroys.contains(key); } public String description() { return "never got destroy event for CQ " + CqTimeTestListener.this.cqName; } }; Wait.waitForCriterion(ev, MAX_TIME, 200, true); return true; } public boolean waitForInvalidated(final Object key) { WaitCriterion ev = new WaitCriterion() { public boolean done() { return CqTimeTestListener.this.invalidates.contains(key); } public String description() { return "never got invalidate event for CQ " + CqTimeTestListener.this.cqName; } }; Wait.waitForCriterion(ev, MAX_TIME, 200, true); return true; } public boolean waitForUpdated(final Object key) { WaitCriterion ev = new WaitCriterion() { public boolean done() { return CqTimeTestListener.this.updates.contains(key); } public String description() { return "never got update event for CQ " + CqTimeTestListener.this.cqName; } }; Wait.waitForCriterion(ev, MAX_TIME, 200, true); return true; } public boolean waitForClose() { WaitCriterion ev = new WaitCriterion() { public boolean done() { return CqTimeTestListener.this.eventClose; } public String description() { return "never got close event for CQ " + CqTimeTestListener.this.cqName; } }; Wait.waitForCriterion(ev, MAX_TIME, 200, true); return true; } public void getEventHistory() { destroys.clear(); creates.clear(); invalidates.clear(); updates.clear(); this.eventClose = false; } }
apache-2.0
ranoble/Megapode
src/main/java/com/gravspace/handlers/PageHandler.java
3478
package com.gravspace.handlers; import java.lang.reflect.InvocationTargetException; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.lang.StringUtils; import org.apache.http.Header; import org.apache.http.HttpRequest; import org.apache.http.HttpResponse; import org.apache.http.protocol.HttpContext; import scala.concurrent.Await; import scala.concurrent.Future; import scala.concurrent.duration.Duration; import akka.actor.ActorRef; import akka.actor.Status; import akka.actor.UntypedActor; import akka.actor.UntypedActorContext; import akka.dispatch.Futures; import akka.event.Logging; import akka.event.LoggingAdapter; import com.gravspace.abstractions.IWidget; import com.gravspace.abstractions.IPage; import com.gravspace.abstractions.PageRoute; import com.gravspace.exceptions.PageNotFoundException; import com.gravspace.messages.RequestMessage; import com.gravspace.messages.ResponseMessage; import com.gravspace.util.Layers; public class PageHandler extends UntypedActor { LoggingAdapter log = Logging.getLogger(getContext().system(), this); List<PageRoute> pages; Map<String, ActorRef> routes; private Map<Layers, ActorRef> routers; public PageHandler(Map<Layers, ActorRef> routers, List<PageRoute> pages){ this.pages = pages; this.routers = routers; } @Override public void onReceive(Object rawMessage) throws Exception { log.info("Page got: "+rawMessage.getClass().getCanonicalName()); if (rawMessage instanceof RequestMessage){ log.info("Handelling Request"); RequestMessage message = (RequestMessage)rawMessage; String uri = message.getPayload().getRequestLine().getUri(); String[] split = StringUtils.split(uri, "?", 2); String path = split[0]; String query = ""; if (split.length > 1){ query = split[1]; } Future<ResponseMessage> rendered = null; try { IPage page = loadPage(path, message.getPayload().getRequestLine().getMethod().toUpperCase(), query, message.getPayload().getHeaders(), message.getPayload().getContent()); rendered = page.build(); } catch (PageNotFoundException pnf){ rendered = Futures.failed(pnf); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e){ log.error(String.format("Error in handelling [%s]", e.getClass().getCanonicalName()), e); rendered = Futures.failed(e); } catch (Throwable e) { rendered = Futures.failed(e); } akka.pattern.Patterns.pipe(rendered, this.getContext().dispatcher()).to(getSender()); } else { unhandled(rawMessage); } } private IPage loadPage(String path, String method, String query, Header[] headers, byte[] content) throws PageNotFoundException, InstantiationException, IllegalAccessException, IllegalArgumentException, InvocationTargetException, NoSuchMethodException, SecurityException { Map<String, String> parameters = new HashMap<String, String>(); for (PageRoute route: pages){ if (route.getTemplate().match(path, parameters)){ IPage page = route.getPageClass().getConstructor(Map.class, ActorRef.class, UntypedActorContext.class).newInstance(routers, getSender(), this.context()); page.initialise(path, method, query, headers, content, parameters ); return page; } } throw new PageNotFoundException(String.format("Page matching [%s] not found", path)); } }
apache-2.0
torrances/swtk-commons
commons-dict-wordnet-indexbyname/src/main/java/org/swtk/commons/dict/wordnet/indexbyname/instance/o/a/x/WordnetNounIndexNameInstanceOAX.java
1213
package org.swtk.commons.dict.wordnet.indexbyname.instance.o.a.x; import java.util.ArrayList; import java.util.Collection; import java.util.Map; import java.util.TreeMap; import org.swtk.common.dict.dto.wordnet.IndexNoun; import com.trimc.blogger.commons.utils.GsonUtils; public final class WordnetNounIndexNameInstanceOAX { private static Map<String, Collection<IndexNoun>> map = new TreeMap<String, Collection<IndexNoun>>(); static { add("{\"term\":\"oaxaca\", \"synsetCount\":1, \"upperType\":\"NOUN\", \"ids\":[\"08762583\"]}"); add("{\"term\":\"oaxaca de juarez\", \"synsetCount\":1, \"upperType\":\"NOUN\", \"ids\":[\"08762583\"]}"); } private static void add(final String JSON) { IndexNoun indexNoun = GsonUtils.toObject(JSON, IndexNoun.class); Collection<IndexNoun> list = (map.containsKey(indexNoun.getTerm())) ? map.get(indexNoun.getTerm()) : new ArrayList<IndexNoun>(); list.add(indexNoun); map.put(indexNoun.getTerm(), list); } public static Collection<IndexNoun> get(final String TERM) { return map.get(TERM); } public static boolean has(final String TERM) { return map.containsKey(TERM); } public static Collection<String> terms() { return map.keySet(); } }
apache-2.0
adligo/fabricate.adligo.org
src/org/adligo/fabricate/repository/DependencyNotAvailableException.java
727
package org.adligo.fabricate.repository; import org.adligo.fabricate.models.dependencies.I_Dependency; public class DependencyNotAvailableException extends Exception { /** * */ private static final long serialVersionUID = -5557407966833930386L; private I_Dependency dependency_; public DependencyNotAvailableException(I_Dependency dep) { dependency_ = dep; } public DependencyNotAvailableException(I_Dependency dep, String message) { super(message); dependency_ = dep; } public DependencyNotAvailableException(I_Dependency dep, String message, Exception x) { super(message, x); dependency_ = dep; } public I_Dependency getDependency() { return dependency_; } }
apache-2.0
mhurne/aws-sdk-java
aws-java-sdk-route53/src/main/java/com/amazonaws/services/route53/AmazonRoute53Client.java
171020
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.route53; import org.w3c.dom.*; import java.net.*; import java.util.*; import java.util.Map.Entry; import org.apache.commons.logging.*; import com.amazonaws.*; import com.amazonaws.auth.*; import com.amazonaws.handlers.*; import com.amazonaws.http.*; import com.amazonaws.internal.*; import com.amazonaws.metrics.*; import com.amazonaws.regions.*; import com.amazonaws.transform.*; import com.amazonaws.util.*; import com.amazonaws.util.json.*; import com.amazonaws.util.AWSRequestMetrics.Field; import com.amazonaws.annotation.ThreadSafe; import com.amazonaws.services.route53.model.*; import com.amazonaws.services.route53.model.transform.*; /** * Client for accessing Route 53. All service calls made using this client are * blocking, and will not return until the service call completes. * <p> * */ @ThreadSafe public class AmazonRoute53Client extends AmazonWebServiceClient implements AmazonRoute53 { /** Provider for AWS credentials. */ private AWSCredentialsProvider awsCredentialsProvider; private static final Log log = LogFactory.getLog(AmazonRoute53.class); /** Default signing name for the service. */ private static final String DEFAULT_SIGNING_NAME = "route53"; /** The region metadata service name for computing region endpoints. */ private static final String DEFAULT_ENDPOINT_PREFIX = "route53"; /** * Client configuration factory providing ClientConfigurations tailored to * this client */ protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory(); /** * List of exception unmarshallers for all Route 53 exceptions. */ protected final List<Unmarshaller<AmazonServiceException, Node>> exceptionUnmarshallers = new ArrayList<Unmarshaller<AmazonServiceException, Node>>(); /** * Constructs a new client to invoke service methods on Route 53. A * credentials provider chain will be used that searches for credentials in * this order: * <ul> * <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY</li> * <li>Java System Properties - aws.accessKeyId and aws.secretKey</li> * <li>Instance profile credentials delivered through the Amazon EC2 * metadata service</li> * </ul> * * <p> * All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @see DefaultAWSCredentialsProviderChain */ public AmazonRoute53Client() { this(new DefaultAWSCredentialsProviderChain(), configFactory .getConfig()); } /** * Constructs a new client to invoke service methods on Route 53. A * credentials provider chain will be used that searches for credentials in * this order: * <ul> * <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY</li> * <li>Java System Properties - aws.accessKeyId and aws.secretKey</li> * <li>Instance profile credentials delivered through the Amazon EC2 * metadata service</li> * </ul> * * <p> * All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param clientConfiguration * The client configuration options controlling how this client * connects to Route 53 (ex: proxy settings, retry counts, etc.). * * @see DefaultAWSCredentialsProviderChain */ public AmazonRoute53Client(ClientConfiguration clientConfiguration) { this(new DefaultAWSCredentialsProviderChain(), clientConfiguration); } /** * Constructs a new client to invoke service methods on Route 53 using the * specified AWS account credentials. * * <p> * All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when * authenticating with AWS services. */ public AmazonRoute53Client(AWSCredentials awsCredentials) { this(awsCredentials, configFactory.getConfig()); } /** * Constructs a new client to invoke service methods on Route 53 using the * specified AWS account credentials and client configuration options. * * <p> * All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when * authenticating with AWS services. * @param clientConfiguration * The client configuration options controlling how this client * connects to Route 53 (ex: proxy settings, retry counts, etc.). */ public AmazonRoute53Client(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) { super(clientConfiguration); this.awsCredentialsProvider = new StaticCredentialsProvider( awsCredentials); init(); } /** * Constructs a new client to invoke service methods on Route 53 using the * specified AWS account credentials provider. * * <p> * All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. */ public AmazonRoute53Client(AWSCredentialsProvider awsCredentialsProvider) { this(awsCredentialsProvider, configFactory.getConfig()); } /** * Constructs a new client to invoke service methods on Route 53 using the * specified AWS account credentials provider and client configuration * options. * * <p> * All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. * @param clientConfiguration * The client configuration options controlling how this client * connects to Route 53 (ex: proxy settings, retry counts, etc.). */ public AmazonRoute53Client(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) { this(awsCredentialsProvider, clientConfiguration, null); } /** * Constructs a new client to invoke service methods on Route 53 using the * specified AWS account credentials provider, client configuration options, * and request metric collector. * * <p> * All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. * @param clientConfiguration * The client configuration options controlling how this client * connects to Route 53 (ex: proxy settings, retry counts, etc.). * @param requestMetricCollector * optional request metric collector */ public AmazonRoute53Client(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricCollector) { super(clientConfiguration, requestMetricCollector); this.awsCredentialsProvider = awsCredentialsProvider; init(); } private void init() { exceptionUnmarshallers.add(new ConflictingTypesExceptionUnmarshaller()); exceptionUnmarshallers .add(new DelegationSetNotAvailableExceptionUnmarshaller()); exceptionUnmarshallers .add(new NoSuchHealthCheckExceptionUnmarshaller()); exceptionUnmarshallers .add(new HostedZoneNotEmptyExceptionUnmarshaller()); exceptionUnmarshallers .add(new HostedZoneAlreadyExistsExceptionUnmarshaller()); exceptionUnmarshallers .add(new NoSuchDelegationSetExceptionUnmarshaller()); exceptionUnmarshallers .add(new InvalidDomainNameExceptionUnmarshaller()); exceptionUnmarshallers .add(new DelegationSetAlreadyReusableExceptionUnmarshaller()); exceptionUnmarshallers.add(new InvalidInputExceptionUnmarshaller()); exceptionUnmarshallers .add(new NoSuchTrafficPolicyExceptionUnmarshaller()); exceptionUnmarshallers .add(new TooManyHealthChecksExceptionUnmarshaller()); exceptionUnmarshallers .add(new DelegationSetInUseExceptionUnmarshaller()); exceptionUnmarshallers.add(new InvalidVPCIdExceptionUnmarshaller()); exceptionUnmarshallers .add(new LastVPCAssociationExceptionUnmarshaller()); exceptionUnmarshallers .add(new InvalidChangeBatchExceptionUnmarshaller()); exceptionUnmarshallers .add(new ConcurrentModificationExceptionUnmarshaller()); exceptionUnmarshallers .add(new TooManyHostedZonesExceptionUnmarshaller()); exceptionUnmarshallers .add(new HostedZoneNotFoundExceptionUnmarshaller()); exceptionUnmarshallers .add(new DelegationSetAlreadyCreatedExceptionUnmarshaller()); exceptionUnmarshallers .add(new ConflictingDomainExistsExceptionUnmarshaller()); exceptionUnmarshallers.add(new LimitsExceededExceptionUnmarshaller()); exceptionUnmarshallers .add(new TooManyTrafficPolicyInstancesExceptionUnmarshaller()); exceptionUnmarshallers.add(new HealthCheckInUseExceptionUnmarshaller()); exceptionUnmarshallers .add(new HealthCheckVersionMismatchExceptionUnmarshaller()); exceptionUnmarshallers .add(new NoSuchGeoLocationExceptionUnmarshaller()); exceptionUnmarshallers .add(new NoSuchTrafficPolicyInstanceExceptionUnmarshaller()); exceptionUnmarshallers.add(new NoSuchHostedZoneExceptionUnmarshaller()); exceptionUnmarshallers .add(new VPCAssociationNotFoundExceptionUnmarshaller()); exceptionUnmarshallers .add(new IncompatibleVersionExceptionUnmarshaller()); exceptionUnmarshallers .add(new HealthCheckAlreadyExistsExceptionUnmarshaller()); exceptionUnmarshallers .add(new PublicZoneVPCAssociationExceptionUnmarshaller()); exceptionUnmarshallers .add(new TrafficPolicyInstanceAlreadyExistsExceptionUnmarshaller()); exceptionUnmarshallers .add(new TrafficPolicyAlreadyExistsExceptionUnmarshaller()); exceptionUnmarshallers.add(new NoSuchChangeExceptionUnmarshaller()); exceptionUnmarshallers .add(new TrafficPolicyInUseExceptionUnmarshaller()); exceptionUnmarshallers .add(new DelegationSetNotReusableExceptionUnmarshaller()); exceptionUnmarshallers.add(new ThrottlingExceptionUnmarshaller()); exceptionUnmarshallers .add(new InvalidTrafficPolicyDocumentExceptionUnmarshaller()); exceptionUnmarshallers .add(new PriorRequestNotCompleteExceptionUnmarshaller()); exceptionUnmarshallers.add(new InvalidArgumentExceptionUnmarshaller()); exceptionUnmarshallers .add(new TooManyTrafficPoliciesExceptionUnmarshaller()); exceptionUnmarshallers.add(new StandardErrorUnmarshaller()); setServiceNameIntern(DEFAULT_SIGNING_NAME); setEndpointPrefix(DEFAULT_ENDPOINT_PREFIX); // calling this.setEndPoint(...) will also modify the signer accordingly this.setEndpoint("https://route53.amazonaws.com"); HandlerChainFactory chainFactory = new HandlerChainFactory(); requestHandler2s .addAll(chainFactory .newRequestHandlerChain("/com/amazonaws/services/route53/request.handlers")); requestHandler2s .addAll(chainFactory .newRequestHandler2Chain("/com/amazonaws/services/route53/request.handler2s")); } /** * <p> * This action associates a VPC with an hosted zone. * </p> * <p> * To associate a VPC with an hosted zone, send a <code>POST</code> request * to the * <code>/<i>Route 53 API version</i>/hostedzone/<i>hosted zone ID</i>/associatevpc</code> * resource. The request body must include a document with a * <code>AssociateVPCWithHostedZoneRequest</code> element. The response * returns the <code>AssociateVPCWithHostedZoneResponse</code> element that * contains <code>ChangeInfo</code> for you to track the progress of the * <code>AssociateVPCWithHostedZoneRequest</code> you made. See * <code>GetChange</code> operation for how to track the progress of your * change. * </p> * * @param associateVPCWithHostedZoneRequest * A complex type that contains information about the request to * associate a VPC with an hosted zone. * @return Result of the AssociateVPCWithHostedZone operation returned by * the service. * @throws NoSuchHostedZoneException * @throws InvalidVPCIdException * The hosted zone you are trying to create for your VPC_ID does not * belong to you. Amazon Route 53 returns this error when the VPC * specified by <code>VPCId</code> does not belong to you. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws PublicZoneVPCAssociationException * The hosted zone you are trying to associate VPC with doesn't have * any VPC association. Amazon Route 53 currently doesn't support * associate a VPC with a public hosted zone. * @throws ConflictingDomainExistsException * @throws LimitsExceededException * The limits specified for a resource have been exceeded. * @sample AmazonRoute53.AssociateVPCWithHostedZone */ @Override public AssociateVPCWithHostedZoneResult associateVPCWithHostedZone( AssociateVPCWithHostedZoneRequest associateVPCWithHostedZoneRequest) { ExecutionContext executionContext = createExecutionContext(associateVPCWithHostedZoneRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<AssociateVPCWithHostedZoneRequest> request = null; Response<AssociateVPCWithHostedZoneResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new AssociateVPCWithHostedZoneRequestMarshaller() .marshall(super .beforeMarshalling(associateVPCWithHostedZoneRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<AssociateVPCWithHostedZoneResult> responseHandler = new StaxResponseHandler<AssociateVPCWithHostedZoneResult>( new AssociateVPCWithHostedZoneResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Use this action to create or change your authoritative DNS information. * To use this action, send a <code>POST</code> request to the * <code>/<i>Route 53 API version</i>/hostedzone/<i>hosted Zone ID</i>/rrset</code> * resource. The request body must include a document with a * <code>ChangeResourceRecordSetsRequest</code> element. * </p> * <p> * Changes are a list of change items and are considered transactional. For * more information on transactional changes, also known as change batches, * see <a href= * "http://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html" * >POST ChangeResourceRecordSets</a> in the <i>Amazon Route 53 API * Reference</i>. * </p> * <important>Due to the nature of transactional changes, you cannot delete * the same resource record set more than once in a single change batch. If * you attempt to delete the same change batch more than once, Amazon Route * 53 returns an <code>InvalidChangeBatch</code> error.</important> * <p> * In response to a <code>ChangeResourceRecordSets</code> request, your DNS * data is changed on all Amazon Route 53 DNS servers. Initially, the status * of a change is <code>PENDING</code>. This means the change has not yet * propagated to all the authoritative Amazon Route 53 DNS servers. When the * change is propagated to all hosts, the change returns a status of * <code>INSYNC</code>. * </p> * <p> * Note the following limitations on a <code>ChangeResourceRecordSets</code> * request: * </p> * <ul> * <li>A request cannot contain more than 100 Change elements.</li> * <li>A request cannot contain more than 1000 ResourceRecord elements.</li> * <li>The sum of the number of characters (including spaces) in all * <code>Value</code> elements in a request cannot exceed 32,000 characters. * </li> * </ul> * * @param changeResourceRecordSetsRequest * A complex type that contains a change batch. * @return Result of the ChangeResourceRecordSets operation returned by the * service. * @throws NoSuchHostedZoneException * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws InvalidChangeBatchException * This error contains a list of one or more error messages. Each * error message indicates one error in the change batch. For more * information, see <a href= * "http://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html#example_Errors" * >Example InvalidChangeBatch Errors</a>. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws PriorRequestNotCompleteException * The request was rejected because Amazon Route 53 was still * processing a prior request. * @sample AmazonRoute53.ChangeResourceRecordSets */ @Override public ChangeResourceRecordSetsResult changeResourceRecordSets( ChangeResourceRecordSetsRequest changeResourceRecordSetsRequest) { ExecutionContext executionContext = createExecutionContext(changeResourceRecordSetsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ChangeResourceRecordSetsRequest> request = null; Response<ChangeResourceRecordSetsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ChangeResourceRecordSetsRequestMarshaller() .marshall(super .beforeMarshalling(changeResourceRecordSetsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ChangeResourceRecordSetsResult> responseHandler = new StaxResponseHandler<ChangeResourceRecordSetsResult>( new ChangeResourceRecordSetsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * @param changeTagsForResourceRequest * A complex type containing information about a request to add, * change, or delete the tags that are associated with a resource. * @return Result of the ChangeTagsForResource operation returned by the * service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws NoSuchHostedZoneException * @throws PriorRequestNotCompleteException * The request was rejected because Amazon Route 53 was still * processing a prior request. * @throws ThrottlingException * @sample AmazonRoute53.ChangeTagsForResource */ @Override public ChangeTagsForResourceResult changeTagsForResource( ChangeTagsForResourceRequest changeTagsForResourceRequest) { ExecutionContext executionContext = createExecutionContext(changeTagsForResourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ChangeTagsForResourceRequest> request = null; Response<ChangeTagsForResourceResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ChangeTagsForResourceRequestMarshaller() .marshall(super .beforeMarshalling(changeTagsForResourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ChangeTagsForResourceResult> responseHandler = new StaxResponseHandler<ChangeTagsForResourceResult>( new ChangeTagsForResourceResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action creates a new health check. * </p> * <p> * To create a new health check, send a <code>POST</code> request to the * <code>/<i>Route 53 API version</i>/healthcheck</code> resource. The * request body must include a document with a * <code>CreateHealthCheckRequest</code> element. The response returns the * <code>CreateHealthCheckResponse</code> element that contains metadata * about the health check. * </p> * * @param createHealthCheckRequest * >A complex type that contains information about the request to * create a health check. * @return Result of the CreateHealthCheck operation returned by the * service. * @throws TooManyHealthChecksException * @throws HealthCheckAlreadyExistsException * The health check you are trying to create already exists. Amazon * Route 53 returns this error when a health check has already been * created with the specified <code>CallerReference</code>. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.CreateHealthCheck */ @Override public CreateHealthCheckResult createHealthCheck( CreateHealthCheckRequest createHealthCheckRequest) { ExecutionContext executionContext = createExecutionContext(createHealthCheckRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CreateHealthCheckRequest> request = null; Response<CreateHealthCheckResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateHealthCheckRequestMarshaller() .marshall(super .beforeMarshalling(createHealthCheckRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<CreateHealthCheckResult> responseHandler = new StaxResponseHandler<CreateHealthCheckResult>( new CreateHealthCheckResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action creates a new hosted zone. * </p> * <p> * To create a new hosted zone, send a <code>POST</code> request to the * <code>/<i>Route 53 API version</i>/hostedzone</code> resource. The * request body must include a document with a * <code>CreateHostedZoneRequest</code> element. The response returns the * <code>CreateHostedZoneResponse</code> element that contains metadata * about the hosted zone. * </p> * <p> * Amazon Route 53 automatically creates a default SOA record and four NS * records for the zone. The NS records in the hosted zone are the name * servers you give your registrar to delegate your domain to. For more * information about SOA and NS records, see <a href= * "http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html" * >NS and SOA Records that Amazon Route 53 Creates for a Hosted Zone</a> in * the <i>Amazon Route 53 Developer Guide</i>. * </p> * <p> * When you create a zone, its initial status is <code>PENDING</code>. This * means that it is not yet available on all DNS servers. The status of the * zone changes to <code>INSYNC</code> when the NS and SOA records are * available on all Amazon Route 53 DNS servers. * </p> * <p> * When trying to create a hosted zone using a reusable delegation set, you * could specify an optional DelegationSetId, and Route53 would assign those * 4 NS records for the zone, instead of alloting a new one. * </p> * * @param createHostedZoneRequest * A complex type that contains information about the request to * create a hosted zone. * @return Result of the CreateHostedZone operation returned by the service. * @throws InvalidDomainNameException * This error indicates that the specified domain name is not valid. * @throws HostedZoneAlreadyExistsException * The hosted zone you are trying to create already exists. Amazon * Route 53 returns this error when a hosted zone has already been * created with the specified <code>CallerReference</code>. * @throws TooManyHostedZonesException * This error indicates that you've reached the maximum number of * hosted zones that can be created for the current AWS account. You * can request an increase to the limit on the <a * href="http://aws.amazon.com/route53-request/">Contact Us</a> * page. * @throws InvalidVPCIdException * The hosted zone you are trying to create for your VPC_ID does not * belong to you. Amazon Route 53 returns this error when the VPC * specified by <code>VPCId</code> does not belong to you. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws DelegationSetNotAvailableException * Amazon Route 53 allows some duplicate domain names, but there is * a maximum number of duplicate names. This error indicates that * you have reached that maximum. If you want to create another * hosted zone with the same name and Amazon Route 53 generates this * error, you can request an increase to the limit on the <a * href="http://aws.amazon.com/route53-request/">Contact Us</a> * page. * @throws ConflictingDomainExistsException * @throws NoSuchDelegationSetException * The specified delegation set does not exist. * @throws DelegationSetNotReusableException * The specified delegation set has not been marked as reusable. * @sample AmazonRoute53.CreateHostedZone */ @Override public CreateHostedZoneResult createHostedZone( CreateHostedZoneRequest createHostedZoneRequest) { ExecutionContext executionContext = createExecutionContext(createHostedZoneRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CreateHostedZoneRequest> request = null; Response<CreateHostedZoneResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateHostedZoneRequestMarshaller() .marshall(super .beforeMarshalling(createHostedZoneRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<CreateHostedZoneResult> responseHandler = new StaxResponseHandler<CreateHostedZoneResult>( new CreateHostedZoneResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action creates a reusable delegationSet. * </p> * <p> * To create a new reusable delegationSet, send a <code>POST</code> request * to the <code>/<i>Route 53 API version</i>/delegationset</code> resource. * The request body must include a document with a * <code>CreateReusableDelegationSetRequest</code> element. The response * returns the <code>CreateReusableDelegationSetResponse</code> element that * contains metadata about the delegationSet. * </p> * <p> * If the optional parameter HostedZoneId is specified, it marks the * delegationSet associated with that particular hosted zone as reusable. * </p> * * @param createReusableDelegationSetRequest * @return Result of the CreateReusableDelegationSet operation returned by * the service. * @throws DelegationSetAlreadyCreatedException * A delegation set with the same owner and caller reference * combination has already been created. * @throws LimitsExceededException * The limits specified for a resource have been exceeded. * @throws HostedZoneNotFoundException * The specified HostedZone cannot be found. * @throws InvalidArgumentException * At least one of the specified arguments is invalid. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws DelegationSetNotAvailableException * Amazon Route 53 allows some duplicate domain names, but there is * a maximum number of duplicate names. This error indicates that * you have reached that maximum. If you want to create another * hosted zone with the same name and Amazon Route 53 generates this * error, you can request an increase to the limit on the <a * href="http://aws.amazon.com/route53-request/">Contact Us</a> * page. * @throws DelegationSetAlreadyReusableException * The specified delegation set has already been marked as reusable. * @sample AmazonRoute53.CreateReusableDelegationSet */ @Override public CreateReusableDelegationSetResult createReusableDelegationSet( CreateReusableDelegationSetRequest createReusableDelegationSetRequest) { ExecutionContext executionContext = createExecutionContext(createReusableDelegationSetRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CreateReusableDelegationSetRequest> request = null; Response<CreateReusableDelegationSetResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateReusableDelegationSetRequestMarshaller() .marshall(super .beforeMarshalling(createReusableDelegationSetRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<CreateReusableDelegationSetResult> responseHandler = new StaxResponseHandler<CreateReusableDelegationSetResult>( new CreateReusableDelegationSetResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Creates a traffic policy, which you use to create multiple DNS resource * record sets for one domain name (such as example.com) or one subdomain * name (such as www.example.com). * </p> * <p> * To create a traffic policy, send a <code>POST</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicy</code> resource. The * request body must include a document with a * <code>CreateTrafficPolicyRequest</code> element. The response includes * the <code>CreateTrafficPolicyResponse</code> element, which contains * information about the new traffic policy. * </p> * * @param createTrafficPolicyRequest * A complex type that contains information about the traffic policy * that you want to create. * @return Result of the CreateTrafficPolicy operation returned by the * service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws TooManyTrafficPoliciesException * You've created the maximum number of traffic policies that can be * created for the current AWS account. You can request an increase * to the limit on the <a * href="http://aws.amazon.com/route53-request/">Contact Us</a> * page. * @throws TrafficPolicyAlreadyExistsException * A traffic policy that has the same value for <code>Name</code> * already exists. * @throws InvalidTrafficPolicyDocumentException * The format of the traffic policy document that you specified in * the <code>Document</code> element is invalid. * @sample AmazonRoute53.CreateTrafficPolicy */ @Override public CreateTrafficPolicyResult createTrafficPolicy( CreateTrafficPolicyRequest createTrafficPolicyRequest) { ExecutionContext executionContext = createExecutionContext(createTrafficPolicyRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CreateTrafficPolicyRequest> request = null; Response<CreateTrafficPolicyResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateTrafficPolicyRequestMarshaller() .marshall(super .beforeMarshalling(createTrafficPolicyRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<CreateTrafficPolicyResult> responseHandler = new StaxResponseHandler<CreateTrafficPolicyResult>( new CreateTrafficPolicyResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Creates resource record sets in a specified hosted zone based on the * settings in a specified traffic policy version. In addition, * <code>CreateTrafficPolicyInstance</code> associates the resource record * sets with a specified domain name (such as example.com) or subdomain name * (such as www.example.com). Amazon Route 53 responds to DNS queries for * the domain or subdomain name by using the resource record sets that * <code>CreateTrafficPolicyInstance</code> created. * </p> * <p> * To create a traffic policy instance, send a <code>POST</code> request to * the <code>/<i>Route 53 API version</i>/trafficpolicyinstance</code> * resource. The request body must include a document with a * <code>CreateTrafficPolicyRequest</code> element. The response returns the * <code>CreateTrafficPolicyInstanceResponse</code> element, which contains * information about the traffic policy instance. * </p> * * @param createTrafficPolicyInstanceRequest * A complex type that contains information about the resource record * sets that you want to create based on a specified traffic policy. * @return Result of the CreateTrafficPolicyInstance operation returned by * the service. * @throws NoSuchHostedZoneException * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws TooManyTrafficPolicyInstancesException * You've created the maximum number of traffic policy instances * that can be created for the current AWS account. You can request * an increase to the limit on the <a * href="http://aws.amazon.com/route53-request/">Contact Us</a> * page. * @throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID. * @throws TrafficPolicyInstanceAlreadyExistsException * Traffic policy instance with given Id already exists. * @sample AmazonRoute53.CreateTrafficPolicyInstance */ @Override public CreateTrafficPolicyInstanceResult createTrafficPolicyInstance( CreateTrafficPolicyInstanceRequest createTrafficPolicyInstanceRequest) { ExecutionContext executionContext = createExecutionContext(createTrafficPolicyInstanceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CreateTrafficPolicyInstanceRequest> request = null; Response<CreateTrafficPolicyInstanceResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateTrafficPolicyInstanceRequestMarshaller() .marshall(super .beforeMarshalling(createTrafficPolicyInstanceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<CreateTrafficPolicyInstanceResult> responseHandler = new StaxResponseHandler<CreateTrafficPolicyInstanceResult>( new CreateTrafficPolicyInstanceResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Creates a new version of an existing traffic policy. When you create a * new version of a traffic policy, you specify the ID of the traffic policy * that you want to update and a JSON-formatted document that describes the * new version. * </p> * <p> * You use traffic policies to create multiple DNS resource record sets for * one domain name (such as example.com) or one subdomain name (such as * www.example.com). * </p> * <p> * To create a new version, send a <code>POST</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicy/</code> resource. The * request body includes a document with a * <code>CreateTrafficPolicyVersionRequest</code> element. The response * returns the <code>CreateTrafficPolicyVersionResponse</code> element, * which contains information about the new version of the traffic policy. * </p> * * @param createTrafficPolicyVersionRequest * A complex type that contains information about the traffic policy * for which you want to create a new version. * @return Result of the CreateTrafficPolicyVersion operation returned by * the service. * @throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws ConcurrentModificationException * Another user submitted a request to update the object at the same * time that you did. Retry the request. * @throws InvalidTrafficPolicyDocumentException * The format of the traffic policy document that you specified in * the <code>Document</code> element is invalid. * @sample AmazonRoute53.CreateTrafficPolicyVersion */ @Override public CreateTrafficPolicyVersionResult createTrafficPolicyVersion( CreateTrafficPolicyVersionRequest createTrafficPolicyVersionRequest) { ExecutionContext executionContext = createExecutionContext(createTrafficPolicyVersionRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<CreateTrafficPolicyVersionRequest> request = null; Response<CreateTrafficPolicyVersionResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateTrafficPolicyVersionRequestMarshaller() .marshall(super .beforeMarshalling(createTrafficPolicyVersionRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<CreateTrafficPolicyVersionResult> responseHandler = new StaxResponseHandler<CreateTrafficPolicyVersionResult>( new CreateTrafficPolicyVersionResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action deletes a health check. To delete a health check, send a * <code>DELETE</code> request to the * <code>/<i>Route 53 API version</i>/healthcheck/<i>health check ID</i></code> * resource. * </p> * <important> You can delete a health check only if there are no resource * record sets associated with this health check. If resource record sets * are associated with this health check, you must disassociate them before * you can delete your health check. If you try to delete a health check * that is associated with resource record sets, Amazon Route 53 will deny * your request with a <code>HealthCheckInUse</code> error. For information * about disassociating the records from your health check, see * <a>ChangeResourceRecordSets</a>.</important> * * @param deleteHealthCheckRequest * A complex type containing the request information for delete * health check. * @return Result of the DeleteHealthCheck operation returned by the * service. * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws HealthCheckInUseException * There are resource records associated with this health check. * Before you can delete the health check, you must disassociate it * from the resource record sets. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.DeleteHealthCheck */ @Override public DeleteHealthCheckResult deleteHealthCheck( DeleteHealthCheckRequest deleteHealthCheckRequest) { ExecutionContext executionContext = createExecutionContext(deleteHealthCheckRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DeleteHealthCheckRequest> request = null; Response<DeleteHealthCheckResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteHealthCheckRequestMarshaller() .marshall(super .beforeMarshalling(deleteHealthCheckRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DeleteHealthCheckResult> responseHandler = new StaxResponseHandler<DeleteHealthCheckResult>( new DeleteHealthCheckResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action deletes a hosted zone. To delete a hosted zone, send a * <code>DELETE</code> request to the * <code>/<i>Route 53 API version</i>/hostedzone/<i>hosted zone ID</i></code> * resource. * </p> * <p> * For more information about deleting a hosted zone, see <a href= * "http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DeleteHostedZone.html" * >Deleting a Hosted Zone</a> in the <i>Amazon Route 53 Developer * Guide</i>. * </p> * <important> You can delete a hosted zone only if there are no resource * record sets other than the default SOA record and NS resource record * sets. If your hosted zone contains other resource record sets, you must * delete them before you can delete your hosted zone. If you try to delete * a hosted zone that contains other resource record sets, Amazon Route 53 * will deny your request with a <code>HostedZoneNotEmpty</code> error. For * information about deleting records from your hosted zone, see * <a>ChangeResourceRecordSets</a>.</important> * * @param deleteHostedZoneRequest * A complex type that contains information about the hosted zone * that you want to delete. * @return Result of the DeleteHostedZone operation returned by the service. * @throws NoSuchHostedZoneException * @throws HostedZoneNotEmptyException * The hosted zone contains resource record sets in addition to the * default NS and SOA resource record sets. Before you can delete * the hosted zone, you must delete the additional resource record * sets. * @throws PriorRequestNotCompleteException * The request was rejected because Amazon Route 53 was still * processing a prior request. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.DeleteHostedZone */ @Override public DeleteHostedZoneResult deleteHostedZone( DeleteHostedZoneRequest deleteHostedZoneRequest) { ExecutionContext executionContext = createExecutionContext(deleteHostedZoneRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DeleteHostedZoneRequest> request = null; Response<DeleteHostedZoneResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteHostedZoneRequestMarshaller() .marshall(super .beforeMarshalling(deleteHostedZoneRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DeleteHostedZoneResult> responseHandler = new StaxResponseHandler<DeleteHostedZoneResult>( new DeleteHostedZoneResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action deletes a reusable delegation set. To delete a reusable * delegation set, send a <code>DELETE</code> request to the * <code>/<i>Route 53 API version</i>/delegationset/<i>delegation set ID</i></code> * resource. * </p> * <important> You can delete a reusable delegation set only if there are no * associated hosted zones. If your reusable delegation set contains * associated hosted zones, you must delete them before you can delete your * reusable delegation set. If you try to delete a reusable delegation set * that contains associated hosted zones, Amazon Route 53 will deny your * request with a <code>DelegationSetInUse</code> error.</important> * * @param deleteReusableDelegationSetRequest * A complex type containing the information for the delete request. * @return Result of the DeleteReusableDelegationSet operation returned by * the service. * @throws NoSuchDelegationSetException * The specified delegation set does not exist. * @throws DelegationSetInUseException * The specified delegation contains associated hosted zones which * must be deleted before the reusable delegation set can be * deleted. * @throws DelegationSetNotReusableException * The specified delegation set has not been marked as reusable. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.DeleteReusableDelegationSet */ @Override public DeleteReusableDelegationSetResult deleteReusableDelegationSet( DeleteReusableDelegationSetRequest deleteReusableDelegationSetRequest) { ExecutionContext executionContext = createExecutionContext(deleteReusableDelegationSetRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DeleteReusableDelegationSetRequest> request = null; Response<DeleteReusableDelegationSetResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteReusableDelegationSetRequestMarshaller() .marshall(super .beforeMarshalling(deleteReusableDelegationSetRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DeleteReusableDelegationSetResult> responseHandler = new StaxResponseHandler<DeleteReusableDelegationSetResult>( new DeleteReusableDelegationSetResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Deletes a traffic policy. To delete a traffic policy, send a * <code>DELETE</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicy</code> resource. * </p> * * @param deleteTrafficPolicyRequest * A request to delete a specified traffic policy version. * @return Result of the DeleteTrafficPolicy operation returned by the * service. * @throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws TrafficPolicyInUseException * One or more traffic policy instances were created by using the * specified traffic policy. * @throws ConcurrentModificationException * Another user submitted a request to update the object at the same * time that you did. Retry the request. * @sample AmazonRoute53.DeleteTrafficPolicy */ @Override public DeleteTrafficPolicyResult deleteTrafficPolicy( DeleteTrafficPolicyRequest deleteTrafficPolicyRequest) { ExecutionContext executionContext = createExecutionContext(deleteTrafficPolicyRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DeleteTrafficPolicyRequest> request = null; Response<DeleteTrafficPolicyResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteTrafficPolicyRequestMarshaller() .marshall(super .beforeMarshalling(deleteTrafficPolicyRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DeleteTrafficPolicyResult> responseHandler = new StaxResponseHandler<DeleteTrafficPolicyResult>( new DeleteTrafficPolicyResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Deletes a traffic policy instance and all of the resource record sets * that Amazon Route 53 created when you created the instance. * </p> * <p> * To delete a traffic policy instance, send a <code>DELETE</code> request * to the * <code>/<i>Route 53 API version</i>/trafficpolicy/<i>traffic policy instance ID</i></code> * resource. * </p> * <important>When you delete a traffic policy instance, Amazon Route 53 * also deletes all of the resource record sets that were created when you * created the traffic policy instance.</important> * * @param deleteTrafficPolicyInstanceRequest * A complex type that contains information about the traffic policy * instance that you want to delete. * @return Result of the DeleteTrafficPolicyInstance operation returned by * the service. * @throws NoSuchTrafficPolicyInstanceException * No traffic policy instance exists with the specified ID. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws PriorRequestNotCompleteException * The request was rejected because Amazon Route 53 was still * processing a prior request. * @sample AmazonRoute53.DeleteTrafficPolicyInstance */ @Override public DeleteTrafficPolicyInstanceResult deleteTrafficPolicyInstance( DeleteTrafficPolicyInstanceRequest deleteTrafficPolicyInstanceRequest) { ExecutionContext executionContext = createExecutionContext(deleteTrafficPolicyInstanceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DeleteTrafficPolicyInstanceRequest> request = null; Response<DeleteTrafficPolicyInstanceResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteTrafficPolicyInstanceRequestMarshaller() .marshall(super .beforeMarshalling(deleteTrafficPolicyInstanceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DeleteTrafficPolicyInstanceResult> responseHandler = new StaxResponseHandler<DeleteTrafficPolicyInstanceResult>( new DeleteTrafficPolicyInstanceResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action disassociates a VPC from an hosted zone. * </p> * <p> * To disassociate a VPC to a hosted zone, send a <code>POST</code> request * to the * <code>/<i>Route 53 API version</i>/hostedzone/<i>hosted zone ID</i>/disassociatevpc</code> * resource. The request body must include a document with a * <code>DisassociateVPCFromHostedZoneRequest</code> element. The response * returns the <code>DisassociateVPCFromHostedZoneResponse</code> element * that contains <code>ChangeInfo</code> for you to track the progress of * the <code>DisassociateVPCFromHostedZoneRequest</code> you made. See * <code>GetChange</code> operation for how to track the progress of your * change. * </p> * * @param disassociateVPCFromHostedZoneRequest * A complex type that contains information about the request to * disassociate a VPC from an hosted zone. * @return Result of the DisassociateVPCFromHostedZone operation returned by * the service. * @throws NoSuchHostedZoneException * @throws InvalidVPCIdException * The hosted zone you are trying to create for your VPC_ID does not * belong to you. Amazon Route 53 returns this error when the VPC * specified by <code>VPCId</code> does not belong to you. * @throws VPCAssociationNotFoundException * The VPC you specified is not currently associated with the hosted * zone. * @throws LastVPCAssociationException * The VPC you are trying to disassociate from the hosted zone is * the last the VPC that is associated with the hosted zone. Amazon * Route 53 currently doesn't support disassociate the last VPC from * the hosted zone. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.DisassociateVPCFromHostedZone */ @Override public DisassociateVPCFromHostedZoneResult disassociateVPCFromHostedZone( DisassociateVPCFromHostedZoneRequest disassociateVPCFromHostedZoneRequest) { ExecutionContext executionContext = createExecutionContext(disassociateVPCFromHostedZoneRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<DisassociateVPCFromHostedZoneRequest> request = null; Response<DisassociateVPCFromHostedZoneResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DisassociateVPCFromHostedZoneRequestMarshaller() .marshall(super .beforeMarshalling(disassociateVPCFromHostedZoneRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<DisassociateVPCFromHostedZoneResult> responseHandler = new StaxResponseHandler<DisassociateVPCFromHostedZoneResult>( new DisassociateVPCFromHostedZoneResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action returns the current status of a change batch request. The * status is one of the following values: * </p> * <p> * - <code>PENDING</code> indicates that the changes in this request have * not replicated to all Amazon Route 53 DNS servers. This is the initial * status of all change batch requests. * </p> * <p> * - <code>INSYNC</code> indicates that the changes have replicated to all * Amazon Route 53 DNS servers. * </p> * * @param getChangeRequest * The input for a GetChange request. * @return Result of the GetChange operation returned by the service. * @throws NoSuchChangeException * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetChange */ @Override public GetChangeResult getChange(GetChangeRequest getChangeRequest) { ExecutionContext executionContext = createExecutionContext(getChangeRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetChangeRequest> request = null; Response<GetChangeResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetChangeRequestMarshaller().marshall(super .beforeMarshalling(getChangeRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetChangeResult> responseHandler = new StaxResponseHandler<GetChangeResult>( new GetChangeResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action returns the status and changes of a change batch request. * </p> * * @param getChangeDetailsRequest * The input for a GetChangeDetails request. * @return Result of the GetChangeDetails operation returned by the service. * @throws NoSuchChangeException * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetChangeDetails */ @Override @Deprecated public GetChangeDetailsResult getChangeDetails( GetChangeDetailsRequest getChangeDetailsRequest) { ExecutionContext executionContext = createExecutionContext(getChangeDetailsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetChangeDetailsRequest> request = null; Response<GetChangeDetailsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetChangeDetailsRequestMarshaller() .marshall(super .beforeMarshalling(getChangeDetailsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetChangeDetailsResult> responseHandler = new StaxResponseHandler<GetChangeDetailsResult>( new GetChangeDetailsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * To retrieve a list of the IP ranges used by Amazon Route 53 health * checkers to check the health of your resources, send a <code>GET</code> * request to the <code>/<i>Route 53 API version</i>/checkeripranges</code> * resource. You can use these IP addresses to configure router and firewall * rules to allow health checkers to check the health of your resources. * </p> * * @param getCheckerIpRangesRequest * Empty request. * @return Result of the GetCheckerIpRanges operation returned by the * service. * @sample AmazonRoute53.GetCheckerIpRanges */ @Override public GetCheckerIpRangesResult getCheckerIpRanges( GetCheckerIpRangesRequest getCheckerIpRangesRequest) { ExecutionContext executionContext = createExecutionContext(getCheckerIpRangesRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetCheckerIpRangesRequest> request = null; Response<GetCheckerIpRangesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetCheckerIpRangesRequestMarshaller() .marshall(super .beforeMarshalling(getCheckerIpRangesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetCheckerIpRangesResult> responseHandler = new StaxResponseHandler<GetCheckerIpRangesResult>( new GetCheckerIpRangesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public GetCheckerIpRangesResult getCheckerIpRanges() { return getCheckerIpRanges(new GetCheckerIpRangesRequest()); } /** * <p> * To retrieve a single geo location, send a <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/geolocation</code> resource with one * of these options: continentcode | countrycode | countrycode and * subdivisioncode. * </p> * * @param getGeoLocationRequest * A complex type that contains information about the request to get * a geo location. * @return Result of the GetGeoLocation operation returned by the service. * @throws NoSuchGeoLocationException * The geo location you are trying to get does not exist. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetGeoLocation */ @Override public GetGeoLocationResult getGeoLocation( GetGeoLocationRequest getGeoLocationRequest) { ExecutionContext executionContext = createExecutionContext(getGeoLocationRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetGeoLocationRequest> request = null; Response<GetGeoLocationResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetGeoLocationRequestMarshaller().marshall(super .beforeMarshalling(getGeoLocationRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetGeoLocationResult> responseHandler = new StaxResponseHandler<GetGeoLocationResult>( new GetGeoLocationResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public GetGeoLocationResult getGeoLocation() { return getGeoLocation(new GetGeoLocationRequest()); } /** * <p> * To retrieve the health check, send a <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/healthcheck/<i>health check ID</i></code> * resource. * </p> * * @param getHealthCheckRequest * A complex type that contains information about the request to get * a health check. * @return Result of the GetHealthCheck operation returned by the service. * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws IncompatibleVersionException * The resource you are trying to access is unsupported on this * Amazon Route 53 endpoint. Please consider using a newer endpoint * or a tool that does so. * @sample AmazonRoute53.GetHealthCheck */ @Override public GetHealthCheckResult getHealthCheck( GetHealthCheckRequest getHealthCheckRequest) { ExecutionContext executionContext = createExecutionContext(getHealthCheckRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetHealthCheckRequest> request = null; Response<GetHealthCheckResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetHealthCheckRequestMarshaller().marshall(super .beforeMarshalling(getHealthCheckRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetHealthCheckResult> responseHandler = new StaxResponseHandler<GetHealthCheckResult>( new GetHealthCheckResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * To retrieve a count of all your health checks, send a <code>GET</code> * request to the <code>/<i>Route 53 API version</i>/healthcheckcount</code> * resource. * </p> * * @param getHealthCheckCountRequest * To retrieve a count of all your health checks, send a * <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/healthcheckcount</code> * resource. * @return Result of the GetHealthCheckCount operation returned by the * service. * @sample AmazonRoute53.GetHealthCheckCount */ @Override public GetHealthCheckCountResult getHealthCheckCount( GetHealthCheckCountRequest getHealthCheckCountRequest) { ExecutionContext executionContext = createExecutionContext(getHealthCheckCountRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetHealthCheckCountRequest> request = null; Response<GetHealthCheckCountResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetHealthCheckCountRequestMarshaller() .marshall(super .beforeMarshalling(getHealthCheckCountRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetHealthCheckCountResult> responseHandler = new StaxResponseHandler<GetHealthCheckCountResult>( new GetHealthCheckCountResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public GetHealthCheckCountResult getHealthCheckCount() { return getHealthCheckCount(new GetHealthCheckCountRequest()); } /** * <p> * If you want to learn why a health check is currently failing or why it * failed most recently (if at all), you can get the failure reason for the * most recent failure. Send a <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/healthcheck/<i>health check ID</i>/lastfailurereason</code> * resource. * </p> * * @param getHealthCheckLastFailureReasonRequest * A complex type that contains information about the request to get * the most recent failure reason for a health check. * @return Result of the GetHealthCheckLastFailureReason operation returned * by the service. * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetHealthCheckLastFailureReason */ @Override public GetHealthCheckLastFailureReasonResult getHealthCheckLastFailureReason( GetHealthCheckLastFailureReasonRequest getHealthCheckLastFailureReasonRequest) { ExecutionContext executionContext = createExecutionContext(getHealthCheckLastFailureReasonRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetHealthCheckLastFailureReasonRequest> request = null; Response<GetHealthCheckLastFailureReasonResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetHealthCheckLastFailureReasonRequestMarshaller() .marshall(super .beforeMarshalling(getHealthCheckLastFailureReasonRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetHealthCheckLastFailureReasonResult> responseHandler = new StaxResponseHandler<GetHealthCheckLastFailureReasonResult>( new GetHealthCheckLastFailureReasonResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * To retrieve the health check status, send a <code>GET</code> request to * the * <code>/<i>Route 53 API version</i>/healthcheck/<i>health check ID</i>/status</code> * resource. You can use this call to get a health check's current status. * </p> * * @param getHealthCheckStatusRequest * A complex type that contains information about the request to get * health check status for a health check. * @return Result of the GetHealthCheckStatus operation returned by the * service. * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetHealthCheckStatus */ @Override public GetHealthCheckStatusResult getHealthCheckStatus( GetHealthCheckStatusRequest getHealthCheckStatusRequest) { ExecutionContext executionContext = createExecutionContext(getHealthCheckStatusRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetHealthCheckStatusRequest> request = null; Response<GetHealthCheckStatusResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetHealthCheckStatusRequestMarshaller() .marshall(super .beforeMarshalling(getHealthCheckStatusRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetHealthCheckStatusResult> responseHandler = new StaxResponseHandler<GetHealthCheckStatusResult>( new GetHealthCheckStatusResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * To retrieve the delegation set for a hosted zone, send a <code>GET</code> * request to the * <code>/<i>Route 53 API version</i>/hostedzone/<i>hosted zone ID</i></code> * resource. The delegation set is the four Amazon Route 53 name servers * that were assigned to the hosted zone when you created it. * </p> * * @param getHostedZoneRequest * The input for a GetHostedZone request. * @return Result of the GetHostedZone operation returned by the service. * @throws NoSuchHostedZoneException * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetHostedZone */ @Override public GetHostedZoneResult getHostedZone( GetHostedZoneRequest getHostedZoneRequest) { ExecutionContext executionContext = createExecutionContext(getHostedZoneRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetHostedZoneRequest> request = null; Response<GetHostedZoneResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetHostedZoneRequestMarshaller().marshall(super .beforeMarshalling(getHostedZoneRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetHostedZoneResult> responseHandler = new StaxResponseHandler<GetHostedZoneResult>( new GetHostedZoneResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * To retrieve a count of all your hosted zones, send a <code>GET</code> * request to the <code>/<i>Route 53 API version</i>/hostedzonecount</code> * resource. * </p> * * @param getHostedZoneCountRequest * To retrieve a count of all your hosted zones, send a * <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/hostedzonecount</code> * resource. * @return Result of the GetHostedZoneCount operation returned by the * service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetHostedZoneCount */ @Override public GetHostedZoneCountResult getHostedZoneCount( GetHostedZoneCountRequest getHostedZoneCountRequest) { ExecutionContext executionContext = createExecutionContext(getHostedZoneCountRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetHostedZoneCountRequest> request = null; Response<GetHostedZoneCountResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetHostedZoneCountRequestMarshaller() .marshall(super .beforeMarshalling(getHostedZoneCountRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetHostedZoneCountResult> responseHandler = new StaxResponseHandler<GetHostedZoneCountResult>( new GetHostedZoneCountResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public GetHostedZoneCountResult getHostedZoneCount() { return getHostedZoneCount(new GetHostedZoneCountRequest()); } /** * <p> * To retrieve the reusable delegation set, send a <code>GET</code> request * to the * <code>/<i>Route 53 API version</i>/delegationset/<i>delegation set ID</i></code> * resource. * </p> * * @param getReusableDelegationSetRequest * The input for a <code>GetReusableDelegationSet</code> request. * @return Result of the GetReusableDelegationSet operation returned by the * service. * @throws NoSuchDelegationSetException * The specified delegation set does not exist. * @throws DelegationSetNotReusableException * The specified delegation set has not been marked as reusable. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetReusableDelegationSet */ @Override public GetReusableDelegationSetResult getReusableDelegationSet( GetReusableDelegationSetRequest getReusableDelegationSetRequest) { ExecutionContext executionContext = createExecutionContext(getReusableDelegationSetRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetReusableDelegationSetRequest> request = null; Response<GetReusableDelegationSetResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetReusableDelegationSetRequestMarshaller() .marshall(super .beforeMarshalling(getReusableDelegationSetRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetReusableDelegationSetResult> responseHandler = new StaxResponseHandler<GetReusableDelegationSetResult>( new GetReusableDelegationSetResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Gets information about a specific traffic policy version. To get the * information, send a <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicy</code> resource. * </p> * * @param getTrafficPolicyRequest * Gets information about a specific traffic policy version. To get * the information, send a GET request to the /<i>Route 53 API * version</i>/trafficpolicy resource, and specify the ID and the * version of the traffic policy. * @return Result of the GetTrafficPolicy operation returned by the service. * @throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetTrafficPolicy */ @Override public GetTrafficPolicyResult getTrafficPolicy( GetTrafficPolicyRequest getTrafficPolicyRequest) { ExecutionContext executionContext = createExecutionContext(getTrafficPolicyRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetTrafficPolicyRequest> request = null; Response<GetTrafficPolicyResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetTrafficPolicyRequestMarshaller() .marshall(super .beforeMarshalling(getTrafficPolicyRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetTrafficPolicyResult> responseHandler = new StaxResponseHandler<GetTrafficPolicyResult>( new GetTrafficPolicyResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Gets information about a specified traffic policy instance. * </p> * <p> * To get information about the traffic policy instance, send a * <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicyinstance</code> resource. * </p> * <note>After you submit a <code>CreateTrafficPolicyInstance</code> or an * <code>UpdateTrafficPolicyInstance</code> request, there's a brief delay * while Amazon Route 53 creates the resource record sets that are specified * in the traffic policy definition. For more information, see the * <a>State</a> response element. </note> * * @param getTrafficPolicyInstanceRequest * Gets information about a specified traffic policy instance.</p> * <p> * To get information about a traffic policy instance, send a * <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicyinstance/<i>Id</i></code> * resource. * @return Result of the GetTrafficPolicyInstance operation returned by the * service. * @throws NoSuchTrafficPolicyInstanceException * No traffic policy instance exists with the specified ID. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.GetTrafficPolicyInstance */ @Override public GetTrafficPolicyInstanceResult getTrafficPolicyInstance( GetTrafficPolicyInstanceRequest getTrafficPolicyInstanceRequest) { ExecutionContext executionContext = createExecutionContext(getTrafficPolicyInstanceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetTrafficPolicyInstanceRequest> request = null; Response<GetTrafficPolicyInstanceResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetTrafficPolicyInstanceRequestMarshaller() .marshall(super .beforeMarshalling(getTrafficPolicyInstanceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetTrafficPolicyInstanceResult> responseHandler = new StaxResponseHandler<GetTrafficPolicyInstanceResult>( new GetTrafficPolicyInstanceResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Gets the number of traffic policy instances that are associated with the * current AWS account. * </p> * <p> * To get the number of traffic policy instances, send a <code>GET</code> * request to the * <code>/<i>Route 53 API version</i>/trafficpolicyinstancecount</code> * resource. * </p> * * @param getTrafficPolicyInstanceCountRequest * To retrieve a count of all your traffic policy instances, send a * <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicyinstancecount</code> * resource. * @return Result of the GetTrafficPolicyInstanceCount operation returned by * the service. * @sample AmazonRoute53.GetTrafficPolicyInstanceCount */ @Override public GetTrafficPolicyInstanceCountResult getTrafficPolicyInstanceCount( GetTrafficPolicyInstanceCountRequest getTrafficPolicyInstanceCountRequest) { ExecutionContext executionContext = createExecutionContext(getTrafficPolicyInstanceCountRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<GetTrafficPolicyInstanceCountRequest> request = null; Response<GetTrafficPolicyInstanceCountResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetTrafficPolicyInstanceCountRequestMarshaller() .marshall(super .beforeMarshalling(getTrafficPolicyInstanceCountRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<GetTrafficPolicyInstanceCountResult> responseHandler = new StaxResponseHandler<GetTrafficPolicyInstanceCountResult>( new GetTrafficPolicyInstanceCountResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public GetTrafficPolicyInstanceCountResult getTrafficPolicyInstanceCount() { return getTrafficPolicyInstanceCount(new GetTrafficPolicyInstanceCountRequest()); } /** * <p> * This action gets the list of ChangeBatches in a given time period for a * given hosted zone. * </p> * * @param listChangeBatchesByHostedZoneRequest * The input for a ListChangeBatchesByHostedZone request. * @return Result of the ListChangeBatchesByHostedZone operation returned by * the service. * @throws NoSuchHostedZoneException * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.ListChangeBatchesByHostedZone */ @Override @Deprecated public ListChangeBatchesByHostedZoneResult listChangeBatchesByHostedZone( ListChangeBatchesByHostedZoneRequest listChangeBatchesByHostedZoneRequest) { ExecutionContext executionContext = createExecutionContext(listChangeBatchesByHostedZoneRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListChangeBatchesByHostedZoneRequest> request = null; Response<ListChangeBatchesByHostedZoneResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListChangeBatchesByHostedZoneRequestMarshaller() .marshall(super .beforeMarshalling(listChangeBatchesByHostedZoneRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListChangeBatchesByHostedZoneResult> responseHandler = new StaxResponseHandler<ListChangeBatchesByHostedZoneResult>( new ListChangeBatchesByHostedZoneResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action gets the list of ChangeBatches in a given time period for a * given hosted zone and RRSet. * </p> * * @param listChangeBatchesByRRSetRequest * The input for a ListChangeBatchesByRRSet request. * @return Result of the ListChangeBatchesByRRSet operation returned by the * service. * @throws NoSuchHostedZoneException * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.ListChangeBatchesByRRSet */ @Override @Deprecated public ListChangeBatchesByRRSetResult listChangeBatchesByRRSet( ListChangeBatchesByRRSetRequest listChangeBatchesByRRSetRequest) { ExecutionContext executionContext = createExecutionContext(listChangeBatchesByRRSetRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListChangeBatchesByRRSetRequest> request = null; Response<ListChangeBatchesByRRSetResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListChangeBatchesByRRSetRequestMarshaller() .marshall(super .beforeMarshalling(listChangeBatchesByRRSetRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListChangeBatchesByRRSetResult> responseHandler = new StaxResponseHandler<ListChangeBatchesByRRSetResult>( new ListChangeBatchesByRRSetResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * To retrieve a list of supported geo locations, send a <code>GET</code> * request to the <code>/<i>Route 53 API version</i>/geolocations</code> * resource. The response to this request includes a * <code>GeoLocationDetailsList</code> element with zero, one, or multiple * <code>GeoLocationDetails</code> child elements. The list is sorted by * country code, and then subdivision code, followed by continents at the * end of the list. * </p> * <p> * By default, the list of geo locations is displayed on a single page. You * can control the length of the page that is displayed by using the * <code>MaxItems</code> parameter. If the list is truncated, * <code>IsTruncated</code> will be set to <i>true</i> and a combination of * <code>NextContinentCode, NextCountryCode, NextSubdivisionCode</code> will * be populated. You can pass these as parameters to * <code>StartContinentCode, StartCountryCode, StartSubdivisionCode</code> * to control the geo location that the list begins with. * </p> * * @param listGeoLocationsRequest * The input for a <code>ListGeoLocations</code> request. * @return Result of the ListGeoLocations operation returned by the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.ListGeoLocations */ @Override public ListGeoLocationsResult listGeoLocations( ListGeoLocationsRequest listGeoLocationsRequest) { ExecutionContext executionContext = createExecutionContext(listGeoLocationsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListGeoLocationsRequest> request = null; Response<ListGeoLocationsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListGeoLocationsRequestMarshaller() .marshall(super .beforeMarshalling(listGeoLocationsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListGeoLocationsResult> responseHandler = new StaxResponseHandler<ListGeoLocationsResult>( new ListGeoLocationsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListGeoLocationsResult listGeoLocations() { return listGeoLocations(new ListGeoLocationsRequest()); } /** * <p> * To retrieve a list of your health checks, send a <code>GET</code> request * to the <code>/<i>Route 53 API version</i>/healthcheck</code> resource. * The response to this request includes a <code>HealthChecks</code> element * with zero, one, or multiple <code>HealthCheck</code> child elements. By * default, the list of health checks is displayed on a single page. You can * control the length of the page that is displayed by using the * <code>MaxItems</code> parameter. You can use the <code>Marker</code> * parameter to control the health check that the list begins with. * </p> * <note> Amazon Route 53 returns a maximum of 100 items. If you set * MaxItems to a value greater than 100, Amazon Route 53 returns only the * first 100.</note> * * @param listHealthChecksRequest * To retrieve a list of your health checks, send a <code>GET</code> * request to the * <code>/<i>Route 53 API version</i>/healthcheck</code> resource. * The response to this request includes a <code>HealthChecks</code> * element with zero or more <code>HealthCheck</code> child elements. * By default, the list of health checks is displayed on a single * page. You can control the length of the page that is displayed by * using the <code>MaxItems</code> parameter. You can use the * <code>Marker</code> parameter to control the health check that the * list begins with.</p> <note> Amazon Route 53 returns a maximum of * 100 items. If you set <code>MaxItems</code> to a value greater * than 100, Amazon Route 53 returns only the first 100. * @return Result of the ListHealthChecks operation returned by the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws IncompatibleVersionException * The resource you are trying to access is unsupported on this * Amazon Route 53 endpoint. Please consider using a newer endpoint * or a tool that does so. * @sample AmazonRoute53.ListHealthChecks */ @Override public ListHealthChecksResult listHealthChecks( ListHealthChecksRequest listHealthChecksRequest) { ExecutionContext executionContext = createExecutionContext(listHealthChecksRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListHealthChecksRequest> request = null; Response<ListHealthChecksResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListHealthChecksRequestMarshaller() .marshall(super .beforeMarshalling(listHealthChecksRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListHealthChecksResult> responseHandler = new StaxResponseHandler<ListHealthChecksResult>( new ListHealthChecksResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListHealthChecksResult listHealthChecks() { return listHealthChecks(new ListHealthChecksRequest()); } /** * <p> * To retrieve a list of your hosted zones, send a <code>GET</code> request * to the <code>/<i>Route 53 API version</i>/hostedzone</code> resource. The * response to this request includes a <code>HostedZones</code> element with * zero, one, or multiple <code>HostedZone</code> child elements. By * default, the list of hosted zones is displayed on a single page. You can * control the length of the page that is displayed by using the * <code>MaxItems</code> parameter. You can use the <code>Marker</code> * parameter to control the hosted zone that the list begins with. * </p> * <note> Amazon Route 53 returns a maximum of 100 items. If you set * MaxItems to a value greater than 100, Amazon Route 53 returns only the * first 100.</note> * * @param listHostedZonesRequest * To retrieve a list of your hosted zones, send a <code>GET</code> * request to the * <code>/<i>Route 53 API version</i>/hostedzone</code> resource. The * response to this request includes a <code>HostedZones</code> * element with zero or more <code>HostedZone</code> child elements. * By default, the list of hosted zones is displayed on a single * page. You can control the length of the page that is displayed by * using the <code>MaxItems</code> parameter. You can use the * <code>Marker</code> parameter to control the hosted zone that the * list begins with. For more information about listing hosted zones, * see <a href= * "http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html" * >Listing the Hosted Zones for an AWS Account</a> in the <i>Amazon * Route 53 Developer Guide</i>.</p> <note> Amazon Route 53 returns a * maximum of 100 items. If you set <code>MaxItems</code> to a value * greater than 100, Amazon Route 53 returns only the first 100. * @return Result of the ListHostedZones operation returned by the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchDelegationSetException * The specified delegation set does not exist. * @throws DelegationSetNotReusableException * The specified delegation set has not been marked as reusable. * @sample AmazonRoute53.ListHostedZones */ @Override public ListHostedZonesResult listHostedZones( ListHostedZonesRequest listHostedZonesRequest) { ExecutionContext executionContext = createExecutionContext(listHostedZonesRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListHostedZonesRequest> request = null; Response<ListHostedZonesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListHostedZonesRequestMarshaller().marshall(super .beforeMarshalling(listHostedZonesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListHostedZonesResult> responseHandler = new StaxResponseHandler<ListHostedZonesResult>( new ListHostedZonesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListHostedZonesResult listHostedZones() { return listHostedZones(new ListHostedZonesRequest()); } /** * <p> * To retrieve a list of your hosted zones in lexicographic order, send a * <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/hostedzonesbyname</code> resource. The * response to this request includes a <code>HostedZones</code> element with * zero or more <code>HostedZone</code> child elements lexicographically * ordered by DNS name. By default, the list of hosted zones is displayed on * a single page. You can control the length of the page that is displayed * by using the <code>MaxItems</code> parameter. You can use the * <code>DNSName</code> and <code>HostedZoneId</code> parameters to control * the hosted zone that the list begins with. * </p> * <note> Amazon Route 53 returns a maximum of 100 items. If you set * MaxItems to a value greater than 100, Amazon Route 53 returns only the * first 100.</note> * * @param listHostedZonesByNameRequest * To retrieve a list of your hosted zones in lexicographic order, * send a <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/hostedzonesbyname</code> * resource. The response to this request includes a * <code>HostedZones</code> element with zero or more * <code>HostedZone</code> child elements lexicographically ordered * by DNS name. By default, the list of hosted zones is displayed on * a single page. You can control the length of the page that is * displayed by using the <code>MaxItems</code> parameter. You can * use the <code>DNSName</code> and <code>HostedZoneId</code> * parameters to control the hosted zone that the list begins * with.</p> * <p> * For more information about listing hosted zones, see <a href= * "http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html" * >Listing the Hosted Zones for an AWS Account</a> in the <i>Amazon * Route 53 Developer Guide</i>. * @return Result of the ListHostedZonesByName operation returned by the * service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws InvalidDomainNameException * This error indicates that the specified domain name is not valid. * @sample AmazonRoute53.ListHostedZonesByName */ @Override public ListHostedZonesByNameResult listHostedZonesByName( ListHostedZonesByNameRequest listHostedZonesByNameRequest) { ExecutionContext executionContext = createExecutionContext(listHostedZonesByNameRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListHostedZonesByNameRequest> request = null; Response<ListHostedZonesByNameResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListHostedZonesByNameRequestMarshaller() .marshall(super .beforeMarshalling(listHostedZonesByNameRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListHostedZonesByNameResult> responseHandler = new StaxResponseHandler<ListHostedZonesByNameResult>( new ListHostedZonesByNameResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListHostedZonesByNameResult listHostedZonesByName() { return listHostedZonesByName(new ListHostedZonesByNameRequest()); } /** * <p> * Imagine all the resource record sets in a zone listed out in front of * you. Imagine them sorted lexicographically first by DNS name (with the * labels reversed, like "com.amazon.www" for example), and secondarily, * lexicographically by record type. This operation retrieves at most * MaxItems resource record sets from this list, in order, starting at a * position specified by the Name and Type arguments: * </p> * <ul> * <li>If both Name and Type are omitted, this means start the results at * the first RRSET in the HostedZone.</li> * <li>If Name is specified but Type is omitted, this means start the * results at the first RRSET in the list whose name is greater than or * equal to Name.</li> * <li>If both Name and Type are specified, this means start the results at * the first RRSET in the list whose name is greater than or equal to Name * and whose type is greater than or equal to Type.</li> * <li>It is an error to specify the Type but not the Name.</li> * </ul> * <p> * Use ListResourceRecordSets to retrieve a single known record set by * specifying the record set's name and type, and setting MaxItems = 1 * </p> * <p> * To retrieve all the records in a HostedZone, first pause any processes * making calls to ChangeResourceRecordSets. Initially call * ListResourceRecordSets without a Name and Type to get the first page of * record sets. For subsequent calls, set Name and Type to the NextName and * NextType values returned by the previous response. * </p> * <p> * In the presence of concurrent ChangeResourceRecordSets calls, there is no * consistency of results across calls to ListResourceRecordSets. The only * way to get a consistent multi-page snapshot of all RRSETs in a zone is to * stop making changes while pagination is in progress. * </p> * <p> * However, the results from ListResourceRecordSets are consistent within a * page. If MakeChange calls are taking place concurrently, the result of * each one will either be completely visible in your results or not at all. * You will not see partial changes, or changes that do not ultimately * succeed. (This follows from the fact that MakeChange is atomic) * </p> * <p> * The results from ListResourceRecordSets are strongly consistent with * ChangeResourceRecordSets. To be precise, if a single process makes a call * to ChangeResourceRecordSets and receives a successful response, the * effects of that change will be visible in a subsequent call to * ListResourceRecordSets by that process. * </p> * * @param listResourceRecordSetsRequest * The input for a ListResourceRecordSets request. * @return Result of the ListResourceRecordSets operation returned by the * service. * @throws NoSuchHostedZoneException * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.ListResourceRecordSets */ @Override public ListResourceRecordSetsResult listResourceRecordSets( ListResourceRecordSetsRequest listResourceRecordSetsRequest) { ExecutionContext executionContext = createExecutionContext(listResourceRecordSetsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListResourceRecordSetsRequest> request = null; Response<ListResourceRecordSetsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListResourceRecordSetsRequestMarshaller() .marshall(super .beforeMarshalling(listResourceRecordSetsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListResourceRecordSetsResult> responseHandler = new StaxResponseHandler<ListResourceRecordSetsResult>( new ListResourceRecordSetsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * To retrieve a list of your reusable delegation sets, send a * <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/delegationset</code> resource. The * response to this request includes a <code>DelegationSets</code> element * with zero, one, or multiple <code>DelegationSet</code> child elements. By * default, the list of delegation sets is displayed on a single page. You * can control the length of the page that is displayed by using the * <code>MaxItems</code> parameter. You can use the <code>Marker</code> * parameter to control the delegation set that the list begins with. * </p> * <note> Amazon Route 53 returns a maximum of 100 items. If you set * MaxItems to a value greater than 100, Amazon Route 53 returns only the * first 100.</note> * * @param listReusableDelegationSetsRequest * To retrieve a list of your reusable delegation sets, send a * <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/delegationset</code> resource. * The response to this request includes a * <code>DelegationSets</code> element with zero or more * <code>DelegationSet</code> child elements. By default, the list of * reusable delegation sets is displayed on a single page. You can * control the length of the page that is displayed by using the * <code>MaxItems</code> parameter. You can use the * <code>Marker</code> parameter to control the delegation set that * the list begins with.</p> <note> Amazon Route 53 returns a maximum * of 100 items. If you set <code>MaxItems</code> to a value greater * than 100, Amazon Route 53 returns only the first 100. * @return Result of the ListReusableDelegationSets operation returned by * the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.ListReusableDelegationSets */ @Override public ListReusableDelegationSetsResult listReusableDelegationSets( ListReusableDelegationSetsRequest listReusableDelegationSetsRequest) { ExecutionContext executionContext = createExecutionContext(listReusableDelegationSetsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListReusableDelegationSetsRequest> request = null; Response<ListReusableDelegationSetsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListReusableDelegationSetsRequestMarshaller() .marshall(super .beforeMarshalling(listReusableDelegationSetsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListReusableDelegationSetsResult> responseHandler = new StaxResponseHandler<ListReusableDelegationSetsResult>( new ListReusableDelegationSetsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListReusableDelegationSetsResult listReusableDelegationSets() { return listReusableDelegationSets(new ListReusableDelegationSetsRequest()); } /** * @param listTagsForResourceRequest * A complex type containing information about a request for a list * of the tags that are associated with an individual resource. * @return Result of the ListTagsForResource operation returned by the * service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws NoSuchHostedZoneException * @throws PriorRequestNotCompleteException * The request was rejected because Amazon Route 53 was still * processing a prior request. * @throws ThrottlingException * @sample AmazonRoute53.ListTagsForResource */ @Override public ListTagsForResourceResult listTagsForResource( ListTagsForResourceRequest listTagsForResourceRequest) { ExecutionContext executionContext = createExecutionContext(listTagsForResourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListTagsForResourceRequest> request = null; Response<ListTagsForResourceResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListTagsForResourceRequestMarshaller() .marshall(super .beforeMarshalling(listTagsForResourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListTagsForResourceResult> responseHandler = new StaxResponseHandler<ListTagsForResourceResult>( new ListTagsForResourceResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * @param listTagsForResourcesRequest * A complex type containing information about a request for a list * of the tags that are associated with up to 10 specified resources. * @return Result of the ListTagsForResources operation returned by the * service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws NoSuchHostedZoneException * @throws PriorRequestNotCompleteException * The request was rejected because Amazon Route 53 was still * processing a prior request. * @throws ThrottlingException * @sample AmazonRoute53.ListTagsForResources */ @Override public ListTagsForResourcesResult listTagsForResources( ListTagsForResourcesRequest listTagsForResourcesRequest) { ExecutionContext executionContext = createExecutionContext(listTagsForResourcesRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListTagsForResourcesRequest> request = null; Response<ListTagsForResourcesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListTagsForResourcesRequestMarshaller() .marshall(super .beforeMarshalling(listTagsForResourcesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListTagsForResourcesResult> responseHandler = new StaxResponseHandler<ListTagsForResourcesResult>( new ListTagsForResourcesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Gets information about the latest version for every traffic policy that * is associated with the current AWS account. To get the information, send * a <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicy</code> resource. * </p> * <p> * Amazon Route 53 returns a maximum of 100 items in each response. If you * have a lot of traffic policies, you can use the <code>maxitems</code> * parameter to list them in groups of up to 100. * </p> * <p> * The response includes three values that help you navigate from one group * of <code>maxitems</code> traffic policies to the next: * </p> * <ul> * <li><b>IsTruncated</b></li> * <p> * If the value of <code>IsTruncated</code> in the response is * <code>true</code>, there are more traffic policies associated with the * current AWS account. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, this response includes * the last traffic policy that is associated with the current account. * </p> * <li><b>TrafficPolicyIdMarker</b></li> * <p> * If <code>IsTruncated</code> is <code>true</code>, * <code>TrafficPolicyIdMarker</code> is the ID of the first traffic policy * in the next group of <code>MaxItems</code> traffic policies. If you want * to list more traffic policies, make another call to * <code>ListTrafficPolicies</code>, and specify the value of the * <code>TrafficPolicyIdMarker</code> element from the response in the * <code>TrafficPolicyIdMarker</code> request parameter. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, the * <code>TrafficPolicyIdMarker</code> element is omitted from the response. * </p> * <li><b>MaxItems</b></li> * <p> * The value that you specified for the <code>MaxItems</code> parameter in * the request that produced the current response. * </p> * </ul> * * @param listTrafficPoliciesRequest * A complex type that contains the information about the request to * list the traffic policies that are associated with the current AWS * account. * @return Result of the ListTrafficPolicies operation returned by the * service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.ListTrafficPolicies */ @Override public ListTrafficPoliciesResult listTrafficPolicies( ListTrafficPoliciesRequest listTrafficPoliciesRequest) { ExecutionContext executionContext = createExecutionContext(listTrafficPoliciesRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListTrafficPoliciesRequest> request = null; Response<ListTrafficPoliciesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListTrafficPoliciesRequestMarshaller() .marshall(super .beforeMarshalling(listTrafficPoliciesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListTrafficPoliciesResult> responseHandler = new StaxResponseHandler<ListTrafficPoliciesResult>( new ListTrafficPoliciesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListTrafficPoliciesResult listTrafficPolicies() { return listTrafficPolicies(new ListTrafficPoliciesRequest()); } /** * <p> * Gets information about the traffic policy instances that you created by * using the current AWS account. * </p> * <note>After you submit an <code>UpdateTrafficPolicyInstance</code> * request, there's a brief delay while Amazon Route 53 creates the resource * record sets that are specified in the traffic policy definition. For more * information, see the <a>State</a> response element.</note> * <p> * To get information about the traffic policy instances that are associated * with the current AWS account, send a <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicyinstance</code> resource. * </p> * <p> * Amazon Route 53 returns a maximum of 100 items in each response. If you * have a lot of traffic policy instances, you can use the * <code>MaxItems</code> parameter to list them in groups of up to 100. * </p> * <p> * The response includes five values that help you navigate from one group * of <code>MaxItems</code> traffic policy instances to the next: * </p> * <ul> * <li><b>IsTruncated</b></li> * <p> * If the value of <code>IsTruncated</code> in the response is * <code>true</code>, there are more traffic policy instances associated * with the current AWS account. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, this response includes * the last traffic policy instance that is associated with the current * account. * </p> * <li><b>MaxItems</b></li> * <p> * The value that you specified for the <code>MaxItems</code> parameter in * the request that produced the current response. * </p> * <li><b>HostedZoneIdMarker</b>, <b>TrafficPolicyInstanceNameMarker</b>, * and <b>TrafficPolicyInstanceTypeMarker</b></li> * <p> * If <code>IsTruncated</code> is <code>true</code>, these three values in * the response represent the first traffic policy instance in the next * group of <code>MaxItems</code> traffic policy instances. To list more * traffic policy instances, make another call to * <code>ListTrafficPolicyInstances</code>, and specify these values in the * corresponding request parameters. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, all three elements are * omitted from the response. * </p> * </ul> * * @param listTrafficPolicyInstancesRequest * A complex type that contains the information about the request to * list your traffic policy instances. * @return Result of the ListTrafficPolicyInstances operation returned by * the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchTrafficPolicyInstanceException * No traffic policy instance exists with the specified ID. * @sample AmazonRoute53.ListTrafficPolicyInstances */ @Override public ListTrafficPolicyInstancesResult listTrafficPolicyInstances( ListTrafficPolicyInstancesRequest listTrafficPolicyInstancesRequest) { ExecutionContext executionContext = createExecutionContext(listTrafficPolicyInstancesRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListTrafficPolicyInstancesRequest> request = null; Response<ListTrafficPolicyInstancesResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListTrafficPolicyInstancesRequestMarshaller() .marshall(super .beforeMarshalling(listTrafficPolicyInstancesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListTrafficPolicyInstancesResult> responseHandler = new StaxResponseHandler<ListTrafficPolicyInstancesResult>( new ListTrafficPolicyInstancesResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public ListTrafficPolicyInstancesResult listTrafficPolicyInstances() { return listTrafficPolicyInstances(new ListTrafficPolicyInstancesRequest()); } /** * <p> * Gets information about the traffic policy instances that you created in a * specified hosted zone. * </p> * <note>After you submit an <code>UpdateTrafficPolicyInstance</code> * request, there's a brief delay while Amazon Route 53 creates the resource * record sets that are specified in the traffic policy definition. For more * information, see the <a>State</a> response element.</note> * <p> * To get information about the traffic policy instances that you created in * a specified hosted zone, send a <code>GET</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicyinstance</code> resource * and include the ID of the hosted zone. * </p> * <p> * Amazon Route 53 returns a maximum of 100 items in each response. If you * have a lot of traffic policy instances, you can use the * <code>MaxItems</code> parameter to list them in groups of up to 100. * </p> * <p> * The response includes four values that help you navigate from one group * of <code>MaxItems</code> traffic policy instances to the next: * </p> * <ul> * <li><b>IsTruncated</b></li> * <p> * If the value of <code/>IsTruncated in the response is <code>true</code>, * there are more traffic policy instances associated with the current AWS * account. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, this response includes * the last traffic policy instance that is associated with the current * account. * </p> * <li><b>MaxItems</b></li> * <p> * The value that you specified for the <code>MaxItems</code> parameter in * the request that produced the current response. * </p> * <li><b>TrafficPolicyInstanceNameMarker</b> and * <b>TrafficPolicyInstanceTypeMarker</b></li> * <p> * If <code>IsTruncated</code> is <code>true</code>, these two values in the * response represent the first traffic policy instance in the next group of * <code>MaxItems</code> traffic policy instances. To list more traffic * policy instances, make another call to * <code>ListTrafficPolicyInstancesByHostedZone</code>, and specify these * values in the corresponding request parameters. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, all three elements are * omitted from the response. * </p> * </ul> * * @param listTrafficPolicyInstancesByHostedZoneRequest * A request for the traffic policy instances that you created in a * specified hosted zone. * @return Result of the ListTrafficPolicyInstancesByHostedZone operation * returned by the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchTrafficPolicyInstanceException * No traffic policy instance exists with the specified ID. * @throws NoSuchHostedZoneException * @sample AmazonRoute53.ListTrafficPolicyInstancesByHostedZone */ @Override public ListTrafficPolicyInstancesByHostedZoneResult listTrafficPolicyInstancesByHostedZone( ListTrafficPolicyInstancesByHostedZoneRequest listTrafficPolicyInstancesByHostedZoneRequest) { ExecutionContext executionContext = createExecutionContext(listTrafficPolicyInstancesByHostedZoneRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListTrafficPolicyInstancesByHostedZoneRequest> request = null; Response<ListTrafficPolicyInstancesByHostedZoneResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListTrafficPolicyInstancesByHostedZoneRequestMarshaller() .marshall(super .beforeMarshalling(listTrafficPolicyInstancesByHostedZoneRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListTrafficPolicyInstancesByHostedZoneResult> responseHandler = new StaxResponseHandler<ListTrafficPolicyInstancesByHostedZoneResult>( new ListTrafficPolicyInstancesByHostedZoneResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Gets information about the traffic policy instances that you created by * using a specify traffic policy version. * </p> * <note>After you submit a <code>CreateTrafficPolicyInstance</code> or an * <code>UpdateTrafficPolicyInstance</code> request, there's a brief delay * while Amazon Route 53 creates the resource record sets that are specified * in the traffic policy definition. For more information, see the * <a>State</a> response element.</note> * <p> * To get information about the traffic policy instances that you created by * using a specify traffic policy version, send a <code>GET</code> request * to the <code>/<i>Route 53 API version</i>/trafficpolicyinstance</code> * resource and include the ID and version of the traffic policy. * </p> * <p> * Amazon Route 53 returns a maximum of 100 items in each response. If you * have a lot of traffic policy instances, you can use the * <code>MaxItems</code> parameter to list them in groups of up to 100. * </p> * <p> * The response includes five values that help you navigate from one group * of <code>MaxItems</code> traffic policy instances to the next: * </p> * <ul> * <li><b>IsTruncated</b> * <p> * If the value of <code>IsTruncated</code> in the response is * <code>true</code>, there are more traffic policy instances associated * with the specified traffic policy. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, this response includes * the last traffic policy instance that is associated with the specified * traffic policy. * </p> * </li> * <li><b>MaxItems</b> * <p> * The value that you specified for the <code>MaxItems</code> parameter in * the request that produced the current response. * </p> * </li> * <li><b>HostedZoneIdMarker</b>, <b>TrafficPolicyInstanceNameMarker</b>, * and <b>TrafficPolicyInstanceTypeMarker</b> * <p> * If <code>IsTruncated</code> is <code>true</code>, these values in the * response represent the first traffic policy instance in the next group of * <code>MaxItems</code> traffic policy instances. To list more traffic * policy instances, make another call to * <code>ListTrafficPolicyInstancesByPolicy</code>, and specify these values * in the corresponding request parameters. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, all three elements are * omitted from the response. * </p> * </li> * </ul> * * @param listTrafficPolicyInstancesByPolicyRequest * A complex type that contains the information about the request to * list your traffic policy instances. * @return Result of the ListTrafficPolicyInstancesByPolicy operation * returned by the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchTrafficPolicyInstanceException * No traffic policy instance exists with the specified ID. * @throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID. * @sample AmazonRoute53.ListTrafficPolicyInstancesByPolicy */ @Override public ListTrafficPolicyInstancesByPolicyResult listTrafficPolicyInstancesByPolicy( ListTrafficPolicyInstancesByPolicyRequest listTrafficPolicyInstancesByPolicyRequest) { ExecutionContext executionContext = createExecutionContext(listTrafficPolicyInstancesByPolicyRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListTrafficPolicyInstancesByPolicyRequest> request = null; Response<ListTrafficPolicyInstancesByPolicyResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListTrafficPolicyInstancesByPolicyRequestMarshaller() .marshall(super .beforeMarshalling(listTrafficPolicyInstancesByPolicyRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListTrafficPolicyInstancesByPolicyResult> responseHandler = new StaxResponseHandler<ListTrafficPolicyInstancesByPolicyResult>( new ListTrafficPolicyInstancesByPolicyResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Gets information about all of the versions for a specified traffic * policy. <code>ListTrafficPolicyVersions</code> lists only versions that * have not been deleted. * </p> * <p> * Amazon Route 53 returns a maximum of 100 items in each response. If you * have a lot of traffic policies, you can use the <code>maxitems</code> * parameter to list them in groups of up to 100. * </p> * <p> * The response includes three values that help you navigate from one group * of <code>maxitems</code>maxitems traffic policies to the next: * </p> * <ul> * <li><b>IsTruncated</b></li> * <p> * If the value of <code>IsTruncated</code> in the response is * <code>true</code>, there are more traffic policy versions associated with * the specified traffic policy. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, this response includes * the last traffic policy version that is associated with the specified * traffic policy. * </p> * <li><b>TrafficPolicyVersionMarker</b></li> * <p> * The ID of the next traffic policy version that is associated with the * current AWS account. If you want to list more traffic policies, make * another call to <code>ListTrafficPolicyVersions</code>, and specify the * value of the <code>TrafficPolicyVersionMarker</code> element in the * <code>TrafficPolicyVersionMarker</code> request parameter. * </p> * <p> * If <code>IsTruncated</code> is <code>false</code>, Amazon Route 53 omits * the <code>TrafficPolicyVersionMarker</code> element from the response. * </p> * <li><b>MaxItems</b></li> * <p> * The value that you specified for the <code>MaxItems</code> parameter in * the request that produced the current response. * </p> * </ul> * * @param listTrafficPolicyVersionsRequest * A complex type that contains the information about the request to * list your traffic policies. * @return Result of the ListTrafficPolicyVersions operation returned by the * service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID. * @sample AmazonRoute53.ListTrafficPolicyVersions */ @Override public ListTrafficPolicyVersionsResult listTrafficPolicyVersions( ListTrafficPolicyVersionsRequest listTrafficPolicyVersionsRequest) { ExecutionContext executionContext = createExecutionContext(listTrafficPolicyVersionsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ListTrafficPolicyVersionsRequest> request = null; Response<ListTrafficPolicyVersionsResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ListTrafficPolicyVersionsRequestMarshaller() .marshall(super .beforeMarshalling(listTrafficPolicyVersionsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ListTrafficPolicyVersionsResult> responseHandler = new StaxResponseHandler<ListTrafficPolicyVersionsResult>( new ListTrafficPolicyVersionsResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * This action updates an existing health check. * </p> * <p> * To update a health check, send a <code>POST</code> request to the * <code>/<i>Route 53 API version</i>/healthcheck/<i>health check ID</i></code> * resource. The request body must include a document with an * <code>UpdateHealthCheckRequest</code> element. The response returns an * <code>UpdateHealthCheckResponse</code> element, which contains metadata * about the health check. * </p> * * @param updateHealthCheckRequest * >A complex type that contains information about the request to * update a health check. * @return Result of the UpdateHealthCheck operation returned by the * service. * @throws NoSuchHealthCheckException * The health check you are trying to get or delete does not exist. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws HealthCheckVersionMismatchException * @sample AmazonRoute53.UpdateHealthCheck */ @Override public UpdateHealthCheckResult updateHealthCheck( UpdateHealthCheckRequest updateHealthCheckRequest) { ExecutionContext executionContext = createExecutionContext(updateHealthCheckRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<UpdateHealthCheckRequest> request = null; Response<UpdateHealthCheckResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateHealthCheckRequestMarshaller() .marshall(super .beforeMarshalling(updateHealthCheckRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<UpdateHealthCheckResult> responseHandler = new StaxResponseHandler<UpdateHealthCheckResult>( new UpdateHealthCheckResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * To update the hosted zone comment, send a <code>POST</code> request to * the * <code>/<i>Route 53 API version</i>/hostedzone/<i>hosted zone ID</i></code> * resource. The request body must include a document with a * <code>UpdateHostedZoneCommentRequest</code> element. The response to this * request includes the modified <code>HostedZone</code> element. * </p> * <note> The comment can have a maximum length of 256 characters.</note> * * @param updateHostedZoneCommentRequest * A complex type that contains information about the request to * update a hosted zone comment. * @return Result of the UpdateHostedZoneComment operation returned by the * service. * @throws NoSuchHostedZoneException * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @sample AmazonRoute53.UpdateHostedZoneComment */ @Override public UpdateHostedZoneCommentResult updateHostedZoneComment( UpdateHostedZoneCommentRequest updateHostedZoneCommentRequest) { ExecutionContext executionContext = createExecutionContext(updateHostedZoneCommentRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<UpdateHostedZoneCommentRequest> request = null; Response<UpdateHostedZoneCommentResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateHostedZoneCommentRequestMarshaller() .marshall(super .beforeMarshalling(updateHostedZoneCommentRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<UpdateHostedZoneCommentResult> responseHandler = new StaxResponseHandler<UpdateHostedZoneCommentResult>( new UpdateHostedZoneCommentResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Updates the comment for a specified traffic policy version. * </p> * <p> * To update the comment, send a <code>POST</code> request to the * <code>/<i>Route 53 API version</i>/trafficpolicy/</code> resource. * </p> * <p> * The request body must include a document with an * <code>UpdateTrafficPolicyCommentRequest</code> element. * </p> * * @param updateTrafficPolicyCommentRequest * A complex type that contains information about the traffic policy * for which you want to update the comment. * @return Result of the UpdateTrafficPolicyComment operation returned by * the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID. * @throws ConcurrentModificationException * Another user submitted a request to update the object at the same * time that you did. Retry the request. * @sample AmazonRoute53.UpdateTrafficPolicyComment */ @Override public UpdateTrafficPolicyCommentResult updateTrafficPolicyComment( UpdateTrafficPolicyCommentRequest updateTrafficPolicyCommentRequest) { ExecutionContext executionContext = createExecutionContext(updateTrafficPolicyCommentRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<UpdateTrafficPolicyCommentRequest> request = null; Response<UpdateTrafficPolicyCommentResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateTrafficPolicyCommentRequestMarshaller() .marshall(super .beforeMarshalling(updateTrafficPolicyCommentRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<UpdateTrafficPolicyCommentResult> responseHandler = new StaxResponseHandler<UpdateTrafficPolicyCommentResult>( new UpdateTrafficPolicyCommentResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * <p> * Updates the resource record sets in a specified hosted zone that were * created based on the settings in a specified traffic policy version. * </p> * <important>The DNS type of the resource record sets that you're updating * must match the DNS type in the JSON document that is associated with the * traffic policy version that you're using to update the traffic policy * instance.</important> * <p> * When you update a traffic policy instance, Amazon Route 53 continues to * respond to DNS queries for the root resource record set name (such as * example.com) while it replaces one group of resource record sets with * another. Amazon Route 53 performs the following operations: * </p> * <ol> * <li>Amazon Route 53 creates a new group of resource record sets based on * the specified traffic policy. This is true regardless of how substantial * the differences are between the existing resource record sets and the new * resource record sets.</li> * <li>When all of the new resource record sets have been created, Amazon * Route 53 starts to respond to DNS queries for the root resource record * set name (such as example.com) by using the new resource record sets.</li> * <li>Amazon Route 53 deletes the old group of resource record sets that * are associated with the root resource record set name.</li> * </ol> * <p> * To update a traffic policy instance, send a <code>POST</code> request to * the * <code>/<i>Route 53 API version</i>/trafficpolicyinstance/<i>traffic policy ID</i></code> * resource. The request body must include a document with an * <code>UpdateTrafficPolicyInstanceRequest</code> element. * </p> * * @param updateTrafficPolicyInstanceRequest * A complex type that contains information about the resource record * sets that you want to update based on a specified traffic policy * instance. * @return Result of the UpdateTrafficPolicyInstance operation returned by * the service. * @throws InvalidInputException * Some value specified in the request is invalid or the XML * document is malformed. * @throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID. * @throws NoSuchTrafficPolicyInstanceException * No traffic policy instance exists with the specified ID. * @throws PriorRequestNotCompleteException * The request was rejected because Amazon Route 53 was still * processing a prior request. * @throws ConflictingTypesException * You tried to update a traffic policy instance by using a traffic * policy version that has a different DNS type than the current * type for the instance. You specified the type in the JSON * document in the <code>CreateTrafficPolicy</code> or * <code>CreateTrafficPolicyVersion</code>request. * @sample AmazonRoute53.UpdateTrafficPolicyInstance */ @Override public UpdateTrafficPolicyInstanceResult updateTrafficPolicyInstance( UpdateTrafficPolicyInstanceRequest updateTrafficPolicyInstanceRequest) { ExecutionContext executionContext = createExecutionContext(updateTrafficPolicyInstanceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<UpdateTrafficPolicyInstanceRequest> request = null; Response<UpdateTrafficPolicyInstanceResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateTrafficPolicyInstanceRequestMarshaller() .marshall(super .beforeMarshalling(updateTrafficPolicyInstanceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<UpdateTrafficPolicyInstanceResult> responseHandler = new StaxResponseHandler<UpdateTrafficPolicyInstanceResult>( new UpdateTrafficPolicyInstanceResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns additional metadata for a previously executed successful, * request, typically used for debugging issues where a service isn't acting * as expected. This data isn't considered part of the result data returned * by an operation, so it's available through this separate, diagnostic * interface. * <p> * Response metadata is only cached for a limited period of time, so if you * need to access this extra diagnostic information for an executed request, * you should use this method to retrieve it as soon as possible after * executing the request. * * @param request * The originally executed request * * @return The response metadata for the specified request, or null if none * is available. */ public ResponseMetadata getCachedResponseMetadata( AmazonWebServiceRequest request) { return client.getResponseMetadataForRequest(request); } /** * Normal invoke with authentication. Credentials are required and may be * overriden at the request level. **/ private <X, Y extends AmazonWebServiceRequest> Response<X> invoke( Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, ExecutionContext executionContext) { executionContext.setCredentialsProvider(CredentialUtils .getCredentialsProvider(request.getOriginalRequest(), awsCredentialsProvider)); return doInvoke(request, responseHandler, executionContext); } /** * Invoke with no authentication. Credentials are not required and any * credentials set on the client or request will be ignored for this * operation. **/ private <X, Y extends AmazonWebServiceRequest> Response<X> anonymousInvoke( Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, ExecutionContext executionContext) { return doInvoke(request, responseHandler, executionContext); } /** * Invoke the request using the http client. Assumes credentials (or lack * thereof) have been configured in the ExecutionContext beforehand. **/ private <X, Y extends AmazonWebServiceRequest> Response<X> doInvoke( Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, ExecutionContext executionContext) { request.setEndpoint(endpoint); request.setTimeOffset(timeOffset); DefaultErrorResponseHandler errorResponseHandler = new DefaultErrorResponseHandler( exceptionUnmarshallers); return client.execute(request, responseHandler, errorResponseHandler, executionContext); } }
apache-2.0
jprante/elasticsearch-server
server/src/test/java/org/elasticsearch/test/action/search/ClearScrollRequestTests.java
5254
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.test.action.search; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.testframework.ESTestCase; import java.io.IOException; import static org.elasticsearch.testframework.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.startsWith; public class ClearScrollRequestTests extends ESTestCase { public void testFromXContent() throws Exception { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); if (randomBoolean()) { //test that existing values get overridden clearScrollRequest = createClearScrollRequest(); } try (XContentParser parser = createParser(XContentFactory.jsonBuilder() .startObject() .array("scroll_id", "value_1", "value_2") .endObject())) { clearScrollRequest.fromXContent(parser); } assertThat(clearScrollRequest.scrollIds(), contains("value_1", "value_2")); } public void testFromXContentWithoutArray() throws Exception { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); if (randomBoolean()) { //test that existing values get overridden clearScrollRequest = createClearScrollRequest(); } try (XContentParser parser = createParser(XContentFactory.jsonBuilder() .startObject() .field("scroll_id", "value_1") .endObject())) { clearScrollRequest.fromXContent(parser); } assertThat(clearScrollRequest.scrollIds(), contains("value_1")); } public void testFromXContentWithUnknownParamThrowsException() throws Exception { XContentParser invalidContent = createParser(XContentFactory.jsonBuilder() .startObject() .array("scroll_id", "value_1", "value_2") .field("unknown", "keyword") .endObject()); ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); Exception e = expectThrows(IllegalArgumentException.class, () -> clearScrollRequest.fromXContent(invalidContent)); assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } public void testToXContent() throws IOException { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId("SCROLL_ID"); try (XContentBuilder builder = JsonXContent.contentBuilder()) { clearScrollRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"scroll_id\":[\"SCROLL_ID\"]}", Strings.toString(builder)); } } public void testFromAndToXContent() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); ClearScrollRequest originalRequest = createClearScrollRequest(); BytesReference originalBytes = toShuffledXContent(originalRequest, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); ClearScrollRequest parsedRequest = new ClearScrollRequest(); try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { parsedRequest.fromXContent(parser); } assertEquals(originalRequest.scrollIds(), parsedRequest.scrollIds()); BytesReference parsedBytes = XContentHelper.toXContent(parsedRequest, xContentType, randomBoolean()); assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); } public static ClearScrollRequest createClearScrollRequest() { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); int numScrolls = randomIntBetween(1, 10); for (int i = 0; i < numScrolls; i++) { clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(3, 10)); } return clearScrollRequest; } }
apache-2.0
Stratio/Explorer
cassandra/src/test/java/com/stratio/explorer/cassandra/functions/DefinitionToNameFunctionTest.java
1255
/** * Copyright (C) 2015 Stratio (http://stratio.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stratio.explorer.cassandra.functions; import static org.junit.Assert.assertThat; import static org.hamcrest.Matchers.is; import com.stratio.explorer.cassandra.doubles.DoubleDefinition; import org.junit.Test; /** * Created by afidalgo on 14/10/15. */ public class DefinitionToNameFunctionTest { @Test public void whenCallTransformFunction(){ String name =""; DefinitionToNameFunction function = new DefinitionToNameFunction(); assertThat("Result should be equals name of column Definition",function.transform(new DoubleDefinition().buildDefinitionWithName(name)), is(name)); } }
apache-2.0
laizhihuan/java-test-demo
simple-actor/src/main/java/com/ibm/actor/MessageEvent.java
612
package com.ibm.actor; import java.util.EventObject; /** * Sent when a message is received. * * @author BFEIGENB * */ public class MessageEvent extends EventObject { /** * Possible message events. * * @author BFEIGENB * */ public static enum MessageStatus {SENT, DELIVERED, COMPLETED, FAILED}; protected MessageStatus status; protected Message message; public MessageStatus getStatus() { return status; } public MessageEvent(Object source, Message m, MessageStatus status) { super(source); this.message = m; this.status = status; } }
apache-2.0
googleinterns/play-movies-2020-intern
server/src/main/java/com/google/moviestvsentiments/account/Account.java
1897
package com.google.moviestvsentiments.account; import javax.persistence.Entity; import javax.persistence.Id; import java.time.Instant; import java.util.Objects; /** * A record in the accounts database table. */ @Entity public class Account { @Id private String name; private Instant timestamp; // The default constructor is required by the Spring JPA. protected Account() {} private Account(String name, Instant timestamp) { this.name = name; this.timestamp = timestamp; } /** * Creates a new Account with the given name and timestamp. * @param name The name for the new account. * @param timestamp The timestamp for the new account. * @return A new Account with the given name and timestamp. */ public static Account create(String name, Instant timestamp) { return new Account(name, timestamp); } /** * Returns the account name. */ public String getName() { return name; } /** * Sets the account name. * @param name The new name for the account. */ public void setName(String name) { this.name = name; } /** * Returns the account's timestamp. */ public Instant getTimestamp() { return timestamp; } /** * Sets the account's timestamp. * @param timestamp The new timestamp for the account. */ public void setTimestamp(Instant timestamp) { this.timestamp = timestamp; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Account account = (Account) o; return timestamp.equals(account.timestamp) && name.equals(account.name); } @Override public int hashCode() { return Objects.hash(name, timestamp); } }
apache-2.0
ytfei/guava
guava-tests/test/com/google/common/collect/TreeMultimapNaturalTest.java
20576
/* * Copyright (C) 2007 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.collect; import static com.google.common.base.Preconditions.checkArgument; import static org.junit.contrib.truth.Truth.ASSERT; import com.google.common.annotations.GwtCompatible; import com.google.common.annotations.GwtIncompatible; import com.google.common.collect.testing.DerivedComparable; import com.google.common.collect.testing.Helpers; import com.google.common.collect.testing.NavigableMapTestSuiteBuilder; import com.google.common.collect.testing.NavigableSetTestSuiteBuilder; import com.google.common.collect.testing.SampleElements; import com.google.common.collect.testing.TestSortedMapGenerator; import com.google.common.collect.testing.TestStringSetGenerator; import com.google.common.collect.testing.TestStringSortedSetGenerator; import com.google.common.collect.testing.features.CollectionFeature; import com.google.common.collect.testing.features.CollectionSize; import com.google.common.collect.testing.features.MapFeature; import com.google.common.collect.testing.google.SortedSetMultimapTestSuiteBuilder; import com.google.common.collect.testing.google.TestStringSetMultimapGenerator; import com.google.common.testing.SerializableTester; import junit.framework.Test; import junit.framework.TestSuite; import java.lang.reflect.Method; import java.util.Arrays; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableMap; import java.util.NavigableSet; import java.util.Set; import java.util.SortedMap; import java.util.SortedSet; /** * Unit tests for {@code TreeMultimap} with natural ordering. * * @author Jared Levy */ @GwtCompatible(emulated = true) public class TreeMultimapNaturalTest extends AbstractSetMultimapTest { @GwtIncompatible("suite") public static Test suite() { TestSuite suite = new TestSuite(); // TODO(user): should we force TreeMultimap to be more thorough about checking nulls? suite.addTest(SortedSetMultimapTestSuiteBuilder.using(new TestStringSetMultimapGenerator() { @Override protected SetMultimap<String, String> create(Entry<String, String>[] entries) { SetMultimap<String, String> multimap = TreeMultimap.create( Ordering.natural().nullsFirst(), Ordering.natural().nullsFirst()); for (Entry<String, String> entry : entries) { multimap.put(entry.getKey(), entry.getValue()); } return multimap; } @Override public Iterable<Entry<String, String>> order(List<Entry<String, String>> insertionOrder) { return new Ordering<Entry<String, String>>() { @Override public int compare(Entry<String, String> left, Entry<String, String> right) { return ComparisonChain.start() .compare(left.getKey(), right.getKey(), Ordering.natural().nullsFirst()) .compare(left.getValue(), right.getValue(), Ordering.natural().nullsFirst()) .result(); } }.sortedCopy(insertionOrder); } }) .named("TreeMultimap nullsFirst") .withFeatures( MapFeature.ALLOWS_NULL_KEYS, MapFeature.ALLOWS_NULL_VALUES, MapFeature.GENERAL_PURPOSE, MapFeature.FAILS_FAST_ON_CONCURRENT_MODIFICATION, CollectionFeature.KNOWN_ORDER, CollectionFeature.SERIALIZABLE, CollectionSize.ANY) .createTestSuite()); suite.addTest(NavigableSetTestSuiteBuilder.using(new TestStringSortedSetGenerator() { @Override protected NavigableSet<String> create(String[] elements) { TreeMultimap<String, Integer> multimap = TreeMultimap.create( Ordering.natural().nullsFirst(), Ordering.natural()); for (int i = 0; i < elements.length; i++) { multimap.put(elements[i], i); } return multimap.keySet(); } @Override public List<String> order(List<String> insertionOrder) { return Ordering.natural().nullsFirst().sortedCopy(insertionOrder); } }) .named("TreeMultimap.keySet") .withFeatures( CollectionFeature.ALLOWS_NULL_VALUES, CollectionFeature.SUPPORTS_REMOVE, CollectionFeature.KNOWN_ORDER, CollectionSize.ANY) .createTestSuite()); suite.addTest(NavigableMapTestSuiteBuilder.using( new TestSortedMapGenerator<String, Collection<String>>() { @Override public String[] createKeyArray(int length) { return new String[length]; } @SuppressWarnings("unchecked") @Override public Collection<String>[] createValueArray(int length) { return new Collection[length]; } @Override public SampleElements<Entry<String, Collection<String>>> samples() { return new SampleElements<Entry<String, Collection<String>>>( Helpers.mapEntry("a", (Collection<String>) ImmutableSortedSet.of("alex")), Helpers.mapEntry("b", (Collection<String>) ImmutableSortedSet.of("bob", "bagel")), Helpers.mapEntry("c", (Collection<String>) ImmutableSortedSet.of("carl", "carol")), Helpers.mapEntry("d", (Collection<String>) ImmutableSortedSet.of("david", "dead")), Helpers.mapEntry("e", (Collection<String>) ImmutableSortedSet.of("eric", "elaine"))); } @SuppressWarnings("unchecked") @Override public Entry<String, Collection<String>>[] createArray(int length) { return new Entry[length]; } @Override public Iterable<Entry<String, Collection<String>>> order( List<Entry<String, Collection<String>>> insertionOrder) { return new Ordering<Entry<String, ?>>() { @Override public int compare(Entry<String, ?> left, Entry<String, ?> right) { return left.getKey().compareTo(right.getKey()); } }.sortedCopy(insertionOrder); } @Override public NavigableMap<String, Collection<String>> create(Object... elements) { TreeMultimap<String, String> multimap = TreeMultimap.create(); for (Object o : elements) { @SuppressWarnings("unchecked") Entry<String, Collection<String>> entry = (Entry<String, Collection<String>>) o; checkArgument(!multimap.containsKey(entry.getKey())); multimap.putAll(entry.getKey(), entry.getValue()); } return multimap.asMap(); } @Override public Entry<String, Collection<String>> belowSamplesLesser() { return Helpers.mapEntry("-- a", (Collection<String>) ImmutableSortedSet.of("--below")); } @Override public Entry<String, Collection<String>> belowSamplesGreater() { return Helpers.mapEntry("-- b", (Collection<String>) ImmutableSortedSet.of("--below")); } @Override public Entry<String, Collection<String>> aboveSamplesLesser() { return Helpers.mapEntry("~~ b", (Collection<String>) ImmutableSortedSet.of("~above")); } @Override public Entry<String, Collection<String>> aboveSamplesGreater() { return Helpers.mapEntry("~~ c", (Collection<String>) ImmutableSortedSet.of("~above")); } }) .named("TreeMultimap.asMap") .withFeatures( MapFeature.SUPPORTS_REMOVE, MapFeature.REJECTS_DUPLICATES_AT_CREATION, CollectionFeature.KNOWN_ORDER, CollectionSize.ANY) .createTestSuite()); suite.addTest(NavigableSetTestSuiteBuilder.using(new TestStringSetGenerator() { @Override protected Set<String> create(String[] elements) { TreeMultimap<Integer, String> multimap = TreeMultimap.create( Ordering.natural(), Ordering.natural().nullsFirst()); multimap.putAll(1, Arrays.asList(elements)); return multimap.get(1); } @Override public List<String> order(List<String> insertionOrder) { return Ordering.natural().nullsFirst().sortedCopy(insertionOrder); } }) .named("TreeMultimap.get") .withFeatures( CollectionFeature.ALLOWS_NULL_VALUES, CollectionFeature.GENERAL_PURPOSE, CollectionFeature.KNOWN_ORDER, CollectionSize.ANY) .createTestSuite()); suite.addTest(NavigableSetTestSuiteBuilder.using(new TestStringSetGenerator() { @Override protected Set<String> create(String[] elements) { TreeMultimap<Integer, String> multimap = TreeMultimap.create( Ordering.natural(), Ordering.natural().nullsFirst()); multimap.putAll(1, Arrays.asList(elements)); return (Set<String>) multimap.asMap().entrySet().iterator().next().getValue(); } @Override public List<String> order(List<String> insertionOrder) { return Ordering.natural().nullsFirst().sortedCopy(insertionOrder); } }) .named("TreeMultimap.asMap.entrySet collection") .withFeatures( CollectionFeature.ALLOWS_NULL_VALUES, CollectionFeature.GENERAL_PURPOSE, CollectionFeature.KNOWN_ORDER, CollectionSize.ONE, CollectionSize.SEVERAL) .createTestSuite()); suite.addTestSuite(TreeMultimapNaturalTest.class); return suite; } @Override protected Multimap<String, Integer> create() { return TreeMultimap.create(); } /* Null keys and values aren't supported. */ @Override protected String nullKey() { return "null"; } @Override protected Integer nullValue() { return 42; } /** * Create and populate a {@code TreeMultimap} with the natural ordering of * keys and values. */ private TreeMultimap<String, Integer> createPopulate() { TreeMultimap<String, Integer> multimap = TreeMultimap.create(); multimap.put("google", 2); multimap.put("google", 6); multimap.put("foo", 3); multimap.put("foo", 1); multimap.put("foo", 7); multimap.put("tree", 4); multimap.put("tree", 0); return multimap; } public void testToString() { assertEquals("{bar=[1, 2, 3], foo=[-1, 1, 2, 3, 4]}", createSample().toString()); } public void testOrderedGet() { TreeMultimap<String, Integer> multimap = createPopulate(); ASSERT.that(multimap.get("foo")).hasContentsInOrder(1, 3, 7); ASSERT.that(multimap.get("google")).hasContentsInOrder(2, 6); ASSERT.that(multimap.get("tree")).hasContentsInOrder(0, 4); } public void testOrderedKeySet() { TreeMultimap<String, Integer> multimap = createPopulate(); ASSERT.that(multimap.keySet()).hasContentsInOrder("foo", "google", "tree"); } public void testOrderedAsMapEntries() { TreeMultimap<String, Integer> multimap = createPopulate(); Iterator<Map.Entry<String, Collection<Integer>>> iterator = multimap.asMap().entrySet().iterator(); Map.Entry<String, Collection<Integer>> entry = iterator.next(); assertEquals("foo", entry.getKey()); ASSERT.that(entry.getValue()).hasContentsAnyOrder(1, 3, 7); entry = iterator.next(); assertEquals("google", entry.getKey()); ASSERT.that(entry.getValue()).hasContentsAnyOrder(2, 6); entry = iterator.next(); assertEquals("tree", entry.getKey()); ASSERT.that(entry.getValue()).hasContentsAnyOrder(0, 4); } public void testOrderedEntries() { TreeMultimap<String, Integer> multimap = createPopulate(); ASSERT.that(multimap.entries()).hasContentsInOrder( Maps.immutableEntry("foo", 1), Maps.immutableEntry("foo", 3), Maps.immutableEntry("foo", 7), Maps.immutableEntry("google", 2), Maps.immutableEntry("google", 6), Maps.immutableEntry("tree", 0), Maps.immutableEntry("tree", 4)); } public void testOrderedValues() { TreeMultimap<String, Integer> multimap = createPopulate(); ASSERT.that(multimap.values()).hasContentsInOrder( 1, 3, 7, 2, 6, 0, 4); } public void testMultimapConstructor() { Multimap<String, Integer> multimap = createSample(); TreeMultimap<String, Integer> copy = TreeMultimap.create(multimap); assertEquals(multimap, copy); } private static final Comparator<Double> KEY_COMPARATOR = Ordering.natural(); private static final Comparator<Double> VALUE_COMPARATOR = Ordering.natural().reverse().nullsFirst(); /** * Test that creating one TreeMultimap from another does not copy the * comparators from the source TreeMultimap. */ public void testCreateFromTreeMultimap() { Multimap<Double, Double> tree = TreeMultimap.create(KEY_COMPARATOR, VALUE_COMPARATOR); tree.put(1.0, 2.0); tree.put(2.0, 3.0); tree.put(3.0, 4.0); tree.put(4.0, 5.0); TreeMultimap<Double, Double> copyFromTree = TreeMultimap.create(tree); assertEquals(tree, copyFromTree); assertSame(Ordering.natural(), copyFromTree.keyComparator()); assertSame(Ordering.natural(), copyFromTree.valueComparator()); assertSame(Ordering.natural(), copyFromTree.get(1.0).comparator()); } /** * Test that creating one TreeMultimap from a non-TreeMultimap * results in natural ordering. */ public void testCreateFromHashMultimap() { Multimap<Double, Double> hash = HashMultimap.create(); hash.put(1.0, 2.0); hash.put(2.0, 3.0); hash.put(3.0, 4.0); hash.put(4.0, 5.0); TreeMultimap<Double, Double> copyFromHash = TreeMultimap.create(hash); assertEquals(hash, copyFromHash); assertEquals(Ordering.natural(), copyFromHash.keyComparator()); assertEquals(Ordering.natural(), copyFromHash.valueComparator()); } /** * Test that creating one TreeMultimap from a SortedSetMultimap uses natural * ordering. */ public void testCreateFromSortedSetMultimap() { SortedSetMultimap<Double, Double> tree = TreeMultimap.create(KEY_COMPARATOR, VALUE_COMPARATOR); tree.put(1.0, 2.0); tree.put(2.0, 3.0); tree.put(3.0, 4.0); tree.put(4.0, 5.0); SortedSetMultimap<Double, Double> sorted = Multimaps.unmodifiableSortedSetMultimap(tree); TreeMultimap<Double, Double> copyFromSorted = TreeMultimap.create(sorted); assertEquals(tree, copyFromSorted); assertSame(Ordering.natural(), copyFromSorted.keyComparator()); assertSame(Ordering.natural(), copyFromSorted.valueComparator()); assertSame(Ordering.natural(), copyFromSorted.get(1.0).comparator()); } public void testComparators() { TreeMultimap<String, Integer> multimap = TreeMultimap.create(); assertEquals(Ordering.natural(), multimap.keyComparator()); assertEquals(Ordering.natural(), multimap.valueComparator()); } @GwtIncompatible("SerializableTester") public void testExplicitComparatorSerialization() { TreeMultimap<String, Integer> multimap = createPopulate(); TreeMultimap<String, Integer> copy = SerializableTester.reserializeAndAssert(multimap); ASSERT.that(copy.values()).hasContentsInOrder(1, 3, 7, 2, 6, 0, 4); ASSERT.that(copy.keySet()).hasContentsInOrder("foo", "google", "tree"); assertEquals(multimap.keyComparator(), copy.keyComparator()); assertEquals(multimap.valueComparator(), copy.valueComparator()); } @GwtIncompatible("SerializableTester") public void testTreeMultimapDerived() { TreeMultimap<DerivedComparable, DerivedComparable> multimap = TreeMultimap.create(); assertEquals(ImmutableMultimap.of(), multimap); multimap.put(new DerivedComparable("foo"), new DerivedComparable("f")); multimap.put(new DerivedComparable("foo"), new DerivedComparable("o")); multimap.put(new DerivedComparable("foo"), new DerivedComparable("o")); multimap.put(new DerivedComparable("bar"), new DerivedComparable("b")); multimap.put(new DerivedComparable("bar"), new DerivedComparable("a")); multimap.put(new DerivedComparable("bar"), new DerivedComparable("r")); ASSERT.that(multimap.keySet()).hasContentsInOrder( new DerivedComparable("bar"), new DerivedComparable("foo")); ASSERT.that(multimap.values()).hasContentsInOrder( new DerivedComparable("a"), new DerivedComparable("b"), new DerivedComparable("r"), new DerivedComparable("f"), new DerivedComparable("o")); assertEquals(Ordering.natural(), multimap.keyComparator()); assertEquals(Ordering.natural(), multimap.valueComparator()); SerializableTester.reserializeAndAssert(multimap); } @GwtIncompatible("SerializableTester") public void testTreeMultimapNonGeneric() { TreeMultimap<LegacyComparable, LegacyComparable> multimap = TreeMultimap.create(); assertEquals(ImmutableMultimap.of(), multimap); multimap.put(new LegacyComparable("foo"), new LegacyComparable("f")); multimap.put(new LegacyComparable("foo"), new LegacyComparable("o")); multimap.put(new LegacyComparable("foo"), new LegacyComparable("o")); multimap.put(new LegacyComparable("bar"), new LegacyComparable("b")); multimap.put(new LegacyComparable("bar"), new LegacyComparable("a")); multimap.put(new LegacyComparable("bar"), new LegacyComparable("r")); ASSERT.that(multimap.keySet()).hasContentsInOrder( new LegacyComparable("bar"), new LegacyComparable("foo")); ASSERT.that(multimap.values()).hasContentsInOrder( new LegacyComparable("a"), new LegacyComparable("b"), new LegacyComparable("r"), new LegacyComparable("f"), new LegacyComparable("o")); assertEquals(Ordering.natural(), multimap.keyComparator()); assertEquals(Ordering.natural(), multimap.valueComparator()); SerializableTester.reserializeAndAssert(multimap); } public void testTreeMultimapAsMapSorted() { TreeMultimap<String, Integer> multimap = createPopulate(); SortedMap<String, Collection<Integer>> asMap = multimap.asMap(); assertEquals(Ordering.natural(), asMap.comparator()); assertEquals("foo", asMap.firstKey()); assertEquals("tree", asMap.lastKey()); Set<Integer> fooValues = ImmutableSet.of(1, 3, 7); Set<Integer> googleValues = ImmutableSet.of(2, 6); Set<Integer> treeValues = ImmutableSet.of(4, 0); assertEquals(ImmutableMap.of("google", googleValues, "tree", treeValues), asMap.tailMap("g")); assertEquals(ImmutableMap.of("google", googleValues, "foo", fooValues), asMap.headMap("h")); assertEquals(ImmutableMap.of("google", googleValues), asMap.subMap("g", "h")); } public void testTailSetClear() { TreeMultimap<String, Integer> multimap = TreeMultimap.create(); multimap.put("a", 1); multimap.put("a", 11); multimap.put("b", 2); multimap.put("c", 3); multimap.put("d", 4); multimap.put("e", 5); multimap.put("e", 55); multimap.keySet().tailSet("d").clear(); assertEquals(ImmutableSet.of("a", "b", "c"), multimap.keySet()); assertEquals(4, multimap.size()); assertEquals(4, multimap.values().size()); assertEquals(4, multimap.keys().size()); } @GwtIncompatible("reflection") public void testKeySetBridgeMethods() { for (Method m : TreeMultimap.class.getMethods()) { if (m.getName().equals("keySet") && m.getReturnType().equals(SortedSet.class)) { return; } } fail("No bridge method found"); } @GwtIncompatible("reflection") public void testAsMapBridgeMethods() { for (Method m : TreeMultimap.class.getMethods()) { if (m.getName().equals("asMap") && m.getReturnType().equals(SortedMap.class)) { return; } } } @GwtIncompatible("reflection") public void testGetBridgeMethods() { for (Method m : TreeMultimap.class.getMethods()) { if (m.getName().equals("get") && m.getReturnType().equals(SortedSet.class)) { return; } } fail("No bridge method found"); } }
apache-2.0
anylineorg/anyline
anyline-qq/src/main/java/org/anyline/qq/mp/util/QQMPUtil.java
2442
package org.anyline.qq.mp.util; import java.net.URLEncoder; import java.util.Hashtable; import org.anyline.entity.DataRow; import org.anyline.net.HttpUtil; import org.anyline.util.BasicUtil; import org.anyline.util.ConfigTable; import org.anyline.util.regular.RegularUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class QQMPUtil { private static final Logger log = LoggerFactory.getLogger(QQMPUtil.class); private static Hashtable<String,QQMPUtil> instances = new Hashtable<String,QQMPUtil>(); private QQMPConfig config = null; public static QQMPUtil getInstance(){ return getInstance("default"); } public static QQMPUtil getInstance(String key){ if(BasicUtil.isEmpty(key)){ key = "default"; } QQMPUtil util = instances.get(key); if(null == util){ util = new QQMPUtil(); util.config = QQMPConfig.getInstance(key); instances.put(key, util); } return util; } public DataRow getOpenId(String code){ DataRow row = new DataRow(); String redirect = QQMPConfig.getInstance().OAUTH_REDIRECT_URL; try{ redirect = URLEncoder.encode(redirect, "UTF-8"); }catch(Exception e){ e.printStackTrace(); } //1.获取accesstoken String url = "https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=" + config.APP_ID+"&client_secret="+config.API_KEY+"&code="+code+"&redirect_uri="+redirect; String txt = HttpUtil.get(url).getText(); if(ConfigTable.isDebug() && log.isWarnEnabled()){ log.warn("[QQ登录][get accesstoken][txt:{}]",txt); } //access_token=3442B853808CA8754EE03979AE23E9BB&expires_in=7776000&refresh_token=609BA09BBC0533116694D5F32FC2F8D5 String accessToken = RegularUtil.cut(txt, "access_token=","&"); //2.获取openid unionid url = "https://graph.qq.com/oauth2.0/me?access_token="+accessToken+"&unionid=1"; txt = HttpUtil.get(url).getText(); if(ConfigTable.isDebug() && log.isWarnEnabled()){ log.warn("[QQ登录][get openid][txt:{}]",txt); } //callback( {"client_id":"101420322","openid":"F1B5285FF5FF77DB097474C25273C01F","unionid":"UID_95588F17205C4CFA583DCAF8F0FE89D9"} ); String openid= RegularUtil.cut(txt, "openid",":","\"","\""); String unionid = RegularUtil.cut(txt, "unionid",":","\"","\""); row.put("OPENID", openid); row.put("UNIONID", unionid); return row; } public DataRow getUnionId(String code){ return getOpenId(code); } }
apache-2.0
tonit/karafonexam2
archetypes/itests/src/test/java/org/apache/karaf/archetypes/CommandArchetypeTest.java
1395
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.karaf.archetypes; import java.util.Properties; /** * * @author iocanel */ public class CommandArchetypeTest extends AbstractArchetypeTest { public void testCommand() throws Exception { Properties commandArchetypeParameters = new Properties(); commandArchetypeParameters.setProperty("scope", "testscope"); commandArchetypeParameters.setProperty("command", "testcommand"); commandArchetypeParameters.setProperty("description", "testdescription"); testKarafArchetype("archetypes-command", commandArchetypeParameters); } }
apache-2.0
g9yuayon/RxNetty
rx-netty/src/main/java/io/reactivex/netty/protocol/http/client/HttpClient.java
2476
/* * Copyright 2014 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.reactivex.netty.protocol.http.client; import io.reactivex.netty.client.RxClient; import rx.Observable; /** * * @param <I> The type of the content of request. * @param <O> The type of the content of response. */ public interface HttpClient<I, O> extends RxClient<HttpClientRequest<I>, HttpClientResponse<O>>{ Observable<HttpClientResponse<O>> submit(HttpClientRequest<I> request); Observable<HttpClientResponse<O>> submit(HttpClientRequest<I> request, ClientConfig config); /** * A configuration to be used for this client. */ class HttpClientConfig extends ClientConfig { private String userAgent = "RxNetty Client"; private Boolean followRedirect; protected HttpClientConfig() { // Only the builder can create this instance, so that we can change the constructor signature at will. } public String getUserAgent() { return userAgent; } public Boolean getFollowRedirect() { return followRedirect; } public static class Builder extends AbstractBuilder<Builder, HttpClientConfig> { public Builder(HttpClientConfig defaultConfig) { super(null == defaultConfig ? new HttpClientConfig() : defaultConfig); } public Builder() { this(null); } public Builder userAgent(String userAgent) { config.userAgent = userAgent; return returnBuilder(); } public Builder setFollowRedirect(boolean value) { config.followRedirect = Boolean.valueOf(value); return returnBuilder(); } public static HttpClientConfig newDefaultConfig() { return new Builder().build(); } } } }
apache-2.0
ChinmaySKulkarni/hbase
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
13136
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.util.Arrays; import java.util.Set; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases @Category({ClientTests.class, SmallTests.class}) public class TestScan { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScan.class); @Test public void testAttributesSerialization() throws IOException { Scan scan = new Scan(); scan.setAttribute("attribute1", Bytes.toBytes("value1")); scan.setAttribute("attribute2", Bytes.toBytes("value2")); scan.setAttribute("attribute3", Bytes.toBytes("value3")); ClientProtos.Scan scanProto = ProtobufUtil.toScan(scan); Scan scan2 = ProtobufUtil.toScan(scanProto); Assert.assertNull(scan2.getAttribute("absent")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan2.getAttribute("attribute1"))); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan2.getAttribute("attribute2"))); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), scan2.getAttribute("attribute3"))); Assert.assertEquals(3, scan2.getAttributesMap().size()); } @Test public void testGetToScan() throws Exception { Get get = new Get(Bytes.toBytes(1)); get.setCacheBlocks(true) .setConsistency(Consistency.TIMELINE) .setFilter(new FilterList()) .setId("get") .setIsolationLevel(IsolationLevel.READ_COMMITTED) .setLoadColumnFamiliesOnDemand(false) .setMaxResultsPerColumnFamily(1000) .readVersions(9999) .setRowOffsetPerColumnFamily(5) .setTimeRange(0, 13) .setAttribute("att_v0", Bytes.toBytes("att_v0")) .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123) .setReplicaId(3) .setACL("test_user", new Permission(Permission.Action.READ)) .setAuthorizations(new Authorizations("test_label")) .setPriority(3); Scan scan = new Scan(get); assertEquals(get.getCacheBlocks(), scan.getCacheBlocks()); assertEquals(get.getConsistency(), scan.getConsistency()); assertEquals(get.getFilter(), scan.getFilter()); assertEquals(get.getId(), scan.getId()); assertEquals(get.getIsolationLevel(), scan.getIsolationLevel()); assertEquals(get.getLoadColumnFamiliesOnDemandValue(), scan.getLoadColumnFamiliesOnDemandValue()); assertEquals(get.getMaxResultsPerColumnFamily(), scan.getMaxResultsPerColumnFamily()); assertEquals(get.getMaxVersions(), scan.getMaxVersions()); assertEquals(get.getRowOffsetPerColumnFamily(), scan.getRowOffsetPerColumnFamily()); assertEquals(get.getTimeRange().getMin(), scan.getTimeRange().getMin()); assertEquals(get.getTimeRange().getMax(), scan.getTimeRange().getMax()); assertTrue(Bytes.equals(get.getAttribute("att_v0"), scan.getAttribute("att_v0"))); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin(), scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax(), scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); assertEquals(get.getReplicaId(), scan.getReplicaId()); assertEquals(get.getACL(), scan.getACL()); assertEquals(get.getAuthorizations().getLabels(), scan.getAuthorizations().getLabels()); assertEquals(get.getPriority(), scan.getPriority()); } @Test public void testScanAttributes() { Scan scan = new Scan(); Assert.assertTrue(scan.getAttributesMap().isEmpty()); Assert.assertNull(scan.getAttribute("absent")); scan.setAttribute("absent", null); Assert.assertTrue(scan.getAttributesMap().isEmpty()); Assert.assertNull(scan.getAttribute("absent")); // adding attribute scan.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttributesMap().get("attribute1"))); // overriding attribute value scan.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttributesMap().get("attribute1"))); // adding another attribute scan.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttribute("attribute2"))); Assert.assertEquals(2, scan.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttributesMap().get("attribute2"))); // removing attribute scan.setAttribute("attribute2", null); Assert.assertNull(scan.getAttribute("attribute2")); Assert.assertEquals(1, scan.getAttributesMap().size()); Assert.assertNull(scan.getAttributesMap().get("attribute2")); // removing non-existed attribute scan.setAttribute("attribute2", null); Assert.assertNull(scan.getAttribute("attribute2")); Assert.assertEquals(1, scan.getAttributesMap().size()); Assert.assertNull(scan.getAttributesMap().get("attribute2")); // removing another attribute scan.setAttribute("attribute1", null); Assert.assertNull(scan.getAttribute("attribute1")); Assert.assertTrue(scan.getAttributesMap().isEmpty()); Assert.assertNull(scan.getAttributesMap().get("attribute1")); } @Test public void testNullQualifier() { Scan scan = new Scan(); byte[] family = Bytes.toBytes("family"); scan.addColumn(family, null); Set<byte[]> qualifiers = scan.getFamilyMap().get(family); Assert.assertEquals(1, qualifiers.size()); } @Test public void testSetAuthorizations() { Scan scan = new Scan(); try { scan.setAuthorizations(new Authorizations("\u002b|\u0029")); scan.setAuthorizations(new Authorizations("A", "B", "0123", "A0", "1A1", "_a")); scan.setAuthorizations(new Authorizations("A|B")); scan.setAuthorizations(new Authorizations("A&B")); scan.setAuthorizations(new Authorizations("!B")); scan.setAuthorizations(new Authorizations("A", "(A)")); scan.setAuthorizations(new Authorizations("A", "{A")); scan.setAuthorizations(new Authorizations(" ")); scan.setAuthorizations(new Authorizations(":B")); scan.setAuthorizations(new Authorizations("-B")); scan.setAuthorizations(new Authorizations(".B")); scan.setAuthorizations(new Authorizations("/B")); } catch (IllegalArgumentException e) { fail("should not throw exception"); } } @Test public void testSetStartRowAndSetStopRow() { Scan scan = new Scan(); scan.setStartRow(null); scan.setStartRow(new byte[1]); scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH]); try { scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH+1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { fail("expected IllegalArgumentException to be thrown"); } scan.setStopRow(null); scan.setStopRow(new byte[1]); scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH]); try { scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH+1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { fail("expected IllegalArgumentException to be thrown"); } } @Test public void testScanCopyConstructor() throws Exception { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) .setACL("test_user", new Permission(Permission.Action.READ)) .setAllowPartialResults(true) .setAsyncPrefetch(false) .setAttribute("test_key", Bytes.toBytes("test_value")) .setAuthorizations(new Authorizations("test_label")) .setBatch(10) .setCacheBlocks(false) .setCaching(10) .setConsistency(Consistency.TIMELINE) .setFilter(new FilterList()) .setId("scan_copy_constructor") .setIsolationLevel(IsolationLevel.READ_COMMITTED) .setLimit(100) .setLoadColumnFamiliesOnDemand(false) .setMaxResultSize(100) .setMaxResultsPerColumnFamily(1000) .readVersions(9999) .setMvccReadPoint(5) .setNeedCursorResult(true) .setPriority(1) .setRaw(true) .setReplicaId(3) .setReversed(true) .setRowOffsetPerColumnFamily(5) .setRowPrefixFilter(Bytes.toBytes("row_")) .setScanMetricsEnabled(true) .setSmall(true) .setReadType(ReadType.STREAM) .withStartRow(Bytes.toBytes("row_1")) .withStopRow(Bytes.toBytes("row_2")) .setTimeRange(0, 13); // create a copy of existing scan object Scan scanCopy = new Scan(scan); // validate fields of copied scan object match with the original scan object assertEquals(scan.getACL(), scanCopy.getACL()); assertEquals(scan.getAllowPartialResults(), scanCopy.getAllowPartialResults()); assertEquals(scan.getAttribute("test_key"), scanCopy.getAttribute("test_key")); assertEquals(scan.getAttributeSize(), scanCopy.getAttributeSize()); assertEquals(scan.getAttributesMap(), scanCopy.getAttributesMap()); assertEquals(scan.getAuthorizations().getLabels(), scanCopy.getAuthorizations().getLabels()); assertEquals(scan.getBatch(), scanCopy.getBatch()); assertEquals(scan.getCacheBlocks(), scanCopy.getCacheBlocks()); assertEquals(scan.getCaching(), scanCopy.getCaching()); assertEquals(scan.getConsistency(), scanCopy.getConsistency()); assertEquals(scan.getFamilies().length, scanCopy.getFamilies().length); assertEquals(scan.getFamilies()[0], scanCopy.getFamilies()[0]); assertEquals(scan.getFamilyMap(), scanCopy.getFamilyMap()); assertEquals(scan.getFilter(), scanCopy.getFilter()); assertEquals(scan.getId(), scanCopy.getId()); assertEquals(scan.getIsolationLevel(), scanCopy.getIsolationLevel()); assertEquals(scan.getLimit(), scanCopy.getLimit()); assertEquals(scan.getLoadColumnFamiliesOnDemandValue(), scanCopy.getLoadColumnFamiliesOnDemandValue()); assertEquals(scan.getMaxResultSize(), scanCopy.getMaxResultSize()); assertEquals(scan.getMaxResultsPerColumnFamily(), scanCopy.getMaxResultsPerColumnFamily()); assertEquals(scan.getMaxVersions(), scanCopy.getMaxVersions()); assertEquals(scan.getMvccReadPoint(), scanCopy.getMvccReadPoint()); assertEquals(scan.getPriority(), scanCopy.getPriority()); assertEquals(scan.getReadType(), scanCopy.getReadType()); assertEquals(scan.getReplicaId(), scanCopy.getReplicaId()); assertEquals(scan.getRowOffsetPerColumnFamily(), scanCopy.getRowOffsetPerColumnFamily()); assertEquals(scan.getStartRow(), scanCopy.getStartRow()); assertEquals(scan.getStopRow(), scanCopy.getStopRow()); assertEquals(scan.getTimeRange(), scanCopy.getTimeRange()); assertTrue("Make sure copy constructor adds all the fields in the copied object", EqualsBuilder.reflectionEquals(scan, scanCopy)); } }
apache-2.0
PRImA-Research-Lab/semantic-labelling
src/org/primaresearch/clc/phd/workflow/gui/dialog/CreateActivityDialog.java
4891
package org.primaresearch.clc.phd.workflow.gui.dialog; import java.awt.BorderLayout; import java.awt.Color; import java.awt.FlowLayout; import java.awt.Font; import java.awt.Frame; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.WindowEvent; import javax.swing.DefaultListModel; import javax.swing.JButton; import javax.swing.JDialog; import javax.swing.JLabel; import javax.swing.JList; import javax.swing.JPanel; import javax.swing.SwingUtilities; import javax.swing.border.EmptyBorder; import javax.swing.border.LineBorder; import javax.swing.event.ListSelectionEvent; import javax.swing.event.ListSelectionListener; import org.primaresearch.clc.phd.repository.search.matching.Matcher; import org.primaresearch.clc.phd.workflow.Workflow; import org.primaresearch.clc.phd.workflow.activity.Activity; import org.primaresearch.clc.phd.workflow.activity.ActivityType; /** * Dialogue for creating a new workflow activity. Shows a list of available activity types. * * @author clc * */ public class CreateActivityDialog extends JDialog { private static final long serialVersionUID = 1L; private JButton btnSearchActivity; private JList<ActivityType> activityTypeList; private ActivityType selectedActivityType = null; private Activity selectedActivity = null; /** * Constructor */ public CreateActivityDialog(Frame parent, final Workflow workflow, final Activity parentActivity) { super(parent, "New Activity", true); setTitle("Add Activity"); setResizable(false); JPanel panel = new JPanel(); panel.setBorder(new EmptyBorder(5, 5, 5, 5)); getContentPane().add(panel, BorderLayout.CENTER); panel.setLayout(new BorderLayout(0, 0)); JLabel lblSelectActivityType = new JLabel("Click on activity type to create new activity"); lblSelectActivityType.setFont(new Font("Tahoma", Font.PLAIN, 14)); panel.add(lblSelectActivityType, BorderLayout.NORTH); JPanel panel_1 = new JPanel(); FlowLayout flowLayout = (FlowLayout) panel_1.getLayout(); flowLayout.setAlignment(FlowLayout.RIGHT); panel.add(panel_1, BorderLayout.SOUTH); btnSearchActivity = new JButton("Search repository for activity"); btnSearchActivity.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { SwingUtilities.invokeLater(new Runnable() { public void run() { FindMatchingActivityDialog dlg = new FindMatchingActivityDialog(workflow, parentActivity, Matcher.MATCHING_FOR_ADDING_CHILD); dlg.setModal(true); dlg.setVisible(true); selectedActivity = dlg.getResultActivity(); if (selectedActivity != null) { setVisible(false); dispatchEvent(new WindowEvent(CreateActivityDialog.this, WindowEvent.WINDOW_CLOSING)); } } }); } }); //btnCreateActivity.setEnabled(false); panel_1.add(btnSearchActivity); JButton btnCancel = new JButton("Cancel"); btnCancel.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { setVisible(false); dispatchEvent(new WindowEvent(CreateActivityDialog.this, WindowEvent.WINDOW_CLOSING)); } }); panel_1.add(btnCancel); JPanel panel_2 = new JPanel(); panel_2.setBorder(new EmptyBorder(5, 0, 5, 0)); panel.add(panel_2, BorderLayout.CENTER); panel_2.setLayout(new BorderLayout(0, 0)); activityTypeList = new JList<ActivityType>(); activityTypeList.addListSelectionListener(new ListSelectionListener() { public void valueChanged(ListSelectionEvent e) { selectedActivityType = activityTypeList.getSelectedValue(); setVisible(false); dispatchEvent(new WindowEvent(CreateActivityDialog.this, WindowEvent.WINDOW_CLOSING)); //btnCreateActivity.setEnabled(activityTypeList.getSelectedValue() != null); } }); activityTypeList.setBorder(new LineBorder(Color.LIGHT_GRAY)); panel_2.add(activityTypeList); activityTypeList.setModel(new ActivityTypeListModel()); } /** * Returns the activity type the user selected (if opted for creating a new activity) * @return Activity type or null (invalid selection) */ public ActivityType getSelectedActivityType() { return selectedActivityType; } /** * Returns the activity the user selected (if opted for using an existing activity from a repository) * @return Activity type or null (invalid selection) */ public Activity getSelectedActivity() { return selectedActivity; } /** * List model for activity types. * * @author clc * */ private static class ActivityTypeListModel extends DefaultListModel<ActivityType> { private static final long serialVersionUID = 1L; public ActivityTypeListModel() { addElement(ActivityType.ATOMIC_ACTIVITY); addElement(ActivityType.DIRECTED_GRAPH_ACTIVITY); addElement(ActivityType.FOR_LOOP_ACTIVITY); addElement(ActivityType.IF_ELSE_ACTIVITY); } } }
apache-2.0
thunderbird/pungwecms
core/src/main/java/com/pungwe/cms/core/system/services/HtmlWrapperService.java
8206
package com.pungwe.cms.core.system.services; import com.pungwe.cms.core.element.HeaderRenderedElement; import com.pungwe.cms.core.element.RenderedElement; import com.pungwe.cms.core.element.basic.LinkElement; import com.pungwe.cms.core.element.basic.ScriptElement; import com.pungwe.cms.core.element.basic.StyleElement; import com.pungwe.cms.core.element.model.ModelAndViewElement; import com.pungwe.cms.core.element.services.RenderedElementService; import com.pungwe.cms.core.system.element.templates.HtmlElement; import com.pungwe.cms.core.system.element.templates.PageElement; import com.pungwe.cms.core.utils.services.HookService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.web.servlet.ModelAndView; import javax.servlet.http.HttpServletRequest; import java.lang.reflect.InvocationTargetException; import java.util.*; /** * Created by ian on 04/03/2016. */ @Service public class HtmlWrapperService { @Autowired protected RenderedElementService renderedElementService; @Autowired protected HookService hookService; @Autowired protected PageBuilderService pageBuilderService; public void wrapModelAndView(HttpServletRequest request, ModelAndView modelAndView) throws InvocationTargetException, IllegalAccessException { ModelAndView wrappedModelAndView = new ModelAndView(); wrappedModelAndView.addAllObjects(modelAndView.getModel()); if (modelAndView.getView() != null) { wrappedModelAndView.setView(modelAndView.getView()); } else { wrappedModelAndView.setViewName(modelAndView.getViewName()); } // Preprocess content hookService.executeHook("preprocess_template", modelAndView.getViewName(), modelAndView.getModel()); // Clear the original model modelAndView.clear(); // Call hooks for each of the variables in the html template //========================================================== // Title - void, no callback final List<String> title = processHook(String.class, "html_title"); // Head final List<HeaderRenderedElement> head = processHook(HeaderRenderedElement.class, "html_head"); // CSS final List<HeaderRenderedElement> css = processHook(HeaderRenderedElement.class, "html_css"); // JS Top final List<ScriptElement> jsTop = processHook(ScriptElement.class, "html_js_top"); // Page Top final List<RenderedElement> pageTop = processHook(RenderedElement.class, "html_page_top"); // Page Bottom final List<RenderedElement> pageBottom = processHook(RenderedElement.class, "html_page_bottom"); // JS Bottom final List<ScriptElement> jsBottom = processHook(ScriptElement.class, "html_js_bottom"); // Create the html element, then execute the preprocess html hook. By this point we should have it fairly well populated... final HtmlElement htmlElement = new HtmlElement(); htmlElement.setTitle(title); htmlElement.addToHead(head); htmlElement.addToCss(css); htmlElement.addToJsTop(jsTop); htmlElement.addToJsBottom(jsBottom); htmlElement.addToPageTop(pageTop); htmlElement.addToPageBottom(pageBottom); // Call the preprocess hook. Pass in the htmlElement as an argument. // Delcare a callback to ensure that those methods without parameters can return a Collection of RenderedElements hookService.executeHook("preprocess_html", (clazz, result) -> { // Potential of non-void methods, so we need to take into account the model. // Hooks don't require parameters, so if someone creates a hook without the HtmlElement parameter, // then we should look for the relevant variables here. if (result != null && result instanceof Map) { Map<String, Object> map = (Map<String, Object>) result; // we need to spin throug the appropriate variables and then add them to the html element processHtmlPreprocessHookResult(map, htmlElement); } }, htmlElement); if (wrappedModelAndView.getModel().containsKey("title") && wrappedModelAndView.getModel().get("title") != null) { htmlElement.addTitle(wrappedModelAndView.getModel().get("title").toString()); // force to string... } // This should be placed in the page... ModelAndViewElement content = new ModelAndViewElement(); content.setContent(wrappedModelAndView); // Build the page... Map<String, Object> model = new HashMap<String, Object>(); if (wrappedModelAndView.getModel().containsKey("title")) { model.put("title", wrappedModelAndView.getModel().get("title")); } model.put("content", content); PageElement page = pageBuilderService.buildPage(request, model); // Add the page to the html element htmlElement.addToPageContent(page); ModelAndView htmlElementModelAndView = renderedElementService.convertToModelAndView(htmlElement); // We know it will be a view name here... modelAndView.setViewName(htmlElementModelAndView.getViewName()); modelAndView.addAllObjects(htmlElementModelAndView.getModelMap()); } protected void processHtmlPreprocessHookResult(Map<String, Object> result, HtmlElement element) { for (Map.Entry<String, Object> entry : result.entrySet()) { if (entry.getValue() == null) { continue; // ignore null values } if (entry.getKey() == "title") { element.addTitle(entry.getValue().toString()); // Head } else if (entry.getKey() == "head" && entry.getValue() instanceof HeaderRenderedElement) { element.addToHead((HeaderRenderedElement) entry.getValue()); } else if (entry.getKey() == "head" && entry.getValue() instanceof Collection) { element.addToHead(((Collection<HeaderRenderedElement>) entry.getValue()).toArray(new HeaderRenderedElement[0])); // CSS } else if (entry.getKey() == "css" && (entry.getValue() instanceof LinkElement || entry.getValue() instanceof StyleElement)) { element.addToHead((HeaderRenderedElement) entry.getValue()); } else if (entry.getKey() == "css" && entry.getValue() instanceof Collection) { element.addToHead(((Collection<HeaderRenderedElement>) entry.getValue()).toArray(new HeaderRenderedElement[0])); // JS TOP } else if (entry.getKey() == "js_top" && entry.getValue() instanceof ScriptElement) { element.addToJsTop((ScriptElement) entry.getValue()); } else if (entry.getKey() == "js_top" && entry.getValue() instanceof Collection) { element.addToJsTop(((Collection<ScriptElement>) entry.getValue()).toArray(new ScriptElement[0])); // PAGE TOP } else if (entry.getKey() == "page_top" && entry.getValue() instanceof RenderedElement) { element.addToPageTop((RenderedElement) entry.getValue()); } else if (entry.getKey() == "page_top" && entry.getValue() instanceof Collection) { element.addToPageTop(((Collection<RenderedElement>) entry.getValue()).toArray(new RenderedElement[0])); // PAGE BOTTOM } else if (entry.getKey() == "page_bottom" && entry.getValue() instanceof RenderedElement) { element.addToPageBottom((RenderedElement) entry.getValue()); } else if (entry.getKey() == "page_bottom" && entry.getValue() instanceof Collection) { element.addToPageBottom(((Collection<RenderedElement>) entry.getValue()).toArray(new RenderedElement[0])); // JS BOTTOM } else if (entry.getKey() == "js_bottom" && entry.getValue() instanceof HeaderRenderedElement) { element.addToJsBottom((ScriptElement) entry.getValue()); } else if (entry.getKey() == "js_bottom" && entry.getValue() instanceof Collection) { element.addToPageBottom(((Collection<ScriptElement>) entry.getValue()).toArray(new ScriptElement[0])); } else if (entry.getKey() == "attributes" && entry.getValue() instanceof Map) { element.getAttributes().putAll((Map<String, String>)entry.getValue()); } else if (entry.getKey() == "body_attributes" && entry.getValue() instanceof Map) { element.getBodyAttributes().putAll((Map<String, String>)entry.getValue()); } } } protected <T> List<T> processHook(Class<T> elementType, String hook) throws InvocationTargetException, IllegalAccessException { List<T> results = new LinkedList<>(); hookService.executeHook(hook, (clazz, result) -> { if (result != null && elementType.isAssignableFrom(result.getClass())) { results.add((T) result); } }, results); return results; } }
apache-2.0
callistaenterprise/websocket-labs
ws-one/src/main/java/se/callista/websocketlabs/wsone/amq/Publisher.java
1348
package se.callista.websocketlabs.wsone.amq; import static se.callista.websocketlabs.wsone.server.Constants.*; import javax.jms.DeliveryMode; import javax.jms.JMSException; import javax.jms.MessageProducer; import javax.jms.TextMessage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class Publisher extends ActiveMQParent { private static final Logger LOG = LoggerFactory.getLogger(Publisher.class); MessageProducer producer = null; public Publisher() { this(DEFAULT_AMQ_URL, DEFAULT_AMQ_NOTIFY_TOPIC); } public Publisher(String activeMqUrl, String topicName) { super(activeMqUrl, topicName); try { producer = getSession().createProducer(getDestination()); } catch (JMSException e) { throw new RuntimeException(e); } } public void publish(String message) { publish(message, DeliveryMode.NON_PERSISTENT); } public void publish(String message, int deliveryMode) { try { TextMessage textMessage = getSession().createTextMessage(message); textMessage.setJMSDeliveryMode(deliveryMode); producer.send(textMessage); LOG.debug("Message sent to subscribers: '{}'", message); } catch (JMSException e) { throw new RuntimeException(e); } } public void close() { try { producer.close(); super.close(); } catch (JMSException e) { throw new RuntimeException(e); } } }
apache-2.0
pdxrunner/geode
geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java
37218
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.internal.cache; import java.io.PrintStream; import java.util.EnumSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import joptsimple.internal.Strings; import org.apache.logging.log4j.Logger; import org.apache.geode.StatisticsFactory; import org.apache.geode.cache.EvictionAction; import org.apache.geode.cache.EvictionAlgorithm; import org.apache.geode.cache.EvictionAttributes; import org.apache.geode.compression.Compressor; import org.apache.geode.internal.ClassPathLoader; import org.apache.geode.internal.CopyOnWriteHashSet; import org.apache.geode.internal.cache.DiskInitFile.DiskRegionFlag; import org.apache.geode.internal.cache.entries.OffHeapRegionEntry; import org.apache.geode.internal.cache.persistence.DiskRegionView; import org.apache.geode.internal.cache.persistence.PersistentMemberID; import org.apache.geode.internal.cache.persistence.PersistentMemberPattern; import org.apache.geode.internal.cache.versions.DiskRegionVersionVector; import org.apache.geode.internal.cache.versions.RegionVersionHolder; import org.apache.geode.internal.cache.versions.RegionVersionVector; import org.apache.geode.internal.cache.versions.VersionSource; import org.apache.geode.internal.cache.versions.VersionTag; import org.apache.geode.internal.logging.LogService; import org.apache.geode.internal.logging.log4j.LogMarker; import org.apache.geode.internal.util.concurrent.ConcurrentMapWithReusableEntries; /** * Code shared by both DiskRegion and RecoveredDiskRegion. * * @since GemFire prPersistSprint2 */ public abstract class AbstractDiskRegion implements DiskRegionView { private static final Logger logger = LogService.getLogger(); private final DiskStoreImpl ds; private final long id; private long clearOplogEntryId = DiskStoreImpl.INVALID_ID; private RegionVersionVector clearRVV; private byte lruAlgorithm; private byte lruAction; private int lruLimit; private int concurrencyLevel = 16; private int initialCapacity = 16; private float loadFactor = 0.75f; private boolean statisticsEnabled; private boolean isBucket; /** True if a persistent backup is needed */ private boolean backup; /** Additional flags that are persisted to the meta-data. */ private final EnumSet<DiskRegionFlag> flags; /** * A flag used to indicate that this disk region is being recreated using already existing data on * the disk. */ private boolean isRecreated; private boolean configChanged; private boolean aboutToDestroy; private boolean aboutToDestroyDataStorage; private String partitionName; private int startingBucketId; private String compressorClassName; private Compressor compressor; private boolean offHeap; /** * Records the version vector of what has been persisted to disk. This may lag behind the version * vector of what is in memory, because updates may be written asynchronously to disk. We need to * keep track of exactly what has been written to disk so that we can record a version vector at * the beginning of each oplog. * * The version vector of what is in memory is held in is held in LocalRegion.versionVector. */ private RegionVersionVector versionVector; /** * A flag whether the current version vector accurately represents what has been written to this * members disk. */ private volatile boolean rvvTrusted = true; protected AbstractDiskRegion(DiskStoreImpl ds, String name) { DiskRegionView drv = ds.getDiskInitFile().takeDiskRegionByName(name); if (drv != null) { // if we found one in the initFile then we take it out of it and this // one we are constructing will replace it in the diskStore drMap. this.ds = drv.getDiskStore(); this.id = drv.getId(); this.backup = drv.isBackup(); this.clearOplogEntryId = drv.getClearOplogEntryId(); this.clearRVV = drv.getClearRVV(); this.lruAlgorithm = drv.getLruAlgorithm(); this.lruAction = drv.getLruAction(); this.lruLimit = drv.getLruLimit(); this.concurrencyLevel = drv.getConcurrencyLevel(); this.initialCapacity = drv.getInitialCapacity(); this.loadFactor = drv.getLoadFactor(); this.statisticsEnabled = drv.getStatisticsEnabled(); this.isBucket = drv.isBucket(); this.flags = drv.getFlags(); this.partitionName = drv.getPartitionName(); this.startingBucketId = drv.getStartingBucketId(); this.myInitializingId = drv.getMyInitializingID(); this.myInitializedId = drv.getMyPersistentID(); this.aboutToDestroy = drv.wasAboutToDestroy(); this.aboutToDestroyDataStorage = drv.wasAboutToDestroyDataStorage(); this.onlineMembers = new CopyOnWriteHashSet<PersistentMemberID>(drv.getOnlineMembers()); this.offlineMembers = new CopyOnWriteHashSet<PersistentMemberID>(drv.getOfflineMembers()); this.equalMembers = new CopyOnWriteHashSet<PersistentMemberID>(drv.getOfflineAndEqualMembers()); this.isRecreated = true; // Use the same atomic counters as the previous disk region. This ensures that // updates from threads with a reference to the old region update this disk region // See 49943 this.numOverflowOnDisk = ((AbstractDiskRegion) drv).numOverflowOnDisk; this.numEntriesInVM = ((AbstractDiskRegion) drv).numEntriesInVM; this.numOverflowBytesOnDisk = ((AbstractDiskRegion) drv).numOverflowBytesOnDisk; this.entries = drv.getRecoveredEntryMap(); this.readyForRecovery = drv.isReadyForRecovery(); this.recoveredEntryCount = drv.getRecoveredEntryCount(); this.recoveryCompleted = ((AbstractDiskRegion) drv).recoveryCompleted; this.versionVector = drv.getRegionVersionVector(); this.compressorClassName = drv.getCompressorClassName(); this.compressor = drv.getCompressor(); this.offHeap = drv.getOffHeap(); if (drv instanceof PlaceHolderDiskRegion) { this.setRVVTrusted(((PlaceHolderDiskRegion) drv).getRVVTrusted()); } } else { // This is a brand new disk region. this.ds = ds; // { // DiskRegion existingDr = ds.getByName(name); // if (existingDr != null) { // throw new IllegalStateException("DiskRegion named " + name + " already exists with id=" + // existingDr.getId()); // } // } this.id = ds.generateRegionId(); this.flags = EnumSet.noneOf(DiskRegionFlag.class); this.onlineMembers = new CopyOnWriteHashSet<PersistentMemberID>(); this.offlineMembers = new CopyOnWriteHashSet<PersistentMemberID>(); this.equalMembers = new CopyOnWriteHashSet<PersistentMemberID>(); this.isRecreated = false; this.versionVector = new DiskRegionVersionVector(ds.getDiskStoreID()); this.numOverflowOnDisk = new AtomicLong(); this.numEntriesInVM = new AtomicLong(); this.numOverflowBytesOnDisk = new AtomicLong(); } } protected AbstractDiskRegion(DiskStoreImpl ds, long id) { this.ds = ds; this.id = id; this.flags = EnumSet.noneOf(DiskRegionFlag.class); this.onlineMembers = new CopyOnWriteHashSet<PersistentMemberID>(); this.offlineMembers = new CopyOnWriteHashSet<PersistentMemberID>(); this.equalMembers = new CopyOnWriteHashSet<PersistentMemberID>(); this.isRecreated = true; this.backup = true; this.versionVector = new DiskRegionVersionVector(ds.getDiskStoreID()); this.numOverflowOnDisk = new AtomicLong(); this.numEntriesInVM = new AtomicLong(); this.numOverflowBytesOnDisk = new AtomicLong(); // We do not initialize the soplog set here. The soplog set needs // to be handled the complete set of recovered soplogs, which is not available // at the time a recovered disk region is first created. } /** * Used to initialize a PlaceHolderDiskRegion for a region that is being closed * * @param drv the region that is being closed */ protected AbstractDiskRegion(DiskRegionView drv) { this.ds = drv.getDiskStore(); this.id = drv.getId(); this.backup = drv.isBackup(); this.clearOplogEntryId = drv.getClearOplogEntryId(); this.clearRVV = drv.getClearRVV(); this.lruAlgorithm = drv.getLruAlgorithm(); this.lruAction = drv.getLruAction(); this.lruLimit = drv.getLruLimit(); this.concurrencyLevel = drv.getConcurrencyLevel(); this.initialCapacity = drv.getInitialCapacity(); this.loadFactor = drv.getLoadFactor(); this.statisticsEnabled = drv.getStatisticsEnabled(); this.isBucket = drv.isBucket(); this.flags = drv.getFlags(); this.partitionName = drv.getPartitionName(); this.startingBucketId = drv.getStartingBucketId(); this.myInitializingId = null; // fixes 43650 this.myInitializedId = drv.getMyPersistentID(); this.aboutToDestroy = false; this.aboutToDestroyDataStorage = false; this.onlineMembers = new CopyOnWriteHashSet<PersistentMemberID>(drv.getOnlineMembers()); this.offlineMembers = new CopyOnWriteHashSet<PersistentMemberID>(drv.getOfflineMembers()); this.equalMembers = new CopyOnWriteHashSet<PersistentMemberID>(drv.getOfflineAndEqualMembers()); this.isRecreated = true; this.numOverflowOnDisk = new AtomicLong(); this.numEntriesInVM = new AtomicLong(); this.numOverflowBytesOnDisk = new AtomicLong(); this.entries = drv.getRecoveredEntryMap(); this.readyForRecovery = drv.isReadyForRecovery(); this.recoveredEntryCount = 0; // fix for bug 41570 this.recoveryCompleted = ((AbstractDiskRegion) drv).recoveryCompleted; this.versionVector = drv.getRegionVersionVector(); this.compressorClassName = drv.getCompressorClassName(); this.compressor = drv.getCompressor(); this.offHeap = drv.getOffHeap(); } @Override public abstract String getName(); @Override public DiskStoreImpl getDiskStore() { return this.ds; } abstract void beginDestroyRegion(LocalRegion region); public void resetRVV() { this.versionVector = new DiskRegionVersionVector(ds.getDiskStoreID()); } @Override public long getId() { return this.id; } @Override public long getClearOplogEntryId() { return this.clearOplogEntryId; } @Override public void setClearOplogEntryId(long v) { this.clearOplogEntryId = v; } @Override public RegionVersionVector getClearRVV() { return this.clearRVV; } @Override public void setClearRVV(RegionVersionVector rvv) { this.clearRVV = rvv; } @Override public void setConfig(byte lruAlgorithm, byte lruAction, int lruLimit, int concurrencyLevel, int initialCapacity, float loadFactor, boolean statisticsEnabled, boolean isBucket, EnumSet<DiskRegionFlag> flags, String partitionName, int startingBucketId, String compressorClassName, boolean offHeap) { this.lruAlgorithm = lruAlgorithm; this.lruAction = lruAction; this.lruLimit = lruLimit; this.concurrencyLevel = concurrencyLevel; this.initialCapacity = initialCapacity; this.loadFactor = loadFactor; this.statisticsEnabled = statisticsEnabled; this.isBucket = isBucket; if (flags != null && flags != this.flags) { this.flags.clear(); this.flags.addAll(flags); } this.partitionName = partitionName; this.startingBucketId = startingBucketId; this.compressorClassName = compressorClassName; this.offHeap = offHeap; if (!ds.isOffline()) { createCompressorFromClassName(); } } public void createCompressorFromClassName() { if (Strings.isNullOrEmpty(compressorClassName)) { compressor = null; } else { try { @SuppressWarnings("unchecked") Class<Compressor> compressorClass = (Class<Compressor>) ClassPathLoader.getLatest().forName(compressorClassName); this.compressor = compressorClass.newInstance(); } catch (ClassNotFoundException e) { throw new IllegalArgumentException( String.format("Unknown Compressor %s found in disk initialization file.", compressorClassName), e); } catch (InstantiationException e) { throw new IllegalArgumentException( String.format("Unknown Compressor %s found in disk initialization file.", compressorClassName), e); } catch (IllegalAccessException e) { throw new IllegalArgumentException( String.format("Unknown Compressor %s found in disk initialization file.", compressorClassName), e); } } } @Override public EvictionAttributes getEvictionAttributes() { return new EvictionAttributesImpl().setAlgorithm(getActualLruAlgorithm()) .setAction(getActualLruAction()).setMaximum(getLruLimit()); } @Override public byte getLruAlgorithm() { return this.lruAlgorithm; } public EvictionAlgorithm getActualLruAlgorithm() { return EvictionAlgorithm.parseValue(getLruAlgorithm()); } @Override public byte getLruAction() { return this.lruAction; } public EvictionAction getActualLruAction() { return EvictionAction.parseValue(getLruAction()); } @Override public int getLruLimit() { return this.lruLimit; } @Override public int getConcurrencyLevel() { return this.concurrencyLevel; } @Override public int getInitialCapacity() { return this.initialCapacity; } @Override public float getLoadFactor() { return this.loadFactor; } @Override public boolean getStatisticsEnabled() { return this.statisticsEnabled; } @Override public boolean isBucket() { return this.isBucket; } @Override public EnumSet<DiskRegionFlag> getFlags() { return this.flags; } @Override public String getPartitionName() { return this.partitionName; } @Override public int getStartingBucketId() { return this.startingBucketId; } public String getPrName() { assert isBucket(); String bn = PartitionedRegionHelper.getBucketName(getName()); return PartitionedRegionHelper.getPRPath(bn); } private PersistentMemberID myInitializingId = null; private PersistentMemberID myInitializedId = null; private final CopyOnWriteHashSet<PersistentMemberID> onlineMembers; private final CopyOnWriteHashSet<PersistentMemberID> offlineMembers; private final CopyOnWriteHashSet<PersistentMemberID> equalMembers; @Override public PersistentMemberID addMyInitializingPMID(PersistentMemberID pmid) { PersistentMemberID result = this.myInitializingId; this.myInitializingId = pmid; if (result != null) { this.myInitializedId = result; } return result; } @Override public void markInitialized() { assert this.myInitializingId != null; this.myInitializedId = this.myInitializingId; this.myInitializingId = null; } @Override public boolean addOnlineMember(PersistentMemberID pmid) { return this.onlineMembers.add(pmid); } @Override public boolean addOfflineMember(PersistentMemberID pmid) { return this.offlineMembers.add(pmid); } @Override public boolean addOfflineAndEqualMember(PersistentMemberID pmid) { return this.equalMembers.add(pmid); } @Override public boolean rmOnlineMember(PersistentMemberID pmid) { return this.onlineMembers.remove(pmid); } @Override public boolean rmOfflineMember(PersistentMemberID pmid) { return this.offlineMembers.remove(pmid); } @Override public boolean rmEqualMember(PersistentMemberID pmid) { return this.equalMembers.remove(pmid); } @Override public void markBeginDestroyRegion() { this.aboutToDestroy = true; } @Override public void markBeginDestroyDataStorage() { this.aboutToDestroyDataStorage = true; } @Override public void markEndDestroyRegion() { this.onlineMembers.clear(); this.offlineMembers.clear(); this.equalMembers.clear(); this.myInitializedId = null; this.myInitializingId = null; this.aboutToDestroy = false; this.isRecreated = false; } @Override public void markEndDestroyDataStorage() { this.myInitializedId = null; this.myInitializingId = null; this.aboutToDestroyDataStorage = false; } // PersistentMemberView methods @Override public PersistentMemberID getMyInitializingID() { DiskInitFile dif = this.ds.getDiskInitFile(); if (dif == null) return this.myInitializingId; synchronized (dif) { return this.myInitializingId; } } @Override public PersistentMemberID getMyPersistentID() { DiskInitFile dif = this.ds.getDiskInitFile(); if (dif == null) return this.myInitializedId; synchronized (dif) { return this.myInitializedId; } } @Override public Set<PersistentMemberID> getOnlineMembers() { DiskInitFile dif = this.ds.getDiskInitFile(); if (dif == null) return this.onlineMembers.getSnapshot(); synchronized (dif) { return this.onlineMembers.getSnapshot(); } } @Override public Set<PersistentMemberID> getOfflineMembers() { DiskInitFile dif = this.ds.getDiskInitFile(); if (dif == null) return this.offlineMembers.getSnapshot(); synchronized (dif) { return this.offlineMembers.getSnapshot(); } } @Override public Set<PersistentMemberID> getOfflineAndEqualMembers() { DiskInitFile dif = this.ds.getDiskInitFile(); if (dif == null) return this.equalMembers.getSnapshot(); synchronized (dif) { return this.equalMembers.getSnapshot(); } } @Override public Set<PersistentMemberPattern> getRevokedMembers() { DiskInitFile dif = this.ds.getDiskInitFile(); return ds.getRevokedMembers(); } @Override public void memberOffline(PersistentMemberID persistentID) { this.ds.memberOffline(this, persistentID); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member offline {}", getDiskStoreID().abbrev(), this.getName(), persistentID); } } @Override public void memberOfflineAndEqual(PersistentMemberID persistentID) { this.ds.memberOfflineAndEqual(this, persistentID); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member offline and equal {}", getDiskStoreID().abbrev(), this.getName(), persistentID); } } @Override public void memberOnline(PersistentMemberID persistentID) { this.ds.memberOnline(this, persistentID); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member online {}", getDiskStoreID().abbrev(), this.getName(), persistentID); } } @Override public void memberRemoved(PersistentMemberID persistentID) { this.ds.memberRemoved(this, persistentID); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member removed {}", getDiskStoreID().abbrev(), this.getName(), persistentID); } } @Override public void memberRevoked(PersistentMemberPattern revokedPattern) { this.ds.memberRevoked(revokedPattern); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member revoked {}", getDiskStoreID().abbrev(), this.getName(), revokedPattern); } } @Override public void setInitializing(PersistentMemberID newId) { this.ds.setInitializing(this, newId); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - initializing local id: {}", getDiskStoreID().abbrev(), this.getName(), getMyInitializingID()); } } @Override public void setInitialized() { this.ds.setInitialized(this); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - initialized local id: {}", getDiskStoreID().abbrev(), this.getName(), getMyPersistentID()); } } @Override public PersistentMemberID generatePersistentID() { return this.ds.generatePersistentID(this); } @Override public boolean isRecreated() { return this.isRecreated; } @Override public boolean hasConfigChanged() { return this.configChanged; } @Override public void setConfigChanged(boolean v) { this.configChanged = v; } @Override public void endDestroy(LocalRegion region) { // Clean up the state if we were ready to recover this region if (isReadyForRecovery()) { ds.updateDiskRegion(this); entriesMapIncompatible = false; if (entries != null) { ConcurrentMapWithReusableEntries<Object, Object> other = entries.getCustomEntryConcurrentHashMap(); for (Map.Entry<Object, Object> me : other.entrySetWithReusableEntries()) { RegionEntry oldRe = (RegionEntry) me.getValue(); if (oldRe instanceof OffHeapRegionEntry) { ((OffHeapRegionEntry) oldRe).release(); } else { // no need to keep iterating; they are all either off heap or on heap. break; } } } entries = null; readyForRecovery = false; } if (aboutToDestroyDataStorage) { ds.endDestroyDataStorage(region, (DiskRegion) this); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - endDestroyDataStorage: {}", getDiskStoreID().abbrev(), this.getName(), getMyPersistentID()); } } else { ds.endDestroyRegion(region, (DiskRegion) this); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - endDestroy: {}", getDiskStoreID().abbrev(), this.getName(), getMyPersistentID()); } } } /** * Begin the destroy of everything related to this disk region. */ @Override public void beginDestroy(LocalRegion region) { beginDestroyRegion(region); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - beginDestroy: {}", getDiskStoreID().abbrev(), this.getName(), getMyPersistentID()); } if (this.myInitializedId == null) { endDestroy(region); } } /** * Destroy the data storage this this disk region. Destroying the data storage leaves the * persistent view, but removes the data. */ @Override public void beginDestroyDataStorage() { this.ds.beginDestroyDataStorage((DiskRegion) this); if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) { logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - beginDestroyDataStorage: {}", getDiskStoreID().abbrev(), this.getName(), getMyPersistentID()); } } public void createDataStorage() {} @Override public boolean wasAboutToDestroy() { return this.aboutToDestroy; } @Override public boolean wasAboutToDestroyDataStorage() { return this.aboutToDestroyDataStorage; } /** * Set to true once this DiskRegion is ready to be recovered. */ private boolean readyForRecovery; /** * Total number of entries recovered by restoring from backup. Its initialized right after a * recovery but may be updated later as recovered entries go away due to updates and destroys. */ protected int recoveredEntryCount; private boolean entriesMapIncompatible; private RegionMap entries; private AtomicBoolean recoveryCompleted; public void setEntriesMapIncompatible(boolean v) { this.entriesMapIncompatible = v; } @Override public boolean isEntriesMapIncompatible() { return entriesMapIncompatible; } public RegionMap useExistingRegionMap(LocalRegion lr) { RegionMap result = null; if (!this.entriesMapIncompatible) { result = this.entries; // if (result != null) { // result.changeOwner(lr); // } } return result; } private void waitForRecoveryCompletion() { boolean interrupted = Thread.interrupted(); synchronized (this.recoveryCompleted) { try { // @todo also check for shutdown of diskstore? while (!this.recoveryCompleted.get()) { try { this.recoveryCompleted.wait(); } catch (InterruptedException ex) { interrupted = true; } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } } } @Override public void copyExistingRegionMap(LocalRegion lr) { waitForRecoveryCompletion(); if (this.entriesMapIncompatible) { // Reset the numEntriesInVM. It will be incremented when the copy to the new map is done, // down in DiskEntry.Help.initialize. However, the other stats can't be updated // there because we don't have the value length at that point. So leave // those stats alone. this.numEntriesInVM.set(0); lr.initializeStats(this.getNumEntriesInVM(), this.getNumOverflowOnDisk(), this.getNumOverflowBytesOnDisk()); lr.copyRecoveredEntries(this.entries); } else { this.entries.changeOwner(lr); lr.initializeStats(this.getNumEntriesInVM(), this.getNumOverflowOnDisk(), this.getNumOverflowBytesOnDisk()); lr.copyRecoveredEntries(null); } this.entries = null; } public void setRecoveredEntryMap(RegionMap rm) { this.recoveryCompleted = new AtomicBoolean(); this.entries = rm; } @Override public RegionMap getRecoveredEntryMap() { return this.entries; } public void releaseRecoveryData() { this.readyForRecovery = false; } @Override public boolean isReadyForRecovery() { // better name for this method would be isRecovering return this.readyForRecovery; } @Override public void prepareForRecovery() { this.readyForRecovery = true; } /** * gets the number of entries recovered * * @since GemFire 3.2.1 */ @Override public int getRecoveredEntryCount() { return this.recoveredEntryCount; } @Override public void incRecoveredEntryCount() { this.recoveredEntryCount++; } /** * initializes the number of entries recovered */ @Override public void initRecoveredEntryCount() { if (this.recoveryCompleted != null) { synchronized (this.recoveryCompleted) { this.recoveryCompleted.set(true); this.recoveryCompleted.notifyAll(); } } } protected final AtomicLong numOverflowOnDisk; @Override public long getNumOverflowOnDisk() { return this.numOverflowOnDisk.get(); } @Override public void incNumOverflowOnDisk(long delta) { this.numOverflowOnDisk.addAndGet(delta); } protected final AtomicLong numOverflowBytesOnDisk; @Override public long getNumOverflowBytesOnDisk() { return this.numOverflowBytesOnDisk.get(); } @Override public void incNumOverflowBytesOnDisk(long delta) { this.numOverflowBytesOnDisk.addAndGet(delta); } protected final AtomicLong numEntriesInVM; @Override public long getNumEntriesInVM() { return this.numEntriesInVM.get(); } @Override public void incNumEntriesInVM(long delta) { this.numEntriesInVM.addAndGet(delta); } /** * Returns true if this region maintains a backup of all its keys and values on disk. Returns * false if only values that will not fit in memory are written to disk. */ @Override public boolean isBackup() { return this.backup; } protected void setBackup(boolean v) { this.backup = v; } public void dump(PrintStream printStream) { String name = getName(); if (isBucket() && !logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) { name = getPrName(); } String msg = name + ":" + " -lru=" + getEvictionAttributes().getAlgorithm(); if (!getEvictionAttributes().getAlgorithm().isNone()) { msg += " -lruAction=" + getEvictionAttributes().getAction(); if (!getEvictionAttributes().getAlgorithm().isLRUHeap()) { msg += " -lruLimit=" + getEvictionAttributes().getMaximum(); } } msg += " -concurrencyLevel=" + getConcurrencyLevel() + " -initialCapacity=" + getInitialCapacity() + " -loadFactor=" + getLoadFactor() + " -offHeap=" + getOffHeap() + " -compressor=" + (getCompressorClassName() == null ? "none" : getCompressorClassName()) + " -statisticsEnabled=" + getStatisticsEnabled(); if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) { msg += " drId=" + getId() + " isBucket=" + isBucket() + " clearEntryId=" + getClearOplogEntryId() + " MyInitializingID=<" + getMyInitializingID() + ">" + " MyPersistentID=<" + getMyPersistentID() + ">" + " onlineMembers=" + getOnlineMembers() + " offlineMembers=" + getOfflineMembers() + " equalsMembers=" + getOfflineAndEqualMembers(); } printStream.println(msg); } public String dump2() { final String lineSeparator = System.getProperty("line.separator"); StringBuffer sb = new StringBuffer(); String name = getName(); if (isBucket() && logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) { name = getPrName(); } String msg = name + ":" + " -lru=" + getEvictionAttributes().getAlgorithm(); sb.append(name); sb.append(lineSeparator); sb.append("lru=" + getEvictionAttributes().getAlgorithm()); sb.append(lineSeparator); if (!getEvictionAttributes().getAlgorithm().isNone()) { sb.append("lruAction=" + getEvictionAttributes().getAction()); sb.append(lineSeparator); if (!getEvictionAttributes().getAlgorithm().isLRUHeap()) { sb.append("lruAction=" + getEvictionAttributes().getAction()); sb.append(lineSeparator); } } sb.append("-concurrencyLevel=" + getConcurrencyLevel()); sb.append(lineSeparator); sb.append("-initialCapacity=" + getInitialCapacity()); sb.append(lineSeparator); sb.append("-loadFactor=" + getLoadFactor()); sb.append(lineSeparator); sb.append("-offHeap=" + getOffHeap()); sb.append(lineSeparator); sb.append( "-compressor=" + (getCompressorClassName() == null ? "none" : getCompressorClassName())); sb.append(lineSeparator); sb.append("-statisticsEnabled=" + getStatisticsEnabled()); sb.append(lineSeparator); if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) { sb.append("drId=" + getId()); sb.append(lineSeparator); sb.append("isBucket=" + isBucket()); sb.append(lineSeparator); sb.append("clearEntryId=" + getClearOplogEntryId()); sb.append(lineSeparator); sb.append("MyInitializingID=<" + getMyInitializingID() + ">"); sb.append(lineSeparator); sb.append("MyPersistentID=<" + getMyPersistentID() + ">"); sb.append(lineSeparator); sb.append("onlineMembers=" + getOnlineMembers()); sb.append(lineSeparator); sb.append("offlineMembers=" + getOfflineMembers()); sb.append(lineSeparator); sb.append("equalsMembers=" + getOfflineAndEqualMembers()); sb.append(lineSeparator); sb.append("flags=").append(getFlags()); sb.append(lineSeparator); } return sb.toString(); } public void dumpMetadata() { String name = getName(); StringBuilder msg = new StringBuilder(name); dumpCommonAttributes(msg); dumpPersistentView(msg); System.out.println(msg); } /** * Dump the (bucket specific) persistent view to the string builder */ public void dumpPersistentView(StringBuilder msg) { msg.append("\n\tMyInitializingID=<").append(getMyInitializingID()).append(">"); msg.append("\n\tMyPersistentID=<").append(getMyPersistentID()).append(">"); msg.append("\n\tonlineMembers:"); for (PersistentMemberID id : getOnlineMembers()) { msg.append("\n\t\t").append(id); } msg.append("\n\tofflineMembers:"); for (PersistentMemberID id : getOfflineMembers()) { msg.append("\n\t\t").append(id); } msg.append("\n\tequalsMembers:"); for (PersistentMemberID id : getOfflineAndEqualMembers()) { msg.append("\n\t\t").append(id); } } /** * Dump the attributes which are common across the PR to the string builder. */ public void dumpCommonAttributes(StringBuilder msg) { msg.append("\n\tlru=").append(getEvictionAttributes().getAlgorithm()); if (!getEvictionAttributes().getAlgorithm().isNone()) { msg.append("\n\tlruAction=").append(getEvictionAttributes().getAction()); if (!getEvictionAttributes().getAlgorithm().isLRUHeap()) { msg.append("\n\tlruLimit=").append(getEvictionAttributes().getMaximum()); } } msg.append("\n\tconcurrencyLevel=").append(getConcurrencyLevel()); msg.append("\n\tinitialCapacity=").append(getInitialCapacity()); msg.append("\n\tloadFactor=").append(getLoadFactor()); msg.append("\n\toffHeap=").append(getOffHeap()); msg.append("\n\tstatisticsEnabled=").append(getStatisticsEnabled()); msg.append("\n\tdrId=").append(getId()); msg.append("\n\tisBucket=").append(isBucket()); msg.append("\n\tclearEntryId=").append(getClearOplogEntryId()); msg.append("\n\tflags=").append(getFlags()); } /** * This method was added to fix bug 40192. It is like getBytesAndBits except it will return * Token.REMOVE_PHASE1 if the htreeReference has changed (which means a clear was done). * * @return an instance of BytesAndBits or Token.REMOVED_PHASE1 */ @Override public Object getRaw(DiskId id) { this.acquireReadLock(); try { return getDiskStore().getRaw(this, id); } finally { this.releaseReadLock(); } } @Override public RegionVersionVector getRegionVersionVector() { return this.versionVector; } public long getVersionForMember(VersionSource member) { return this.versionVector.getVersionForMember(member); } public void recordRecoveredGCVersion(VersionSource member, long gcVersion) { this.versionVector.recordGCVersion(member, gcVersion); } public void recordRecoveredVersonHolder(VersionSource member, RegionVersionHolder versionHolder, boolean latestOplog) { this.versionVector.initRecoveredVersion(member, versionHolder, latestOplog); } public void recordRecoveredVersionTag(VersionTag tag) { this.versionVector.recordVersion(tag.getMemberID(), tag.getRegionVersion()); } /** * Indicate that the current RVV for this disk region does not accurately reflect what has been * recorded on disk. This is true while we are in the middle of a GII, because we record the new * RVV at the beginning of the GII. If we recover in this state, we need to know that the * recovered RVV is not something we can use to do a delta GII. */ public void setRVVTrusted(boolean trusted) { this.rvvTrusted = trusted; } public boolean getRVVTrusted() { return this.rvvTrusted; } public PersistentOplogSet getOplogSet() { return getDiskStore().getPersistentOplogSet(this); } @Override public String getCompressorClassName() { return this.compressorClassName; } @Override public Compressor getCompressor() { return this.compressor; } @Override public boolean getOffHeap() { return this.offHeap; } @Override public CachePerfStats getCachePerfStats() { return this.ds.getCache().getCachePerfStats(); } @Override public void oplogRecovered(long oplogId) { // do nothing. Overridden in ExportDiskRegion } @Override public String toString() { return getClass().getSimpleName() + ":" + getName(); } @Override public void incRecentlyUsed() { entries.incRecentlyUsed(); } @Override public StatisticsFactory getStatisticsFactory() { return this.ds.getStatisticsFactory(); } @Override public String getNameForStats() { if (isBucket()) { return getPrName(); } else { return getName(); } } @Override public InternalCache getCache() { return getDiskStore().getCache(); } }
apache-2.0
mrfranta/jop
jop-api/src/main/java/cz/zcu/kiv/jop/annotation/generator/string/RegularExpression.java
1382
package cz.zcu.kiv.jop.annotation.generator.string; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import cz.zcu.kiv.jop.annotation.generator.ValueGeneratorAnnotation; /** * This annotation marks property for which will be generated random string value based on * <em>Regular expression</em>. This generator transforms the regular expression to * <em><a href="https://en.wikipedia.org/wiki/Finite-state_machine"> * FSM</a></em>. The transitions again represents the generating of next character to string, but in * this time, probability of all characters in each state is equal. * * @see <a href="https://en.wikipedia.org/wiki/Regular_expression">Regular expression</a> * * @author Mr.FrAnTA * @since 1.0.0 */ @ValueGeneratorAnnotation @Documented @Target({ElementType.FIELD, ElementType.PARAMETER}) @Retention(RetentionPolicy.RUNTIME) public @interface RegularExpression { /** * Required parameter for regular expression which will be used for generating of random string * value. */ public String value(); /** * Optional parameter for maximal number of characters in generated string. The value has to be * greater or equal to 0. */ public int maxLen() default Integer.MAX_VALUE; }
apache-2.0
NationalSecurityAgency/ghidra
Ghidra/Features/FileFormats/src/main/java/ghidra/file/formats/android/dex/format/TypeList.java
1758
/* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ghidra.file.formats.android.dex.format; import ghidra.app.util.bin.BinaryReader; import ghidra.app.util.bin.StructConverter; import ghidra.program.model.data.*; import ghidra.util.exception.DuplicateNameException; import java.io.IOException; import java.util.*; public class TypeList implements StructConverter { private int size; private List<TypeItem> items = new ArrayList<TypeItem>(); public TypeList(BinaryReader reader) throws IOException { size = reader.readNextInt(); for (int i = 0; i < size; ++i) { items.add(new TypeItem(reader)); } } /** * Size of the list, in entries */ public int getSize() { return size; } /** * Elements of the list */ public List<TypeItem> getItems() { return Collections.unmodifiableList(items); } @Override public DataType toDataType() throws DuplicateNameException, IOException { Structure structure = new StructureDataType("type_list" + size, 0); structure.add(DWORD, "size", null); int index = 0; for (TypeItem item : items) { structure.add(item.toDataType(), "item_" + (index++), null); } structure.setCategoryPath(new CategoryPath("/dex")); return structure; } }
apache-2.0
SkyCrawl/pikater-vaadin
src/org/pikater/web/vaadin/gui/server/components/iconbutton/IconButton.java
746
package org.pikater.web.vaadin.gui.server.components.iconbutton; import com.vaadin.annotations.StyleSheet; import com.vaadin.event.MouseEvents.ClickListener; import com.vaadin.server.Resource; import com.vaadin.ui.CustomComponent; import com.vaadin.ui.Image; /** * An image-button. * * @author SkyCrawl */ @StyleSheet("iconButton.css") public class IconButton extends CustomComponent { private static final long serialVersionUID = 4029033501431550617L; private final Image img; public IconButton(Resource source) { this.img = new Image(null, source); this.img.setStyleName("icon-button"); setCompositionRoot(this.img); } public void addClickListener(ClickListener clickListener) { img.addClickListener(clickListener); } }
apache-2.0
nyjsl/Nyjsl
javaUtils/src/main/java/org/nyjsl/utils/SingletonUtils.java
565
package org.nyjsl.utils; /** * Singleton helper class for lazily initialization. * * @author <a href="http://www.trinea.cn/" target="_blank">Trinea</a> * * @param <T> */ public abstract class SingletonUtils<T> { private T instance; protected abstract T newInstance(); public final T getInstance() { if (instance == null) { synchronized (SingletonUtils.class) { if (instance == null) { instance = newInstance(); } } } return instance; } }
apache-2.0
sergmor/plnr
Plnr/endpoint-libs/libmessageEndpoint-v1/messageEndpoint/messageendpoint-v1-generated-source/edu/columbia/cloud/plnr/messageEndpoint/model/Key.java
4251
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://code.google.com/p/google-apis-client-generator/ * (build: 2013-11-22 19:59:01 UTC) * on 2013-12-08 at 00:40:14 UTC * Modify at your own risk. */ package edu.columbia.cloud.plnr.messageEndpoint.model; /** * Model definition for Key. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the messageEndpoint. For a detailed explanation see: * <a href="http://code.google.com/p/google-http-java-client/wiki/JSON">http://code.google.com/p/google-http-java-client/wiki/JSON</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class Key extends com.google.api.client.json.GenericJson { /** * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String appId; /** * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.Boolean complete; /** * The value may be {@code null}. */ @com.google.api.client.util.Key @com.google.api.client.json.JsonString private java.lang.Long id; /** * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String kind; /** * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String name; /** * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String namespace; /** * The value may be {@code null}. */ @com.google.api.client.util.Key private Key parent; /** * @return value or {@code null} for none */ public java.lang.String getAppId() { return appId; } /** * @param appId appId or {@code null} for none */ public Key setAppId(java.lang.String appId) { this.appId = appId; return this; } /** * @return value or {@code null} for none */ public java.lang.Boolean getComplete() { return complete; } /** * @param complete complete or {@code null} for none */ public Key setComplete(java.lang.Boolean complete) { this.complete = complete; return this; } /** * @return value or {@code null} for none */ public java.lang.Long getId() { return id; } /** * @param id id or {@code null} for none */ public Key setId(java.lang.Long id) { this.id = id; return this; } /** * @return value or {@code null} for none */ public java.lang.String getKind() { return kind; } /** * @param kind kind or {@code null} for none */ public Key setKind(java.lang.String kind) { this.kind = kind; return this; } /** * @return value or {@code null} for none */ public java.lang.String getName() { return name; } /** * @param name name or {@code null} for none */ public Key setName(java.lang.String name) { this.name = name; return this; } /** * @return value or {@code null} for none */ public java.lang.String getNamespace() { return namespace; } /** * @param namespace namespace or {@code null} for none */ public Key setNamespace(java.lang.String namespace) { this.namespace = namespace; return this; } /** * @return value or {@code null} for none */ public Key getParent() { return parent; } /** * @param parent parent or {@code null} for none */ public Key setParent(Key parent) { this.parent = parent; return this; } @Override public Key set(String fieldName, Object value) { return (Key) super.set(fieldName, value); } @Override public Key clone() { return (Key) super.clone(); } }
apache-2.0
flipkart-incubator/flux
runtime/src/main/java/com/flipkart/flux/deploymentunit/DeploymentUnit.java
11118
/* * Copyright 2012-2016, the original author or authors. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.flipkart.flux.deploymentunit; import static com.flipkart.flux.client.constant.ClientConstants._VERSION; import java.io.IOException; import java.lang.annotation.Annotation; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.configuration.Configuration; import org.apache.commons.io.IOUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import com.flipkart.flux.api.core.FluxError; import com.flipkart.flux.client.intercept.MethodId; import com.flipkart.flux.client.model.Task; import com.flipkart.flux.client.runtime.Stoppable; import com.flipkart.polyguice.config.YamlConfiguration; /** * <code>DeploymentUnit</code> represents a Deployment Unit. * * @author shyam.akirala * @author gaurav.ashok */ public class DeploymentUnit { private static final Logger LOGGER = LogManager.getLogger(DeploymentUnit.class); /** * Key in the config file, which has list of workflow/task class FQNs */ private static final String WORKFLOW_CLASSES = "workflowClasses"; /** * Name of the deployment unit. */ private String name; /** * Version */ private Integer version; /** * Class loader of the deployment unit */ private DeploymentUnitClassLoader deploymentUnitClassLoader; /** * Tasks which belong to the deployment unit */ private Map<String, Method> taskMethods; /** * Object Mapper class instance which used for serialization/deserialization through out this deployment unit. Having it here to avoid multiple instances */ private Object objectMapperInstance; /** * Object of {@link com.google.inject.Injector} which is used to provide instances of classes which are in this deployment unit */ private Object injectorClassInstance; /** * Object of {@link Stoppable} to stop/close all acquired resources */ private Object stoppableInstance; /** * Configuration of this deployment unit which is specified in flux_config.yml file */ private YamlConfiguration configuration; /** * Constructor */ public DeploymentUnit(String name, Integer version, DeploymentUnitClassLoader deploymentUnitClassLoader, YamlConfiguration configuration) { this.name = name; this.version = version; this.deploymentUnitClassLoader = deploymentUnitClassLoader; this.configuration = configuration; this.taskMethods = new HashMap<>(); // populate all methods annotated with {@link Task} populateTaskMethods(); // load ClassLoaderInjector class from app class loader to deployment unit's class loader. loadClassLoaderInjector(); // create object mapper instance per deployment unit which is used for serialization/deserialization of {@link com.flipkart.flux.domain.Event eventData} createObjectMapperInstance(); // load {@link Stoppable} instance so that it can be used later to release resources. loadStoppableInstance(); } public void close() { // release all acquired resources if (stoppableInstance != null) { try { Method stopMethod = stoppableInstance.getClass().getMethod("stop"); stopMethod.invoke(stoppableInstance); } catch (NoSuchMethodException | IllegalAccessException e) { LOGGER.error("Unexpected error while calling stop method for deploymentUnit: {}/{}", name, version); } catch (InvocationTargetException e) { LOGGER.error("Exception occurred when stop method called for deploymentUnit: {}/{}", name, version, e); } } // close the classLoader try { deploymentUnitClassLoader.close(); } catch (IOException e) { LOGGER.error("IOexception while closing classLoader", e); } } /** * Loads {@Link ClassLoaderInjector} class into given deployment unit's class loader and returns it. */ private void loadClassLoaderInjector() { Class injectorClass = null; try { //Convert the class into bytes byte[] classBytes = IOUtils.toByteArray(this.getClass().getResourceAsStream("/com/flipkart/flux/deploymentunit/ClassLoaderInjector.class")); injectorClass = deploymentUnitClassLoader.defineClass(ClassLoaderInjector.class.getCanonicalName(), classBytes); } catch (LinkageError le) { // This exception never comes in ideal world. Can occur while unit testing as class is already loaded // (while unit testing we use App classloader as parent for Deployment unit class loader, due to that this class would be already loaded) // TODO: Altering production code for unit testing is not good. Find a workaround. LOGGER.error("End of the world! Seems ClassloaderInjector.class is loaded already in this deployment.", le); try { injectorClass = deploymentUnitClassLoader.loadClass("com.flipkart.flux.deploymentunit.ClassLoaderInjector"); } catch (ClassNotFoundException e) { throw new FluxError(FluxError.ErrorType.runtime, "Unable to load class ClassLoaderInjector into deployment unit's class loader.", e); } } catch (IOException e) { throw new FluxError(FluxError.ErrorType.runtime, "Unexpected error while converting ClassLoaderInjector.class to bytes.", e); } try { Class guiceModuleClass = deploymentUnitClassLoader.loadClass("com.google.inject.Module"); String DUModuleClassFQN = String.valueOf(configuration.getProperty("guiceModuleClass")); //check if user has specified any guice module class name in deployment unit configuration file, if not create an empty injector if (DUModuleClassFQN == null || DUModuleClassFQN.trim().isEmpty() || DUModuleClassFQN.equals("null")) { injectorClassInstance = injectorClass.newInstance(); } else { injectorClassInstance = injectorClass.getConstructor(guiceModuleClass).newInstance(deploymentUnitClassLoader.loadClass(DUModuleClassFQN).newInstance()); } } catch (Exception e) { throw new FluxError(FluxError.ErrorType.runtime, "Unable to load class ClassLoaderInjector into deployment unit's class loader.", e); } } /** * Creates an instance of the object mapper for this deployment unit */ private void createObjectMapperInstance() { stoppableInstance = null; try { Method getInstanceMethod = injectorClassInstance.getClass().getMethod("getInstance", Class.class); Class objectMapper = deploymentUnitClassLoader.loadClass("com.fasterxml.jackson.databind.ObjectMapper"); objectMapperInstance = getInstanceMethod.invoke(injectorClassInstance, objectMapper); } catch (Exception e) { throw new FluxError(FluxError.ErrorType.runtime, "Error occurred while creating Object Mapper instance for Deployment Unit: " + name + "/" + version, e); } } /** * Loads {@link Stoppable} instance using the injector */ private void loadStoppableInstance() { try { Method getInstanceMethod = injectorClassInstance.getClass().getMethod("getInstance", Class.class); Class stoppableClass = deploymentUnitClassLoader.loadClass("com.flipkart.flux.client.runtime.Stoppable"); stoppableInstance = getInstanceMethod.invoke(injectorClassInstance, stoppableClass); } catch (Exception e) { LOGGER.error("Unable to find/load Stoppable instance for deploymentUnit: {}/{}", name, version, e); } } /** * Given a class loader, retrieves workflow classes names from config file, and returns methods * which are annotated with {@link com.flipkart.flux.client.model.Task} annotation in those classes. */ private void populateTaskMethods() { List<String> classNames = (List<String>) configuration.getProperty(WORKFLOW_CLASSES); try { //loading this class separately in this class loader as the following isAnnotationPresent check returns false, if //we use default class loader's Task, as both class loaders don't have any relation between them. Class taskAnnotationClass = deploymentUnitClassLoader.loadClass(Task.class.getCanonicalName()); for (String name : classNames) { Class clazz = deploymentUnitClassLoader.loadClass(name); for (Method method : clazz.getMethods()) { if (method.isAnnotationPresent(taskAnnotationClass)) { Annotation taskAnnotation = method.getAnnotationsByType(taskAnnotationClass)[0]; long version = 0; for (Method annotationMethod : taskAnnotationClass.getDeclaredMethods()) { if (annotationMethod.getName().equals("version")) { version = (Long) annotationMethod.invoke(taskAnnotation); } } MethodId methodId = new MethodId(method); String taskIdentifier = methodId.toString() + _VERSION + version; taskMethods.put(taskIdentifier, method); } } } } catch (Exception e) { throw new FluxError(FluxError.ErrorType.runtime, "Error while getting task methods for deploymentUnit: " + name + "/" + version, e); } } /** * Accessor methods */ public String getName() { return name; } public DeploymentUnitClassLoader getDeploymentUnitClassLoader() { return deploymentUnitClassLoader; } public Map<String, Method> getTaskMethods() { return taskMethods; } public Object getObjectMapperInstance() { return objectMapperInstance; } public Object getInjectorClassInstance() { return injectorClassInstance; } public Configuration getTaskConfiguration() { return configuration.subset("taskConfig"); } public Integer getVersion() { return this.version; } }
apache-2.0
sherl0cks/maven-archetypes
camel-kie-spring-eap-war-source/camel-kie-spring-eap-war-source-test-harness/src/main/java/com/rhc/transactions/MockSpringTransactionManager.java
886
package com.rhc.transactions; import org.springframework.transaction.PlatformTransactionManager; import org.springframework.transaction.TransactionDefinition; import org.springframework.transaction.TransactionException; import org.springframework.transaction.TransactionStatus; import org.springframework.transaction.support.SimpleTransactionStatus; /** * * This is a basic mock trx manager to support mock repositories in camel tests * */ public class MockSpringTransactionManager implements PlatformTransactionManager { @Override public TransactionStatus getTransaction( TransactionDefinition definition ) throws TransactionException { return new SimpleTransactionStatus(); } @Override public void commit( TransactionStatus status ) throws TransactionException { } @Override public void rollback( TransactionStatus status ) throws TransactionException { } }
apache-2.0
Subterranean-Security/Crimson
src/main/java/com/subterranean_security/crimson/viewer/net/ViewerCommands.java
15026
/****************************************************************************** * * * Copyright 2016 Subterranean Security * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * *****************************************************************************/ package com.subterranean_security.crimson.viewer.net; import static com.subterranean_security.crimson.universal.Flags.DEV_MODE; import java.io.File; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.subterranean_security.crimson.core.net.TimeoutConstants; import com.subterranean_security.crimson.core.net.MessageFuture.MessageTimeout; import com.subterranean_security.crimson.core.store.LcvidStore; import com.subterranean_security.crimson.core.store.NetworkStore; import com.subterranean_security.crimson.core.util.FileUtil; import com.subterranean_security.crimson.core.util.IDGen; import com.subterranean_security.crimson.proto.core.Generator.ClientConfig; import com.subterranean_security.crimson.proto.core.Generator.GenReport; import com.subterranean_security.crimson.proto.core.Misc.AuthMethod; import com.subterranean_security.crimson.proto.core.Misc.Outcome; import com.subterranean_security.crimson.proto.core.net.sequences.Chat.RQ_Chat; import com.subterranean_security.crimson.proto.core.net.sequences.ClientAuth.RQ_CreateAuthMethod; import com.subterranean_security.crimson.proto.core.net.sequences.ClientAuth.RQ_RemoveAuthMethod; import com.subterranean_security.crimson.proto.core.net.sequences.ClientControl.RQ_ChangeSetting; import com.subterranean_security.crimson.proto.core.net.sequences.Generator.RQ_Generate; import com.subterranean_security.crimson.proto.core.net.sequences.Keylogger.RQ_KeyUpdate; import com.subterranean_security.crimson.proto.core.net.sequences.Log.LogFile; import com.subterranean_security.crimson.proto.core.net.sequences.Log.LogType; import com.subterranean_security.crimson.proto.core.net.sequences.Log.RQ_Logs; import com.subterranean_security.crimson.proto.core.net.sequences.MSG.Message; import com.subterranean_security.crimson.proto.core.net.sequences.Screenshot.RQ_QuickScreenshot; import com.subterranean_security.crimson.proto.core.net.sequences.State.RQ_ChangeClientState; import com.subterranean_security.crimson.proto.core.net.sequences.State.RQ_ChangeServerState; import com.subterranean_security.crimson.proto.core.net.sequences.State.StateType; import com.subterranean_security.crimson.proto.core.net.sequences.Update.RQ_GetClientConfig; import com.subterranean_security.crimson.universal.Universal; import com.subterranean_security.crimson.viewer.store.ViewerProfileStore; import com.subterranean_security.crimson.viewer.ui.screen.generator.Report; import com.subterranean_security.crimson.viewer.ui.screen.main.MainFrame; public final class ViewerCommands { private static final Logger log = LoggerFactory.getLogger(ViewerCommands.class); private ViewerCommands() { } public static Outcome changeServerState(StateType st) { log.debug("Changing server state: {}", st.toString()); Outcome.Builder outcome = Outcome.newBuilder(); try { Message m = NetworkStore.route(Message.newBuilder().setId(IDGen.msg()).setRqChangeServerState( RQ_ChangeServerState.newBuilder().setNewState(st)), TimeoutConstants.DEFAULT); if (m == null) { outcome.setResult(false).setComment("Request timeout"); } else if (!m.getRsChangeServerState().getOutcome().getResult()) { outcome.setResult(false).setComment(!m.getRsChangeServerState().getOutcome().getComment().isEmpty() ? m.getRsChangeServerState().getOutcome().getComment() : "no comment"); } else { outcome.setResult(true); } } catch (InterruptedException e) { outcome.setResult(false).setComment("Interrupted"); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return outcome.build(); } public static Outcome changeClientState(int cid, StateType st) { log.debug("Changing client state: {}", st.toString()); Outcome.Builder outcome = Outcome.newBuilder(); try { Message m = NetworkStore.route(Message.newBuilder().setId(IDGen.msg()).setRid(cid).setSid(LcvidStore.cvid) .setRqChangeClientState(RQ_ChangeClientState.newBuilder().setNewState(st)), TimeoutConstants.DEFAULT); if (m == null) { outcome.setResult(false).setComment("Request timeout"); } else { return m.getRsChangeClientState().getOutcome(); } } catch (InterruptedException e) { outcome.setResult(false).setComment("Interrupted"); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return outcome.build(); } public static Outcome createAuthMethod(AuthMethod at) { Outcome.Builder outcome = Outcome.newBuilder(); try { Message m = NetworkStore.route(Message.newBuilder().setId(IDGen.msg()).setRqCreateAuthMethod( RQ_CreateAuthMethod.newBuilder().setAuthMethod(at)), TimeoutConstants.DEFAULT); if (m == null) { outcome.setResult(false).setComment("Request timeout"); } else if (!m.getRsCreateAuthMethod().getOutcome().getResult()) { outcome.setResult(false).setComment(!m.getRsCreateAuthMethod().getOutcome().getComment().isEmpty() ? m.getRsCreateAuthMethod().getOutcome().getComment() : "no comment"); } else { outcome.setResult(true); } } catch (InterruptedException e) { outcome.setResult(false).setComment("Interrupted"); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return outcome.build(); } public static Outcome removeAuthMethod(int id) { Outcome.Builder outcome = Outcome.newBuilder(); try { Message m = NetworkStore.route( Message.newBuilder().setRqRemoveAuthMethod(RQ_RemoveAuthMethod.newBuilder().setId(id)), TimeoutConstants.DEFAULT); if (m == null) { outcome.setResult(false).setComment("Request timeout"); } else if (!m.getRsRemoveAuthMethod().getResult()) { outcome.setResult(false).setComment( m.getRsRemoveAuthMethod().hasComment() ? m.getRsRemoveAuthMethod().getComment() : "no comment"); } else { outcome.setResult(true); } } catch (InterruptedException e) { outcome.setResult(false).setComment("Interrupted"); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return outcome.build(); } public static void generate(ClientConfig config, String output, Date creation) { int id = IDGen.msg(); RQ_Generate.Builder rq = RQ_Generate.newBuilder().setInternalConfig(config); NetworkStore.route(Message.newBuilder().setId(id).setRqGenerate(rq).build()); try { Message rs = NetworkStore.getResponse(0, id, TimeoutConstants.RQ_Generate); // success final GenReport gr = rs.getRsGenerate().getReport(); Runnable r = new Runnable() { public void run() { Report rep = new Report(gr); rep.setVisible(true); } }; if (gr.getResult()) { MainFrame.main.np.addNote("info", "Generation complete!", "Click for report", r); FileUtil.writeFile(rs.getRsGenerate().getInstaller().toByteArray(), new File(output)); } else { MainFrame.main.np.addNote("error", "Generation failed!", "Click for report", r); log.error("Could not generate an installer"); } } catch (InterruptedException e) { log.debug("Generation interrupted"); } catch (IOException e) { log.error("Failed to write the installer"); e.printStackTrace(); } catch (MessageTimeout e) { MainFrame.main.np.addNote("error", "Generation Timed Out!"); log.error("Could not generate an installer. Check the network."); } } public static void trigger_key_update(int cid, Date target) { log.debug("Triggering keylog update"); try { Message m = NetworkStore.route( Message.newBuilder().setId(IDGen.msg()).setRqKeyUpdate( RQ_KeyUpdate.newBuilder().setCid(cid).setStartDate(target == null ? 0 : target.getTime())), TimeoutConstants.RQ_KeyUpdate); if (m != null) { log.debug("Update result: " + m.getRsKeyUpdate().getResult()); } } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } } public static ClientConfig getClientConfig(int cid) { log.debug("Retrieving client config"); try { Message m = NetworkStore.route(Message.newBuilder().setId(IDGen.msg()).setRid(cid).setSid(LcvidStore.cvid) .setRqGetClientConfig(RQ_GetClientConfig.newBuilder()), TimeoutConstants.DEFAULT); if (m != null) { return m.getRsGetClientConfig().getConfig(); } } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return null; } public static Outcome updateClient(int cid) { Outcome.Builder outcome = Outcome.newBuilder(); ClientConfig client = getClientConfig(cid); if (client == null) { outcome.setResult(false).setComment("Could not obtain client configuration"); } else if (client.getBuildNumber() >= Universal.build && !DEV_MODE) { outcome.setResult(false).setComment("No updated needed"); } else { try { Message m = NetworkStore.route( Message.newBuilder().setId(IDGen.msg()) .setRqGenerate(RQ_Generate.newBuilder().setSendToCid(cid).setInternalConfig(client)), TimeoutConstants.RQ_Generate); if (m == null) { outcome.setResult(false).setComment("No response"); } else { GenReport gr = m.getRsGenerate().getReport(); outcome.setResult(gr.getResult()); if (!gr.getComment().isEmpty()) { outcome.setComment(gr.getComment()); } } } catch (InterruptedException e) { outcome.setResult(false).setComment("Interrupted"); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } } return outcome.build(); } private static SimpleDateFormat screenshotDate = new SimpleDateFormat("YYYY-MM-dd hh.mm.ss"); public static Outcome quickScreenshot(int cid) { Outcome.Builder outcome = Outcome.newBuilder(); try { Message m = NetworkStore.route(Message.newBuilder().setId(IDGen.msg()).setRid(cid).setSid(LcvidStore.cvid) .setRqQuickScreenshot(RQ_QuickScreenshot.newBuilder()), TimeoutConstants.DEFAULT); File file = new File( System.getProperty("user.home") + "/Crimson/" + screenshotDate.format(new Date()) + ".jpg"); file.getParentFile().mkdirs(); if (m != null) { outcome.setComment(file.getAbsolutePath()); FileUtil.writeFile(m.getRsQuickScreenshot().getBin().toByteArray(), file); outcome.setResult(file.exists()); } else { outcome.setResult(false).setComment("Request timeout"); } } catch (InterruptedException e) { outcome.setResult(false).setComment("Error: Interrupted"); } catch (IOException e) { outcome.setResult(false).setComment("Error: " + e.getMessage()); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return outcome.build(); } public static LogFile getLog(int cid, LogType type) { try { Message m = NetworkStore.route(Message.newBuilder().setId(IDGen.msg()).setRid(cid).setSid(LcvidStore.cvid) .setRqLogs(RQ_Logs.newBuilder().setLog(type)), TimeoutConstants.DEFAULT); if (m != null) { return m.getRsLogs().getLog(0); } } catch (InterruptedException e) { } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return null; } public static List<LogFile> getLogs(int cid) { try { Message m = NetworkStore.route(Message.newBuilder().setId(IDGen.msg()).setRid(cid).setSid(LcvidStore.cvid) .setRqLogs(RQ_Logs.newBuilder()), TimeoutConstants.DEFAULT); if (m != null) { return m.getRsLogs().getLogList(); } } catch (InterruptedException e) { } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return null; } public static Outcome changeSetting(int cid, RQ_ChangeSetting rq) { Outcome.Builder outcome = Outcome.newBuilder(); try { Message m = NetworkStore.route( Message.newBuilder().setId(IDGen.msg()).setRid(cid).setSid(LcvidStore.cvid).setRqChangeSetting(rq), TimeoutConstants.DEFAULT); if (m == null) { outcome.setResult(false).setComment("Request timed out"); } else { if (m.getRsChangeSetting().getResult().getResult()) { // update profile if (rq.hasKeyloggerState()) { ViewerProfileStore.getClient(cid).setKeyloggerState(rq.getKeyloggerState()); } if (rq.hasFlushMethod()) { ViewerProfileStore.getClient(cid).setKeyloggerTrigger(rq.getFlushMethod()); } if (rq.hasFlushValue()) { ViewerProfileStore.getClient(cid).setKeyloggerTriggerValue(rq.getFlushValue()); } } return m.getRsChangeSetting().getResult(); } } catch (InterruptedException e) { outcome.setResult(false).setComment("Interrupted"); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return outcome.build(); } public static Outcome openChat(int cid, boolean prompt) { Outcome.Builder outcome = Outcome.newBuilder(); try { Message m = NetworkStore.route(Message.newBuilder().setId(IDGen.msg()).setRid(cid).setSid(LcvidStore.cvid) .setRqChat(RQ_Chat.newBuilder()), TimeoutConstants.DEFAULT); if (m == null) { outcome.setResult(false).setComment("Request timed out"); } else { // TODO open chat outcome.setResult(true); } } catch (InterruptedException e) { outcome.setResult(false).setComment("Interrupted"); } catch (MessageTimeout e) { // TODO Auto-generated catch block e.printStackTrace(); } return outcome.build(); } }
apache-2.0
hopestar720/aioweb
src/com/xhsoft/framework/uam/service/IRoleService.java
206
package com.xhsoft.framework.uam.service; import com.xhsoft.framework.base.service.IBaseService; import com.xhsoft.framework.uam.entity.Role; public interface IRoleService extends IBaseService<Role> { }
apache-2.0
spccold/sailfish
sailfish-kernel/src/main/java/sailfish/remoting/channel/EagerExchangeChannel.java
1238
/** * * Copyright 2016-2016 spccold * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package sailfish.remoting.channel; import io.netty.bootstrap.Bootstrap; import sailfish.remoting.exceptions.SailfishException; /** * @author spccold * @version $Id: EagerExchangeChannel.java, v 0.1 2016年11月21日 下午11:17:02 spccold Exp $ */ public final class EagerExchangeChannel extends SingleConnctionExchangeChannel { EagerExchangeChannel(Bootstrap bootstrap, ExchangeChannelGroup parent, int reconnectInterval) throws SailfishException { super(bootstrap, parent, reconnectInterval, true); } @Override public boolean isAvailable() { if (isClosed()) { return false; } return super.isAvailable(); } }
apache-2.0
qjafcunuas/jbromo
jbromo-dao/jbromo-dao-jpa/jbromo-dao-jpa-lib/src/main/java/org/jbromo/dao/jpa/query/jpql/where/predicate/AbstractUniquePredicate.java
3145
/*- * Copyright (C) 2013-2014 The JBromo Authors. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.jbromo.dao.jpa.query.jpql.where.predicate; import java.util.List; import org.jbromo.common.StringUtil; import org.jbromo.common.exception.MessageLabelException; import org.jbromo.dao.common.exception.DataFindException; import org.jbromo.dao.jpa.query.jpql.where.JpqlWhereBuilder; import org.jbromo.dao.jpa.query.jpql.where.condition.ICondition; import lombok.AccessLevel; import lombok.Getter; /** * Define unique predicate. A unique predicate has only one child (for example the not operator). * @author qjafcunuas */ public abstract class AbstractUniquePredicate extends AbstractPredicate { /** * The child condition. */ @Getter(AccessLevel.PROTECTED) private ICondition child; /** * Default constructor. * @param where the where builder. */ public AbstractUniquePredicate(final JpqlWhereBuilder where) { super(where); } @Override protected abstract String getOperator(); @Override void add(final ICondition condition) throws MessageLabelException { if (this.child != null) { throw new DataFindException("Cannot add more than one element on a UniquePredicate"); } this.child = condition; } @Override public void build(final StringBuilder builder, final List<Object> parameters) { if (isEmpty()) { return; } final int length = builder.length(); builder.append(getOperator()); builder.append(StringUtil.SPACE); builder.append(StringUtil.PARENTHESIS_OPEN); final int all = builder.length(); getChild().build(builder, parameters); if (all == builder.length()) { // Nothing has been added. builder.delete(length, all); return; } builder.append(StringUtil.PARENTHESIS_CLOSE); builder.append(StringUtil.SPACE); } @Override public boolean isEmpty() { return getChild() == null || getChild().isEmpty(); } }
apache-2.0
socialmetrix/excel-templater
src/main/java/com/socialmetrix/templater/utils/DAG.java
2197
package com.socialmetrix.templater.utils; import java.util.*; /** * Simplified Directed acyclic graph to find relationship trees. */ public class DAG { private final boolean[][] adjacencyMatrix; public DAG(boolean[][] adjacencyMatrix) { this.adjacencyMatrix = adjacencyMatrix; } /** * Recursively removes relationships of nodes which, transitively can relate to other.<br> * The resulting state is a DAG with many trees. */ public DAG pseudoTransitiveReduction() { // reduce adjMatrix to get multiple trees List<Integer> roots = getRoots(); deleteTransitiveRelationshipsForEach(roots); return this; } private void deleteTransitiveRelationshipsForEach(List<Integer> parents) { for (Integer parent : parents) { List<Integer> children = killNonExclusiveRelationshipsAndRetrieveExclusiveOnes(parent); deleteTransitiveRelationshipsForEach(children); } } private List<Integer> killNonExclusiveRelationshipsAndRetrieveExclusiveOnes(int parent) { List<Integer> result = new ArrayList<Integer>(); for (Integer child : getChildren(parent)) { if (countEdgesTo(child) == 1) { result.add(child); } else { adjacencyMatrix[parent][child] = false; } } return result; } public List<Integer> getChildren(int parent) { List<Integer> result = new ArrayList<Integer>(); for (int child = 0; child < adjacencyMatrix.length; child++) { if (adjacencyMatrix[parent][child]) { result.add(child); } } return result; } public List<Integer> getRoots() { List<Integer> result = new ArrayList<Integer>(); for (int i = 0; i < adjacencyMatrix.length; i++) { if (countEdgesTo(i) == 0) { result.add(i); } } return result; } private int countEdgesTo(int destIndex) { int sum = 0; for (int i = 0; i < adjacencyMatrix.length; i++) { if (adjacencyMatrix[i][destIndex]) { sum++; } } return sum; } @Override public int hashCode() { return Arrays.hashCode(adjacencyMatrix); } @Override public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; DAG other = (DAG) obj; return Arrays.deepEquals(adjacencyMatrix, other.adjacencyMatrix); } }
apache-2.0
fedorchuck/jsqlb
src/main/java/com/github/fedorchuck/jsqlb/postgresql/datatypes/UUID.java
448
package com.github.fedorchuck.jsqlb.postgresql.datatypes; import com.github.fedorchuck.jsqlb.postgresql.PGDataTypes; import lombok.EqualsAndHashCode; import lombok.Getter; /** * Universally unique identifier * * @author <a href="http://vl-fedorchuck.rhcloud.com/">Volodymyr Fedorchuk</a>. */ @Getter @EqualsAndHashCode public class UUID implements PGDataTypes { private final String name = "UUID"; private final int capacity = 0; }
apache-2.0
TonyWang-UMU/TFG-TWang
opencds-parent/opencds-common/src/main/java/org/opencds/common/xml/XmlEntity.java
31433
/** * Copyright 2011 OpenCDS.org * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.opencds.common.xml; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.Set; import java.util.TreeSet; import org.opencds.common.utilities.StringUtility; /** * <p>XmlElement represents an XML entity.</p> * <p/> * * @author Kensaku Kawamoto * @version 1.00 */ public class XmlEntity extends Object implements Serializable { /** * */ private static final long serialVersionUID = -4531869042716473060L; protected String myLabel; // The label that identifies the XmlEntity (e.g. "FirstName") protected String myValue; // The value of the XmlEntity (e.g. "Bob") protected boolean myValueIsCDATA; // Whether the value is CDATA (meaning shouldn't be parsed by XML parser) protected MixedContent myValue_mixedContent; // will be populated if entity holds mixed content protected boolean myValueIsMixedContent; protected ArrayList<String> myComments; // Comments associated with (preceding) the XmlEntity. protected ArrayList<XmlProcessingInstruction> myProcessingInstructions; // XmlProcessingInstruction's associated with (preceding) the XmlEntity. protected final String myXmlHeaderString = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"; // The XML header to be added to the outputs of the getAs[Concise/Verbose]XmlStringBuffer methods. // NOTE: if changing from "UTF-8", also change XmlConverter.myXmlEncodingCharSet // note: attributes are in HashMap because, by definition, // XML attributes are orderless protected ArrayList<XmlEntity> myChildren; // contains XMLEntity's protected HashMap<String, String> myAttributes; // key = String label, target = String value /** * Creates XmlEntity object with the specified parameters set to indicated values. * * @param label The label of the XmlEntity. * @param value The value of the XmlEntity. * @param valueIsCDATA Whether the value is a CDATA entry. */ public XmlEntity(String label, String value, boolean valueIsCDATA) { initialize(); myLabel = label; myValue = value; myValueIsCDATA = valueIsCDATA; } /** * Creates XmlEntity object with the specified parameters set to indicated values. * * @param label The label of the XmlEntity. */ public XmlEntity(String label) { initialize(); myLabel = label; } /** * Creates new XmlEntity where label == null, value == null, CDATA == false, comments == null. */ public XmlEntity() { initialize(); } protected void initialize() { myValueIsCDATA = false; myComments = new ArrayList<String>(); myProcessingInstructions = new ArrayList<XmlProcessingInstruction>(); myChildren = new ArrayList<XmlEntity>(); myAttributes = new HashMap<String, String>(); myValue_mixedContent = new MixedContent(); myValueIsMixedContent = false; } /** * Sets the label of the XmlEntity to the specified label. * * @param label The label of the XmlEntity. */ public void setLabel(String label) { myLabel = label; } /** * Sets the value of the XmlEntity to the specified value. * * @param value The value of the XmlEntity. */ public void setValue(String value) { myValue = value; } /** * Sets whether the value of the XmlEntity is a CDATA item. * * @param valueIsCDATA Whether the value of the XmlEntity is a CDATA item. */ public void setValueIsCDATA(boolean valueIsCDATA) { myValueIsCDATA = valueIsCDATA; } public void setValue_MixedContent(MixedContent mixedContent) { this.myValue_mixedContent = mixedContent; this.myValueIsMixedContent = true; } public void addMixedContentEntry(String stringMixedContentEntry) { this.myValue_mixedContent.addMixedContentEntry(stringMixedContentEntry); } public void addMixedContentEntry(XmlEntity xmlEntityMixedContentEntry) { this.myValue_mixedContent.addMixedContentEntry(xmlEntityMixedContentEntry); } public void setValueIsMixedContent(boolean valueIsMixedContent) { this.myValueIsMixedContent = valueIsMixedContent; } /** * Adds comment associated with (preceding) the XmlEntity. * * @param comment Comment associated with the XmlEntity. */ public void addComment(String comment) { myComments.add(comment); } /** * Adds processing instruction associated with (preceding) the XmlEntity. * * @param processingInstruction */ public void addProcessingInstruction(XmlProcessingInstruction processingInstruction) { myProcessingInstructions.add(processingInstruction); } /** * Add to this XmlEntity the specified child XmlEntity. * * @param childXmlEntity An XmlEntity which is the child of this XmlEntity. */ public void addChild(XmlEntity childXmlEntity) { myChildren.add(childXmlEntity); } /** * Adds child at the index specified (use position 0 if want to add as first * in list). * * @param childXmlEntity * @param index */ public void addChildAtIndex(XmlEntity childXmlEntity, int index) throws IndexOutOfBoundsException { try { myChildren.add(index, childXmlEntity); } catch (IndexOutOfBoundsException e) { throw e; } } /** * Adds an attribute to this XmlEntity. If attribute already exists, overwrites existing value. * * @param attributeLabel The label of the attribute to be added. * @param attributeValue The value of the attribute to be added. */ public void addAttribute(String attributeLabel, String attributeValue) { myAttributes.put(attributeLabel, attributeValue); } /** * Sets attribute for this XmlEntity. If attribute does not exist, it is created. * * @param attributeLabel The label of the attribute to be added. * @param attributeValue The value of the attribute to be added. */ public void setAttribute(String attributeLabel, String attributeValue) { addAttribute(attributeLabel, attributeValue); } /** * Returns the label of the XmlEntity. * * @return The label of the XmlEntity; may be <code>null</code> if not set. */ public String getLabel() { return myLabel; } /** * Returns the value of the XmlEntity. * * @return The value of the XmlEntity; may be <code>null</code> if not set. */ public String getValue() { return myValue; } /** * Returns whether the value of the XmlEntity is of the CDATA type. * * @return <code>true</code> if XmlEntity's value is of the CDATA type, <code>false</code> otherwise. */ public boolean getValueIsCDATA() { return myValueIsCDATA; } public MixedContent getValue_MixedContent() { return myValue_mixedContent; } public boolean isValueMixedContent() { return myValueIsMixedContent; } /** * Returns any comments associated with the XmlEntity; should be null if no comments. * * @return Comments associated with the XmlEntity; should be <code>null</code> if no comments. */ public ArrayList<String> getComments() { return myComments; } /** * Returns array containing XmlProcessingInstruction objects. * * @return */ public ArrayList<XmlProcessingInstruction> getProcessingInstructions() { return myProcessingInstructions; } /** * Returns the ArrayList of XmlEntity objects that are this XmlEntity's children. * * @return ArrayList containing XmlEntity objects that are this XmlEntity's children. */ public ArrayList<XmlEntity> getChildren() { return myChildren; } /** * Returns the ArrayList of XmlEntity objects that are this XmlEntity's children AND have the specified label. * Returns an empty array list if no matches exist. * * @param label The label required among this XmlEntity's children in order for that child to be included in the returned ArrayList. * @return An ArrayList containing the children XmlEntity objects that have the specified label. */ public ArrayList<XmlEntity> getChildrenWithLabel(String label) { ArrayList<XmlEntity> arrayListToReturn = new ArrayList<XmlEntity>(); for (int k = 0; k < myChildren.size(); k++) { XmlEntity child = (XmlEntity) myChildren.get(k); if ((child.getLabel()).equals(label)) { arrayListToReturn.add(child); } } return arrayListToReturn; } /** * Returns XmlEntity which is the first child with the label. Returns null if no such entity exists. * * @param label * @return */ public XmlEntity getFirstChildWithLabel(String label) { for (int k = 0; k < myChildren.size(); k++) { XmlEntity child = (XmlEntity) myChildren.get(k); if (child != null) { if ((child.getLabel()).equals(label)) { return child; } } } return null; } /** * Returns the ArrayList of XmlEntity objects that are this XmlEntity's descendants AND have the specified label. * Returns an empty array list if no matches exist. * * @param label The label required among this XmlEntity's descendants in order for that descendant to be included in the returned ArrayList. * @return An ArrayList containing the descendant XmlEntity objects that have the specified label. */ public ArrayList<XmlEntity> getDescendantsWithLabel(String label) { ArrayList<XmlEntity> arrayListToReturn = new ArrayList<XmlEntity>(); ArrayList<XmlEntity> descendants = getDescendants(); for (int a = 0; a < descendants.size(); a++) { XmlEntity descendant = (XmlEntity) descendants.get(a); if (descendant.getLabel().equals(label)) { arrayListToReturn.add(descendant); } } return arrayListToReturn; } /** * Returns the ArrayList of XmlEntity objects that are this XmlEntity's descendants. * Returns an empty array list if no descendants exists. * * @return An ArrayList containing the descendant XmlEntity objects. */ public ArrayList<XmlEntity> getDescendants() { ArrayList<XmlEntity> arrayListToReturn = new ArrayList<XmlEntity>(); if (!myValueIsMixedContent) { for (int a = 0; a < myChildren.size(); a++) { XmlEntity child = (XmlEntity) myChildren.get(a); arrayListToReturn.add(child); // recursively add descendants with label arrayListToReturn.addAll(child.getDescendants()); } } else { ArrayList<?> mixedContentEntryList = myValue_mixedContent.getMixedContentEntryList(); for (int b = 0; b < mixedContentEntryList.size(); b++) { MixedContentEntry mixedContentEntry = (MixedContentEntry) mixedContentEntryList.get(b); if (mixedContentEntry.isXmlEntity()) { XmlEntity mixedContentXmlEntry = mixedContentEntry.getXmlEntityEntry(); arrayListToReturn.add(mixedContentXmlEntry); // recursively add descendants with label arrayListToReturn.addAll(mixedContentXmlEntry.getDescendants()); } } } return arrayListToReturn; } /** * Returns XmlEntity which is the first descendant with the label. Returns null if no such entity exists. * * @param label * @return */ public XmlEntity getFirstDescendantWithLabel(String label) { XmlEntity entityToReturn = null; ArrayList<XmlEntity> descendantsWithLabel = getDescendantsWithLabel(label); if (descendantsWithLabel.size() > 0) { return (XmlEntity) descendantsWithLabel.get(0); } return entityToReturn; } /** * Returns XmlEntity which is the first child with label1. Returns null if no such entity exists, or * if a child with label2 is encountered. Introduced to avoid iteration over entire children list if * not necessary. * * @param label1 * @param label2 * @return */ public XmlEntity getFirstChildWithLabel1BeforeLabel2(String label1, String label2) { for (int k = 0; k < myChildren.size(); k++) { XmlEntity child = (XmlEntity) myChildren.get(k); String currentLabel = child.getLabel(); if (currentLabel.equals(label1)) { return child; } else if (currentLabel.equals(label2)) { return null; } } return null; } /** * Returns first child. * * @return */ public XmlEntity getFirstChild() { return (XmlEntity) myChildren.get(0); } /** * Returns the n'th child of this XmlEntity; returns null if n is not valid. * * @param n The number of the child XmlEntity; must be 1 or greater, where 1 (not 0) signifies the first child. * @return The XmlEntity corresponding to the n'th child if n is valid, null otherwise. */ public XmlEntity getNthChild(int n) { // if n is invalid if ((n < 1) || (n > myChildren.size())) { return null; } else { return (XmlEntity) myChildren.get(n - 1); } } /** * Gets the labels of all the XmlEntity's attributes. * * @return A Set (implemented as a TreeSet) of attributeLabels (ie keys to myAttributes). */ public Set<String> getAttributeLabels() { //Set setToReturn = new TreeSet(); Set<String> setToReturn = new TreeSet<String>(myAttributes.keySet()); return setToReturn; } /** * Gets the value of this XmlEntity's attribute with the specified attribute label. * * @param attributeLabel The label of the attribute for which the value is requested. * @return The value of the specified label; null if no such attribute exists. */ public String getAttributeValue(String attributeLabel) { return (String) myAttributes.get(attributeLabel); } /** * Get the contents of the XmlEntity represented as a string, with white space and formatting added * for easier reading by a human, as when printed to screen or saved to a text file. Note that * the XML declaration IS now part of the XML string returned. * * @param indentSpacing Number of spaces to add for each level of indentation in the XML. * @param includeComments Whether comments should be inlcuded in the output. * @return The contents of the XmlEntity represented as a string. */ public String getAsVerboseXmlString(int indentSpacing, boolean includeComments, boolean includeProcessingInstructions) { StringBuilder buffer = new StringBuilder(10000); int numberUnclosedElements = 0; boolean isInCDATA = false; boolean isInComments = false; int indexOfMostRecentStartTag = 0; int indexOfMostRecentEndTag = 0; // get the concise version StringBuffer xmlBuffer = getAsConciseXmlStringBuffer(includeComments, includeProcessingInstructions, true, 0); // 0 indicates start recursion // now format it to be more human readable, by adding in // carriage lines and white space as appropriate // algorithm: // 0) if in a comment section (started by "<!-" and ending with "-->"), then // don't make any formatting changes // // 1) every time there is a "<[not / or !]", add to the number of unclosed elements // Then, add a '\n + indentSpacing * x' before the <, where x // is the number of unclosed elements. // // 2) every time there is a "</", decrease number of unclosed elements by 1 // // 3) if there is a "<!", don't change unclosed elements number // // 4) if "<![CDATA[" is encountered (deemed to be the case with the presence // of "<![" ), then don't do anything until "]]>" is encountered. // // 5) if this is the last three characters, add all three characters to the copy buffer // // 6) except for case 5, add the first character being read to the copy buffer // // 7) also, if there are 2 endElements in a row (as determined by lack // of new element following endElement, go to new line after end of first // endElement for (int index = 0; index < (xmlBuffer.length()) - 2; index++) { String triad = (String) xmlBuffer.substring(index, index + 3); String firstCharacter = triad.substring(0, 1); String secondCharacter = triad.substring(1, 2); String thirdCharacter = triad.substring(2, 3); if (index == 0) // take care of special case of first character { buffer.append(firstCharacter); } else if (index == (xmlBuffer.length()) - 3) // take care of special case of this being the last three characters { // if this is the last triad, add all of the characters to the document buffer.append(firstCharacter); buffer.append(secondCharacter); buffer.append(thirdCharacter); } else if (isInComments) { // just append the first character buffer.append(firstCharacter); // check to see if should move out of CDATA if ((firstCharacter.equals("-")) && (secondCharacter.equals("-")) && (thirdCharacter.equals(">"))) { isInComments = false; } } else if (isInCDATA) // next, take care of special case of being inside a CDATA section { // just append the first character buffer.append(firstCharacter); // check to see if should move out of CDATA if ((firstCharacter.equals("]")) && (secondCharacter.equals("]")) && (thirdCharacter.equals(">"))) { isInCDATA = false; } } else { if ((!firstCharacter.equals("<")) && (!firstCharacter.equals("]"))) { // just append the first character and move on buffer.append(firstCharacter); } else if (firstCharacter.equals("<")) // if this is a <XX { if (secondCharacter.equals("!")) // if this is a comment or CDATA start { // don't change numberUnclosedElements, but indent as if this were the case if (thirdCharacter.equals("-")) { // assume this is a comment section beginning isInComments = true; } if (thirdCharacter.equals("[")) { // assume this is a CDATA section beginning isInCDATA = true; } // new line buffer.append("\n"); // indent for (int k = 0; k < (numberUnclosedElements + 1) * indentSpacing; k++) { buffer.append(" "); } } else if (secondCharacter.equals("/")) // if this is an end tag { numberUnclosedElements--; // add a new line if this is a second end tag in a row if (indexOfMostRecentEndTag > indexOfMostRecentStartTag) { // new line buffer.append("\n"); // indent for (int k = 0; k < (numberUnclosedElements + 1) * indentSpacing; k++) { buffer.append(" "); } } // mark the index of end tag indexOfMostRecentEndTag = index; } else { numberUnclosedElements++; // mark the index of start tag indexOfMostRecentStartTag = index; // new line buffer.append("\n"); // indent for (int k = 0; k < numberUnclosedElements * indentSpacing; k++) { buffer.append(" "); } } // in all cases, add first character buffer.append(firstCharacter); } else { // case of end of CDATA section buffer.append(firstCharacter); } } } return new String(buffer); } /** * Get the contents of the XmlEntity represented as a string, WITHOUT any white space and formatting added. * This method should be invoked rather than getAsVerboseXmlStringBuffer when the size of the * XML representation should be constrained, as when using the output of this method for * system-system communication over HTTP. Note that the XML declaration IS now part * of the XML string returned. * * @param includeComments Whether comments should be inlcuded in the output. * @param includeProcessingInstructions * @param includeXmlHeader Whether to include the <?xml ... > XML header * @return The contents of the XmlEntity represented as a string. */ public String getAsConciseXmlString(boolean includeComments, boolean includeProcessingInstructions, boolean includeXmlHeader) { return new String(getAsConciseXmlStringBuffer(includeComments, includeProcessingInstructions, includeXmlHeader, 0)); } /** * Same as above function, but includeXmlHeader set to true. * * @param includeComments * @param includeProcessingInstructions * @return */ public String getAsConciseXmlString(boolean includeComments, boolean includeProcessingInstructions) { return new String(getAsConciseXmlStringBuffer(includeComments, includeProcessingInstructions, true, 0)); } /** * Get the contents of the XmlEntity represented as a string, WITHOUT any white space and formatting added. * This method is protected because it is meant to act as a helper class to the public function * of the same name. This method has been sequestered into a separate class in order to enable * construction of the string using recursion, where the XML header is added only * at the very beginning of the XML document. * NOTE: illegal XML characters in attributes and in non-CDATA element values are replaced with escape characters. * * @param includeComments Whether comments should be inlcuded in the output. * @param recursionNumber The current depth in the recursion; initial call should indicate a value of 0. * @return The contents of the XmlEntity represented as a string. */ protected StringBuffer getAsConciseXmlStringBuffer(boolean includeComments, boolean includeProcessingInstructions, boolean includeXmlHeader, int recursionNumber) { StringBuffer buffer = new StringBuffer(10000); if (recursionNumber == 0) { if (includeXmlHeader) { buffer.append(myXmlHeaderString); } } recursionNumber++; if (includeComments) { for (int k = 0; k < myComments.size(); k++) { buffer.append("<!-- "); buffer.append((String) myComments.get(k)); buffer.append(" -->"); } } if (includeProcessingInstructions) { for (int k = 0; k < myProcessingInstructions.size(); k++) { XmlProcessingInstruction processingInstruction = (XmlProcessingInstruction) myProcessingInstructions.get(k); buffer.append("<?"); buffer.append(processingInstruction.getTarget()); buffer.append(" "); buffer.append(processingInstruction.getData()); buffer.append("?>"); } } buffer.append("<"); buffer.append(myLabel); Set<String> attributeLabels = getAttributeLabels(); Iterator<String> allLabels = attributeLabels.iterator(); while (allLabels.hasNext()) { String attributeLabel = (String) allLabels.next(); String attributeValue = (String) myAttributes.get(attributeLabel); buffer.append(" "); buffer.append(attributeLabel); buffer.append("=\""); if (attributeValue != null) { buffer.append(StringUtility.getInstance().getStringWithLegalXmlCharacters(attributeValue)); } buffer.append("\""); } buffer.append(">"); if (isValueMixedContent() == false) { if (myValueIsCDATA) { buffer.append("<![CDATA["); } if (myValue != null) { if (myValueIsCDATA) { buffer.append(myValue); } else { buffer.append(StringUtility.getInstance().getStringWithLegalXmlCharacters(myValue)); } } if (myValueIsCDATA) { buffer.append("]]>"); } for (int j = 0; j < myChildren.size(); j++) { XmlEntity child = (XmlEntity) myChildren.get(j); buffer.append(child.getAsConciseXmlStringBuffer(includeComments, includeProcessingInstructions, true, recursionNumber)); } } else { MixedContent mixedContent = getValue_MixedContent(); ArrayList<?> mixedContentEntryList = mixedContent.getMixedContentEntryList(); for (int k = 0; k < mixedContentEntryList.size(); k++) { MixedContentEntry entry = (MixedContentEntry) mixedContentEntryList.get(k); if (entry.isXmlEntity()) { XmlEntity xmlEntityEntry = entry.getXmlEntityEntry(); buffer.append(xmlEntityEntry.getAsConciseXmlStringBuffer(includeComments, includeProcessingInstructions, true, recursionNumber)); } else { buffer.append(StringUtility.getInstance().getStringWithLegalXmlCharacters(entry.getStringEntry())); } } } buffer.append("</"); buffer.append(myLabel); buffer.append(">"); return buffer; } // removal functions /** * Re-initializes XmlEntity. */ public void reInitialize() { initialize(); } public void removeAttributes() { this.myAttributes.clear(); } public void removeChildren() { myChildren = null; // get rid of previous children list from memory myChildren = new ArrayList<XmlEntity>(); } public void removeChildrenWithLabel(String label) { ArrayList<XmlEntity> childrenWithoutLabel = new ArrayList<XmlEntity>(); for (int k = 0; k < myChildren.size(); k++) { XmlEntity child = (XmlEntity) myChildren.get(k); if (!(child.getLabel().equals(label))) { childrenWithoutLabel.add(child); } } myChildren = null; // get rid of previous children list from memory myChildren = childrenWithoutLabel; } public void removeProcessingInstructions() { myProcessingInstructions = null; myProcessingInstructions = new ArrayList<XmlProcessingInstruction>(); } // hash functions public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof XmlEntity)) return false; final XmlEntity xmlEntity = (XmlEntity) o; if (myAttributes != null ? !myAttributes.equals(xmlEntity.myAttributes) : xmlEntity.myAttributes != null) return false; if (myChildren != null ? !myChildren.equals(xmlEntity.myChildren) : xmlEntity.myChildren != null) return false; if (myLabel != null ? !myLabel.equals(xmlEntity.myLabel) : xmlEntity.myLabel != null) return false; if (myValue != null ? !myValue.equals(xmlEntity.myValue) : xmlEntity.myValue != null) return false; if (myValue_mixedContent != null ? !myValue_mixedContent.equals(xmlEntity.myValue_mixedContent) : xmlEntity.myValue_mixedContent != null) return false; return true; } public int hashCode() { int result; result = (myLabel != null ? myLabel.hashCode() : 0); result = 29 * result + (myValue != null ? myValue.hashCode() : 0); result = 29 * result + (myValue_mixedContent != null ? myValue_mixedContent.hashCode() : 0); result = 29 * result + (myChildren != null ? myChildren.hashCode() : 0); result = 29 * result + (myAttributes != null ? myAttributes.hashCode() : 0); return result; } }
apache-2.0
TonyWang-UMU/TFG-TWang
opencds-parent/dss-java-stub/src/main/java/org/omg/dss/exception/UnrecognizedLanguageException.java
1719
package org.omg.dss.exception; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlType; /** * The UnrecognizedLanguageException is thrown when the language is unrecognized. * * <p>Java class for UnrecognizedLanguageException complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="UnrecognizedLanguageException"> * &lt;complexContent> * &lt;extension base="{http://www.omg.org/spec/CDSS/201105/dss}DSSException"> * &lt;sequence> * &lt;element name="unrecognizedLanguage" type="{http://www.omg.org/spec/CDSS/201105/dss}Language"/> * &lt;/sequence> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "UnrecognizedLanguageException", propOrder = { "unrecognizedLanguage" }) public class UnrecognizedLanguageException extends DSSException { @XmlElement(required = true) protected String unrecognizedLanguage; /** * Gets the value of the unrecognizedLanguage property. * * @return * possible object is * {@link String } * */ public String getUnrecognizedLanguage() { return unrecognizedLanguage; } /** * Sets the value of the unrecognizedLanguage property. * * @param value * allowed object is * {@link String } * */ public void setUnrecognizedLanguage(String value) { this.unrecognizedLanguage = value; } }
apache-2.0
scottbez1/embarcadero
app/src/main/java/com/scottbezek/embarcadero/app/model/PathManager.java
12017
package com.scottbezek.embarcadero.app.model; import android.location.Location; import com.dropbox.sync.android.DbxDatastore; import com.dropbox.sync.android.DbxException; import com.dropbox.sync.android.DbxFields; import com.dropbox.sync.android.DbxRecord; import com.dropbox.sync.android.DbxTable; import com.dropbox.sync.android.DbxTable.QueryResult; import com.scottbezek.embarcadero.app.model.data.PathCoord; import com.scottbezek.embarcadero.app.model.data.PathListItem; import com.scottbezek.embarcadero.app.model.location.LocationUpdateProvider; import com.scottbezek.embarcadero.app.model.location.LocationUpdateQueue; import com.scottbezek.embarcadero.app.util.DatastoreUtils; import com.scottbezek.embarcadero.app.util.DatastoreUtils.AutoSyncingDatastoreWithLock; import com.scottbezek.embarcadero.app.util.DatastoreUtils.DataStream; import com.scottbezek.embarcadero.app.util.DatastoreUtils.DatastoreQuery; import com.scottbezek.embarcadero.app.util.DatastoreUtils.DatastoreRowQuery; import com.scottbezek.embarcadero.app.util.DatastoreUtils.DatastoreTableQuery; import com.scottbezek.embarcadero.app.util.DatastoreUtils.DatastoreWithLock; import com.scottbezek.embarcadero.app.util.DatastoreUtils.DatastoreWithLock.OnSyncListener; import com.scottbezek.embarcadero.app.util.DatastoreUtils.QueryLoader; import com.scottbezek.embarcadero.app.util.RefCountedObject; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import javax.annotation.CheckForNull; import javax.annotation.Nonnull; import javax.annotation.concurrent.Immutable; import rx.Observable; import rx.Scheduler; import rx.Scheduler.Worker; import rx.Subscriber; import rx.functions.Action0; import rx.subjects.BehaviorSubject; import rx.subscriptions.Subscriptions; /** */ public class PathManager { private final RefCountedObject<AutoSyncingDatastoreWithLock> mDatastoreRef; private Thread mPathRecordThread = null; private final AtomicBoolean mShouldStopPathRecording = new AtomicBoolean(); private final BehaviorSubject<RecordingState> mRecordingStateSubject = BehaviorSubject.create(new RecordingState(false, null)); public PathManager(@Nonnull RefCountedObject<AutoSyncingDatastoreWithLock> datastoreRef) { mDatastoreRef = datastoreRef; } @Immutable public class RecordingState { private final boolean mRecording; @CheckForNull private final String mPathRecordId; public RecordingState(boolean recording, String pathRecordId) { mRecording = recording; mPathRecordId = pathRecordId; } public boolean isRecording() { return mRecording; } @CheckForNull public String getPathRecordId() { return mPathRecordId; } } public void startRecording(final LocationUpdateProvider locationProvider) { if (mPathRecordThread != null) { throw new IllegalStateException("Already recording!"); } mRecordingStateSubject.onNext(new RecordingState(true, null)); mShouldStopPathRecording.set(false); mPathRecordThread = new ThreadWithDatastore(mDatastoreRef) { @Override protected void runWithDatastore(DatastoreWithLock datastoreWithLock) { final DbxDatastore datastore = datastoreWithLock.getDatastore(); final Object datastoreLock = datastoreWithLock.getLock(); final DbxTable pathsTable = datastore.getTable("paths"); final LocationUpdateQueue locationUpdateQueue = new LocationUpdateQueue(locationProvider); final Location lastLocation = locationProvider.getLastLocation(); final PathRecordWriter pathWriter; synchronized (datastoreLock) { DbxRecord pathRecord = pathsTable.insert(); pathWriter = new PathRecordWriter(pathRecord); pathWriter.setStartTime(System.currentTimeMillis()); // If we know our current location, add it immediately so the path starts with at least one coord if (lastLocation != null) { // TODO(sbezek): ignore if last location is too old? pathWriter.addLocation(lastLocation); } if (!DatastoreUtils.syncQuietly(datastoreWithLock)) { return; } mRecordingStateSubject.onNext(new RecordingState(true, pathRecord.getId())); } locationUpdateQueue.enableProducer(); // TODO(sbezek): maybe want to split out a "resumeRecording" method for Service restarts following process death? // TODO(sbezek): Aquire wakelocks, start services, and all that jazz! // Pull Location updates from the queue and apply them to the PathRecordWriter while (true){ try { Location updatedLocation = locationUpdateQueue.take(); synchronized (datastoreLock) { pathWriter.addLocation(updatedLocation); if (!DatastoreUtils.syncQuietly(datastoreWithLock)) { break; } } } catch (InterruptedException e) { if (mShouldStopPathRecording.get()) { // We were requested to stop, so shut down cleanly break; } else { // I don't know why we were interrupted, so freak out! throw new RuntimeException(e); } } } locationUpdateQueue.disableProducer(); synchronized (datastoreLock) { pathWriter.setStopTime(System.currentTimeMillis()); DatastoreUtils.syncQuietly(datastoreWithLock); } } }; mPathRecordThread.start(); } public void stopRecording() { if (mPathRecordThread == null) { throw new IllegalStateException("Not recording"); } mShouldStopPathRecording.set(true); mRecordingStateSubject.onNext(new RecordingState(false, null)); mPathRecordThread.interrupt(); mPathRecordThread = null; } /** * Helper for running something in a background thread while holding a reference to the datastore. */ private static abstract class ThreadWithDatastore extends Thread { private final RefCountedObject<? extends DatastoreWithLock> mDatastoreRef; public ThreadWithDatastore(RefCountedObject<? extends DatastoreWithLock> datastoreRef) { mDatastoreRef = datastoreRef; } @Override public final void run() { final DatastoreWithLock ds = mDatastoreRef.acquire(); try { runWithDatastore(ds); } finally { mDatastoreRef.release(ds); } } protected abstract void runWithDatastore(DatastoreWithLock datastore); } private static final DatastoreQuery<List<PathListItem>> PATH_LIST_QUERY = new DatastoreTableQuery<List<PathListItem>>("paths", new DbxFields()) { @Override public List<PathListItem> createImmutableSnapshot(QueryResult queryResult) { List<PathListItem> result = new ArrayList<>(); for (DbxRecord record : queryResult) { final PathListItem item = new PathListItem( record.getId(), record.hasField("name") ? record.getString("name") : null, record.getLong("start_time"), record.hasField("stop_time") ? record.getLong("stop_time") : null, record.hasField("coord_time") ? record.getList("coord_time").size() : 0); result.add(item); } return result; } }; private static DatastoreQuery<List<PathCoord>> getPathCoordsQuery(String pathRecordId) { return new DatastoreRowQuery<List<PathCoord>>("paths", pathRecordId) { @Override public List<PathCoord> createImmutableSnapshot(@CheckForNull DbxRecord result) { if (result == null) { // TODO(sbezek): make a useful RuntimeException subclass: RecordNotFoundException? throw new RuntimeException("Record not found"); } else { return PathCoord.listFrom(result); } } }; } public DataStream<List<PathListItem>> getPathListLoader() { return new QueryLoader<>(mDatastoreRef, PATH_LIST_QUERY); } public Observable<List<PathListItem>> getPathList(Scheduler queryExecutionScheduler) { return QueryObservable.createObservable(mDatastoreRef, PATH_LIST_QUERY, queryExecutionScheduler); } public Observable<List<PathCoord>> getPathCoords(String pathRecordId, Scheduler queryExecutionScheduler) { return QueryObservable.createObservable(mDatastoreRef, getPathCoordsQuery(pathRecordId), queryExecutionScheduler); } public Observable<RecordingState> getRecordingState() { return mRecordingStateSubject.asObservable(); } private static class QueryObservable<T> implements Observable.OnSubscribe<T> { private final RefCountedObject<? extends DatastoreWithLock> mDatastoreRef; private final DatastoreQuery<T> mQuery; private final Scheduler mScheduler; public QueryObservable(RefCountedObject<? extends DatastoreWithLock> datastoreRef, DatastoreQuery<T> query, Scheduler scheduler) { mDatastoreRef = datastoreRef; mQuery = query; mScheduler = scheduler; } public static <T> Observable<T> createObservable(RefCountedObject<? extends DatastoreWithLock> datastoreRef, DatastoreQuery<T> query, Scheduler scheduler) { return Observable.create(new QueryObservable(datastoreRef, query, scheduler)); } @Override public void call(final Subscriber<? super T> subscriber) { final Worker worker = mScheduler.createWorker(); final DatastoreWithLock datastore = mDatastoreRef.acquire(); // Register a change listener for recurring loads final OnSyncListener requeryTrigger = new OnSyncListener() { @Override public void onSynced() { worker.schedule(new Action0() { @Override public void call() { try { subscriber.onNext(mQuery.executeOnDatastore(datastore)); } catch (DbxException e) { subscriber.onError(e); // TODO(sbezek): unregister change listener and stop emitting data? } catch (Throwable e) { subscriber.onError(e); } } }); } }; datastore.addSyncListener(requeryTrigger); // Trigger an initial load... requeryTrigger.onSynced(); // When unsubscribed, unregister the change listener and release the datastore subscriber.add(Subscriptions.create(new Action0() { @Override public void call() { datastore.removeSyncListener(requeryTrigger); mDatastoreRef.release(datastore); worker.unsubscribe(); } })); } } }
apache-2.0
wangyusheng/NewBook3
src/com/example/newbook4/bean/ExchangeBookOrderFinish.java
422
package com.example.newbook4.bean; public class ExchangeBookOrderFinish { public int finish_id; public int exchange_id; public int release_user; public int obtain_user; public int release_book; public int obtain_book; public String release_msg; public String obtain_msg; public String generate_time; public String rfinish_time; public String ofinish_time; public ExchangeBookOrderFinish() { } }
apache-2.0
Luckyion/SpeechIntelligence
SpeechIntelligence/src/me/videa/base/db/entity/Action.java
2879
package me.videa.base.db.entity; import com.lidroid.xutils.db.annotation.Column; import com.lidroid.xutils.db.annotation.Table; @Table(name="ACTIONS") public class Action extends EntityBase{ @Column(column = "ACTION_ID") private String action_id; @Column(column = "ACTION") private String action; @Column(column = "SEMANTIC") private String semantic;//鍔犲瘑璇­涔‰ @Column(column = "ENCRYPT_ROLE") private String encrypt_role;//鍔犲瘑瑙勫垯 @Column(column = "PASSWORD") private String password;//鏁版嵁鑾峰彇瀵嗛挜 @Column(column = "NOTE") private String note;//璁板綍 @Column(column = "MARK") private String mark;//澶囨敞 @Column(column = "CREATE_TIME") private String create_time;//鍒涘缓鏃堕棿 @Column(column = "MODIFY_TIME") private String modify_time; @Column(column = "LOCK_STATUS") private String lock_status;//閿佸畾鐘舵€ public String getAction_id() { return action_id; } public void setAction_id(String action_id) { this.action_id = action_id; } public String getAction() { return action; } public void setAction(String action) { this.action = action; } public String getSemantic() { return semantic; } public void setSemantic(String semantic) { this.semantic = semantic; } public String getEncrypt_role() { return encrypt_role; } public void setEncrypt_role(String encrypt_role) { this.encrypt_role = encrypt_role; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public String getNote() { return note; } public void setNote(String note) { this.note = note; } public String getMark() { return mark; } public void setMark(String mark) { this.mark = mark; } public String getCreate_time() { return create_time; } public void setCreate_time(String create_time) { this.create_time = create_time; } public String getModify_time() { return modify_time; } public void setModify_time(String modify_time) { this.modify_time = modify_time; } public String getLock_status() { return lock_status; } public void setLock_status(String lock_status) { this.lock_status = lock_status; } @Override public String toString() { return "ACTIONS{" + ", action_id='" + action_id + '\'' + ", action='" + action + '\'' + ", semantic=" + semantic + ", encrypt_role='" + encrypt_role + '\'' + ", password='" + password + '\'' + ", note='" + note + '\'' + ", mark='" + mark + '\'' + ", create_time='" + create_time + '\'' + ", modify_time='" + modify_time + '\'' + ", lock_status='" + lock_status + '\'' + '}'; } }
apache-2.0
gawkermedia/googleads-java-lib
modules/adwords_appengine/src/main/java/com/google/api/ads/adwords/jaxws/v201601/cm/ConstantDataServiceInterfacegetUserInterestCriterionResponse.java
2078
package com.google.api.ads.adwords.jaxws.v201601.cm; import java.util.ArrayList; import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for getUserInterestCriterionResponse element declaration. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;element name="getUserInterestCriterionResponse"> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="rval" type="{https://adwords.google.com/api/adwords/cm/v201601}CriterionUserInterest" maxOccurs="unbounded" minOccurs="0"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * &lt;/element> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = { "rval" }) @XmlRootElement(name = "getUserInterestCriterionResponse") public class ConstantDataServiceInterfacegetUserInterestCriterionResponse { protected List<CriterionUserInterest> rval; /** * Gets the value of the rval property. * * <p> * This accessor method returns a reference to the live list, * not a snapshot. Therefore any modification you make to the * returned list will be present inside the JAXB object. * This is why there is not a <CODE>set</CODE> method for the rval property. * * <p> * For example, to add a new item, do as follows: * <pre> * getRval().add(newItem); * </pre> * * * <p> * Objects of the following type(s) are allowed in the list * {@link CriterionUserInterest } * * */ public List<CriterionUserInterest> getRval() { if (rval == null) { rval = new ArrayList<CriterionUserInterest>(); } return this.rval; } }
apache-2.0
zpzkit/appstore-cms
src/main/java/com/mobvoi/appstore/mapper/WeeklyRecommendMapper.java
429
package com.mobvoi.appstore.mapper; import com.mobvoi.appstore.model.WeeklyRecommend; public interface WeeklyRecommendMapper { int deleteByPrimaryKey(Integer id); int insert(WeeklyRecommend record); int insertSelective(WeeklyRecommend record); WeeklyRecommend selectByPrimaryKey(Integer id); int updateByPrimaryKeySelective(WeeklyRecommend record); int updateByPrimaryKey(WeeklyRecommend record); }
apache-2.0
wildfly-swarm/wildfly-config-api
generator/src/main/java/org/wildfly/swarm/config/generator/model/ResourceDescription.java
6253
/* * JBoss, Home of Professional Open Source. * Copyright 2010, Red Hat, Inc., and individual contributors * as indicated by the @author tags. See the copyright.txt file in the * distribution for a full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. */ package org.wildfly.swarm.config.generator.model; import org.jboss.dmr.ModelNode; import org.jboss.dmr.ModelType; import org.jboss.dmr.Property; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.*; /** * Represents the result of a read-resource-description operation for one specific resource. * @author Harald Pehl */ public class ResourceDescription extends ModelNode { public final static ResourceDescription EMPTY = new ResourceDescription(); static final String ACCESS_CONTROL = "access-control"; static final String NOTIFICATIONS = "notifications"; private boolean isSingleton; private String singletonName; public ResourceDescription() { super(); } public ResourceDescription(ModelNode description) { set(description); } public boolean hasAttributes() { return hasDefined(ATTRIBUTES); } @SuppressWarnings("unchecked") public List<Property> getAttributes() { return hasAttributes() ? get(ATTRIBUTES).asPropertyList() : Collections.EMPTY_LIST; } public boolean hasAccessControl() { return hasDefined(ACCESS_CONTROL); } public boolean hasChildrenDefined() { return hasDefined(CHILDREN) && !get(CHILDREN).keys().isEmpty(); } public boolean hasOperations() { return hasDefined(OPERATIONS); } public boolean hasNotifications() { return hasDefined(NOTIFICATIONS); } public Set<String> getChildrenTypes() { Set<String> result = new HashSet<>(); if(hasChildrenDefined()) { ModelNode children = get(CHILDREN); List<Property> items = children.asPropertyList(); for (Property item : items) { Set<String> keys = item.getValue().get(MODEL_DESCRIPTION).keys(); if(keys.contains("*")) // regular resources (opposed to singletons, that carry distinct names) { result.add(item.getName()); } } } return result; } public Set<String> getSingletonChildrenTypes() { Set<String> result = new HashSet<>(); if(hasChildrenDefined()) { ModelNode children = get(CHILDREN); List<Property> items = children.asPropertyList(); for (Property item : items) { Set<String> keys = item.getValue().get(MODEL_DESCRIPTION).keys(); if(!keys.contains("*")) // singleton resources { result.addAll(keys.stream().map(key -> item.getName() + "=" + key).collect(Collectors.toList())); } } } return result; } /** * Looks for the description of a child resource. * @param childType The type of the child resource * @return the description of the child resource or {@link #EMPTY} if no such resource exists. */ public ResourceDescription getChildDescription(String childType) { return getChildDescription(childType, "*"); } /** * Looks for the description of a specific child resource. * @param type The type of the child resource * @param name The name of the instance * @return the description of the specific child resource or {@link #EMPTY} if no such resource exists. */ public ResourceDescription getChildDescription(String type, String name) { if (hasChildrenDefined()) { List<Property> children = get("children").asPropertyList(); for (Property child : children) { if (type.equals(child.getName()) && child.getValue().hasDefined(MODEL_DESCRIPTION)) { List<Property> modelDescriptions = child.getValue().get(MODEL_DESCRIPTION).asPropertyList(); for (Property modelDescription : modelDescriptions) { if (name.equals(modelDescription.getName())) { return new ResourceDescription(modelDescription.getValue()); } } } } } return EMPTY; } public static ResourceDescription from(ModelNode response) { if(!response.get(OUTCOME).asString().equals(SUCCESS)) throw new RuntimeException(response.get(FAILURE_DESCRIPTION).asString()); ModelNode result = response.get(RESULT); if(ModelType.LIST == result.getType()) { // wildcard addressing return new ResourceDescription(result.asList().get(0).get(RESULT)); } else { // specific addressing return new ResourceDescription(result); } } public String getText() { return get(DESCRIPTION).asString(); } public boolean isSingleton() { return isSingleton; } public String getSingletonName() { return singletonName; } public void setSingletonName(String name) { this.isSingleton = true; this.singletonName = name; } }
apache-2.0
U-QASAR/u-qasar.platform
src/main/java/ro/fortsoft/wicket/dashboard/web/WidgetView.java
1241
/* * Copyright 2012 Decebal Suiu * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this work except in compliance with * the License. You may obtain a copy of the License in the LICENSE file, or at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package ro.fortsoft.wicket.dashboard.web; import org.apache.wicket.markup.html.panel.GenericPanel; import org.apache.wicket.model.IModel; import ro.fortsoft.wicket.dashboard.Widget; /** * @author Decebal Suiu */ public class WidgetView extends GenericPanel<Widget> { private static final long serialVersionUID = 1L; protected WidgetView(String id, IModel<Widget> model) { super(id, model); setOutputMarkupPlaceholderTag(true); } private Widget getWidget() { return getModelObject(); } @Override protected void onConfigure() { super.onConfigure(); setVisible(!getWidget().isCollapsed()); } }
apache-2.0
softindex/datakernel
core-http/src/main/java/io/datakernel/http/HttpUtils.java
9413
/* * Copyright (C) 2015-2018 SoftIndex LLC. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.datakernel.http; import io.datakernel.common.parse.ParseException; import io.datakernel.common.parse.UnknownFormatException; import org.jetbrains.annotations.Nullable; import java.io.UnsupportedEncodingException; import java.net.InetAddress; import java.net.URLDecoder; import java.net.URLEncoder; import java.net.UnknownHostException; import java.util.Map; import static io.datakernel.bytebuf.ByteBufStrings.*; import static io.datakernel.http.HttpHeaders.HOST; /** * Util for working with {@link HttpRequest} */ public final class HttpUtils { public static final ParseException INVALID_Q_VALUE = new ParseException("Value of 'q' should start either from 0 or 1"); private static final int URI_DEFAULT_CAPACITY = 1 << 5; public static InetAddress inetAddress(String host) { try { return InetAddress.getByName(host); } catch (UnknownHostException e) { throw new IllegalArgumentException(e); } } // https://url.spec.whatwg.org/ public static boolean isInetAddress(String host) { int colons = 0; int dots = 0; byte[] bytes = encodeAscii(host); // expect ipv6 address if (bytes[0] == '[') { return bytes[bytes.length - 1] == ']' && checkIpv6(bytes, 1, bytes.length - 1); } // assume ipv4 could be as oct, bin, dec; ipv6 - hex for (byte b : bytes) { if (b == '.') { dots++; } else if (b == ':') { if (dots != 0) { return false; } colons++; } else if (Character.digit(b, 16) == -1) { return false; } } if (dots < 4) { if (colons > 0 && colons < 8) { return checkIpv6(bytes, 0, bytes.length); } return checkIpv4(bytes, 0, bytes.length); } return false; } /* * Checks only for a dot decimal format (192.168.0.208 for example) more -> https://en.wikipedia.org/wiki/IPv4 */ private static boolean checkIpv4(byte[] bytes, int pos, int length) { int start = pos; for (int i = pos; i < length; i++) { // assume at least one more symbol is present after dot if (i == length - 1 && bytes[i] == '.') { return false; } if (bytes[i] == '.' || i == length - 1) { int v; if (i - start == 0 && i != length - 1) { return false; } try { v = trimAndDecodePositiveInt(bytes, start, i - start); } catch (ParseException e) { return false; } if (v < 0 || v > 255) return false; start = i + 1; } } return true; } /* * http://stackoverflow.com/questions/5963199/ipv6-validation * rfc4291 * * IPV6 addresses are represented as 8, 4 hex digit groups of numbers * 2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d * * leading zeros are not necessary, however at least one digit should be present * * the null group ':0000:0000:0000'(one or more) could be substituted with '::' once per address * * x:x:x:x:x:x:d.d.d.d - 6 ipv6 + 4 ipv4 * ::d.d.d.d * */ private static boolean checkIpv6(byte[] bytes, int pos, int length) { boolean shortHand = false; // denotes usage of :: int numCount = 0; int blocksCount = 0; int start = 0; while (pos < length) { if (bytes[pos] == ':') { start = pos; blocksCount++; numCount = 0; if (pos > 0 && bytes[pos - 1] == ':') { if (shortHand) return false; else { shortHand = true; } } } else if (bytes[pos] == '.') { return checkIpv4(bytes, start + 1, length - start + 1); } else { if (Character.digit(bytes[pos], 16) == -1) { return false; } numCount++; if (numCount > 4) { return false; } } pos++; } return blocksCount > 6 || shortHand; } public static int skipSpaces(byte[] bytes, int pos, int end) { while (pos < end && bytes[pos] == ' ') { pos++; } return pos; } public static int parseQ(byte[] bytes, int pos, int length) throws ParseException { if (bytes[pos] == '1') { return 100; } else if (bytes[pos] == '0') { if (length == 1) return 0; length = length > 4 ? 2 : length - 2; int q = trimAndDecodePositiveInt(bytes, pos + 2, length); if (length == 1) q *= 10; return q; } throw INVALID_Q_VALUE; } /** * Method which creates string with parameters and its value in format URL. Using encoding UTF-8 * * @param q map in which keys if name of parameters, value - value of parameters. * @return string with parameters and its value in format URL */ public static String renderQueryString(Map<String, String> q) { return renderQueryString(q, "UTF-8"); } /** * Method which creates string with parameters and its value in format URL * * @param q map in which keys if name of parameters, value - value of parameters. * @param enc encoding of this string * @return string with parameters and its value in format URL */ public static String renderQueryString(Map<String, String> q, String enc) { StringBuilder sb = new StringBuilder(); for (Map.Entry<String, String> e : q.entrySet()) { String name = urlEncode(e.getKey(), enc); sb.append(name); if (e.getValue() != null) { sb.append('='); sb.append(urlEncode(e.getValue(), enc)); } sb.append('&'); } if (sb.length() > 0) sb.setLength(sb.length() - 1); return sb.toString(); } /** * Translates a string into application/x-www-form-urlencoded format using a specific encoding scheme. * This method uses the supplied encoding scheme to obtain the bytes for unsafe characters * * @param string string for encoding * @param enc new encoding * @return the translated String. */ public static String urlEncode(String string, String enc) { try { return URLEncoder.encode(string, enc); } catch (UnsupportedEncodingException e) { throw new IllegalArgumentException("Can't encode with supplied encoding: " + enc, e); } } public static String urlDecode(@Nullable String string, String enc) throws ParseException { if (string == null) { throw new ParseException(HttpUtils.class, "No string to decode"); } try { return URLDecoder.decode(string, enc); } catch (UnsupportedEncodingException e) { throw new UnknownFormatException(HttpUtils.class, "Can't encode with supplied encoding: " + enc, e); } } public static int trimAndDecodePositiveInt(byte[] array, int pos, int len) throws ParseException { int left = trimLeft(array, pos, len); pos += left; len -= left; len -= trimRight(array, pos, len); return decodePositiveInt(array, pos, len); } private static int trimLeft(byte[] array, int pos, int len) { for (int i = 0; i < len; i++) { if (array[pos + i] != SP && array[pos + i] != HT) { return i; } } return 0; } private static int trimRight(byte[] array, int pos, int len) { for (int i = len - 1; i >= 0; i--) { if (array[pos + i] != SP && array[pos + i] != HT) { return len - i - 1; } } return 0; } /** * (RFC3986) scheme://authority/path/?query#fragment */ @Nullable public static String getFullUri(HttpRequest request, int builderCapacity) { String host = request.getHeader(HOST); if (host == null) { return null; } String query = request.getQuery(); String fragment = request.getFragment(); StringBuilder fullUriBuilder = new StringBuilder(builderCapacity) .append(request.isHttps() ? "https://" : "http://") .append(host) .append(request.getPath()); if (!query.isEmpty()) { fullUriBuilder.append("?").append(query); } if (!fragment.isEmpty()) { fullUriBuilder.append("#").append(fragment); } return fullUriBuilder.toString(); } @Nullable public static String getFullUri(HttpRequest request) { return getFullUri(request, URI_DEFAULT_CAPACITY); } /** * RFC-7231, sections 6.5 and 6.6 */ public static String getHttpErrorTitle(int code) { switch (code) { case 400: return "400. Bad Request"; case 402: return "402. Payment Required"; case 403: return "403. Forbidden"; case 404: return "404. Not Found"; case 405: return "405. Method Not Allowed"; case 406: return "406. Not Acceptable"; case 408: return "408. Request Timeout"; case 409: return "409. Conflict"; case 410: return "410. Gone"; case 411: return "411. Length Required"; case 413: return "413. Payload Too Large"; case 414: return "414. URI Too Long"; case 415: return "415. Unsupported Media Type"; case 417: return "417. Expectation Failed"; case 426: return "426. Upgrade Required"; case 500: return "500. Internal Server Error"; case 501: return "501. Not Implemented"; case 502: return "502. Bad Gateway"; case 503: return "503. Service Unavailable"; case 504: return "504. Gateway Timeout"; case 505: return "505. HTTP Version Not Supported"; default: return code + ". Unknown HTTP code, returned from an error"; } } }
apache-2.0
mwsobol/SORCER
core/sorcer-dl/src/main/java/sorcer/service/Evaluation.java
1674
/* * Copyright 2009 the original author or authors. * Copyright 2009 SorcerSoft.org. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sorcer.service; import sorcer.service.modeling.EvaluationComponent; import sorcer.service.modeling.SupportComponent; import java.rmi.RemoteException; /** * A functionality required by all evaluations in SORCER. * * @author Mike Sobolewski */ public interface Evaluation <T> extends Substitutable, Scopable, Call, EvaluationComponent, SupportComponent { /** * Returns the value of the existing value of this evaluation that might be invalid. * * @return the value as is * @throws EvaluationException * @throws RemoteException */ public T asis() throws EvaluationException, RemoteException; /** * Returns the current value of this evaluation. The current value can be * exiting value with no need to evaluate it if it's still valid. * * @return the current value of this evaluation * @throws EvaluationException * @throws RemoteException */ public T getValue(Arg... entries) throws EvaluationException, RemoteException; public void setNegative(boolean negative); }
apache-2.0
arthurdm/microprofile-open-api
tck/src/main/java/org/eclipse/microprofile/openapi/tck/utils/YamlToJsonFilter.java
2703
/** * Copyright (c) 2018 Contributors to the Eclipse Foundation * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.eclipse.microprofile.openapi.tck.utils; import org.apache.commons.lang3.exception.ExceptionUtils; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import io.restassured.builder.ResponseBuilder; import io.restassured.filter.FilterContext; import io.restassured.filter.OrderedFilter; import io.restassured.http.ContentType; import io.restassured.response.Response; import io.restassured.specification.FilterableRequestSpecification; import io.restassured.specification.FilterableResponseSpecification; /** * This filter is a replacement for the original {@code YamlToJsonConverterServlet}. * <p> * Its only purpose is to convert YAML to JSON so that tests can use JsonPath to inspect the response body. * * @author Martin Kouba */ public class YamlToJsonFilter implements OrderedFilter { @Override public Response filter(FilterableRequestSpecification requestSpec, FilterableResponseSpecification responseSpec, FilterContext ctx) { if (ContentType.JSON.matches(requestSpec.getContentType())) { // Conversion is not needed return ctx.next(requestSpec, responseSpec); } try { Response response = ctx.next(requestSpec, responseSpec); ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory()); Object obj = yamlReader.readValue(response.getBody().asString(), Object.class); ObjectMapper jsonWriter = new ObjectMapper(); String json = jsonWriter.writeValueAsString(obj); ResponseBuilder builder = new ResponseBuilder(); builder.clone(response); builder.setBody(json); builder.setContentType(ContentType.JSON); return builder.build(); } catch (Exception e) { throw new IllegalStateException("Failed to convert the request: " + ExceptionUtils.getMessage(e), e); } } @Override public int getOrder() { return OrderedFilter.HIGHEST_PRECEDENCE; } }
apache-2.0
mayonghui2112/helloWorld
sourceCode/OnJava8-Examples-master/concurrent/ThrowsChecked.java
1169
// concurrent/ThrowsChecked.java // (c)2017 MindView LLC: see Copyright.txt // We make no guarantees that this code is fit for any purpose. // Visit http://OnJava8.com for more book information. import java.util.stream.*; import java.util.concurrent.*; public class ThrowsChecked { class Checked extends Exception {} static ThrowsChecked nochecked(ThrowsChecked tc) { return tc; } static ThrowsChecked withchecked(ThrowsChecked tc) throws Checked { return tc; } static void testStream() { Stream.of(new ThrowsChecked()) .map(ThrowsChecked::nochecked) // .map(ThrowsChecked::withchecked); // [1] .map(tc -> { try { return withchecked(tc); } catch(Checked e) { throw new RuntimeException(e); } }); } static void testCompletableFuture() { CompletableFuture .completedFuture(new ThrowsChecked()) .thenApply(ThrowsChecked::nochecked) // .thenApply(ThrowsChecked::withchecked); // [2] .thenApply(tc -> { try { return withchecked(tc); } catch(Checked e) { throw new RuntimeException(e); } }); } }
apache-2.0
lbitonti/liquibase-hana
src/test/java/liquibase/sqlgenerator/ext/GetViewDefinitionGeneratorHanaDBTest.java
2000
package liquibase.sqlgenerator.ext; import liquibase.database.Database; import liquibase.database.ext.HanaDBDatabase; import liquibase.sqlgenerator.AbstractSqlGeneratorHanaDBTest; import liquibase.sqlgenerator.SqlGenerator; import liquibase.statement.core.DropViewStatement; import liquibase.statement.core.GetViewDefinitionStatement; import org.junit.Test; import static org.junit.Assert.assertEquals; public class GetViewDefinitionGeneratorHanaDBTest extends AbstractSqlGeneratorHanaDBTest<GetViewDefinitionStatement> { public GetViewDefinitionGeneratorHanaDBTest() throws Exception { this(new GetViewDefinitionGeneratorHanaDB()); } protected GetViewDefinitionGeneratorHanaDBTest(SqlGenerator<GetViewDefinitionStatement> generatorUnderTest) throws Exception { super(generatorUnderTest); } @Override protected GetViewDefinitionStatement createSampleSqlStatement() { Database database = new HanaDBDatabase(); GetViewDefinitionStatement getViewDefinitionStatement = new GetViewDefinitionStatement(null, null, "view_name"); return getViewDefinitionStatement; } @Test public void testGetViewNoSchema() { Database database = new HanaDBDatabase(); GetViewDefinitionStatement statement = new GetViewDefinitionStatement(null, null, "actual_view_name"); assertEquals("SELECT DEFINITION FROM VIEWS WHERE upper(VIEW_NAME)='ACTUAL_VIEW_NAME'", this.generatorUnderTest.generateSql(statement, database, null)[0].toSql()); } @Test public void testGetViewWithSchema() { Database database = new HanaDBDatabase(); GetViewDefinitionStatement statement = new GetViewDefinitionStatement(null, "schema_name", "actual_view_name"); assertEquals("SELECT DEFINITION FROM VIEWS WHERE upper(SCHEMA_NAME)='SCHEMA_NAME' AND upper(VIEW_NAME)='ACTUAL_VIEW_NAME'", this.generatorUnderTest.generateSql(statement, database, null)[0].toSql()); } }
apache-2.0
xunboo/JJCamera
android/src/main/java/com/jjcamera/apps/iosched/util/PlayServicesUtils.java
1945
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jjcamera.apps.iosched.util; import android.app.Activity; import android.app.Dialog; import android.content.DialogInterface; import com.google.android.gms.common.ConnectionResult; import com.google.android.gms.common.GooglePlayServicesUtil; /** * Helper for Google Play services-related operations. */ public class PlayServicesUtils { public static boolean checkGooglePlayServices(final Activity activity) { final int googlePlayServicesCheck = GooglePlayServicesUtil.isGooglePlayServicesAvailable(activity); switch (googlePlayServicesCheck) { case ConnectionResult.SUCCESS: return true; case ConnectionResult.SERVICE_DISABLED: case ConnectionResult.SERVICE_INVALID: case ConnectionResult.SERVICE_MISSING: case ConnectionResult.SERVICE_VERSION_UPDATE_REQUIRED: Dialog dialog = GooglePlayServicesUtil.getErrorDialog(googlePlayServicesCheck, activity, 0); dialog.setOnCancelListener(new DialogInterface.OnCancelListener() { @Override public void onCancel(DialogInterface dialogInterface) { activity.finish(); } }); dialog.show(); } return false; } }
apache-2.0
JFL110/app-base-prender
src/main/java/org/jfl110/prender/api/resources/ServletContextResourceSource.java
365
package org.jfl110.prender.api.resources; /** * A ResourceSource that can be resolved from the ServletContext * * @author JFL110 */ public class ServletContextResourceSource implements ResourceSource { private final String path; ServletContextResourceSource(String path){ this.path = path; } @Override public String getPath() { return path; } }
apache-2.0
aaronphilips/as1
app/src/androidTest/java/ca/as1/MainHabitTrackerActivityTest.java
421
package ca.as1; import android.app.Activity; import android.test.ActivityInstrumentationTestCase2; /** * Created by wz on 14/09/15. */ public class MainHabitTrackerActivityTest extends ActivityInstrumentationTestCase2 { public MainHabitTrackerActivityTest() { super(MainHabitTrackerActivity.class); } public void testStart() throws Exception { Activity activity = getActivity(); } }
apache-2.0
HaStr/kieker
kieker-common/src-gen/kieker/common/record/flow/trace/operation/constructor/object/AfterConstructorFailedObjectEvent.java
6534
package kieker.common.record.flow.trace.operation.constructor.object; import java.nio.BufferOverflowException; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; import kieker.common.record.flow.trace.operation.constructor.AfterConstructorFailedEvent; import kieker.common.util.registry.IRegistry; import kieker.common.record.flow.IObjectRecord; /** * @author Jan Waller * * @since 1.6 */ public class AfterConstructorFailedObjectEvent extends AfterConstructorFailedEvent implements IObjectRecord { private static final long serialVersionUID = -8160283153301963516L; /** Descriptive definition of the serialization size of the record. */ public static final int SIZE = TYPE_SIZE_LONG // IEventRecord.timestamp + TYPE_SIZE_LONG // ITraceRecord.traceId + TYPE_SIZE_INT // ITraceRecord.orderIndex + TYPE_SIZE_STRING // IOperationSignature.operationSignature + TYPE_SIZE_STRING // IClassSignature.classSignature + TYPE_SIZE_STRING // IExceptionRecord.cause + TYPE_SIZE_INT // IObjectRecord.objectId ; public static final Class<?>[] TYPES = { long.class, // IEventRecord.timestamp long.class, // ITraceRecord.traceId int.class, // ITraceRecord.orderIndex String.class, // IOperationSignature.operationSignature String.class, // IClassSignature.classSignature String.class, // IExceptionRecord.cause int.class, // IObjectRecord.objectId }; /** user-defined constants */ /** default constants */ public static final int OBJECT_ID = 0; /** property declarations */ private final int objectId; /** * Creates a new instance of this class using the given parameters. * * @param timestamp * timestamp * @param traceId * traceId * @param orderIndex * orderIndex * @param operationSignature * operationSignature * @param classSignature * classSignature * @param cause * cause * @param objectId * objectId */ public AfterConstructorFailedObjectEvent(final long timestamp, final long traceId, final int orderIndex, final String operationSignature, final String classSignature, final String cause, final int objectId) { super(timestamp, traceId, orderIndex, operationSignature, classSignature, cause); this.objectId = objectId; } /** * This constructor converts the given array into a record. * It is recommended to use the array which is the result of a call to {@link #toArray()}. * * @param values * The values for the record. */ public AfterConstructorFailedObjectEvent(final Object[] values) { // NOPMD (direct store of values) super(values, TYPES); this.objectId = (Integer) values[6]; } /** * This constructor uses the given array to initialize the fields of this record. * * @param values * The values for the record. * @param valueTypes * The types of the elements in the first array. */ protected AfterConstructorFailedObjectEvent(final Object[] values, final Class<?>[] valueTypes) { // NOPMD (values stored directly) super(values, valueTypes); this.objectId = (Integer) values[6]; } /** * This constructor converts the given array into a record. * * @param buffer * The bytes for the record. * * @throws BufferUnderflowException * if buffer not sufficient */ public AfterConstructorFailedObjectEvent(final ByteBuffer buffer, final IRegistry<String> stringRegistry) throws BufferUnderflowException { super(buffer, stringRegistry); this.objectId = buffer.getInt(); } /** * {@inheritDoc} */ @Override public Object[] toArray() { return new Object[] { this.getTimestamp(), this.getTraceId(), this.getOrderIndex(), this.getOperationSignature(), this.getClassSignature(), this.getCause(), this.getObjectId() }; } /** * {@inheritDoc} */ @Override public void registerStrings(final IRegistry<String> stringRegistry) { // NOPMD (generated code) stringRegistry.get(this.getOperationSignature()); stringRegistry.get(this.getClassSignature()); stringRegistry.get(this.getCause()); } /** * {@inheritDoc} */ @Override public void writeBytes(final ByteBuffer buffer, final IRegistry<String> stringRegistry) throws BufferOverflowException { buffer.putLong(this.getTimestamp()); buffer.putLong(this.getTraceId()); buffer.putInt(this.getOrderIndex()); buffer.putInt(stringRegistry.get(this.getOperationSignature())); buffer.putInt(stringRegistry.get(this.getClassSignature())); buffer.putInt(stringRegistry.get(this.getCause())); buffer.putInt(this.getObjectId()); } /** * {@inheritDoc} */ @Override public Class<?>[] getValueTypes() { return TYPES; // NOPMD } /** * {@inheritDoc} */ @Override public int getSize() { return SIZE; } /** * {@inheritDoc} * * @deprecated This record uses the {@link kieker.common.record.IMonitoringRecord.Factory} mechanism. Hence, this method is not implemented. */ @Override @Deprecated public void initFromArray(final Object[] values) { throw new UnsupportedOperationException(); } /** * {@inheritDoc} * * @deprecated This record uses the {@link kieker.common.record.IMonitoringRecord.BinaryFactory} mechanism. Hence, this method is not implemented. */ @Override @Deprecated public void initFromBytes(final ByteBuffer buffer, final IRegistry<String> stringRegistry) throws BufferUnderflowException { throw new UnsupportedOperationException(); } /** * {@inheritDoc} */ @Override public boolean equals(final Object obj) { if (obj == null) return false; if (obj == this) return true; if (obj.getClass() != this.getClass()) return false; final AfterConstructorFailedObjectEvent castedRecord = (AfterConstructorFailedObjectEvent) obj; if (this.getLoggingTimestamp() != castedRecord.getLoggingTimestamp()) return false; if (this.getTimestamp() != castedRecord.getTimestamp()) return false; if (this.getTraceId() != castedRecord.getTraceId()) return false; if (this.getOrderIndex() != castedRecord.getOrderIndex()) return false; if (!this.getOperationSignature().equals(castedRecord.getOperationSignature())) return false; if (!this.getClassSignature().equals(castedRecord.getClassSignature())) return false; if (!this.getCause().equals(castedRecord.getCause())) return false; if (this.getObjectId() != castedRecord.getObjectId()) return false; return true; } public final int getObjectId() { return this.objectId; } }
apache-2.0
bigtester/automation-test-engine
org.bigtester.ate.core/src/main/java/org/bigtester/ate/model/page/elementaction/BaseElementAction.java
3042
/******************************************************************************* * ATE, Automation Test Engine * * Copyright 2014, Montreal PROT, or individual contributors as * indicated by the @author tags or express copyright attribution * statements applied by the authors. All third-party contributions are * distributed under license by Montreal PROT. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package org.bigtester.ate.model.page.elementaction; import org.bigtester.ate.model.data.IStepInputData; import org.bigtester.ate.model.page.PageModelBase; import org.bigtester.ate.model.page.atewebdriver.IMyWebDriver; import org.eclipse.jdt.annotation.Nullable; // TODO: Auto-generated Javadoc /** * The Class _ElementAction defines .... * * @author Peidong Hu */ public class BaseElementAction extends PageModelBase{ /** The data valued action flag. */ private transient boolean dataValuedActionFlag; /** The data value. */ @Nullable private IStepInputData dataValue; /** * Instantiates a new base element action. * * @param myWd the my wd */ public BaseElementAction(IMyWebDriver myWd) { super(myWd); // TODO Auto-generated constructor stub } /** * Instantiates a new base element action. */ public BaseElementAction() { super(); // TODO Auto-generated constructor stub } /** * Gets the action parameters logging value. * * @return the action parameters logging value */ public String getActionParametersLoggingValue() { String retVal; final IStepInputData dataValue2 = dataValue; if (dataValue2 == null) { retVal = "action with no parameter"; } else { retVal = "dataValue = " + dataValue2.getStrDataValue(); } return retVal; } /** * Gets the data value. * * @return the data value */ @Nullable public IStepInputData getDataValue() { return dataValue; } /** * Sets the data value. * * @param dataValue * the new data value */ public void setDataValue(final IStepInputData dataValue) { this.dataValuedActionFlag = true; this.dataValue = dataValue; } /** * Checks if is data value action flag. * * @return the dataValueFlag */ public boolean isDataValuedActionFlag() { return dataValuedActionFlag; } /** * {@inheritDoc} */ public @Nullable <T> T getCapability(Class<T> type) { if (this instanceof IElementAction) { return (T) this; //NOPMD } else { return null; } } }
apache-2.0
uschindler/elasticsearch
x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java
14329
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.spatial.index.mapper; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; import java.io.IOException; import java.util.Collections; import static org.elasticsearch.index.mapper.AbstractPointGeometryFieldMapper.Names.IGNORE_Z_VALUE; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; /** testing for {@link org.elasticsearch.xpack.spatial.index.mapper.ShapeFieldMapper} */ public class ShapeFieldMapperTests extends CartesianFieldMapperTests { @Override protected XContentBuilder createDefaultMapping(String fieldName, boolean ignored_malformed, boolean ignoreZValue) throws IOException { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject(fieldName).field("type", "shape"); if (ignored_malformed || randomBoolean()) { xContentBuilder.field("ignore_malformed", ignored_malformed); } if (ignoreZValue == false || randomBoolean()) { xContentBuilder.field(PointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue); } return xContentBuilder.endObject().endObject().endObject().endObject(); } public void testDefaultConfiguration() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .endObject().endObject() .endObject().endObject()); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); ShapeFieldMapper shapeFieldMapper = (ShapeFieldMapper) fieldMapper; assertThat(shapeFieldMapper.fieldType().orientation(), equalTo(ShapeFieldMapper.Defaults.ORIENTATION.value())); } /** * Test that orientation parameter correctly parses */ public void testOrientationParsing() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .field("orientation", "left") .endObject().endObject() .endObject().endObject()); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); ShapeBuilder.Orientation orientation = ((ShapeFieldMapper)fieldMapper).fieldType().orientation(); assertThat(orientation, equalTo(ShapeBuilder.Orientation.CLOCKWISE)); assertThat(orientation, equalTo(ShapeBuilder.Orientation.LEFT)); assertThat(orientation, equalTo(ShapeBuilder.Orientation.CW)); // explicit right orientation test mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .field("orientation", "right") .endObject().endObject() .endObject().endObject()); defaultMapper = createIndex("test2").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); orientation = ((ShapeFieldMapper)fieldMapper).fieldType().orientation(); assertThat(orientation, equalTo(ShapeBuilder.Orientation.COUNTER_CLOCKWISE)); assertThat(orientation, equalTo(ShapeBuilder.Orientation.RIGHT)); assertThat(orientation, equalTo(ShapeBuilder.Orientation.CCW)); } /** * Test that coerce parameter correctly parses */ public void testCoerceParsing() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .field("coerce", "true") .endObject().endObject() .endObject().endObject()); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); boolean coerce = ((ShapeFieldMapper)fieldMapper).coerce().value(); assertThat(coerce, equalTo(true)); // explicit false coerce test mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .field("coerce", "false") .endObject().endObject() .endObject().endObject()); defaultMapper = createIndex("test2").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); coerce = ((ShapeFieldMapper)fieldMapper).coerce().value(); assertThat(coerce, equalTo(false)); assertFieldWarnings("tree"); } /** * Test that accept_z_value parameter correctly parses */ public void testIgnoreZValue() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .field(IGNORE_Z_VALUE.getPreferredName(), "true") .endObject().endObject() .endObject().endObject()); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); boolean ignoreZValue = ((ShapeFieldMapper)fieldMapper).ignoreZValue().value(); assertThat(ignoreZValue, equalTo(true)); // explicit false accept_z_value test mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .field(IGNORE_Z_VALUE.getPreferredName(), "false") .endObject().endObject() .endObject().endObject()); defaultMapper = createIndex("test2").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); ignoreZValue = ((ShapeFieldMapper)fieldMapper).ignoreZValue().value(); assertThat(ignoreZValue, equalTo(false)); } /** * Test that ignore_malformed parameter correctly parses */ public void testIgnoreMalformedParsing() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .field("ignore_malformed", "true") .endObject().endObject() .endObject().endObject()); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); Explicit<Boolean> ignoreMalformed = ((ShapeFieldMapper)fieldMapper).ignoreMalformed(); assertThat(ignoreMalformed.value(), equalTo(true)); // explicit false ignore_malformed test mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .field("ignore_malformed", "false") .endObject().endObject() .endObject().endObject()); defaultMapper = createIndex("test2").mapperService().documentMapperParser() .parse("type1", new CompressedXContent(mapping)); fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); ignoreMalformed = ((ShapeFieldMapper)fieldMapper).ignoreMalformed(); assertThat(ignoreMalformed.explicit(), equalTo(true)); assertThat(ignoreMalformed.value(), equalTo(false)); } private void assertFieldWarnings(String... fieldNames) { String[] warnings = new String[fieldNames.length]; for (int i = 0; i < fieldNames.length; ++i) { warnings[i] = "Field parameter [" + fieldNames[i] + "] " + "is deprecated and will be removed in a future version."; } } public void testShapeMapperMerge() throws Exception { String stage1Mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") .startObject("shape").field("type", "shape") .field("orientation", "ccw") .endObject().endObject().endObject().endObject()); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper docMapper = mapperService.merge("type", new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE); String stage2Mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("shape").field("type", "shape") .field("orientation", "cw").endObject().endObject().endObject().endObject()); mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); // verify nothing changed Mapper fieldMapper = docMapper.mappers().getMapper("shape"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); ShapeFieldMapper ShapeFieldMapper = (ShapeFieldMapper) fieldMapper; assertThat(ShapeFieldMapper.fieldType().orientation(), equalTo(ShapeBuilder.Orientation.CCW)); // change mapping; orientation stage2Mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("shape").field("type", "shape") .field("orientation", "cw").endObject().endObject().endObject().endObject()); docMapper = mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); fieldMapper = docMapper.mappers().getMapper("shape"); assertThat(fieldMapper, instanceOf(ShapeFieldMapper.class)); ShapeFieldMapper shapeFieldMapper = (ShapeFieldMapper) fieldMapper; assertThat(shapeFieldMapper.fieldType().orientation(), equalTo(ShapeBuilder.Orientation.CW)); } public void testSerializeDefaults() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location") .field("type", "shape") .endObject().endObject() .endObject().endObject()); DocumentMapper defaultMapper = parser.parse("type1", new CompressedXContent(mapping)); String serialized = toXContentString((ShapeFieldMapper) defaultMapper.mappers().getMapper("location")); assertTrue(serialized, serialized.contains("\"orientation\":\"" + AbstractShapeGeometryFieldMapper.Defaults.ORIENTATION.value() + "\"")); } } public String toXContentString(ShapeFieldMapper mapper, boolean includeDefaults) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); ToXContent.Params params; if (includeDefaults) { params = new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true")); } else { params = ToXContent.EMPTY_PARAMS; } mapper.doXContentBody(builder, includeDefaults, params); return Strings.toString(builder.endObject()); } public String toXContentString(ShapeFieldMapper mapper) throws IOException { return toXContentString(mapper, true); } }
apache-2.0
Ariah-Group/Finance
af_webapp/src/main/java/org/kuali/kfs/module/endow/businessobject/CurrentTaxLotBalance.java
3167
/* * Copyright 2009 The Kuali Foundation. * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.kfs.module.endow.businessobject; import java.math.BigDecimal; public class CurrentTaxLotBalance extends HoldingTaxLot { private BigDecimal annualEstimatedIncome; private BigDecimal remainderOfFYEstimatedIncome; private BigDecimal nextFYEstimatedIncome; private BigDecimal securityUnitVal; private BigDecimal holdingMarketValue; /** * Gets the nextFYEstimatedIncome. * * @return nextFYEstimatedIncome */ public BigDecimal getNextFYEstimatedIncome() { return nextFYEstimatedIncome; } /** * Sets the nextFYEstimatedIncome. * * @param nextFYEstimatedIncome */ public void setNextFYEstimatedIncome(BigDecimal nextFYEstimatedIncome) { this.nextFYEstimatedIncome = nextFYEstimatedIncome; } /** * Gets the annualEstimatedIncome. * * @return annualEstimatedIncome */ public BigDecimal getAnnualEstimatedIncome() { return annualEstimatedIncome; } /** * Sets the annualEstimatedIncome. * * @param annualEstimatedIncome */ public void setAnnualEstimatedIncome(BigDecimal annualEstimatedIncome) { this.annualEstimatedIncome = annualEstimatedIncome; } /** * Gets the holdingMarketValue. * * @return holdingMarketValue */ public BigDecimal getHoldingMarketValue() { return holdingMarketValue; } /** * Sets the holdingMarketValue. * * @param holdingMarketValue */ public void setHoldingMarketValue(BigDecimal holdingMarketValue) { this.holdingMarketValue = holdingMarketValue; } /** * Gets the remainderOfFYEstimatedIncome. * * @return remainderOfFYEstimatedIncome */ public BigDecimal getRemainderOfFYEstimatedIncome() { return remainderOfFYEstimatedIncome; } /** * Sets the remainderOfFYEstimatedIncome. * * @param remainderOfFYEstimatedIncome */ public void setRemainderOfFYEstimatedIncome(BigDecimal remainderOfFYEstimatedIncome) { this.remainderOfFYEstimatedIncome = remainderOfFYEstimatedIncome; } /** * Gets the securityUnitVal. * * @return securityUnitVal */ public BigDecimal getSecurityUnitVal() { return securityUnitVal; } /** * Sets the securityUnitVal. * * @param securityUnitVal */ public void setSecurityUnitVal(BigDecimal securityUnitVal) { this.securityUnitVal = securityUnitVal; } }
apache-2.0
liyiorg/weixin-popular
src/main/java/weixin/popular/bean/paymch/MchBaseResult.java
309
package weixin.popular.bean.paymch; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name="xml") @XmlAccessorType(XmlAccessType.FIELD) public class MchBaseResult extends MchBase{ }
apache-2.0
alexander071/cf-java-client
cloudfoundry-operations/src/main/java/org/cloudfoundry/operations/serviceadmin/_ServiceAccess.java
1250
/* * Copyright 2013-2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.cloudfoundry.operations.serviceadmin; import org.cloudfoundry.Nullable; import org.immutables.value.Value; import java.util.List; /** * A Service Access */ @Value.Immutable abstract class _ServiceAccess { /** * The access */ abstract Access getAccess(); /** * The broker name */ abstract String getBrokerName(); /** * The organizations */ @Nullable abstract List<String> getOrganizationNames(); /** * The plan name */ abstract String getPlanName(); /** * The service name */ abstract String getServiceName(); }
apache-2.0
darranl/directory-server
server-config/src/test/java/org/apache/directory/server/config/KerberosServerConfigReaderTest.java
5380
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.apache.directory.server.config; import static org.junit.Assert.assertNotNull; import java.io.File; import java.util.List; import org.apache.commons.io.FileUtils; import org.apache.directory.api.ldap.model.name.Dn; import org.apache.directory.api.ldap.model.schema.SchemaManager; import org.apache.directory.api.ldap.model.schema.registries.SchemaLoader; import org.apache.directory.api.ldap.schemaextractor.SchemaLdifExtractor; import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor; import org.apache.directory.api.ldap.schemaloader.LdifSchemaLoader; import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; import org.apache.directory.api.util.exception.Exceptions; import org.apache.directory.server.config.beans.ConfigBean; import org.apache.directory.server.config.beans.KdcServerBean; import org.apache.directory.server.core.api.CacheService; import org.apache.directory.server.core.api.DnFactory; import org.apache.directory.server.core.partition.ldif.SingleFileLdifPartition; import org.apache.directory.server.core.shared.DefaultDnFactory; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import com.mycila.junit.concurrent.Concurrency; import com.mycila.junit.concurrent.ConcurrentJunitRunner; /** * Test class for ConfigPartitionReader * * @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a> */ @RunWith(ConcurrentJunitRunner.class) @Concurrency() public class KerberosServerConfigReaderTest { private static File workDir = new File( System.getProperty( "java.io.tmpdir" ) + "/server-work" ); private static SchemaManager schemaManager; private static DnFactory dnFactory; private static CacheService cacheService; @BeforeClass public static void readConfig() throws Exception { File workDir = new File( System.getProperty( "java.io.tmpdir" ) + "/server-work" ); FileUtils.deleteDirectory( workDir ); workDir.mkdir(); String workingDirectory = workDir.getPath(); // Extract the schema on disk (a brand new one) and load the registries File schemaRepository = new File( workingDirectory, "schema" ); if ( schemaRepository.exists() ) { FileUtils.deleteDirectory( schemaRepository ); } SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor( new File( workingDirectory ) ); extractor.extractOrCopy(); SchemaLoader loader = new LdifSchemaLoader( schemaRepository ); schemaManager = new DefaultSchemaManager( loader ); // We have to load the schema now, otherwise we won't be able // to initialize the Partitions, as we won't be able to parse // and normalize their suffix Dn schemaManager.loadAllEnabled(); List<Throwable> errors = schemaManager.getErrors(); if ( errors.size() != 0 ) { throw new Exception( "Schema load failed : " + Exceptions.printErrors( errors ) ); } cacheService = new CacheService(); cacheService.initialize( null ); dnFactory = new DefaultDnFactory( schemaManager, cacheService.getCache( "dnCache" ) ); } @Test public void testKerberosServer() throws Exception { File configDir = new File( workDir, "kerberosServer" ); // could be any directory, cause the config is now in a single file String configFile = LdifConfigExtractor.extractSingleFileConfig( configDir, "kerberosServer.ldif", true ); SingleFileLdifPartition configPartition = new SingleFileLdifPartition( schemaManager, dnFactory ); configPartition.setId( "config" ); configPartition.setPartitionPath( new File( configFile ).toURI() ); configPartition.setSuffixDn( new Dn( "ou=config" ) ); configPartition.setSchemaManager( schemaManager ); configPartition.setCacheService( cacheService ); configPartition.initialize(); ConfigPartitionReader cpReader = new ConfigPartitionReader( configPartition ); ConfigBean configBean = cpReader.readConfig( new Dn( schemaManager, "ou=servers,ads-directoryServiceId=default,ou=config" ), ConfigSchemaConstants.ADS_KERBEROS_SERVER_OC .getValue() ); assertNotNull( configBean ); KdcServerBean kdcServerBean = ( KdcServerBean ) configBean.getDirectoryServiceBeans().get( 0 ); assertNotNull( kdcServerBean ); configPartition.destroy(); } }
apache-2.0
rabix/bunny
rabix-bindings-cwl/src/main/java/org/rabix/bindings/cwl/CWLTranslator.java
8910
package org.rabix.bindings.cwl; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.rabix.bindings.BindingException; import org.rabix.bindings.ProtocolTranslator; import org.rabix.bindings.ProtocolType; import org.rabix.bindings.cwl.bean.CWLDataLink; import org.rabix.bindings.cwl.bean.CWLJob; import org.rabix.bindings.cwl.bean.CWLStep; import org.rabix.bindings.cwl.bean.CWLStepInputs; import org.rabix.bindings.cwl.bean.CWLWorkflow; import org.rabix.bindings.cwl.helper.CWLJobHelper; import org.rabix.bindings.cwl.helper.CWLSchemaHelper; import org.rabix.bindings.helper.DAGValidationHelper; import org.rabix.bindings.model.ApplicationPort; import org.rabix.bindings.model.Job; import org.rabix.bindings.model.LinkMerge; import org.rabix.bindings.model.ScatterMethod; import org.rabix.bindings.model.dag.DAGContainer; import org.rabix.bindings.model.dag.DAGLink; import org.rabix.bindings.model.dag.DAGLinkPort; import org.rabix.bindings.model.dag.DAGLinkPort.LinkPortType; import org.rabix.bindings.model.dag.DAGNode; import org.rabix.common.helper.InternalSchemaHelper; public class CWLTranslator implements ProtocolTranslator { @Override public DAGNode translateToDAG(Job job) throws BindingException { CWLJob cwlJob = CWLJobHelper.getCWLJob(job); DAGNode dagNode = processBatchInfo(cwlJob, transformToGeneric(cwlJob.getId(), cwlJob)); DAGValidationHelper.detectLoop(dagNode); DAGValidationHelper.detectUnconnectedOutputs(dagNode); processPorts(dagNode); return dagNode; } @SuppressWarnings("unchecked") private DAGNode processBatchInfo(CWLJob job, DAGNode node) { Object batch = job.getScatter(); if (batch != null) { List<String> scatterList = new ArrayList<>(); if (batch instanceof List<?>) { for (String scatter : ((List<String>) batch)) { scatterList.add(CWLSchemaHelper.normalizeId(scatter)); } } else if (batch instanceof String) { scatterList.add(CWLSchemaHelper.normalizeId((String) batch)); } else { throw new RuntimeException("Failed to process batch properties. Invalid application structure."); } for (String scatter : scatterList) { for (DAGLinkPort inputPort : node.getInputPorts()) { if (inputPort.getId().equals(scatter)) { inputPort.setScatter(true); } } if (node instanceof DAGContainer) { DAGContainer container = (DAGContainer) node; for (DAGLink link : container.getLinks()) { if (link.getSource().getId().equals(scatter) && link.getSource().getType().equals(LinkPortType.INPUT)) { link.getSource().setScatter(true); } } } } } return node; } @SuppressWarnings("unchecked") private DAGNode transformToGeneric(String globalJobId, CWLJob job) throws BindingException { List<DAGLinkPort> inputPorts = new ArrayList<>(); for (ApplicationPort port : job.getApp().getInputs()) { DAGLinkPort linkPort = null; if(job.getInputs().containsKey(port.getId())) { Object value = job.getInputs().get(port.getId()); Object defaultValue = null; Object transform = null; if(value instanceof CWLStepInputs) { defaultValue = CWLValueTranslator.translateToCommon(((CWLStepInputs) value).getDefaultValue()); transform = ((CWLStepInputs) value).getValueFrom(); } else { defaultValue = CWLValueTranslator.translateToCommon(value); } linkPort = new DAGLinkPort(CWLSchemaHelper.normalizeId(port.getId()), job.getId(), LinkPortType.INPUT, LinkMerge.merge_nested, port.getScatter() != null ? port.getScatter() : false, defaultValue, transform); } else { linkPort = new DAGLinkPort(CWLSchemaHelper.normalizeId(port.getId()), job.getId(), LinkPortType.INPUT, LinkMerge.merge_nested, port.getScatter() != null ? port.getScatter() : false, null, null); } inputPorts.add(linkPort); } List<DAGLinkPort> outputPorts = new ArrayList<>(); for (ApplicationPort port : job.getApp().getOutputs()) { DAGLinkPort linkPort = new DAGLinkPort(CWLSchemaHelper.normalizeId(port.getId()), job.getId(), LinkPortType.OUTPUT, LinkMerge.merge_nested, false, null, null); outputPorts.add(linkPort); } ScatterMethod scatterMethod = job.getScatterMethod() != null? ScatterMethod.valueOf(job.getScatterMethod()) : ScatterMethod.dotproduct; if (!job.getApp().isWorkflow()) { Map<String, Object> commonDefaults = (Map<String, Object>) CWLValueTranslator.translateToCommon(extractDefaults(job.getInputs())); return new DAGNode(job.getId(), inputPorts, outputPorts, scatterMethod, job.getApp(), commonDefaults, ProtocolType.CWL); } CWLWorkflow workflow = (CWLWorkflow) job.getApp(); List<DAGNode> children = new ArrayList<>(); for (CWLStep step : workflow.getSteps()) { children.add(transformToGeneric(globalJobId, step.getJob())); } List<DAGLink> links = new ArrayList<>(); for (CWLDataLink dataLink : workflow.getDataLinks()) { String source = dataLink.getSource(); String sourceNodeId = null; String sourcePortId = null; if (!source.contains(InternalSchemaHelper.SLASH_SEPARATOR)) { sourceNodeId = job.getId(); sourcePortId = source.substring(0); } else { sourceNodeId = job.getId() + InternalSchemaHelper.SEPARATOR + source.substring(0, source.indexOf(InternalSchemaHelper.SLASH_SEPARATOR)); sourcePortId = source.substring(source.indexOf(InternalSchemaHelper.SLASH_SEPARATOR) + 1); } String destination = dataLink.getDestination(); String destinationPortId = null; String destinationNodeId = null; if (!destination.contains(InternalSchemaHelper.SLASH_SEPARATOR)) { destinationNodeId = job.getId(); destinationPortId = destination.substring(0); } else { destinationNodeId = job.getId() + InternalSchemaHelper.SEPARATOR + destination.substring(0, destination.indexOf(InternalSchemaHelper.SLASH_SEPARATOR)); destinationPortId = destination.substring(destination.indexOf(InternalSchemaHelper.SLASH_SEPARATOR) + 1); } boolean isSourceFromWorkflow = !dataLink.getSource().contains(InternalSchemaHelper.SLASH_SEPARATOR); DAGLinkPort sourceLinkPort = new DAGLinkPort(sourcePortId, sourceNodeId, isSourceFromWorkflow ? LinkPortType.INPUT : LinkPortType.OUTPUT, LinkMerge.merge_nested, false, null, null); DAGLinkPort destinationLinkPort = new DAGLinkPort(destinationPortId, destinationNodeId, dataLink.isOutputSource()? LinkPortType.OUTPUT : LinkPortType.INPUT, dataLink.getLinkMerge(), dataLink.getScattered() != null ? dataLink.getScattered() : false, null, null); int position = dataLink.getPosition() != null ? dataLink.getPosition() : 1; links.add(new DAGLink(sourceLinkPort, destinationLinkPort, dataLink.getLinkMerge(), position)); } Map<String, Object> commonDefaults = (Map<String, Object>) CWLValueTranslator.translateToCommon(extractDefaults(job.getInputs())); return new DAGContainer(job.getId(), inputPorts, outputPorts, job.getApp(), scatterMethod, links, children, commonDefaults, ProtocolType.CWL); } private Map<String, Object> extractDefaults(Map<String, Object> inputs) { Map<String, Object> defaults = new HashMap<>(); for (Entry<String, Object> entry : inputs.entrySet()) { if (entry.getValue() != null) { if (entry.getValue() instanceof CWLStepInputs) { defaults.put(entry.getKey(), ((CWLStepInputs)entry.getValue()).getDefaultValue()); } else { defaults.put(entry.getKey(), entry.getValue()); } } } return defaults; } private void processPorts(DAGNode dagNode) { if (dagNode instanceof DAGContainer) { DAGContainer dagContainer = (DAGContainer) dagNode; for (DAGLink dagLink : dagContainer.getLinks()) { dagLink.getDestination().setLinkMerge(dagLink.getLinkMerge()); processPorts(dagLink, dagNode); for (DAGNode childNode : dagContainer.getChildren()) { processPorts(dagLink, childNode); if (childNode instanceof DAGContainer) { processPorts(childNode); } } } } } private void processPorts(DAGLink dagLink, DAGNode dagNode) { for (DAGLinkPort dagLinkPort : dagNode.getInputPorts()) { if (dagLinkPort.equals(dagLink.getDestination())) { dagLinkPort.setLinkMerge(dagLink.getLinkMerge()); } } for (DAGLinkPort dagLinkPort : dagNode.getOutputPorts()) { if (dagLinkPort.equals(dagLink.getDestination())) { dagLinkPort.setLinkMerge(dagLink.getLinkMerge()); } } } }
apache-2.0
deleolajide/ofmeet-openfire-plugin
ofchat/src/java/org/jivesoftware/openfire/plugin/rawpropertyeditor/RawPropertyEditor.java
3411
package org.jivesoftware.openfire.plugin.rawpropertyeditor; import java.io.File; import java.util.Map; import org.jivesoftware.util.Log; import org.jivesoftware.openfire.XMPPServer; import org.jivesoftware.openfire.group.Group; import org.jivesoftware.openfire.group.GroupManager; import org.jivesoftware.openfire.group.GroupNotFoundException; import org.jivesoftware.openfire.user.User; import org.jivesoftware.openfire.user.UserManager; import org.jivesoftware.openfire.user.UserNotFoundException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xmpp.packet.JID; public class RawPropertyEditor { private static final Logger Log = LoggerFactory.getLogger(RawPropertyEditor.class); public static RawPropertyEditor self = new RawPropertyEditor(); public static RawPropertyEditor getInstance() { return self; } public User getAndCheckUser(String username) { JID targetJID = XMPPServer.getInstance().createJID(username, null); try { return XMPPServer.getInstance().getUserManager().getUser(targetJID.getNode()); } catch (UserNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); } return null; } public Group getAndCheckGroup(String groupname) { JID targetJID = XMPPServer.getInstance().createJID(groupname, null, true); try { return GroupManager.getInstance().getGroup(targetJID.getNode()); } catch (GroupNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); } return null; } public void addProperties(String username, String propname, String propvalue) { try { User user = getAndCheckUser(username); user.getProperties().put(propname, propvalue); } catch (Exception e) { e.printStackTrace(); } } public void addGroupProperties(String groupname, String propname, String propvalue) { try { Group group = getAndCheckGroup(groupname); group.getProperties().put(propname, propvalue); } catch (Exception e) { e.printStackTrace(); } } public void deleteGroupProperties(String groupname, String propname) { try { Group group = getAndCheckGroup(groupname); group.getProperties().remove(propname); } catch (Exception e) { e.printStackTrace(); } } public void deleteProperties(String username, String propname) { try { User user = getAndCheckUser(username); user.getProperties().remove(propname); } catch (Exception e) { e.printStackTrace(); } } /* * public List<UserProperty> getUserProperties(UserEntity user) { return * user.getProperties(); * * } */ public Map<String, String> getUserProperties(String username) { User user = getAndCheckUser(username); return user.getProperties(); } public Map<String, String> getGroupProperties(String groupname) { Group group = getAndCheckGroup(groupname); return group.getProperties(); } public String getName() { return "rawpropertyeditor"; } public String getDescription() { return "rawpropertyeditor Plugin"; } }
apache-2.0
intrack/BoofCV-master
main/ip/test/boofcv/alg/misc/TestGPixelMath.java
4370
/* * Copyright (c) 2011-2013, Peter Abeles. All Rights Reserved. * * This file is part of BoofCV (http://boofcv.org). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package boofcv.alg.misc; import boofcv.struct.image.ImageBase; import boofcv.struct.image.ImageSingleBand; import boofcv.testing.BoofTesting; import org.junit.Test; import java.lang.reflect.Method; import static org.junit.Assert.assertTrue; /** * @author Peter Abeles */ public class TestGPixelMath extends BaseGClassChecksInMisc { public TestGPixelMath() { super(GPixelMath.class, PixelMath.class); } @Test public void compareToPixelMath() { performTests(13); } @Override protected Object[][] createInputParam(Method candidate, Method validation) { Class<?> param[] = validation.getParameterTypes(); String name = candidate.getName(); ImageBase inputA = createImage(param[0],null); ImageBase inputB=null,output=null; Object[][] ret = new Object[1][param.length]; if( name.equals("abs")) { output = createImage(param[1],null); ret[0][0] = inputA; ret[0][1] = output; } else if( name.equals("invert")) { output = createImage(param[1],null); ret[0][0] = inputA; ret[0][1] = output; } else if( name.equals("divide") && param.length == 3) { output = createImage(param[param.length-1],null); ret[0][0] = inputA; ret[0][1] = 3; ret[0][2] = output; } else if( name.equals("divide") && param.length == 5) { output = createImage(param[param.length - 1],null); ret[0][0] = inputA; ret[0][1] = 3; ret[0][2] = -1; ret[0][3] = 5; ret[0][4] = output; } else if( name.equals("multiply") && param.length == 3) { output = createImage(param[param.length - 1],null); ret[0][0] = inputA; ret[0][1] = 3; ret[0][2] = output; } else if( name.equals("multiply") && param.length == 5) { output = createImage(param[param.length - 1],null); ret[0][0] = inputA; ret[0][1] = 3; ret[0][2] = -20; ret[0][3] = 12; ret[0][4] = output; } else if( name.equals("plus") && param.length == 3) { output = createImage(param[param.length - 1],null); ret[0][0] = inputA; ret[0][1] = 3; ret[0][2] = output; } else if( name.equals("plus") && param.length == 5) { output = createImage(param[param.length-1],null); ret[0][0] = inputA; ret[0][1] = 3; ret[0][2] = -10; ret[0][3] = 12; ret[0][4] = output; } else if( name.equals("add") ) { inputB = createImage(param[1],null); output = createImage(param[2],null); ret[0][0] = inputA; ret[0][1] = inputB; ret[0][2] = output; } else if( name.equals("subtract") ) { inputB = createImage(param[1],null); output = createImage(param[2],null); ret[0][0] = inputA; ret[0][1] = inputB; ret[0][2] = output; } else if( name.equals("boundImage") ) { ret[0][0] = inputA; ret[0][1] = 2; ret[0][2] = 8; } else if( name.equals("diffAbs") ) { inputB = createImage(param[1],null); output = createImage(param[2],null); ret[0][0] = inputA; ret[0][1] = inputB; ret[0][2] = output; } else if( name.equals("averageBand") ) { inputA = createImage(param[0],param[1]); output = createImage(param[1],null); ret[0][0] = inputA; ret[0][1] = output; } fillRandom(inputA); fillRandom(inputB); fillRandom(output); return ret; } @Override protected void compareResults(Object targetResult, Object[] targetParam, Object validationResult, Object[] validationParam) { int which; if( targetParam[targetParam.length-1] instanceof ImageBase ) { which = targetParam.length-1; } else { which = 0; } ImageSingleBand t = (ImageSingleBand)targetParam[which]; ImageSingleBand v = (ImageSingleBand)validationParam[which]; // if it is full of zeros something went wrong assertTrue(GImageStatistics.maxAbs(t) != 0); BoofTesting.assertEquals(t, v, 0); } }
apache-2.0
jentfoo/aws-sdk-java
aws-java-sdk-dynamodb/src/test/java/com/amazonaws/services/dynamodbv2/datamodeling/StandardModelFactoriesV2Test.java
15019
/* * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.dynamodbv2.datamodeling; import static org.junit.Assert.assertEquals; import java.lang.reflect.Method; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; import java.util.Date; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; import java.util.TreeSet; import java.util.UUID; import org.junit.Assert; import org.junit.Test; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.dynamodbv2.model.AttributeValue; import com.amazonaws.services.dynamodbv2.pojos.SubClass; import com.amazonaws.services.dynamodbv2.pojos.TestClass; import com.amazonaws.services.dynamodbv2.pojos.UnannotatedSubClass; public class StandardModelFactoriesV2Test { protected static final DynamoDBMapperConfig CONFIG = new DynamoDBMapperConfig.Builder() .withTypeConverterFactory(DynamoDBMapperConfig.DEFAULT.getTypeConverterFactory()) .withConversionSchema(ConversionSchemas.V2) .build(); private static final DynamoDBMapperModelFactory factory = StandardModelFactories.of(S3Link.Factory.of(null)); private static final DynamoDBMapperModelFactory.TableFactory models = factory.getTableFactory(CONFIG); protected <T> AttributeValue convert(Class<T> clazz, Method getter, Object value) { final StandardAnnotationMaps.FieldMap<Object> map = StandardAnnotationMaps.of(getter, null); return models.getTable(clazz).field(map.attributeName()).convert(value); } @Test public void testBoolean() { // These are all native booleans by default in the v2 schema assertEquals(true, convert("getBoolean", true).getBOOL()); assertEquals(false, convert("getBoolean", false).getBOOL()); assertEquals(true, convert("getBoxedBoolean", true).getBOOL()); assertEquals(false, convert("getBoxedBoolean", false).getBOOL()); assertEquals(true, convert("getNativeBoolean", true).getBOOL()); assertEquals(false, convert("getNativeBoolean", false).getBOOL()); } @Test public void testString() { assertEquals("abc", convert("getString", "abc").getS()); assertEquals(RandomUUIDMarshaller.randomUUID, convert("getCustomString", "abc").getS()); } @Test public void testUuid() { UUID uuid = UUID.randomUUID(); assertEquals(uuid.toString(), convert("getUuid", uuid).getS()); } @Test public void testDate() { assertEquals("1970-01-01T00:00:00.000Z", convert("getDate", new Date(0)).getS()); Calendar c = GregorianCalendar.getInstance(); c.setTimeInMillis(0); assertEquals("1970-01-01T00:00:00.000Z", convert("getCalendar", c).getS()); } @Test public void testNumbers() { assertEquals("0", convert("getByte", (byte) 0).getN()); assertEquals("1", convert("getByte", (byte) 1).getN()); assertEquals("0", convert("getBoxedByte", (byte) 0).getN()); assertEquals("1", convert("getBoxedByte", (byte) 1).getN()); assertEquals("0", convert("getShort", (short) 0).getN()); assertEquals("1", convert("getShort", (short) 1).getN()); assertEquals("0", convert("getBoxedShort", (short) 0).getN()); assertEquals("1", convert("getBoxedShort", (short) 1).getN()); assertEquals("0", convert("getInt", 0).getN()); assertEquals("1", convert("getInt", 1).getN()); assertEquals("0", convert("getBoxedInt", 0).getN()); assertEquals("1", convert("getBoxedInt", 1).getN()); assertEquals("0", convert("getLong", 0l).getN()); assertEquals("1", convert("getLong", 1l).getN()); assertEquals("0", convert("getBoxedLong", 0l).getN()); assertEquals("1", convert("getBoxedLong", 1l).getN()); assertEquals("0", convert("getBigInt", BigInteger.ZERO).getN()); assertEquals("1", convert("getBigInt", BigInteger.ONE).getN()); assertEquals("0.0", convert("getFloat", 0f).getN()); assertEquals("1.0", convert("getFloat", 1f).getN()); assertEquals("0.0", convert("getBoxedFloat", 0f).getN()); assertEquals("1.0", convert("getBoxedFloat", 1f).getN()); assertEquals("0.0", convert("getDouble", 0d).getN()); assertEquals("1.0", convert("getDouble", 1d).getN()); assertEquals("0.0", convert("getBoxedDouble", 0d).getN()); assertEquals("1.0", convert("getBoxedDouble", 1d).getN()); assertEquals("0", convert("getBigDecimal", BigDecimal.ZERO).getN()); assertEquals("1", convert("getBigDecimal", BigDecimal.ONE).getN()); } @Test public void testBinary() { ByteBuffer value = ByteBuffer.wrap("value".getBytes()); assertEquals(value.slice(), convert("getByteArray", "value".getBytes()).getB()); assertEquals(value.slice(), convert("getByteBuffer", value.slice()).getB()); } @Test public void testBooleanSet() { // Set<Boolean> (which is silly but technically valid) gets mapped to // a List of Booleans now via the ObjectSetToListMarshaller. AttributeValue value = convert("getBooleanSet", Collections.singleton(true)); Assert.assertEquals(1, value.getL().size()); Assert.assertEquals(true, value.getL().get(0).getBOOL()); } @Test public void testStringSet() { assertEquals(Collections.singletonList("a"), convert("getStringSet", Collections.singleton("a")).getSS()); assertEquals(Collections.singletonList("b"), convert("getStringSet", Collections.singleton("b")).getSS()); assertEquals(Arrays.asList("a", "b", "c"), convert("getStringSet", new TreeSet<String>() {{ add("a"); add("b"); add("c"); }}).getSS()); } @Test public void testUuidSet() { final UUID one = UUID.randomUUID(); final UUID two = UUID.randomUUID(); final UUID three = UUID.randomUUID(); assertEquals(Collections.singletonList(one.toString()), convert("getUuidSet", Collections.singleton(one)).getSS()); assertEquals(Collections.singletonList(two.toString()), convert("getUuidSet", Collections.singleton(two)).getSS()); assertEquals( Arrays.asList( one.toString(), two.toString(), three.toString()), convert("getUuidSet", new LinkedHashSet<UUID>() {{ add(one); add(two); add(three); }}).getSS()); } @Test public void testDateSet() { assertEquals(Collections.singletonList("1970-01-01T00:00:00.000Z"), convert("getDateSet", Collections.singleton(new Date(0))) .getSS()); Calendar c = GregorianCalendar.getInstance(); c.setTimeInMillis(0); assertEquals(Collections.singletonList("1970-01-01T00:00:00.000Z"), convert("getCalendarSet", Collections.singleton(c)) .getSS()); } @Test public void testNumberSet() { assertEquals(Collections.singletonList("0"), convert("getByteSet", Collections.singleton((byte) 0)).getNS()); assertEquals(Collections.singletonList("0"), convert("getShortSet", Collections.singleton((short) 0)).getNS()); assertEquals(Collections.singletonList("0"), convert("getIntSet", Collections.singleton(0)).getNS()); assertEquals(Collections.singletonList("0"), convert("getLongSet", Collections.singleton(0l)).getNS()); assertEquals(Collections.singletonList("0"), convert("getBigIntegerSet", Collections.singleton(BigInteger.ZERO)) .getNS()); assertEquals(Collections.singletonList("0.0"), convert("getFloatSet", Collections.singleton(0f)).getNS()); assertEquals(Collections.singletonList("0.0"), convert("getDoubleSet", Collections.singleton(0d)).getNS()); assertEquals(Collections.singletonList("0"), convert("getBigDecimalSet", Collections.singleton(BigDecimal.ZERO)) .getNS()); assertEquals(Arrays.asList("0", "1", "2"), convert("getLongSet", new TreeSet<Number>() {{ add(0); add(1); add(2); }}).getNS()); } @Test public void testBinarySet() { final ByteBuffer test = ByteBuffer.wrap("test".getBytes()); final ByteBuffer test2 = ByteBuffer.wrap("test2".getBytes()); assertEquals(Collections.singletonList(test.slice()), convert("getByteArraySet", Collections.singleton("test".getBytes())) .getBS()); assertEquals(Collections.singletonList(test.slice()), convert("getByteBufferSet", Collections.singleton(test.slice())) .getBS()); assertEquals(Arrays.asList(test.slice(), test2.slice()), convert("getByteBufferSet",new TreeSet<ByteBuffer>() {{ add(test.slice()); add(test2.slice()); }}).getBS()); } @Test public void testObjectSet() { AttributeValue value = convert("getObjectSet", Collections.singleton(new SubClass())); assertEquals(1, value.getL().size()); assertEquals(new HashMap<String, AttributeValue>() {{ put("name", new AttributeValue("name")); put("value", new AttributeValue().withN("123")); }}, value.getL().get(0).getM()); assertEquals(Arrays.asList(new AttributeValue().withNULL(true)), convert("getObjectSet", Collections.<SubClass>singleton(null)).getL()); } @Test public void testList() { assertEquals(Arrays.asList( new AttributeValue("a"), new AttributeValue("b"), new AttributeValue("c")), convert("getList", Arrays.asList("a", "b", "c")).getL()); assertEquals(Arrays.asList(new AttributeValue().withNULL(true)), convert("getList", Collections.<String>singletonList(null)).getL()); } @Test public void testObjectList() { AttributeValue value = convert( "getObjectList", Collections.singletonList(new SubClass())); assertEquals(1, value.getL().size()); assertEquals(new HashMap<String, AttributeValue>() {{ put("name", new AttributeValue("name")); put("value", new AttributeValue().withN("123")); }}, value.getL().get(0).getM()); } @Test public void testSetList() { assertEquals( Arrays.asList(new AttributeValue().withSS("a")), convert("getSetList", Arrays.asList( Collections.<String>singleton("a"))).getL()); List<Set<String>> list = new ArrayList<Set<String>>(); list.add(null); assertEquals( Arrays.asList(new AttributeValue().withNULL(true)), convert("getSetList", list).getL()); } @Test public void testMap() { assertEquals(new HashMap<String, AttributeValue>() {{ put("a", new AttributeValue("b")); put("c", new AttributeValue("d")); put("e", new AttributeValue("f")); }}, convert("getMap", new HashMap<String, String>() {{ put("a", "b"); put("c", "d"); put("e", "f"); }}).getM()); assertEquals(Collections.singletonMap("a", new AttributeValue().withNULL(true)), convert("getMap", Collections.<String, String>singletonMap("a", null)).getM()); } @Test public void testSetMap() { assertEquals(new HashMap<String, AttributeValue>() {{ put("a", new AttributeValue().withSS("a", "b")); }}, convert("getSetMap", new HashMap<String, Set<String>>() {{ put("a", new TreeSet<String>(Arrays.asList("a", "b"))); }}).getM()); assertEquals(new HashMap<String, AttributeValue>() {{ put("a", new AttributeValue().withSS("a")); put("b", new AttributeValue().withNULL(true)); }}, convert("getSetMap", new HashMap<String, Set<String>>() {{ put("a", new TreeSet<String>(Arrays.asList("a"))); put("b", null); }}).getM()); } @Test public void testObject() { assertEquals(new HashMap<String, AttributeValue>() {{ put("name", new AttributeValue("name")); put("value", new AttributeValue().withN("123")); }}, convert("getObject", new SubClass()).getM()); } @Test public void testUnannotatedObject() throws Exception { try { convert(UnannotatedSubClass.class, UnannotatedSubClass.class.getMethod("getChild"), new UnannotatedSubClass()); Assert.fail("Expected DynamoDBMappingException"); } catch (DynamoDBMappingException e) { } } @Test public void testS3Link() { S3ClientCache cache = new S3ClientCache((AWSCredentialsProvider) null); S3Link link = new S3Link(cache, "bucket", "key"); assertEquals("{\"s3\":{" + "\"bucket\":\"bucket\"," + "\"key\":\"key\"," + "\"region\":null}}", convert("getS3Link", link).getS()); } private AttributeValue convert(String getter, Object value) { try { return convert(TestClass.class, TestClass.class.getMethod(getter), value); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } } }
apache-2.0
ShailShah/alluxio
tests/src/test/java/alluxio/master/StartupConsistencyCheckTest.java
4498
/* * The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 * (the "License"). You may not use this work except in compliance with the License, which is * available at www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied, as more fully set forth in the License. * * See the NOTICE file distributed with this work for information regarding copyright ownership. */ package alluxio.master; import alluxio.AlluxioURI; import alluxio.LocalAlluxioClusterResource; import alluxio.PropertyKey; import alluxio.BaseIntegrationTest; import alluxio.client.WriteType; import alluxio.client.file.FileSystem; import alluxio.client.file.options.CreateDirectoryOptions; import alluxio.client.file.options.CreateFileOptions; import alluxio.master.file.FileSystemMaster; import alluxio.underfs.UnderFileSystem; import alluxio.underfs.options.DeleteOptions; import com.google.common.collect.Lists; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import java.util.Collections; import java.util.List; /** * Tests the consistency check which happens on master start up. */ public class StartupConsistencyCheckTest extends BaseIntegrationTest { private static final AlluxioURI TOP_LEVEL_FILE = new AlluxioURI("/file"); private static final AlluxioURI TOP_LEVEL_DIR = new AlluxioURI("/dir"); private static final AlluxioURI SECOND_LEVEL_FILE = new AlluxioURI("/dir/file"); private static final AlluxioURI SECOND_LEVEL_DIR = new AlluxioURI("/dir/dir"); private static final AlluxioURI THIRD_LEVEL_FILE = new AlluxioURI("/dir/dir/file"); private LocalAlluxioCluster mCluster; private FileSystem mFileSystem; @Rule public LocalAlluxioClusterResource mLocalAlluxioClusterResource = new LocalAlluxioClusterResource.Builder() .setProperty(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_ENABLED, "false") .setProperty(PropertyKey.MASTER_STARTUP_CONSISTENCY_CHECK_ENABLED, "true") .build(); @Before public void before() throws Exception { CreateFileOptions fileOptions = CreateFileOptions.defaults().setWriteType(WriteType.THROUGH); CreateDirectoryOptions dirOptions = CreateDirectoryOptions.defaults().setWriteType(WriteType.THROUGH); mCluster = mLocalAlluxioClusterResource.get(); mFileSystem = mCluster.getClient(); mFileSystem.createFile(TOP_LEVEL_FILE, fileOptions).close(); mFileSystem.createDirectory(TOP_LEVEL_DIR, dirOptions); mFileSystem.createDirectory(SECOND_LEVEL_DIR, dirOptions); mFileSystem.createFile(SECOND_LEVEL_FILE, fileOptions).close(); mFileSystem.createFile(THIRD_LEVEL_FILE, fileOptions).close(); } /** * Tests that a consistent Alluxio system's startup check does not detect any inconsistencies * and completes within 1 minute. */ @Test public void consistent() throws Exception { mCluster.stopFS(); MasterRegistry registry = MasterTestUtils.createLeaderFileSystemMasterFromJournal(); FileSystemMaster master = registry.get(FileSystemMaster.class); MasterTestUtils.waitForStartupConsistencyCheck(master); Assert.assertTrue(master.getStartupConsistencyCheck().getInconsistentUris().isEmpty()); registry.stop(); } /** * Tests that an inconsistent Alluxio system's startup check correctly detects the inconsistent * files. */ @Test public void inconsistent() throws Exception { String topLevelFileUfsPath = mFileSystem.getStatus(TOP_LEVEL_FILE).getUfsPath(); String secondLevelDirUfsPath = mFileSystem.getStatus(SECOND_LEVEL_DIR).getUfsPath(); mCluster.stopFS(); UnderFileSystem ufs = UnderFileSystem.Factory.create(topLevelFileUfsPath); ufs.deleteFile(topLevelFileUfsPath); ufs.deleteDirectory(secondLevelDirUfsPath, DeleteOptions.defaults().setRecursive(true)); MasterRegistry registry = MasterTestUtils.createLeaderFileSystemMasterFromJournal(); FileSystemMaster master = registry.get(FileSystemMaster.class); MasterTestUtils.waitForStartupConsistencyCheck(master); List<AlluxioURI> expected = Lists.newArrayList(TOP_LEVEL_FILE, SECOND_LEVEL_DIR, THIRD_LEVEL_FILE); List<AlluxioURI> result = master.getStartupConsistencyCheck().getInconsistentUris(); Collections.sort(expected); Collections.sort(result); Assert.assertEquals(expected, result); registry.stop(); } }
apache-2.0
JoelMarcey/buck
src/com/facebook/buck/event/EventBusEventConsole.java
1144
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.buck.event; import com.facebook.buck.event.console.EventConsole; import java.util.logging.Level; /** Event console implementation which sends events through event bus to super-console. */ public class EventBusEventConsole extends EventConsole { private final BuckEventBus eventBus; public EventBusEventConsole(BuckEventBus eventBus) { this.eventBus = eventBus; } @Override public void println(Level level, String message) { eventBus.post(ConsoleEvent.create(level, message)); } }
apache-2.0
protochron/Baragon
BaragonAgentService/src/main/java/com/hubspot/baragon/agent/config/TemplateConfiguration.java
968
package com.hubspot.baragon.agent.config; import java.util.Map; import javax.validation.constraints.NotNull; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import org.hibernate.validator.constraints.NotEmpty; @JsonIgnoreProperties(ignoreUnknown = true) public class TemplateConfiguration { @NotEmpty @JsonProperty("filename") private String filename; @NotNull @JsonProperty("template") private String defaultTemplate; @JsonProperty("namedTemplates") private Map<String, String> extraTemplates; public String getFilename() { return filename; } public void setFilename(String filename) { this.filename = filename; } public String getDefaultTemplate() { return defaultTemplate; } public void setTemplate(String template) { this.defaultTemplate = template; } public Map<String, String> getNamedTemplates() { return extraTemplates; } }
apache-2.0