repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
facebook/presto
presto-spark-base/src/main/java/com/facebook/presto/spark/execution/PrestoSparkDiskPageInput.java
8167
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.spark.execution; import com.facebook.airlift.log.Logger; import com.facebook.presto.common.Page; import com.facebook.presto.execution.StageId; import com.facebook.presto.spark.classloader_interface.PrestoSparkStorageHandle; import com.facebook.presto.spark.classloader_interface.PrestoSparkTaskOutput; import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.page.PagesSerde; import com.facebook.presto.spi.page.SerializedPage; import com.facebook.presto.spi.plan.PlanNodeId; import com.facebook.presto.spi.storage.TempDataOperationContext; import com.facebook.presto.spi.storage.TempStorage; import com.facebook.presto.spi.storage.TempStorageHandle; import com.google.common.collect.ImmutableList; import io.airlift.slice.InputStreamSliceInput; import javax.annotation.concurrent.GuardedBy; import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.zip.CRC32; import static com.facebook.presto.spark.SparkErrorCode.STORAGE_ERROR; import static com.facebook.presto.spi.page.PagesSerdeUtil.readSerializedPages; import static com.google.common.collect.ImmutableList.toImmutableList; import static java.util.Collections.shuffle; import static java.util.Objects.requireNonNull; public class PrestoSparkDiskPageInput implements PrestoSparkPageInput { private static final Logger log = Logger.get(PrestoSparkDiskPageInput.class); private final PagesSerde pagesSerde; private final TempStorage tempStorage; private final TempDataOperationContext tempDataOperationContext; private final PrestoSparkBroadcastTableCacheManager prestoSparkBroadcastTableCacheManager; private final StageId stageId; private final PlanNodeId planNodeId; private final List<List<PrestoSparkStorageHandle>> broadcastTableFilesInfo; @GuardedBy("this") private List<Iterator<Page>> pageIterators; @GuardedBy("this") private int currentIteratorIndex; public PrestoSparkDiskPageInput( PagesSerde pagesSerde, TempStorage tempStorage, TempDataOperationContext tempDataOperationContext, PrestoSparkBroadcastTableCacheManager prestoSparkBroadcastTableCacheManager, StageId stageId, PlanNodeId planNodeId, List<List<PrestoSparkStorageHandle>> broadcastTableFilesInfo) { this.pagesSerde = requireNonNull(pagesSerde, "pagesSerde is null"); this.tempStorage = requireNonNull(tempStorage, "tempStorage is null"); this.tempDataOperationContext = requireNonNull(tempDataOperationContext, "tempDataOperationContext is null"); this.prestoSparkBroadcastTableCacheManager = requireNonNull(prestoSparkBroadcastTableCacheManager, "prestoSparkBroadcastTableCacheManager is null"); this.stageId = requireNonNull(stageId, "stageId is null"); this.planNodeId = requireNonNull(planNodeId, "planNodeId is null"); this.broadcastTableFilesInfo = requireNonNull(broadcastTableFilesInfo, "broadcastTableFilesInfo is null"); } @Override public Page getNextPage() { Page page = null; synchronized (this) { while (page == null) { if (currentIteratorIndex >= getPageIterators().size()) { return null; } Iterator<Page> currentIterator = getPageIterators().get(currentIteratorIndex); if (currentIterator.hasNext()) { page = currentIterator.next(); } else { currentIteratorIndex++; } } } return page; } private List<Iterator<Page>> getPageIterators() { if (pageIterators == null) { pageIterators = getPages(broadcastTableFilesInfo, tempStorage, tempDataOperationContext, prestoSparkBroadcastTableCacheManager, stageId, planNodeId); } return pageIterators; } private List<Iterator<Page>> getPages( List<List<PrestoSparkStorageHandle>> broadcastTableFilesInfo, TempStorage tempStorage, TempDataOperationContext tempDataOperationContext, PrestoSparkBroadcastTableCacheManager prestoSparkBroadcastTableCacheManager, StageId stageId, PlanNodeId planNodeId) { // Try to get table from cache List<List<Page>> pages = prestoSparkBroadcastTableCacheManager.getCachedBroadcastTable(stageId, planNodeId); if (pages == null) { pages = broadcastTableFilesInfo.stream() .map(tableFiles -> { List<SerializedPage> serializedPages = loadBroadcastTable(tableFiles, tempStorage, tempDataOperationContext); return serializedPages.stream().map(serializedPage -> pagesSerde.deserialize(serializedPage)).collect(toImmutableList()); }).collect(toImmutableList()); // Cache deserialized pages prestoSparkBroadcastTableCacheManager.cache(stageId, planNodeId, pages); } return pages.stream().map(List::iterator).collect(toImmutableList()); } private List<SerializedPage> loadBroadcastTable( List<PrestoSparkStorageHandle> broadcastTaskFilesInfo, TempStorage tempStorage, TempDataOperationContext tempDataOperationContext) { try { CRC32 checksum = new CRC32(); ImmutableList.Builder<SerializedPage> pages = ImmutableList.builder(); List<PrestoSparkStorageHandle> broadcastTaskFilesInfoCopy = new ArrayList<>(broadcastTaskFilesInfo); shuffle(broadcastTaskFilesInfoCopy); for (PrestoSparkTaskOutput taskFileInfo : broadcastTaskFilesInfoCopy) { checksum.reset(); PrestoSparkStorageHandle prestoSparkStorageHandle = (PrestoSparkStorageHandle) taskFileInfo; TempStorageHandle tempStorageHandle = tempStorage.deserialize(prestoSparkStorageHandle.getSerializedStorageHandle()); log.info("Reading path: " + tempStorageHandle.toString()); try (InputStream inputStream = tempStorage.open(tempDataOperationContext, tempStorageHandle); InputStreamSliceInput inputStreamSliceInput = new InputStreamSliceInput(inputStream)) { Iterator<SerializedPage> pagesIterator = readSerializedPages(inputStreamSliceInput); while (pagesIterator.hasNext()) { SerializedPage page = pagesIterator.next(); checksum.update(page.getSlice().byteArray(), page.getSlice().byteArrayOffset(), page.getSlice().length()); pages.add(page); } } if (checksum.getValue() != prestoSparkStorageHandle.getChecksum()) { throw new PrestoException(STORAGE_ERROR, "Disk page checksum does not match. " + "Data seems to be corrupted on disk for file " + tempStorageHandle.toString()); } } return pages.build(); } catch (UncheckedIOException | IOException e) { throw new PrestoException(STORAGE_ERROR, "Unable to read data from disk: ", e); } } public long getRetainedSizeInBytes() { return prestoSparkBroadcastTableCacheManager.getBroadcastTableSizeInBytes(stageId, planNodeId); } }
apache-2.0
attila-kiss-it/querydsl
querydsl-jpa/src/test/java/com/querydsl/jpa/IntegrationBase.java
6802
/* * Copyright 2015, The Querydsl Team (http://www.querydsl.com/team) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.querydsl.jpa; import static org.junit.Assert.assertEquals; import static com.querydsl.jpa.Constants.*; import static org.junit.Assert.assertNotNull; import java.util.Arrays; import java.util.List; import org.hibernate.query.Query; import org.hibernate.ScrollMode; import org.hibernate.ScrollableResults; import org.hibernate.Session; import org.junit.Test; import org.junit.runner.RunWith; import com.querydsl.core.types.EntityPath; import com.querydsl.jpa.domain.Cat; import com.querydsl.jpa.domain.QCat; import com.querydsl.jpa.hibernate.HibernateDeleteClause; import com.querydsl.jpa.hibernate.HibernateQuery; import com.querydsl.jpa.hibernate.HibernateInsertClause; import com.querydsl.jpa.hibernate.HibernateUpdateClause; import com.querydsl.jpa.hibernate.HibernateUtil; import com.querydsl.jpa.testutil.HibernateTestRunner; import antlr.RecognitionException; import antlr.TokenStreamException; @RunWith(HibernateTestRunner.class) public class IntegrationBase extends ParsingTest implements HibernateTest { private Session session; @Override protected QueryHelper<?> query() { return new QueryHelper<Void>(HQLTemplates.DEFAULT) { @Override public void parse() throws RecognitionException, TokenStreamException { try { System.out.println("query : " + toString().replace('\n', ' ')); JPQLSerializer serializer = new JPQLSerializer(HQLTemplates.DEFAULT); serializer.serialize(getMetadata(), false, null); Query query = session.createQuery(serializer.toString()); HibernateUtil.setConstants(query, serializer.getConstants(), getMetadata().getParams()); query.list(); } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } finally { System.out.println(); } } }; } @Override @Test public void groupBy() throws Exception { // NOTE : commented out, because HQLSDB doesn't support these queries } @Override @Test public void groupBy_2() throws Exception { // NOTE : commented out, because HQLSDB doesn't support these queries } @Override @Test public void orderBy() throws Exception { // NOTE : commented out, because HQLSDB doesn't support these queries } @Override @Test public void docoExamples910() throws Exception { // NOTE : commented out, because HQLSDB doesn't support these queries } private HibernateDeleteClause delete(EntityPath<?> entity) { return new HibernateDeleteClause(session, entity); } private HibernateUpdateClause update(EntityPath<?> entity) { return new HibernateUpdateClause(session, entity); } private HibernateInsertClause insert(EntityPath<?> entity) { return new HibernateInsertClause(session, entity); } @Test public void scroll() { session.save(new Cat("Bob",10)); session.save(new Cat("Steve",11)); QCat cat = QCat.cat; HibernateQuery<?> query = new HibernateQuery<Void>(session); ScrollableResults results = query.from(cat).select(cat).scroll(ScrollMode.SCROLL_INSENSITIVE); while (results.next()) { assertNotNull(results.get(0)); } results.close(); } @Test public void insert() { session.save(new Cat("Bob",10)); QCat cat = QCat.cat; long amount = insert(cat) .set(cat.name, "Bobby") .set(cat.alive, false) .execute(); assertEquals(1, amount); assertEquals(1L, query().from(cat).where(cat.name.eq("Bobby")).fetchCount()); } @Test public void insert2() { session.save(new Cat("Bob",10)); QCat cat = QCat.cat; long amount = insert(cat).columns(cat.name, cat.alive) .values("Bobby", false) .execute(); assertEquals(1, amount); assertEquals(1L, query().from(cat).where(cat.name.eq("Bobby")).fetchCount()); } @Test public void insert3() { session.save(new Cat("Bob",10)); QCat cat = QCat.cat; QCat bob = new QCat("Bob"); long amount = insert(cat) .columns(cat.name, cat.alive) .select(JPAExpressions.select(bob.name, bob.alive).from(bob)) .execute(); assertEquals(1, amount); assertEquals(1L, query().from(cat).where(cat.name.eq("Bobby")).fetchCount()); } @Test public void update() { session.save(new Cat("Bob",10)); session.save(new Cat("Steve",11)); QCat cat = QCat.cat; long amount = update(cat).where(cat.name.eq("Bob")) .set(cat.name, "Bobby") .set(cat.alive, false) .execute(); assertEquals(1, amount); assertEquals(0L, query().from(cat).where(cat.name.eq("Bob")).fetchCount()); } @Test public void update_with_null() { session.save(new Cat("Bob",10)); session.save(new Cat("Steve",11)); QCat cat = QCat.cat; long amount = update(cat).where(cat.name.eq("Bob")) .set(cat.name, (String) null) .set(cat.alive, false) .execute(); assertEquals(1, amount); } @Test public void delete() { session.save(new Cat("Bob",10)); session.save(new Cat("Steve",11)); QCat cat = QCat.cat; long amount = delete(cat).where(cat.name.eq("Bob")) .execute(); assertEquals(1, amount); } @Test public void collection() throws Exception { List<Cat> cats = Arrays.asList(new Cat("Bob",10), new Cat("Steve",11)); for (Cat cat : cats) { session.save(cat); } query().from(cat) .innerJoin(cat.kittens, kitten) .where(kitten.in(cats)) .parse(); } @Override public void setSession(Session session) { this.session = session; } }
apache-2.0
robertgates55/frameworkium
src/test/java/theinternet/pages/JavaScriptAlertsPage.java
2380
package theinternet.pages; import com.frameworkium.core.ui.annotations.Visible; import com.frameworkium.core.ui.pages.BasePage; import io.qameta.allure.Step; import org.openqa.selenium.support.FindBy; import ru.yandex.qatools.htmlelements.annotations.Name; import ru.yandex.qatools.htmlelements.element.Button; import ru.yandex.qatools.htmlelements.element.TextBlock; import static org.openqa.selenium.support.ui.ExpectedConditions.visibilityOf; public class JavaScriptAlertsPage extends BasePage<JavaScriptAlertsPage> { @Visible @Name("JS Alert button") @FindBy(css = "button[onclick='jsAlert()']") private Button jsAlertButton; @Visible @Name("JS Confirm button") @FindBy(css = "button[onclick='jsConfirm()']") private Button jsConfirmButton; @Visible @Name("JS Prompt button") @FindBy(css = "button[onclick='jsPrompt()']") private Button jsPromptButton; @Name("Result area") @FindBy(css = "p#result") private TextBlock resultArea; @Step("Click alert") public JavaScriptAlertsPage clickAlertButtonAndAccept() { jsAlertButton.click(); driver.switchTo().alert().accept(); wait.until(visibilityOf(resultArea)); return this; } @Step("Click alert") public JavaScriptAlertsPage clickAlertButtonAndDismiss() { jsAlertButton.click(); driver.switchTo().alert().dismiss(); wait.until(visibilityOf(resultArea)); return this; } @Step("Click confirm and confirm") public JavaScriptAlertsPage clickConfirmButtonAndAccept() { jsConfirmButton.click(); driver.switchTo().alert().accept(); wait.until(visibilityOf(resultArea)); return this; } @Step("Click confirm and dismiss") public JavaScriptAlertsPage clickConfirmButtonAndDismiss() { jsConfirmButton.click(); driver.switchTo().alert().dismiss(); wait.until(visibilityOf(resultArea)); return this; } @Step("Click prompt") public JavaScriptAlertsPage clickPromptButtonAndEnterPrompt(String textToEnter) { jsPromptButton.click(); driver.switchTo().alert().sendKeys(textToEnter); driver.switchTo().alert().accept(); return this; } @Step("Click prompt") public String getResultText() { return resultArea.getText(); } }
apache-2.0
bassages/home-server
src/test/java/nl/homeserver/energie/verbruikkosten/VerbruikKostenOverzichtenTest.java
3882
package nl.homeserver.energie.verbruikkosten; import org.junit.jupiter.api.Test; import java.math.BigDecimal; import java.util.List; import static org.assertj.core.api.Assertions.assertThat; class VerbruikKostenOverzichtenTest { @Test void whenAverageToSingleThenSingleVerbuikKostenOverichtWithAveragesReturned() { final VerbruikKostenOverzicht verbruikKostenOverzicht1 = VerbruikKostenOverzicht.builder() .gasVerbruik(new BigDecimal("42.023")) .gasKosten(new BigDecimal("10.000")) .stroomVerbruikDal(new BigDecimal("123.000")) .stroomKostenDal(new BigDecimal("12.872")) .stroomVerbruikNormaal(new BigDecimal("2450.607")) .stroomKostenNormaal(new BigDecimal("2312.023")) .build(); final VerbruikKostenOverzicht verbruikKostenOverzicht2 = VerbruikKostenOverzicht.builder() .gasVerbruik(new BigDecimal("21.531")) .gasKosten(new BigDecimal("34.131")) .stroomVerbruikDal(new BigDecimal("134.012")) .stroomKostenDal(new BigDecimal("71.325")) .stroomVerbruikNormaal(new BigDecimal("2321.242")) .stroomKostenNormaal(new BigDecimal("9214.081")) .build(); final VerbruikKostenOverzichten verbruikKostenOverzichten = new VerbruikKostenOverzichten( List.of(verbruikKostenOverzicht1, verbruikKostenOverzicht2)); final VerbruikKostenOverzicht averagedVerbruikKostenOverzicht = verbruikKostenOverzichten.averageToSingle(); assertThat(averagedVerbruikKostenOverzicht.getGasVerbruik()).isEqualTo(new BigDecimal("31.777")); assertThat(averagedVerbruikKostenOverzicht.getStroomVerbruikDal()).isEqualTo(new BigDecimal("128.506")); assertThat(averagedVerbruikKostenOverzicht.getStroomVerbruikNormaal()).isEqualTo(new BigDecimal("2385.925")); assertThat(averagedVerbruikKostenOverzicht.getGasKosten()).isEqualTo(new BigDecimal("22.07")); assertThat(averagedVerbruikKostenOverzicht.getStroomKostenDal()).isEqualTo(new BigDecimal("42.10")); assertThat(averagedVerbruikKostenOverzicht.getStroomKostenNormaal()).isEqualTo(new BigDecimal("5763.05")); } @Test void givenNotNullNormaalAndNotNullDalWhenGetTotaalStroomKostenThenSumOfNormaalAndDalReturned() { final VerbruikKostenOverzicht verbruikKostenOverzicht = VerbruikKostenOverzicht.builder() .stroomKostenDal(new BigDecimal(3)) .stroomKostenNormaal(new BigDecimal(1)) .build(); assertThat(verbruikKostenOverzicht.getTotaalStroomKosten()).isEqualTo(new BigDecimal(4)); } @Test void givenNullNormaalAndNotNullDalWhenGetTotaalStroomKostenThenDalReturned() { final VerbruikKostenOverzicht verbruikKostenOverzicht = VerbruikKostenOverzicht.builder() .stroomKostenDal(new BigDecimal(3)) .stroomKostenNormaal(null) .build(); assertThat(verbruikKostenOverzicht.getTotaalStroomKosten()).isEqualTo(new BigDecimal(3)); } @Test void givenNotNullNormaalAndNullDalWhenGetTotaalStroomKostenThenNormaalReturned() { final VerbruikKostenOverzicht verbruikKostenOverzicht = VerbruikKostenOverzicht.builder() .stroomKostenDal(null) .stroomKostenNormaal(new BigDecimal(3)) .build(); assertThat(verbruikKostenOverzicht.getTotaalStroomKosten()).isEqualTo(new BigDecimal(3)); } @Test void givenNullNormaalAndDalWhenGetTotaalStroomKostenThenNullReturned() { final VerbruikKostenOverzicht verbruikKostenOverzicht = VerbruikKostenOverzicht.builder() .stroomKostenDal(null) .stroomKostenNormaal(null) .build(); assertThat(verbruikKostenOverzicht.getTotaalStroomKosten()).isNull(); } }
apache-2.0
justinkwony/ermaster-nhit
org.insightech.er/src/org/insightech/er/preference/page/classpath/ExtClassPathPreferencePage.java
6293
package org.insightech.er.preference.page.classpath; import java.io.File; import java.io.FileOutputStream; import java.io.InputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.net.URL; import java.util.List; import org.eclipse.jface.dialogs.Dialog; import org.eclipse.jface.preference.DirectoryFieldEditor; import org.eclipse.jface.preference.PreferencePage; import org.eclipse.swt.SWT; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.graphics.Point; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.layout.GridLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.ui.IWorkbench; import org.eclipse.ui.IWorkbenchPreferencePage; import org.insightech.er.ERDiagramActivator; import org.insightech.er.ResourceString; import org.insightech.er.common.widgets.CompositeFactory; import org.insightech.er.editor.view.dialog.common.FileOverrideConfirmDialog; import org.insightech.er.preference.PreferenceInitializer; import org.insightech.er.util.io.IOUtils; public class ExtClassPathPreferencePage extends PreferencePage implements IWorkbenchPreferencePage { private DirectoryFieldEditor extDir; private Button downloadButton; public void init(IWorkbench workbench) { } @Override protected Control createContents(Composite parent) { Composite composite = new Composite(parent, SWT.NONE); GridLayout gridLayout = new GridLayout(); gridLayout.numColumns = 3; composite.setLayout(gridLayout); this.extDir = new DirectoryFieldEditor("", ResourceString.getResourceString("label.ext.classpath"), composite); CompositeFactory.filler(composite, 2); this.downloadButton = this.createButton(composite, "Download"); this.downloadButton.addSelectionListener(new SelectionAdapter() { /** * {@inheritDoc} */ @Override public void widgetSelected(SelectionEvent e) { download(); } }); this.extDir.setFocus(); this.setData(); return composite; } private void download() { try { String path = PreferenceInitializer.getExtendedClasspath(); List<URL> urls = ERDiagramActivator.getURLList("html"); urls.addAll(ERDiagramActivator.getURLList("javasource")); boolean overrideYesToAll = false; boolean overrideNoToAll = false; for (URL url : urls) { String inputFile = url.getFile() .substring("template/".length()); InputStream in = null; Writer out = null; try { if (!inputFile.endsWith("/")) { in = url.openStream(); File outputFile = new File(path, inputFile); if (outputFile.exists()) { boolean override = false; if (!overrideYesToAll && !overrideNoToAll) { FileOverrideConfirmDialog fileOverrideConfirmDialog = new FileOverrideConfirmDialog( outputFile.getCanonicalPath()); int result = fileOverrideConfirmDialog.open(); if (result == 2) { overrideYesToAll = true; } else if (result == 3) { overrideNoToAll = true; } else if (result == 0) { override = true; } else if (result == 4) { break; } } if (overrideYesToAll || override) { outputFile.getParentFile().mkdirs(); out = new OutputStreamWriter( new FileOutputStream(outputFile), "UTF-8"); IOUtils.copy(in, out); } } else { outputFile.getParentFile().mkdirs(); out = new OutputStreamWriter(new FileOutputStream( outputFile), "UTF-8"); IOUtils.copy(in, out); } } } finally { IOUtils.closeQuietly(in); IOUtils.closeQuietly(out); } } } catch (Exception e) { ERDiagramActivator.showExceptionDialog(e); } } /* * private List<URL> getFileList(String rootPath) throws URISyntaxException, * IOException { List<URL> urlList = new ArrayList<URL>(); ClassLoader * classLoader = ERDiagramActivator.class.getClassLoader(); * * URL dirURL = classLoader.getResource(rootPath); * * classLoader. if (dirURL.getProtocol().equals("file")) { List<File> * fileList = FileUtils .getChildren(new File(dirURL.toURI())); * * for (File file : fileList) { urlList.add(file.toURI().toURL()); } } * ERDiagramActivator. * ResourcesPlugin.getPlugin().getBundle().findEntries(arg0, arg1, arg2) if * (dirURL.getProtocol().equals("jar")) { String jarPath = * dirURL.getPath().substring(5, dirURL.getPath().indexOf("!")); * * JarFile jar = new JarFile(jarPath); * * try { Enumeration<JarEntry> entries = jar.entries(); List<String> * fileList = new ArrayList<String>(); * * while (entries.hasMoreElements()) { String name = * entries.nextElement().getName(); if (name.startsWith(rootPath)) { String * entry = name.substring(rootPath.length()); int checkSubdir = * entry.indexOf("/"); if (checkSubdir >= 0) { entry = entry.substring(0, * checkSubdir); } fileList.add(entry); } } * * for (String file : fileList) { * urlList.add(classLoader.getResource(file)); } * * } finally { jar.close(); } } * * return urlList; } */ private void setData() { String path = PreferenceInitializer.getExtendedClasspath(); this.extDir.setStringValue(path); } @Override protected void performDefaults() { PreferenceInitializer.saveExtendedClasspath(null); setData(); super.performDefaults(); } @Override public boolean performOk() { PreferenceInitializer.saveExtendedClasspath(this.extDir .getStringValue()); return super.performOk(); } private Button createButton(Composite parent, String label) { int widthHint = convertHorizontalDLUsToPixels(61); Button button = new Button(parent, 8); button.setText(label); Dialog.applyDialogFont(button); GridData data = new GridData(256); Point minButtonSize = button.computeSize(-1, -1, true); data.widthHint = Math.max(widthHint, minButtonSize.x); button.setLayoutData(data); return button; } }
apache-2.0
podnov/queryinfo
jpa/src/main/java/com/evanzeimet/queryinfo/jpa/attribute/DefaultQueryInfoAttributeContext.java
1955
package com.evanzeimet.queryinfo.jpa.attribute; /* * #%L * queryinfo-jpa * $Id:$ * $HeadURL:$ * %% * Copyright (C) 2015 Evan Zeimet * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import java.util.Map; import com.evanzeimet.queryinfo.jpa.field.QueryInfoFieldInfo; import com.evanzeimet.queryinfo.jpa.join.QueryInfoJoinInfo; public class DefaultQueryInfoAttributeContext implements QueryInfoAttributeContext { private Map<String, QueryInfoFieldInfo> fields; private Map<String, QueryInfoJoinInfo> joins; public DefaultQueryInfoAttributeContext() { } @Override public QueryInfoFieldInfo getField(String queryInfoFieldAttributeName) { QueryInfoFieldInfo result; if (fields == null) { result = null; } else { result = fields.get(queryInfoFieldAttributeName); } return result; } @Override public Map<String, QueryInfoFieldInfo> getFields() { return fields; } @Override public void setFields(Map<String, QueryInfoFieldInfo> fields) { this.fields = fields; } @Override public QueryInfoJoinInfo getJoin(String queryInfoJoinAttributeName) { QueryInfoJoinInfo result; if (fields == null) { result = null; } else { result = joins.get(queryInfoJoinAttributeName); } return result; } @Override public Map<String, QueryInfoJoinInfo> getJoins() { return joins; } @Override public void setJoins(Map<String, QueryInfoJoinInfo> joins) { this.joins = joins; } }
apache-2.0
AlexanderBuloichik/pentaho-kettle
engine/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java
66659
/*! ****************************************************************************** * * Pentaho Data Integration * * Copyright (C) 2002-2017 by Hitachi Vantara : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.job.entries.trans; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; import java.util.List; import java.util.Map; import org.pentaho.di.cluster.SlaveServer; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.Const; import org.pentaho.di.core.extension.ExtensionPointHandler; import org.pentaho.di.core.extension.KettleExtensionPoint; import org.pentaho.di.core.listeners.CurrentDirectoryChangedListener; import org.pentaho.di.core.listeners.impl.EntryCurrentDirectoryChangedListener; import org.pentaho.di.core.util.Utils; import org.pentaho.di.core.ObjectLocationSpecificationMethod; import org.pentaho.di.core.Result; import org.pentaho.di.core.ResultFile; import org.pentaho.di.core.RowMetaAndData; import org.pentaho.di.core.SQLStatement; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.exception.KettleDatabaseException; import org.pentaho.di.core.exception.KettleException; import org.pentaho.di.core.exception.KettleXMLException; import org.pentaho.di.core.logging.LogChannelFileWriter; import org.pentaho.di.core.logging.LogLevel; import org.pentaho.di.core.parameters.NamedParams; import org.pentaho.di.core.parameters.NamedParamsDefault; import org.pentaho.di.core.util.CurrentDirectoryResolver; import org.pentaho.di.core.util.FileUtil; import org.pentaho.di.core.variables.VariableSpace; import org.pentaho.di.core.vfs.KettleVFS; import org.pentaho.di.core.xml.XMLHandler; import org.pentaho.di.i18n.BaseMessages; import org.pentaho.di.job.DelegationListener; import org.pentaho.di.job.Job; import org.pentaho.di.job.JobMeta; import org.pentaho.di.job.entry.JobEntryBase; import org.pentaho.di.job.entry.JobEntryInterface; import org.pentaho.di.job.entry.validator.AndValidator; import org.pentaho.di.job.entry.validator.JobEntryValidatorUtils; import org.pentaho.di.repository.HasRepositoryDirectories; import org.pentaho.di.repository.ObjectId; import org.pentaho.di.repository.Repository; import org.pentaho.di.repository.RepositoryDirectory; import org.pentaho.di.repository.RepositoryDirectoryInterface; import org.pentaho.di.repository.RepositoryImportLocation; import org.pentaho.di.repository.RepositoryObject; import org.pentaho.di.repository.RepositoryObjectType; import org.pentaho.di.repository.StringObjectId; import org.pentaho.di.resource.ResourceDefinition; import org.pentaho.di.resource.ResourceEntry; import org.pentaho.di.resource.ResourceEntry.ResourceType; import org.pentaho.di.resource.ResourceNamingInterface; import org.pentaho.di.resource.ResourceReference; import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransExecutionConfiguration; import org.pentaho.di.trans.TransMeta; import org.pentaho.di.trans.TransSupplier; import org.pentaho.di.trans.cluster.TransSplitter; import org.pentaho.di.trans.step.StepMeta; import org.pentaho.di.www.SlaveServerTransStatus; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; /** * This is the job entry that defines a transformation to be run. * * @author Matt Casters * @since 1-Oct-2003, rewritten on 18-June-2004 */ public class JobEntryTrans extends JobEntryBase implements Cloneable, JobEntryInterface, HasRepositoryDirectories { private static Class<?> PKG = JobEntryTrans.class; // for i18n purposes, needed by Translator2!! private String transname; private String filename; private String directory; private ObjectId transObjectId; private ObjectLocationSpecificationMethod specificationMethod; public String[] arguments; public boolean argFromPrevious; public boolean paramsFromPrevious; public boolean execPerRow; public String[] parameters; public String[] parameterFieldNames; public String[] parameterValues; public boolean clearResultRows; public boolean clearResultFiles; public boolean createParentFolder; public boolean setLogfile; public boolean setAppendLogfile; public String logfile, logext; public boolean addDate, addTime; public LogLevel logFileLevel; private String directoryPath; private boolean clustering; public boolean waitingToFinish = true; public boolean followingAbortRemotely; private String remoteSlaveServerName; private boolean passingAllParameters = true; private boolean loggingRemoteWork; private String runConfiguration; private Trans trans; private CurrentDirectoryChangedListener currentDirListener = new EntryCurrentDirectoryChangedListener( this::getSpecificationMethod, this::getDirectory, this::setDirectory ); public JobEntryTrans( String name ) { super( name, "" ); } public JobEntryTrans() { this( "" ); clear(); } private void allocateArgs( int nrArgs ) { arguments = new String[nrArgs]; } private void allocateParams( int nrParameters ) { parameters = new String[nrParameters]; parameterFieldNames = new String[nrParameters]; parameterValues = new String[nrParameters]; } @Override public Object clone() { JobEntryTrans je = (JobEntryTrans) super.clone(); if ( arguments != null ) { int nrArgs = arguments.length; je.allocateArgs( nrArgs ); System.arraycopy( arguments, 0, je.arguments, 0, nrArgs ); } if ( parameters != null ) { int nrParameters = parameters.length; je.allocateParams( nrParameters ); System.arraycopy( parameters, 0, je.parameters, 0, nrParameters ); System.arraycopy( parameterFieldNames, 0, je.parameterFieldNames, 0, nrParameters ); System.arraycopy( parameterValues, 0, je.parameterValues, 0, nrParameters ); } return je; } public void setFileName( String n ) { filename = n; } /** * @return the filename * @deprecated use getFilename() instead */ @Deprecated public String getFileName() { return filename; } @Override public String getFilename() { return filename; } @Override public String getRealFilename() { return environmentSubstitute( getFilename() ); } public void setTransname( String transname ) { this.transname = transname; } public String getTransname() { return transname; } public String getDirectory() { return directory; } public void setDirectory( String directory ) { this.directory = directory; } @Override public String[] getDirectories() { return new String[]{ directory }; } @Override public void setDirectories( String[] directories ) { this.directory = directories[0]; } public String getLogFilename() { String retval = ""; if ( setLogfile ) { retval += logfile == null ? "" : logfile; Calendar cal = Calendar.getInstance(); if ( addDate ) { SimpleDateFormat sdf = new SimpleDateFormat( "yyyyMMdd" ); retval += "_" + sdf.format( cal.getTime() ); } if ( addTime ) { SimpleDateFormat sdf = new SimpleDateFormat( "HHmmss" ); retval += "_" + sdf.format( cal.getTime() ); } if ( logext != null && logext.length() > 0 ) { retval += "." + logext; } } return retval; } @Override public String getXML() { StringBuilder retval = new StringBuilder( 300 ); retval.append( super.getXML() ); // specificationMethod // retval.append( " " ).append( XMLHandler.addTagValue( "specification_method", specificationMethod == null ? null : specificationMethod .getCode() ) ); retval.append( " " ).append( XMLHandler.addTagValue( "trans_object_id", transObjectId == null ? null : transObjectId.toString() ) ); // Export a little bit of extra information regarding the reference since it doesn't really matter outside the same // repository. // if ( rep != null && transObjectId != null ) { try { RepositoryObject objectInformation = rep.getObjectInformation( transObjectId, RepositoryObjectType.TRANSFORMATION ); if ( objectInformation != null ) { transname = objectInformation.getName(); directory = objectInformation.getRepositoryDirectory().getPath(); } } catch ( KettleException e ) { // Ignore object reference problems. It simply means that the reference is no longer valid. } } retval.append( " " ).append( XMLHandler.addTagValue( "filename", filename ) ); retval.append( " " ).append( XMLHandler.addTagValue( "transname", transname ) ); if ( parentJobMeta != null ) { parentJobMeta.getNamedClusterEmbedManager().registerUrl( filename ); } if ( directory != null ) { retval.append( " " ).append( XMLHandler.addTagValue( "directory", directory ) ); } else if ( directoryPath != null ) { // don't loose this info (backup/recovery) // retval.append( " " ).append( XMLHandler.addTagValue( "directory", directoryPath ) ); } retval.append( " " ).append( XMLHandler.addTagValue( "arg_from_previous", argFromPrevious ) ); retval.append( " " ).append( XMLHandler.addTagValue( "params_from_previous", paramsFromPrevious ) ); retval.append( " " ).append( XMLHandler.addTagValue( "exec_per_row", execPerRow ) ); retval.append( " " ).append( XMLHandler.addTagValue( "clear_rows", clearResultRows ) ); retval.append( " " ).append( XMLHandler.addTagValue( "clear_files", clearResultFiles ) ); retval.append( " " ).append( XMLHandler.addTagValue( "set_logfile", setLogfile ) ); retval.append( " " ).append( XMLHandler.addTagValue( "logfile", logfile ) ); retval.append( " " ).append( XMLHandler.addTagValue( "logext", logext ) ); retval.append( " " ).append( XMLHandler.addTagValue( "add_date", addDate ) ); retval.append( " " ).append( XMLHandler.addTagValue( "add_time", addTime ) ); retval.append( " " ).append( XMLHandler.addTagValue( "loglevel", logFileLevel != null ? logFileLevel.getCode() : null ) ); retval.append( " " ).append( XMLHandler.addTagValue( "cluster", clustering ) ); retval.append( " " ).append( XMLHandler.addTagValue( "slave_server_name", remoteSlaveServerName ) ); retval.append( " " ).append( XMLHandler.addTagValue( "set_append_logfile", setAppendLogfile ) ); retval.append( " " ).append( XMLHandler.addTagValue( "wait_until_finished", waitingToFinish ) ); retval.append( " " ).append( XMLHandler.addTagValue( "follow_abort_remote", followingAbortRemotely ) ); retval.append( " " ).append( XMLHandler.addTagValue( "create_parent_folder", createParentFolder ) ); retval.append( " " ).append( XMLHandler.addTagValue( "logging_remote_work", loggingRemoteWork ) ); retval.append( " " ).append( XMLHandler.addTagValue( "run_configuration", runConfiguration ) ); if ( arguments != null ) { for ( int i = 0; i < arguments.length; i++ ) { // This is a very very bad way of making an XML file, don't use it (or // copy it). Sven Boden retval.append( " " ).append( XMLHandler.addTagValue( "argument" + i, arguments[ i ] ) ); } } if ( parameters != null ) { retval.append( " " ).append( XMLHandler.openTag( "parameters" ) ).append( Const.CR ); retval.append( " " ).append( XMLHandler.addTagValue( "pass_all_parameters", passingAllParameters ) ); for ( int i = 0; i < parameters.length; i++ ) { // This is a better way of making the XML file than the arguments. retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ).append( Const.CR ); retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[ i ] ) ); retval.append( " " ).append( XMLHandler.addTagValue( "stream_name", parameterFieldNames[ i ] ) ); retval.append( " " ).append( XMLHandler.addTagValue( "value", parameterValues[ i ] ) ); retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ).append( Const.CR ); } retval.append( " " ).append( XMLHandler.closeTag( "parameters" ) ).append( Const.CR ); } return retval.toString(); } private void checkObjectLocationSpecificationMethod() { if ( specificationMethod == null ) { // Backward compatibility // // Default = Filename // specificationMethod = ObjectLocationSpecificationMethod.FILENAME; if ( !Utils.isEmpty( filename ) ) { specificationMethod = ObjectLocationSpecificationMethod.FILENAME; } else if ( transObjectId != null ) { specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; } else if ( !Utils.isEmpty( transname ) ) { specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; } } } @Override public void loadXML( Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep, IMetaStore metaStore ) throws KettleXMLException { try { super.loadXML( entrynode, databases, slaveServers ); String method = XMLHandler.getTagValue( entrynode, "specification_method" ); specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); String transId = XMLHandler.getTagValue( entrynode, "trans_object_id" ); transObjectId = Utils.isEmpty( transId ) ? null : new StringObjectId( transId ); filename = XMLHandler.getTagValue( entrynode, "filename" ); transname = XMLHandler.getTagValue( entrynode, "transname" ); directory = XMLHandler.getTagValue( entrynode, "directory" ); if ( rep != null && rep.isConnected() && !Utils.isEmpty( transname ) ) { specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; } // Backward compatibility check for object specification // checkObjectLocationSpecificationMethod(); argFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "arg_from_previous" ) ); paramsFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "params_from_previous" ) ); execPerRow = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "exec_per_row" ) ); clearResultRows = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "clear_rows" ) ); clearResultFiles = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "clear_files" ) ); setLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_logfile" ) ); addDate = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_date" ) ); addTime = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_time" ) ); logfile = XMLHandler.getTagValue( entrynode, "logfile" ); logext = XMLHandler.getTagValue( entrynode, "logext" ); logFileLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); clustering = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "cluster" ) ); createParentFolder = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "create_parent_folder" ) ); loggingRemoteWork = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "logging_remote_work" ) ); runConfiguration = XMLHandler.getTagValue( entrynode, "run_configuration" ); remoteSlaveServerName = XMLHandler.getTagValue( entrynode, "slave_server_name" ); setAppendLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_append_logfile" ) ); String wait = XMLHandler.getTagValue( entrynode, "wait_until_finished" ); if ( Utils.isEmpty( wait ) ) { waitingToFinish = true; } else { waitingToFinish = "Y".equalsIgnoreCase( wait ); } followingAbortRemotely = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "follow_abort_remote" ) ); // How many arguments? int argnr = 0; while ( XMLHandler.getTagValue( entrynode, "argument" + argnr ) != null ) { argnr++; } allocateArgs( argnr ); // Read them all... for ( int a = 0; a < argnr; a++ ) { arguments[ a ] = XMLHandler.getTagValue( entrynode, "argument" + a ); } Node parametersNode = XMLHandler.getSubNode( entrynode, "parameters" ); String passAll = XMLHandler.getTagValue( parametersNode, "pass_all_parameters" ); passingAllParameters = Utils.isEmpty( passAll ) || "Y".equalsIgnoreCase( passAll ); int nrParameters = XMLHandler.countNodes( parametersNode, "parameter" ); allocateParams( nrParameters ); for ( int i = 0; i < nrParameters; i++ ) { Node knode = XMLHandler.getSubNodeByNr( parametersNode, "parameter", i ); parameters[ i ] = XMLHandler.getTagValue( knode, "name" ); parameterFieldNames[ i ] = XMLHandler.getTagValue( knode, "stream_name" ); parameterValues[ i ] = XMLHandler.getTagValue( knode, "value" ); } } catch ( KettleException e ) { throw new KettleXMLException( "Unable to load job entry of type 'trans' from XML node", e ); } } // Load the jobentry from repository // @Override public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers ) throws KettleException { try { String method = rep.getJobEntryAttributeString( id_jobentry, "specification_method" ); specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); String transId = rep.getJobEntryAttributeString( id_jobentry, "trans_object_id" ); transObjectId = Utils.isEmpty( transId ) ? null : new StringObjectId( transId ); transname = rep.getJobEntryAttributeString( id_jobentry, "name" ); directory = rep.getJobEntryAttributeString( id_jobentry, "dir_path" ); filename = rep.getJobEntryAttributeString( id_jobentry, "file_name" ); // Backward compatibility check for object specification // checkObjectLocationSpecificationMethod(); argFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "arg_from_previous" ); paramsFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "params_from_previous" ); execPerRow = rep.getJobEntryAttributeBoolean( id_jobentry, "exec_per_row" ); clearResultRows = rep.getJobEntryAttributeBoolean( id_jobentry, "clear_rows", true ); clearResultFiles = rep.getJobEntryAttributeBoolean( id_jobentry, "clear_files", true ); setLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_logfile" ); addDate = rep.getJobEntryAttributeBoolean( id_jobentry, "add_date" ); addTime = rep.getJobEntryAttributeBoolean( id_jobentry, "add_time" ); logfile = rep.getJobEntryAttributeString( id_jobentry, "logfile" ); logext = rep.getJobEntryAttributeString( id_jobentry, "logext" ); logFileLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); clustering = rep.getJobEntryAttributeBoolean( id_jobentry, "cluster" ); createParentFolder = rep.getJobEntryAttributeBoolean( id_jobentry, "create_parent_folder" ); remoteSlaveServerName = rep.getJobEntryAttributeString( id_jobentry, "slave_server_name" ); setAppendLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_append_logfile" ); waitingToFinish = rep.getJobEntryAttributeBoolean( id_jobentry, "wait_until_finished", true ); followingAbortRemotely = rep.getJobEntryAttributeBoolean( id_jobentry, "follow_abort_remote" ); loggingRemoteWork = rep.getJobEntryAttributeBoolean( id_jobentry, "logging_remote_work" ); runConfiguration = rep.getJobEntryAttributeString( id_jobentry, "run_configuration" ); // How many arguments? int argnr = rep.countNrJobEntryAttributes( id_jobentry, "argument" ); allocateArgs( argnr ); // Read all arguments... for ( int a = 0; a < argnr; a++ ) { arguments[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "argument" ); } // How many arguments? int parameternr = rep.countNrJobEntryAttributes( id_jobentry, "parameter_name" ); allocateParams( parameternr ); // Read all parameters ... for ( int a = 0; a < parameternr; a++ ) { parameters[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_name" ); parameterFieldNames[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_stream_name" ); parameterValues[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_value" ); } passingAllParameters = rep.getJobEntryAttributeBoolean( id_jobentry, "pass_all_parameters", true ); } catch ( KettleDatabaseException dbe ) { throw new KettleException( "Unable to load job entry of type 'trans' from the repository for id_jobentry=" + id_jobentry, dbe ); } } // Save the attributes of this job entry // @Override public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_job ) throws KettleException { try { rep.saveJobEntryAttribute( id_job, getObjectId(), "specification_method", specificationMethod == null ? null : specificationMethod.getCode() ); rep.saveJobEntryAttribute( id_job, getObjectId(), "trans_object_id", transObjectId == null ? null : transObjectId.toString() ); rep.saveJobEntryAttribute( id_job, getObjectId(), "name", getTransname() ); rep.saveJobEntryAttribute( id_job, getObjectId(), "dir_path", getDirectory() != null ? getDirectory() : "" ); rep.saveJobEntryAttribute( id_job, getObjectId(), "file_name", filename ); rep.saveJobEntryAttribute( id_job, getObjectId(), "arg_from_previous", argFromPrevious ); rep.saveJobEntryAttribute( id_job, getObjectId(), "params_from_previous", paramsFromPrevious ); rep.saveJobEntryAttribute( id_job, getObjectId(), "exec_per_row", execPerRow ); rep.saveJobEntryAttribute( id_job, getObjectId(), "clear_rows", clearResultRows ); rep.saveJobEntryAttribute( id_job, getObjectId(), "clear_files", clearResultFiles ); rep.saveJobEntryAttribute( id_job, getObjectId(), "set_logfile", setLogfile ); rep.saveJobEntryAttribute( id_job, getObjectId(), "add_date", addDate ); rep.saveJobEntryAttribute( id_job, getObjectId(), "add_time", addTime ); rep.saveJobEntryAttribute( id_job, getObjectId(), "logfile", logfile ); rep.saveJobEntryAttribute( id_job, getObjectId(), "logext", logext ); rep.saveJobEntryAttribute( id_job, getObjectId(), "loglevel", logFileLevel != null ? logFileLevel.getCode() : null ); rep.saveJobEntryAttribute( id_job, getObjectId(), "cluster", clustering ); rep.saveJobEntryAttribute( id_job, getObjectId(), "slave_server_name", remoteSlaveServerName ); rep.saveJobEntryAttribute( id_job, getObjectId(), "set_append_logfile", setAppendLogfile ); rep.saveJobEntryAttribute( id_job, getObjectId(), "wait_until_finished", waitingToFinish ); rep.saveJobEntryAttribute( id_job, getObjectId(), "follow_abort_remote", followingAbortRemotely ); rep.saveJobEntryAttribute( id_job, getObjectId(), "create_parent_folder", createParentFolder ); rep.saveJobEntryAttribute( id_job, getObjectId(), "logging_remote_work", loggingRemoteWork ); rep.saveJobEntryAttribute( id_job, getObjectId(), "run_configuration", runConfiguration ); // Save the arguments... if ( arguments != null ) { for ( int i = 0; i < arguments.length; i++ ) { rep.saveJobEntryAttribute( id_job, getObjectId(), i, "argument", arguments[ i ] ); } } // Save the parameters... if ( parameters != null ) { for ( int i = 0; i < parameters.length; i++ ) { rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_name", parameters[ i ] ); rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_stream_name", Const.NVL( parameterFieldNames[ i ], "" ) ); rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_value", Const.NVL( parameterValues[ i ], "" ) ); } } rep.saveJobEntryAttribute( id_job, getObjectId(), "pass_all_parameters", passingAllParameters ); } catch ( KettleDatabaseException dbe ) { throw new KettleException( "Unable to save job entry of type 'trans' to the repository for id_job=" + id_job, dbe ); } } @Override public void clear() { super.clear(); specificationMethod = ObjectLocationSpecificationMethod.FILENAME; transname = null; filename = null; directory = null; arguments = null; argFromPrevious = false; execPerRow = false; addDate = false; addTime = false; logfile = null; logext = null; setLogfile = false; clearResultRows = false; clearResultFiles = false; remoteSlaveServerName = null; setAppendLogfile = false; waitingToFinish = true; followingAbortRemotely = false; // backward compatibility reasons createParentFolder = false; logFileLevel = LogLevel.BASIC; } /** * Execute this job entry and return the result. In this case it means, just set the result boolean in the Result * class. * * @param result The result of the previous execution * @param nr the job entry number * @return The Result of the execution. */ @Override public Result execute( Result result, int nr ) throws KettleException { result.setEntryNr( nr ); LogChannelFileWriter logChannelFileWriter = null; LogLevel transLogLevel = parentJob.getLogLevel(); //Set Embedded NamedCluter MetatStore Provider Key so that it can be passed to VFS if ( parentJobMeta.getNamedClusterEmbedManager() != null ) { parentJobMeta.getNamedClusterEmbedManager() .passEmbeddedMetastoreKey( this, parentJobMeta.getEmbeddedMetastoreProviderKey() ); } String realLogFilename = ""; if ( setLogfile ) { transLogLevel = logFileLevel; realLogFilename = environmentSubstitute( getLogFilename() ); // We need to check here the log filename // if we do not have one, we must fail if ( Utils.isEmpty( realLogFilename ) ) { logError( BaseMessages.getString( PKG, "JobTrans.Exception.LogFilenameMissing" ) ); result.setNrErrors( 1 ); result.setResult( false ); return result; } // create parent folder? if ( !FileUtil.createParentFolder( PKG, realLogFilename, createParentFolder, this.getLogChannel(), this ) ) { result.setNrErrors( 1 ); result.setResult( false ); return result; } try { logChannelFileWriter = new LogChannelFileWriter( this.getLogChannelId(), KettleVFS.getFileObject( realLogFilename, this ), setAppendLogfile ); logChannelFileWriter.startLogging(); } catch ( KettleException e ) { logError( BaseMessages.getString( PKG, "JobTrans.Error.UnableOpenAppender", realLogFilename, e.toString() ) ); logError( Const.getStackTracker( e ) ); result.setNrErrors( 1 ); result.setResult( false ); return result; } } // Open the transformation... // switch ( specificationMethod ) { case FILENAME: if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobTrans.Log.OpeningTrans", environmentSubstitute( getFilename() ) ) ); } break; case REPOSITORY_BY_NAME: if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobTrans.Log.OpeningTransInDirec", environmentSubstitute( getFilename() ), environmentSubstitute( directory ) ) ); } break; case REPOSITORY_BY_REFERENCE: if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobTrans.Log.OpeningTransByReference", transObjectId ) ); } break; default: break; } // Load the transformation only once for the complete loop! // Throws an exception if it was not possible to load the transformation. For example, the XML file doesn't exist or // the repository is down. // Log the stack trace and return an error condition from this // TransMeta transMeta = null; try { transMeta = getTransMeta( rep, metaStore, this ); } catch ( KettleException e ) { logError( Const.getStackTracker( e ) ); result.setNrErrors( 1 ); result.setResult( false ); return result; } int iteration = 0; String[] args1 = arguments; if ( args1 == null || args1.length == 0 ) { // No arguments set, look at the parent job. args1 = parentJob.getArguments(); } // initializeVariablesFrom(parentJob); // // For the moment only do variable translation at the start of a job, not // for every input row (if that would be switched on). This is for safety, // the real argument setting is later on. // String[] args = null; if ( args1 != null ) { args = new String[ args1.length ]; for ( int idx = 0; idx < args1.length; idx++ ) { args[ idx ] = environmentSubstitute( args1[ idx ] ); } } RowMetaAndData resultRow = null; boolean first = true; List<RowMetaAndData> rows = new ArrayList<RowMetaAndData>( result.getRows() ); while ( ( first && !execPerRow ) || ( execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0 ) && !parentJob.isStopped() ) { // Clear the result rows of the result // Otherwise we double the amount of rows every iteration in the simple cases. // if ( execPerRow ) { result.getRows().clear(); } if ( rows != null && execPerRow ) { resultRow = rows.get( iteration ); } else { resultRow = null; } NamedParams namedParam = new NamedParamsDefault(); if ( parameters != null ) { for ( int idx = 0; idx < parameters.length; idx++ ) { if ( !Utils.isEmpty( parameters[ idx ] ) ) { // We have a parameter // namedParam.addParameterDefinition( parameters[ idx ], "", "Job entry runtime" ); if ( Utils.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { // There is no field name specified. // String value = Const.NVL( environmentSubstitute( parameterValues[ idx ] ), "" ); namedParam.setParameterValue( parameters[ idx ], value ); } else { // something filled in, in the field column... // String value = ""; if ( resultRow != null ) { value = resultRow.getString( parameterFieldNames[ idx ], "" ); } namedParam.setParameterValue( parameters[ idx ], value ); } } } } first = false; Result previousResult = result; try { if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobTrans.StartingTrans", getFilename(), getName(), getDescription() ) ); } if ( clearResultRows ) { previousResult.setRows( new ArrayList<RowMetaAndData>() ); } if ( clearResultFiles ) { previousResult.getResultFiles().clear(); } /* * Set one or more "result" rows on the transformation... */ if ( execPerRow ) { // Execute for each input row if ( argFromPrevious ) { // Copy the input row to the (command line) arguments args = null; if ( resultRow != null ) { args = new String[ resultRow.size() ]; for ( int i = 0; i < resultRow.size(); i++ ) { args[ i ] = resultRow.getString( i, null ); } } } else { // Just pass a single row List<RowMetaAndData> newList = new ArrayList<RowMetaAndData>(); newList.add( resultRow ); // This previous result rows list can be either empty or not. // Depending on the checkbox "clear result rows" // In this case, it would execute the transformation with one extra row each time // Can't figure out a real use-case for it, but hey, who am I to decide that, right? // :-) // previousResult.getRows().addAll( newList ); } if ( paramsFromPrevious ) { // Copy the input the parameters if ( parameters != null ) { for ( int idx = 0; idx < parameters.length; idx++ ) { if ( !Utils.isEmpty( parameters[ idx ] ) ) { // We have a parameter if ( Utils.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { namedParam.setParameterValue( parameters[ idx ], Const.NVL( environmentSubstitute( parameterValues[ idx ] ), "" ) ); } else { String fieldValue = ""; if ( resultRow != null ) { fieldValue = resultRow.getString( parameterFieldNames[ idx ], "" ); } // Get the value from the input stream namedParam.setParameterValue( parameters[ idx ], Const.NVL( fieldValue, "" ) ); } } } } } } else { if ( argFromPrevious ) { // Only put the first Row on the arguments args = null; if ( resultRow != null ) { args = new String[ resultRow.size() ]; for ( int i = 0; i < resultRow.size(); i++ ) { args[ i ] = resultRow.getString( i, null ); } } } if ( paramsFromPrevious ) { // Copy the input the parameters if ( parameters != null ) { for ( int idx = 0; idx < parameters.length; idx++ ) { if ( !Utils.isEmpty( parameters[ idx ] ) ) { // We have a parameter if ( Utils.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { namedParam.setParameterValue( parameters[ idx ], Const.NVL( environmentSubstitute( parameterValues[ idx ] ), "" ) ); } else { String fieldValue = ""; if ( resultRow != null ) { fieldValue = resultRow.getString( parameterFieldNames[ idx ], "" ); } // Get the value from the input stream namedParam.setParameterValue( parameters[ idx ], Const.NVL( fieldValue, "" ) ); } } } } } } // Handle the parameters... // transMeta.clearParameters(); String[] parameterNames = transMeta.listParameters(); for ( int idx = 0; idx < parameterNames.length; idx++ ) { // Grab the parameter value set in the Trans job entry // String thisValue = namedParam.getParameterValue( parameterNames[ idx ] ); if ( !Utils.isEmpty( thisValue ) ) { // Set the value as specified by the user in the job entry // transMeta.setParameterValue( parameterNames[ idx ], thisValue ); } else { // See if the parameter had a value set in the parent job... // This value should pass down to the transformation if that's what we opted to do. // if ( isPassingAllParameters() ) { String parentValue = parentJob.getParameterValue( parameterNames[ idx ] ); if ( !Utils.isEmpty( parentValue ) ) { transMeta.setParameterValue( parameterNames[ idx ], parentValue ); } } } } boolean doFallback = true; SlaveServer remoteSlaveServer = null; TransExecutionConfiguration executionConfiguration = new TransExecutionConfiguration(); if ( !Utils.isEmpty( runConfiguration ) ) { log.logBasic( BaseMessages.getString( PKG, "JobTrans.RunConfig.Message" ), runConfiguration ); runConfiguration = environmentSubstitute( runConfiguration ); executionConfiguration.setRunConfiguration( runConfiguration ); try { ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.SpoonTransBeforeStart.id, new Object[] { executionConfiguration, parentJob.getJobMeta(), transMeta, rep } ); if ( !executionConfiguration.isExecutingLocally() && !executionConfiguration.isExecutingRemotely() && !executionConfiguration.isExecutingClustered() ) { result.setResult( true ); return result; } clustering = executionConfiguration.isExecutingClustered(); remoteSlaveServer = executionConfiguration.getRemoteServer(); doFallback = false; } catch ( KettleException e ) { log.logError( e.getMessage(), getName() ); result.setNrErrors( 1 ); result.setResult( false ); return result; } } if ( doFallback ) { // Figure out the remote slave server... // if ( !Utils.isEmpty( remoteSlaveServerName ) ) { String realRemoteSlaveServerName = environmentSubstitute( remoteSlaveServerName ); remoteSlaveServer = parentJob.getJobMeta().findSlaveServer( realRemoteSlaveServerName ); if ( remoteSlaveServer == null ) { throw new KettleException( BaseMessages.getString( PKG, "JobTrans.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName ) ); } } } // Execute this transformation across a cluster of servers // if ( clustering ) { executionConfiguration.setClusterPosting( true ); executionConfiguration.setClusterPreparing( true ); executionConfiguration.setClusterStarting( true ); executionConfiguration.setClusterShowingTransformation( false ); executionConfiguration.setSafeModeEnabled( false ); executionConfiguration.setRepository( rep ); executionConfiguration.setLogLevel( transLogLevel ); executionConfiguration.setPreviousResult( previousResult ); // Also pass the variables from the transformation into the execution configuration // That way it can go over the HTTP connection to the slave server. // executionConfiguration.setVariables( transMeta ); // Also set the arguments... // executionConfiguration.setArgumentStrings( args ); if ( parentJob.getJobMeta().isBatchIdPassed() ) { executionConfiguration.setPassedBatchId( parentJob.getPassedBatchId() ); } TransSplitter transSplitter = null; long errors = 0; try { transSplitter = Trans.executeClustered( transMeta, executionConfiguration ); // Monitor the running transformations, wait until they are done. // Also kill them all if anything goes bad // Also clean up afterwards... // errors += Trans.monitorClusteredTransformation( log, transSplitter, parentJob ); } catch ( Exception e ) { logError( "Error during clustered execution. Cleaning up clustered execution.", e ); // In case something goes wrong, make sure to clean up afterwards! // errors++; if ( transSplitter != null ) { Trans.cleanupCluster( log, transSplitter ); } else { // Try to clean anyway... // SlaveServer master = null; for ( StepMeta stepMeta : transMeta.getSteps() ) { if ( stepMeta.isClustered() ) { for ( SlaveServer slaveServer : stepMeta.getClusterSchema().getSlaveServers() ) { if ( slaveServer.isMaster() ) { master = slaveServer; break; } } } } if ( master != null ) { master.deAllocateServerSockets( transMeta.getName(), null ); } } } result.clear(); if ( transSplitter != null ) { Result clusterResult = Trans.getClusteredTransformationResult( log, transSplitter, parentJob, executionConfiguration.isLogRemoteExecutionLocally() ); result.add( clusterResult ); } result.setNrErrors( result.getNrErrors() + errors ); } else if ( remoteSlaveServer != null ) { // Execute this transformation remotely // // Make sure we can parameterize the slave server connection // remoteSlaveServer.shareVariablesWith( this ); // Remote execution... // executionConfiguration.setPreviousResult( previousResult.clone() ); executionConfiguration.setArgumentStrings( args ); executionConfiguration.setVariables( this ); executionConfiguration.setRemoteServer( remoteSlaveServer ); executionConfiguration.setLogLevel( transLogLevel ); executionConfiguration.setRepository( rep ); executionConfiguration.setLogFileName( realLogFilename ); executionConfiguration.setSetAppendLogfile( setAppendLogfile ); executionConfiguration.setSetLogfile( setLogfile ); Map<String, String> params = executionConfiguration.getParams(); for ( String param : transMeta.listParameters() ) { String value = Const.NVL( transMeta.getParameterValue( param ), Const.NVL( transMeta.getParameterDefault( param ), transMeta.getVariable( param ) ) ); params.put( param, value ); } if ( parentJob.getJobMeta().isBatchIdPassed() ) { executionConfiguration.setPassedBatchId( parentJob.getPassedBatchId() ); } // Send the XML over to the slave server // Also start the transformation over there... // String carteObjectId = Trans.sendToSlaveServer( transMeta, executionConfiguration, rep, metaStore ); // Now start the monitoring... // SlaveServerTransStatus transStatus = null; while ( !parentJob.isStopped() && waitingToFinish ) { try { transStatus = remoteSlaveServer.getTransStatus( transMeta.getName(), carteObjectId, 0 ); if ( !transStatus.isRunning() ) { // The transformation is finished, get the result... // Result remoteResult = transStatus.getResult(); result.clear(); result.add( remoteResult ); // In case you manually stop the remote trans (browser etc), make sure it's marked as an error // if ( remoteResult.isStopped() ) { result.setNrErrors( result.getNrErrors() + 1 ); // } // Make sure to clean up : write a log record etc, close any left-over sockets etc. // remoteSlaveServer.cleanupTransformation( transMeta.getName(), carteObjectId ); break; } } catch ( Exception e1 ) { logError( BaseMessages.getString( PKG, "JobTrans.Error.UnableContactSlaveServer", "" + remoteSlaveServer, transMeta.getName() ), e1 ); result.setNrErrors( result.getNrErrors() + 1L ); break; // Stop looking too, chances are too low the server will come back on-line } // sleep for 2 seconds try { Thread.sleep( 2000 ); } catch ( InterruptedException e ) { // Ignore } } if ( parentJob.isStopped() ) { // See if we have a status and if we need to stop the remote execution here... // if ( transStatus == null || transStatus.isRunning() ) { // Try a remote abort ... // remoteSlaveServer.stopTransformation( transMeta.getName(), transStatus.getId() ); // And a cleanup... // remoteSlaveServer.cleanupTransformation( transMeta.getName(), transStatus.getId() ); // Set an error state! // result.setNrErrors( result.getNrErrors() + 1L ); } } } else { // Execute this transformation on the local machine // // Create the transformation from meta-data // //trans = new Trans( transMeta, this ); final TransMeta meta = transMeta; trans = new TransSupplier( transMeta, log, () -> new Trans( meta ) ).get(); trans.setParent( this ); // Pass the socket repository as early as possible... // trans.setSocketRepository( parentJob.getSocketRepository() ); if ( parentJob.getJobMeta().isBatchIdPassed() ) { trans.setPassedBatchId( parentJob.getPassedBatchId() ); } // set the parent job on the transformation, variables are taken from here... // trans.setParentJob( parentJob ); trans.setParentVariableSpace( parentJob ); trans.setLogLevel( transLogLevel ); trans.setPreviousResult( previousResult ); trans.setArguments( arguments ); // Mappings need the repository to load from // trans.setRepository( rep ); // inject the metaStore trans.setMetaStore( metaStore ); // First get the root job // Job rootJob = parentJob; while ( rootJob.getParentJob() != null ) { rootJob = rootJob.getParentJob(); } // Get the start and end-date from the root job... // trans.setJobStartDate( rootJob.getStartDate() ); trans.setJobEndDate( rootJob.getEndDate() ); // Inform the parent job we started something here... // for ( DelegationListener delegationListener : parentJob.getDelegationListeners() ) { // TODO: copy some settings in the job execution configuration, not strictly needed // but the execution configuration information is useful in case of a job re-start // delegationListener.transformationDelegationStarted( trans, new TransExecutionConfiguration() ); } try { // Start execution... // trans.execute( args ); // Wait until we're done with it... //TODO is it possible to implement Observer pattern to avoid Thread.sleep here? while ( !trans.isFinished() && trans.getErrors() == 0 ) { if ( parentJob.isStopped() ) { trans.stopAll(); break; } else { try { Thread.sleep( 0, 500 ); } catch ( InterruptedException e ) { // Ignore errors } } } trans.waitUntilFinished(); if ( parentJob.isStopped() || trans.getErrors() != 0 ) { trans.stopAll(); result.setNrErrors( 1 ); } Result newResult = trans.getResult(); result.clear(); // clear only the numbers, NOT the files or rows. result.add( newResult ); // Set the result rows too, if any ... if ( !Utils.isEmpty( newResult.getRows() ) ) { result.setRows( newResult.getRows() ); } if ( setLogfile ) { ResultFile resultFile = new ResultFile( ResultFile.FILE_TYPE_LOG, KettleVFS.getFileObject( realLogFilename, this ), parentJob .getJobname(), toString() ); result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); } } catch ( KettleException e ) { logError( BaseMessages.getString( PKG, "JobTrans.Error.UnablePrepareExec" ), e ); result.setNrErrors( 1 ); } } } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobTrans.ErrorUnableOpenTrans", e.getMessage() ) ); logError( Const.getStackTracker( e ) ); result.setNrErrors( 1 ); } iteration++; } if ( setLogfile ) { if ( logChannelFileWriter != null ) { logChannelFileWriter.stopLogging(); ResultFile resultFile = new ResultFile( ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName() ); result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); // See if anything went wrong during file writing... // if ( logChannelFileWriter.getException() != null ) { logError( "Unable to open log file [" + getLogFilename() + "] : " ); logError( Const.getStackTracker( logChannelFileWriter.getException() ) ); result.setNrErrors( 1 ); result.setResult( false ); return result; } } } if ( result.getNrErrors() == 0 ) { result.setResult( true ); } else { result.setResult( false ); } return result; } /** * @deprecated use {@link #getTransMeta(Repository, IMetaStore, VariableSpace)} * @param rep * @param space * @return * @throws KettleException */ @Deprecated public TransMeta getTransMeta( Repository rep, VariableSpace space ) throws KettleException { return getTransMeta( rep, null, space ); } public TransMeta getTransMeta( Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { try { TransMeta transMeta = null; CurrentDirectoryResolver r = new CurrentDirectoryResolver(); VariableSpace tmpSpace = r.resolveCurrentDirectory( specificationMethod, space, rep, parentJob, getFilename() ); switch ( specificationMethod ) { case FILENAME: String realFilename = tmpSpace.environmentSubstitute( getFilename() ); if ( rep != null ) { realFilename = r.normalizeSlashes( realFilename ); // need to try to load from the repository try { String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1 ); RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); transMeta = rep.loadTransformation( tmpFilename, dir, null, true, null ); } catch ( KettleException ke ) { // try without extension if ( realFilename.endsWith( Const.STRING_TRANS_DEFAULT_EXT ) ) { try { String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1, realFilename.indexOf( "." + Const.STRING_TRANS_DEFAULT_EXT ) ); String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); transMeta = rep.loadTransformation( tmpFilename, dir, null, true, null ); } catch ( KettleException ke2 ) { // fall back to try loading from file system (transMeta is going to be null) } } } } if ( transMeta == null ) { logBasic( "Loading transformation from XML file [" + realFilename + "]" ); transMeta = new TransMeta( realFilename, metaStore, null, true, this, null ); } break; case REPOSITORY_BY_NAME: String transname = tmpSpace.environmentSubstitute( getTransname() ); String realDirectory = tmpSpace.environmentSubstitute( getDirectory() ); logBasic( BaseMessages.getString( PKG, "JobTrans.Log.LoadingTransRepDirec", transname, realDirectory ) ); if ( rep != null ) { // // It only makes sense to try to load from the repository when the // repository is also filled in. // // It reads last the last revision from the repository. // realDirectory = r.normalizeSlashes( realDirectory ); RepositoryDirectoryInterface repositoryDirectory = rep.findDirectory( realDirectory ); transMeta = rep.loadTransformation( transname, repositoryDirectory, null, true, null ); } else { // rep is null, let's try loading by filename try { transMeta = new TransMeta( realDirectory + "/" + transname, metaStore, null, true, this, null ); } catch ( KettleException ke ) { try { // add .ktr extension and try again transMeta = new TransMeta( realDirectory + "/" + transname + "." + Const.STRING_TRANS_DEFAULT_EXT, metaStore, null, true, this, null ); } catch ( KettleException ke2 ) { throw new KettleException( BaseMessages.getString( PKG, "JobTrans.Exception.NoRepDefined" ), ke2 ); } } } break; case REPOSITORY_BY_REFERENCE: if ( transObjectId == null ) { throw new KettleException( BaseMessages.getString( PKG, "JobTrans.Exception.ReferencedTransformationIdIsNull" ) ); } if ( rep != null ) { // Load the last revision // transMeta = rep.loadTransformation( transObjectId, null ); } break; default: throw new KettleException( "The specified object location specification method '" + specificationMethod + "' is not yet supported in this job entry." ); } if ( transMeta != null ) { // copy parent variables to this loaded variable space. // transMeta.copyVariablesFrom( this ); // set Internal.Entry.Current.Directory again because it was changed transMeta.setInternalKettleVariables(); // Pass repository and metastore references // transMeta.setRepository( rep ); transMeta.setMetaStore( metaStore ); } return transMeta; } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "JobTrans.Exception.MetaDataLoad" ), e ); } } @Override public boolean evaluates() { return true; } @Override public boolean isUnconditional() { return true; } @Override public List<SQLStatement> getSQLStatements( Repository repository, IMetaStore metaStore, VariableSpace space ) throws KettleException { this.copyVariablesFrom( space ); TransMeta transMeta = getTransMeta( repository, metaStore, this ); return transMeta.getSQLStatements(); } /** * @return Returns the directoryPath. */ public String getDirectoryPath() { return directoryPath; } /** * @param directoryPath The directoryPath to set. */ public void setDirectoryPath( String directoryPath ) { this.directoryPath = directoryPath; } /** * @return the clustering */ public boolean isClustering() { return clustering; } /** * @param clustering the clustering to set */ public void setClustering( boolean clustering ) { this.clustering = clustering; } @Override public void check( List<CheckResultInterface> remarks, JobMeta jobMeta, VariableSpace space, Repository repository, IMetaStore metaStore ) { if ( setLogfile ) { JobEntryValidatorUtils.andValidator().validate( this, "logfile", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); } if ( !Utils.isEmpty( filename ) ) { JobEntryValidatorUtils.andValidator().validate( this, "filename", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); } else { JobEntryValidatorUtils.andValidator().validate( this, "transname", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); JobEntryValidatorUtils.andValidator().validate( this, "directory", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notNullValidator() ) ); } } @Override public List<ResourceReference> getResourceDependencies( JobMeta jobMeta ) { List<ResourceReference> references = super.getResourceDependencies( jobMeta ); if ( !Utils.isEmpty( filename ) ) { // During this phase, the variable space hasn't been initialized yet - it seems // to happen during the execute. As such, we need to use the job meta's resolution // of the variables. String realFileName = jobMeta.environmentSubstitute( filename ); ResourceReference reference = new ResourceReference( this ); reference.getEntries().add( new ResourceEntry( realFileName, ResourceType.ACTIONFILE ) ); references.add( reference ); } return references; } /** * We're going to load the transformation meta data referenced here. Then we're going to give it a new filename, * modify that filename in this entries. The parent caller will have made a copy of it, so it should be OK to do so. * <p/> * Exports the object to a flat-file system, adding content with filename keys to a set of definitions. The supplied * resource naming interface allows the object to name appropriately without worrying about those parts of the * implementation specific details. * * @param space The variable space to resolve (environment) variables with. * @param definitions The map containing the filenames and content * @param namingInterface The resource naming interface allows the object to be named appropriately * @param repository The repository to load resources from * @param metaStore the metaStore to load external metadata from * @return The filename for this object. (also contained in the definitions map) * @throws KettleException in case something goes wrong during the export */ @Override public String exportResources( VariableSpace space, Map<String, ResourceDefinition> definitions, ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { // Try to load the transformation from repository or file. // Modify this recursively too... // // AGAIN: there is no need to clone this job entry because the caller is responsible for this. // // First load the transformation metadata... // copyVariablesFrom( space ); TransMeta transMeta = getTransMeta( repository, space ); // Also go down into the transformation and export the files there. (mapping recursively down) // String proposedNewFilename = transMeta.exportResources( transMeta, definitions, namingInterface, repository, metaStore ); // To get a relative path to it, we inject ${Internal.Entry.Current.Directory} // String newFilename = "${" + Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY + "}/" + proposedNewFilename; // Set the correct filename inside the XML. // transMeta.setFilename( newFilename ); // exports always reside in the root directory, in case we want to turn this into a file repository... // transMeta.setRepositoryDirectory( new RepositoryDirectory() ); // export to filename ALWAYS (this allows the exported XML to be executed remotely) // setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); // change it in the job entry // filename = newFilename; return proposedNewFilename; } protected String getLogfile() { return logfile; } /** * @return the remote slave server name */ public String getRemoteSlaveServerName() { return remoteSlaveServerName; } /** * @param remoteSlaveServerName the remote slave server name to set */ public void setRemoteSlaveServerName( String remoteSlaveServerName ) { this.remoteSlaveServerName = remoteSlaveServerName; } /** * @return the waitingToFinish */ public boolean isWaitingToFinish() { return waitingToFinish; } /** * @param waitingToFinish the waitingToFinish to set */ public void setWaitingToFinish( boolean waitingToFinish ) { this.waitingToFinish = waitingToFinish; } /** * @return the followingAbortRemotely */ public boolean isFollowingAbortRemotely() { return followingAbortRemotely; } /** * @param followingAbortRemotely the followingAbortRemotely to set */ public void setFollowingAbortRemotely( boolean followingAbortRemotely ) { this.followingAbortRemotely = followingAbortRemotely; } public boolean isLoggingRemoteWork() { return loggingRemoteWork; } public void setLoggingRemoteWork( boolean loggingRemoteWork ) { this.loggingRemoteWork = loggingRemoteWork; } /** * @return the passingAllParameters */ public boolean isPassingAllParameters() { return passingAllParameters; } /** * @param passingAllParameters the passingAllParameters to set */ public void setPassingAllParameters( boolean passingAllParameters ) { this.passingAllParameters = passingAllParameters; } public String getRunConfiguration() { return runConfiguration; } public void setRunConfiguration( String runConfiguration ) { this.runConfiguration = runConfiguration; } public Trans getTrans() { return trans; } /** * @return the transObjectId */ public ObjectId getTransObjectId() { return transObjectId; } /** * @param transObjectId the transObjectId to set */ public void setTransObjectId( ObjectId transObjectId ) { this.transObjectId = transObjectId; } /** * @return the specificationMethod */ public ObjectLocationSpecificationMethod getSpecificationMethod() { return specificationMethod; } @Override public ObjectLocationSpecificationMethod[] getSpecificationMethods() { return new ObjectLocationSpecificationMethod[] { specificationMethod }; } /** * @param specificationMethod the specificationMethod to set */ public void setSpecificationMethod( ObjectLocationSpecificationMethod specificationMethod ) { this.specificationMethod = specificationMethod; } @Override public boolean hasRepositoryReferences() { return specificationMethod == ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; } /** * Look up the references after import * * @param repository the repository to reference. */ @Override public void lookupRepositoryReferences( Repository repository ) throws KettleException { // The correct reference is stored in the trans name and directory attributes... // RepositoryDirectoryInterface repositoryDirectoryInterface = RepositoryImportLocation.getRepositoryImportLocation().findDirectory( directory ); transObjectId = repository.getTransformationID( transname, repositoryDirectoryInterface ); } /** * @return The objects referenced in the step, like a a transformation, a job, a mapper, a reducer, a combiner, ... */ @Override public String[] getReferencedObjectDescriptions() { return new String[] { BaseMessages.getString( PKG, "JobEntryTrans.ReferencedObject.Description" ), }; } private boolean isTransformationDefined() { return !Utils.isEmpty( filename ) || transObjectId != null || ( !Utils.isEmpty( this.directory ) && !Utils.isEmpty( transname ) ); } @Override public boolean[] isReferencedObjectEnabled() { return new boolean[] { isTransformationDefined(), }; } /** * Load the referenced object * * @param index the referenced object index to load (in case there are multiple references) * @param rep the repository * @param metaStore metaStore * @param space the variable space to use * @return the referenced object once loaded * @throws KettleException */ @Override public Object loadReferencedObject( int index, Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { return getTransMeta( rep, metaStore, space ); } @Override public void setParentJobMeta( JobMeta parentJobMeta ) { JobMeta previous = getParentJobMeta(); super.setParentJobMeta( parentJobMeta ); if ( parentJobMeta != null ) { parentJobMeta.addCurrentDirectoryChangedListener( currentDirListener ); variables.setParentVariableSpace( parentJobMeta ); } else if ( previous != null ) { previous.removeCurrentDirectoryChangedListener( currentDirListener ); } } }
apache-2.0
xbib/malva
malva-core/src/main/java/org/xbib/web/util/BytesArray.java
2580
package org.xbib.web.util; import java.io.IOException; import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.util.Arrays; /** * A byte array, wrapped in a {@link org.xbib.web.util.BytesReference}. */ public class BytesArray implements BytesReference { private static final String EMPTY_STRING = ""; private byte[] bytes; private int offset; private int length; /** * Create {@link BytesArray} from a byte array. * @param bytes the byte array */ public BytesArray(byte[] bytes) { this.bytes = bytes; this.offset = 0; this.length = bytes.length; } /** * Create {@link BytesArray} from a part of a byte array. * @param bytes the byte array * @param offset the offset * @param length the length */ public BytesArray(byte[] bytes, int offset, int length) { this.bytes = bytes; this.offset = offset; this.length = length; } public void write(byte[] b) { byte[] c = new byte[length + b.length]; System.arraycopy(bytes, 0, c, 0, length); System.arraycopy(b, 0, c, bytes.length, b.length); this.bytes = c; this.offset = 0; this.length = c.length; } @Override public byte get(int index) { return bytes[offset + index]; } @Override public int length() { return length; } @Override public int indexOf(byte b, int offset, int len) { if (offset < 0 || (offset + length) > this.length) { throw new IllegalArgumentException(); } for (int i = offset; i < offset + len; i++) { if (bytes[i] == b) { return i; } } return -1; } @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > this.length) { throw new IllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new BytesArray(bytes, offset + from, length); } @Override public byte[] toBytes() { if (offset == 0 && bytes.length == length) { return bytes; } return Arrays.copyOfRange(bytes, offset, offset + length); } @Override public String toUtf8() { if (length == 0) { return EMPTY_STRING; } return new String(bytes, offset, length, StandardCharsets.UTF_8); } }
apache-2.0
apache/olingo-odata2
odata2-lib/odata-client-core/src/main/java/org/apache/olingo/odata2/client/core/edm/Impl/EdmNavigationPropertyImpl.java
3571
/******************************************************************************* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. ******************************************************************************/ package org.apache.olingo.odata2.client.core.edm.Impl; import org.apache.olingo.odata2.api.edm.EdmAnnotatable; import org.apache.olingo.odata2.api.edm.EdmAnnotations; import org.apache.olingo.odata2.api.edm.EdmAssociation; import org.apache.olingo.odata2.api.edm.EdmException; import org.apache.olingo.odata2.api.edm.EdmMapping; import org.apache.olingo.odata2.api.edm.EdmMultiplicity; import org.apache.olingo.odata2.api.edm.EdmNavigationProperty; import org.apache.olingo.odata2.api.edm.EdmType; import org.apache.olingo.odata2.api.edm.FullQualifiedName; import org.apache.olingo.odata2.api.edm.provider.Mapping; import org.apache.olingo.odata2.client.api.edm.EdmDocumentation; /** * Objects of this class represent EdmNavigationProperty * */ public class EdmNavigationPropertyImpl extends EdmTypedImpl implements EdmNavigationProperty, EdmAnnotatable { private EdmAnnotations annotations; private FullQualifiedName relationship; private String fromRole; private String toRole; private EdmDocumentation documentation; private Mapping mapping; public EdmDocumentation getDocumentation() { return documentation; } public void setDocumentation(EdmDocumentation documentation) { this.documentation = documentation; } public void setMapping(Mapping mapping) { this.mapping = mapping; } public void setAnnotations(EdmAnnotations annotations) { this.annotations = annotations; } public void setFromRole(String fromRole) { this.fromRole = fromRole; } public void setToRole(String toRole) { this.toRole = toRole; } @Override public EdmType getType() throws EdmException { return edmType; } @Override public EdmMultiplicity getMultiplicity() throws EdmException { return multiplicity; } @Override public EdmAssociation getRelationship() throws EdmException { return edm.getAssociation(relationship.getNamespace(), relationship.getName()); } public FullQualifiedName getRelationshipName() throws EdmException { return relationship; } public void setRelationshipName( FullQualifiedName relationship){ this.relationship = relationship; } @Override public String getFromRole() throws EdmException { return fromRole; } @Override public String getToRole() throws EdmException { return toRole; } @Override public EdmAnnotations getAnnotations() throws EdmException { return annotations; } @Override public EdmMapping getMapping() throws EdmException { return mapping; } @Override public String toString() { return String.format(name); } }
apache-2.0
johtani/redpen
redpen-core/src/test/java/org/unigram/docvalidator/validator/sentence/InvalidCharacterValidatorTest.java
3585
/** * redpen: a text inspection tool * Copyright (C) 2014 Recruit Technologies Co., Ltd. and contributors * (see CONTRIBUTORS.md) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.unigram.docvalidator.validator.sentence; import static org.junit.Assert.*; import java.io.InputStream; import java.util.List; import org.apache.commons.io.IOUtils; import org.junit.Test; import org.unigram.docvalidator.model.Sentence; import org.unigram.docvalidator.config.CharacterTable; import org.unigram.docvalidator.config.CharacterTableLoader; import org.unigram.docvalidator.ValidationError; class InvalidCharacterValidatorForTest extends InvalidCharacterValidator { void loadCharacterTable (CharacterTable characterTable) { this.setCharacterTable(characterTable); } } public class InvalidCharacterValidatorTest { @Test public void testWithInvalidCharacter() { InvalidCharacterValidatorForTest validator = new InvalidCharacterValidatorForTest(); String sampleCharTable = new String( "<?xml version=\"1.0\"?>"+ "<character-table>" + "<character name=\"EXCLAMATION_MARK\" value=\"!\" invalid-chars=\"!\"/>" + "</character-table>"); InputStream stream = IOUtils.toInputStream(sampleCharTable); CharacterTable characterTable = CharacterTableLoader.load(stream); validator.loadCharacterTable(characterTable); Sentence str = new Sentence("わたしはカラオケが大好き!",0); List<ValidationError> errors = validator.validate(str); assertEquals(1, errors.size()); } @Test public void testWithoutInvalidCharacter() { InvalidCharacterValidatorForTest validator = new InvalidCharacterValidatorForTest(); String sampleCharTable = new String( "<?xml version=\"1.0\"?>"+ "<character-table>" + "<character name=\"EXCLAMATION_MARK\" value=\"!\" invalid-chars=\"!\"/>" + "</character-table>"); InputStream stream = IOUtils.toInputStream(sampleCharTable); CharacterTable characterTable = CharacterTableLoader.load(stream); validator.loadCharacterTable(characterTable); Sentence str = new Sentence("I like karaoke!",0); List<ValidationError> errors = validator.validate(str); assertEquals(0, errors.size()); } @Test public void testWithoutMultipleInvalidCharacter() { InvalidCharacterValidatorForTest validator = new InvalidCharacterValidatorForTest(); String sampleCharTable = new String( "<?xml version=\"1.0\"?>"+ "<character-table>" + "<character name=\"EXCLAMATION_MARK\" value=\"!\" invalid-chars=\"!\"/>" + "<character name=\"COMMA\" value=\",\" invalid-chars=\"、\"/>" + "</character-table>"); InputStream stream = IOUtils.toInputStream(sampleCharTable); CharacterTable characterTable = CharacterTableLoader.load(stream); validator.loadCharacterTable(characterTable); Sentence str = new Sentence("わたしは、カラオケが好き!",0); List<ValidationError> errors = validator.validate(str); assertEquals(2, errors.size()); } }
apache-2.0
arenadata/ambari
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostEntity_.java
2278
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ambari.server.orm.entities; import javax.persistence.metamodel.SingularAttribute; /** * This class exists so that JPQL can use static singular attributes that are * strongly typed as opposed to Java reflection like HostEntity.get("fieldname") */ @javax.persistence.metamodel.StaticMetamodel(HostEntity.class) public class HostEntity_ { public static volatile SingularAttribute<HostEntity, Long> hostId; public static volatile SingularAttribute<HostEntity, String> hostName; public static volatile SingularAttribute<HostEntity, String> ipv4; public static volatile SingularAttribute<HostEntity, String> ipv6; public static volatile SingularAttribute<HostEntity, String> publicHostName; public static volatile SingularAttribute<HostEntity, Long> totalMem; public static volatile SingularAttribute<HostEntity, Integer> cpuCount; public static volatile SingularAttribute<HostEntity, Integer> phCpuCount; public static volatile SingularAttribute<HostEntity, String> cpuInfo; public static volatile SingularAttribute<HostEntity, String> osArch; public static volatile SingularAttribute<HostEntity, String> osInfo; public static volatile SingularAttribute<HostEntity, String> discoveryStatus; public static volatile SingularAttribute<HostEntity, Long> lastRegistrationTime; public static volatile SingularAttribute<HostEntity, String> rackInfo; public static volatile SingularAttribute<HostEntity, String> hostAttributes; }
apache-2.0
yuriBobrik/HPE-Software-Bamboo-Plugin
Bamboo/hp.application.automation/src/main/java/com/hpe/application/automation/bamboo/tasks/TestResultHelperAlm.java
8745
/** © Copyright 2015 Hewlett Packard Enterprise Development LP Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package com.hpe.application.automation.bamboo.tasks; import com.atlassian.bamboo.build.LogEntry; import com.atlassian.bamboo.build.logger.BuildLogger; import com.atlassian.bamboo.task.TaskContext; import com.atlassian.bamboo.utils.i18n.I18nBean; import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import java.io.*; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import static com.hpe.application.automation.bamboo.tasks.TestResultHelper.getOutputFilePath; /** * Created by ybobrik on 9/25/2015. */ public class TestResultHelperAlm { private static final String CAN_NOT_SAVE_RUN_LOG_MESSAGE = "Alm.error.canNotSaveTheRunLog"; private static final String RUN_LOG_FILE_NAME = "RunLog"; private static final String RUN_LOG_HTML_TEXT = "<!DOCTYPE html>\n" + "<html>\n" + " <head>\n" + " <title>Test</title>\n" + " <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n" + " <script type=\"text/javascript\">\n" + " function codeAddress() {\n" + " window.location = ALM_RUN_RESULTS_LINK_PARAMETER;\n" + " }\n" + " window.onload = codeAddress;\n" + " </script>\n" + " </head>\n" + " <body>\n" + " \n" + " </body>\n" + "</html>"; private static final String ALM_RUN_RESULTS_LINK_PARAMETER = "ALM_RUN_RESULTS_LINK_PARAMETER"; private static List<String> savedALMRunLogPaths = new ArrayList<String>(); private static int currentBuildNumber; protected static void AddALMArtifacts(final TaskContext taskContext, File resultFile, String linkSearchFilter, I18nBean i18nBean) { clearSavedALMRunLogPaths(taskContext); String taskName = taskContext.getConfigurationMap().get(CommonTaskConfigurationProperties.TASK_NAME); if(taskName.equals(i18nBean.getText(AlmLabManagementTaskConfigurator.TASK_NAME_VALUE))) { String taskRunLogPath = findRequiredStringFromLog(taskContext, linkSearchFilter); if (com.hpe.application.automation.tools.common.StringUtils.isNullOrEmpty(taskRunLogPath)) { taskContext.getBuildLogger().addErrorLogEntry(i18nBean.getText(CAN_NOT_SAVE_RUN_LOG_MESSAGE)); return; } createResultFile(taskContext, taskRunLogPath, ".*processRunId=", i18nBean); } else if(taskName.equals(i18nBean.getText(RunFromAlmTaskConfigurator.TASK_NAME_VALUE))){ List<String> links = null; if (resultFile != null && resultFile.exists()) { links = findRequiredStringsFromFile(taskContext.getBuildLogger(), resultFile); } if (links == null || links.size() < 1) { links = findRequiredStringsFromLog(taskContext.getBuildLogger(), linkSearchFilter); } Integer linksAmount = links.size(); if (linksAmount.equals(0)) { taskContext.getBuildLogger().addErrorLogEntry(i18nBean.getText(CAN_NOT_SAVE_RUN_LOG_MESSAGE)); return; } //taskContext.getBuildLogger().addBuildLogEntry("+++++++++++ " + links.size()); for (String link : links) { createResultFile(taskContext, link, ".*EntityID=",i18nBean); } } } private static void clearSavedALMRunLogPaths(TaskContext taskContext) { int taskBuildNumber = taskContext.getBuildContext().getBuildNumber(); if(savedALMRunLogPaths.size() > 0 && taskBuildNumber != currentBuildNumber) { savedALMRunLogPaths.clear(); } currentBuildNumber=taskBuildNumber; } //is used for Run from Alm Lab Management task private static String findRequiredStringFromLog(TaskContext taskContext, String searchFilter) { BuildLogger logger = taskContext.getBuildLogger(); List<LogEntry> buildLog = Lists.reverse(logger.getBuildLog()); for(LogEntry logEntry: buildLog){ String log = logEntry.getLog(); if(log.contains(searchFilter)) { int pathBegin = log.indexOf("http"); if(pathBegin > -1) { log=log.substring(pathBegin); if(!savedALMRunLogPaths.contains(log)){ return log; } } } } return null; } //is used for Run from Alm task private static List<String> findRequiredStringsFromLog(BuildLogger logger, String searchFilter) { List<LogEntry> buildLog = Lists.reverse(logger.getBuildLog()); List<String> results = new ArrayList<String>(); for(LogEntry logEntry: buildLog){ String log = logEntry.getLog(); if(log.contains(searchFilter)) { int pathBegin = log.indexOf("td:"); if(pathBegin > -1) { String result = log.substring(pathBegin); if(!results.contains(result) && !savedALMRunLogPaths.contains(result)){ results.add(result); } } } } return results; } private static List<String> findRequiredStringsFromFile(BuildLogger logger, File resultFile) { List<String> results = new ArrayList<String>(); try { StringBuilder sb = new StringBuilder(); BufferedReader in = new BufferedReader(new FileReader(resultFile.getAbsoluteFile())); try{ String s; while((s = in.readLine()) != null){ sb.append(s); } }finally { in.close(); } //report link example: td://Automation.AUTOMATION.mydph0271.hpswlabs.adapps.hp.com:8080/qcbin/TestLabModule-000000003649890581?EntityType=IRun&amp;EntityID=1195091 String sp = "td://.+?;EntityID=[0-9]+"; Pattern p = Pattern.compile(sp); Matcher m = p.matcher(sb.toString()); while(m.find()){ results.add(m.group()); } } catch (Exception e) { logger.addBuildLogEntry(e.getMessage()); } return results; } private static void createResultFile(TaskContext taskContext, String link, String idFilter, I18nBean i18nBean){ savedALMRunLogPaths.add(link); String RunReportFileId = link.replaceAll(idFilter, ""); if(com.hpe.application.automation.tools.common.StringUtils.isNullOrEmpty(RunReportFileId)) { return; } String RunReportFileName = RUN_LOG_FILE_NAME+RunReportFileId+".html"; String workingDirectory = getOutputFilePath(taskContext); File resultFile = new File(workingDirectory+"/"+RunReportFileName); link = "\""+link+"\""; String parameterizedResultsHtmlText = RUN_LOG_HTML_TEXT.replaceAll(ALM_RUN_RESULTS_LINK_PARAMETER, link); try { FileUtils.writeStringToFile(resultFile, parameterizedResultsHtmlText); } catch(Exception ex){ taskContext.getBuildLogger().addErrorLogEntry(i18nBean.getText(CAN_NOT_SAVE_RUN_LOG_MESSAGE)); } } }
apache-2.0
noobyang/AndroidStudy
lib/src/main/java/com/lee/lib/support/RecyclerViewActivity.java
1224
package com.lee.lib.support; import android.os.Bundle; import android.support.annotation.Nullable; import com.lee.base.activity.BaseActivity; import com.lee.lib.R; import com.lee.lib.support.v7.widget.LinearLayoutManager; import com.lee.lib.support.v7.widget.RecyclerView; import java.util.ArrayList; import java.util.List; /** * RecyclerView Activity * </p> * Created by LiYang on 2017/7/15. */ public class RecyclerViewActivity extends BaseActivity { private RecyclerView recyclerView; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_recycler_view); LinearLayoutManager linearLayoutManager = new LinearLayoutManager(this); linearLayoutManager.setOrientation(LinearLayoutManager.VERTICAL); recyclerView = (RecyclerView)findViewById(R.id.rv_content); recyclerView.setLayoutManager(linearLayoutManager); List<String> data = new ArrayList<>(); for (int i=0; i<100; i++) { data.add("title " + i); } RecyclerViewAdapter adapter = new RecyclerViewAdapter(this, data); recyclerView.setAdapter(adapter); } }
apache-2.0
haoyanjun21/jstorm
jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerReportError.java
1359
package com.alibaba.jstorm.daemon.worker; import com.alibaba.jstorm.cluster.StormClusterState; import com.alibaba.jstorm.task.error.ErrorConstants; import com.alibaba.jstorm.utils.TimeFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Date; import java.util.Set; /** * @author xiaojian.fxj */ public class WorkerReportError { private static Logger LOG = LoggerFactory.getLogger(WorkerReportError.class); private StormClusterState zkCluster; private String hostName; public WorkerReportError(StormClusterState stormClusterState, String hostName) { this.zkCluster = stormClusterState; this.hostName = hostName; } public void report(String topologyId, Integer workerPort, Set<Integer> tasks, String error, int errorCode) { // Report worker's error to zk try { Date now = new Date(); String nowStr = TimeFormat.getSecond(now); String errorInfo = error + "on " + this.hostName + ":" + workerPort + "," + nowStr; for (Integer task : tasks) { zkCluster.report_task_error(topologyId, task, errorInfo, ErrorConstants.FATAL, errorCode); } } catch (Exception e) { LOG.error("Failed to update errors of port " + workerPort + " to ZK.", e); } } }
apache-2.0
seata/seata
sqlparser/seata-sqlparser-antlr/src/main/java/io/seata/sqlparser/antlr/mysql/visit/InsertStatementSqlVisitor.java
1346
/* * Copyright 1999-2019 Seata.io Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.seata.sqlparser.antlr.mysql.visit; import io.seata.sqlparser.antlr.mysql.MySqlContext; import io.seata.sqlparser.antlr.mysql.parser.MySqlParser; import io.seata.sqlparser.antlr.mysql.parser.MySqlParserBaseVisitor; /** * InsertStatementSqlVisitor * * @author zhihou */ public class InsertStatementSqlVisitor extends MySqlParserBaseVisitor<MySqlContext> { private MySqlContext mySqlContext; public InsertStatementSqlVisitor(MySqlContext mySqlContext) { this.mySqlContext = mySqlContext; } @Override public MySqlContext visitInsertStatement(MySqlParser.InsertStatementContext ctx) { return new InsertSpecificationSqlVisitor(this.mySqlContext).visitInsertStatement(ctx); } }
apache-2.0
anwar6953/incubator-tephra
tephra-hbase-compat-1.1-base/src/main/java/org/apache/tephra/hbase/txprune/HBaseTransactionPruningPlugin.java
18153
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.tephra.hbase.txprune; import com.google.common.base.Function; import com.google.common.collect.Iterables; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Bytes; import org.apache.tephra.TxConstants; import org.apache.tephra.hbase.coprocessor.TransactionProcessor; import org.apache.tephra.txprune.TransactionPruningPlugin; import org.apache.tephra.util.TxUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; /** * Default implementation of the {@link TransactionPruningPlugin} for HBase. * * This plugin determines the prune upper bound for transactional HBase tables that use * coprocessor {@link TransactionProcessor}. * * <h3>State storage:</h3> * * This plugin expects the TransactionProcessor to save the prune upper bound for invalid transactions * after every major compaction of a region. Let's call this <i>(region, prune upper bound)</i>. * In addition, the plugin also persists the following information on a run at time <i>t</i> * <ul> * <li> * <i>(t, set of regions)</i>: Set of transactional regions at time <i>t</i>. * Transactional regions are regions of the tables that have the coprocessor TransactionProcessor * attached to them. * </li> * <li> * <i>(t, inactive transaction bound)</i>: This is the smallest not in-progress transaction that * will not have writes in any HBase regions that are created after time <i>t</i>. * This value is determined by the Transaction Service based on the transaction state at time <i>t</i> * and passed on to the plugin. * </li> * </ul> * * <h3>Computing prune upper bound:</h3> * * In a typical HBase instance, there can be a constant change in the number of regions due to region creations, * splits and merges. At any given time there can always be a region on which a major compaction has not been run. * Since the prune upper bound will get recorded for a region only after a major compaction, * using only the latest set of regions we may not be able to find the * prune upper bounds for all the current regions. Hence we persist the set of regions that exist at that time * of each run of the plugin, and use historical region set for time <i>t</i>, <i>t - 1</i>, etc. * to determine the prune upper bound. * * From the regions saved at time <i>t</i>, <i>t - 1</i>, etc., * the plugin tries to find the latest <i>(t, set of regions)</i> where all regions have been major compacted, * i.e, all regions have prune upper bound recorded in <i>(region, prune upper bound)</i>. * <br/> * If such a set is found for time <i>t1</i>, the prune upper bound returned by the plugin is the minimum of * <ul> * <li>Prune upper bounds of regions in set <i>(t1, set of regions)</i></li> * <li>Inactive transaction bound from <i>(t1, inactive transaction bound)</i></li> * </ul> * * <p/> * Above, when we find <i>(t1, set of regions)</i>, there may a region that was created after time <i>t1</i>, * but has a data write from an invalid transaction that is smaller than the prune upper bounds of all * regions in <i>(t1, set of regions)</i>. This is possible because <i>(region, prune upper bound)</i> persisted by * TransactionProcessor is always the latest prune upper bound for a region. * <br/> * However a region created after time <i>t1</i> cannot have writes from an invalid transaction that is smaller than * inactive transaction bound at the time the region was created. * Since we limit the plugin prune upper bound using <i>(t1, inactive transaction bound)</i>, * there should be no invalid transactions smaller than the plugin prune upper bound with writes in any * transactional region of this HBase instance. * * <p/> * Note: If your tables uses a transactional coprocessor other than TransactionProcessor, * then you may need to write a new plugin to compute prune upper bound for those tables. */ @SuppressWarnings("WeakerAccess") public class HBaseTransactionPruningPlugin implements TransactionPruningPlugin { public static final Logger LOG = LoggerFactory.getLogger(HBaseTransactionPruningPlugin.class); protected Configuration conf; protected Connection connection; protected DataJanitorState dataJanitorState; @Override public void initialize(Configuration conf) throws IOException { this.conf = conf; this.connection = ConnectionFactory.createConnection(conf); final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); LOG.info("Initializing plugin with state table {}", stateTable.getNameWithNamespaceInclAsString()); createPruneTable(stateTable); this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public Table get() throws IOException { return connection.getTable(stateTable); } }); } /** * Determines prune upper bound for the data store as mentioned above. */ @Override public long fetchPruneUpperBound(long time, long inactiveTransactionBound) throws IOException { LOG.debug("Fetching prune upper bound for time {} and inactive transaction bound {}", time, inactiveTransactionBound); if (time < 0 || inactiveTransactionBound < 0) { return -1; } // Get all the current transactional regions SortedSet<byte[]> transactionalRegions = getTransactionalRegions(); if (!transactionalRegions.isEmpty()) { LOG.debug("Saving {} transactional regions for time {}", transactionalRegions.size(), time); dataJanitorState.saveRegionsForTime(time, transactionalRegions); // Save inactive transaction bound for time as the final step. // We can then use its existence to make sure that the data for a given time is complete or not LOG.debug("Saving inactive transaction bound {} for time {}", inactiveTransactionBound, time); dataJanitorState.saveInactiveTransactionBoundForTime(time, inactiveTransactionBound); } return computePruneUpperBound(new TimeRegions(time, transactionalRegions)); } /** * After invalid list has been pruned, this cleans up state information that is no longer required. * This includes - * <ul> * <li> * <i>(region, prune upper bound)</i> - prune upper bound for regions that are older * than maxPrunedInvalid * </li> * <li> * <i>(t, set of regions) - Regions set that were recorded on or before the start time * of maxPrunedInvalid * </li> * <li> * (t, inactive transaction bound) - Smallest not in-progress transaction without any writes in new regions * information recorded on or before the start time of maxPrunedInvalid * </li> * </ul> */ @Override public void pruneComplete(long time, long maxPrunedInvalid) throws IOException { LOG.debug("Prune complete for time {} and prune upper bound {}", time, maxPrunedInvalid); if (time < 0 || maxPrunedInvalid < 0) { return; } // Get regions for the current time, so as to not delete the prune upper bounds for them. // The prune upper bounds for regions are recorded by TransactionProcessor and the deletion // is done by this class. To avoid update/delete race condition, we only delete prune upper // bounds for the stale regions. TimeRegions regionsToExclude = dataJanitorState.getRegionsOnOrBeforeTime(time); if (regionsToExclude != null) { LOG.debug("Deleting prune upper bounds smaller than {} for stale regions", maxPrunedInvalid); dataJanitorState.deletePruneUpperBounds(maxPrunedInvalid, regionsToExclude.getRegions()); } else { LOG.warn("Cannot find saved regions on or before time {}", time); } long pruneTime = TxUtils.getTimestamp(maxPrunedInvalid); LOG.debug("Deleting regions recorded before time {}", pruneTime); dataJanitorState.deleteAllRegionsOnOrBeforeTime(pruneTime); LOG.debug("Deleting inactive transaction bounds recorded on or before time {}", pruneTime); dataJanitorState.deleteInactiveTransactionBoundsOnOrBeforeTime(pruneTime); LOG.debug("Deleting empty regions recorded on or before time {}", pruneTime); dataJanitorState.deleteEmptyRegionsOnOrBeforeTime(pruneTime); } @Override public void destroy() { LOG.info("Stopping plugin..."); try { connection.close(); } catch (IOException e) { LOG.error("Got exception while closing HBase connection", e); } } /** * Create the prune state table given the {@link TableName} if the table doesn't exist already. * * @param stateTable prune state table name */ protected void createPruneTable(TableName stateTable) throws IOException { try (Admin admin = this.connection.getAdmin()) { if (admin.tableExists(stateTable)) { LOG.debug("Not creating pruneStateTable {} since it already exists.", stateTable.getNameWithNamespaceInclAsString()); return; } HTableDescriptor htd = new HTableDescriptor(stateTable); htd.addFamily(new HColumnDescriptor(DataJanitorState.FAMILY).setMaxVersions(1)); admin.createTable(htd); LOG.info("Created pruneTable {}", stateTable.getNameWithNamespaceInclAsString()); } catch (TableExistsException ex) { // Expected if the prune state table is being created at the same time by another client LOG.debug("Not creating pruneStateTable {} since it already exists.", stateTable.getNameWithNamespaceInclAsString(), ex); } } /** * Returns whether the table is a transactional table. By default, it is a table is identified as a transactional * table if it has a the coprocessor {@link TransactionProcessor} attached to it. Should be overriden if the users * attach a different coprocessor. * * @param tableDescriptor {@link HTableDescriptor} of the table * @return true if the table is transactional */ protected boolean isTransactionalTable(HTableDescriptor tableDescriptor) { return tableDescriptor.hasCoprocessor(TransactionProcessor.class.getName()); } protected SortedSet<byte[]> getTransactionalRegions() throws IOException { SortedSet<byte[]> regions = new TreeSet<>(Bytes.BYTES_COMPARATOR); try (Admin admin = connection.getAdmin()) { HTableDescriptor[] tableDescriptors = admin.listTables(); LOG.debug("Got {} tables to process", tableDescriptors == null ? 0 : tableDescriptors.length); if (tableDescriptors != null) { for (HTableDescriptor tableDescriptor : tableDescriptors) { if (isTransactionalTable(tableDescriptor)) { List<HRegionInfo> tableRegions = admin.getTableRegions(tableDescriptor.getTableName()); LOG.debug("Regions for table {}: {}", tableDescriptor.getTableName(), tableRegions); if (tableRegions != null) { for (HRegionInfo region : tableRegions) { regions.add(region.getRegionName()); } } } else { LOG.debug("{} is not a transactional table", tableDescriptor.getTableName()); } } } } return regions; } /** * Try to find the latest set of regions in which all regions have been major compacted, and * compute prune upper bound from them. Starting from newest to oldest, this looks into the * region set that has been saved periodically, and joins it with the prune upper bound data * for a region recorded after a major compaction. * * @param timeRegions the latest set of regions * @return prune upper bound * @throws IOException when not able to talk to HBase */ private long computePruneUpperBound(TimeRegions timeRegions) throws IOException { do { LOG.debug("Computing prune upper bound for {}", timeRegions); SortedSet<byte[]> transactionalRegions = timeRegions.getRegions(); long time = timeRegions.getTime(); long inactiveTransactionBound = dataJanitorState.getInactiveTransactionBoundForTime(time); LOG.debug("Got inactive transaction bound {}", inactiveTransactionBound); // If inactiveTransactionBound is not recorded then that means the data is not complete for these regions if (inactiveTransactionBound == -1) { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring regions for time {} as no inactiveTransactionBound was found for that time, " + "and hence the data must be incomplete", time); } continue; } // Get the prune upper bounds for all the transactional regions Map<byte[], Long> pruneUpperBoundRegions = dataJanitorState.getPruneUpperBoundForRegions(transactionalRegions); logPruneUpperBoundRegions(pruneUpperBoundRegions); // Use inactiveTransactionBound as the prune upper bound for the empty regions since the regions that are // recorded as empty after inactiveTransactionBoundTime will not have invalid data // for transactions started on or before inactiveTransactionBoundTime pruneUpperBoundRegions = handleEmptyRegions(inactiveTransactionBound, transactionalRegions, pruneUpperBoundRegions); // If prune upper bounds are found for all the transactional regions, then compute the prune upper bound // across all regions if (!transactionalRegions.isEmpty() && pruneUpperBoundRegions.size() == transactionalRegions.size()) { Long minPruneUpperBoundRegions = Collections.min(pruneUpperBoundRegions.values()); long pruneUpperBound = Math.min(inactiveTransactionBound, minPruneUpperBoundRegions); LOG.debug("Found prune upper bound {} for time {}", pruneUpperBound, time); return pruneUpperBound; } else { if (LOG.isDebugEnabled()) { Sets.SetView<byte[]> difference = Sets.difference(transactionalRegions, pruneUpperBoundRegions.keySet()); LOG.debug("Ignoring regions for time {} because the following regions did not record a pruneUpperBound: {}", time, Iterables.transform(difference, TimeRegions.BYTE_ARR_TO_STRING_FN)); } } timeRegions = dataJanitorState.getRegionsOnOrBeforeTime(time - 1); } while (timeRegions != null); return -1; } private Map<byte[], Long> handleEmptyRegions(long inactiveTransactionBound, SortedSet<byte[]> transactionalRegions, Map<byte[], Long> pruneUpperBoundRegions) throws IOException { long inactiveTransactionBoundTime = TxUtils.getTimestamp(inactiveTransactionBound); SortedSet<byte[]> emptyRegions = dataJanitorState.getEmptyRegionsAfterTime(inactiveTransactionBoundTime, transactionalRegions); LOG.debug("Got empty transactional regions for inactive transaction bound time {}: {}", inactiveTransactionBoundTime, Iterables.transform(emptyRegions, TimeRegions.BYTE_ARR_TO_STRING_FN)); // The regions that are recorded as empty after inactiveTransactionBoundTime will not have invalid data // for transactions started before or on inactiveTransactionBoundTime. Hence we can consider the prune upper bound // for these empty regions as inactiveTransactionBound Map<byte[], Long> pubWithEmptyRegions = new TreeMap<>(Bytes.BYTES_COMPARATOR); pubWithEmptyRegions.putAll(pruneUpperBoundRegions); for (byte[] emptyRegion : emptyRegions) { if (!pruneUpperBoundRegions.containsKey(emptyRegion)) { pubWithEmptyRegions.put(emptyRegion, inactiveTransactionBound); } } return Collections.unmodifiableMap(pubWithEmptyRegions); } private void logPruneUpperBoundRegions(Map<byte[], Long> pruneUpperBoundRegions) { if (LOG.isDebugEnabled()) { LOG.debug("Got region - prune upper bound map: {}", Iterables.transform(pruneUpperBoundRegions.entrySet(), new Function<Map.Entry<byte[], Long>, Map.Entry<String, Long>>() { @Override public Map.Entry<String, Long> apply(Map.Entry<byte[], Long> input) { String regionName = TimeRegions.BYTE_ARR_TO_STRING_FN.apply(input.getKey()); return Maps.immutableEntry(regionName, input.getValue()); } })); } } }
apache-2.0
jamesagnew/hapi-fhir
hapi-fhir-server/src/main/java/ca/uhn/fhir/rest/api/server/IPreResourceAccessDetails.java
1181
package ca.uhn.fhir.rest.api.server; /*- * #%L * HAPI FHIR - Server Framework * %% * Copyright (C) 2014 - 2022 Smile CDR, Inc. * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import org.hl7.fhir.instance.model.api.IBaseResource; /** * This object is an abstraction for a server response that is going to * return one or more resources to the user. This can be used by interceptors * to make decisions about whether a resource should be visible or not * to the user making the request. */ public interface IPreResourceAccessDetails { int size(); IBaseResource getResource(int theIndex); void setDontReturnResourceAtIndex(int theIndex); }
apache-2.0
SunghyunLim/easyCompany3
src/main/java/egovframework/rte/tex/com/service/EgovMailService.java
1369
/* * Copyright 2011 MOPAS(Ministry of Public Administration and Security). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package egovframework.rte.tex.com.service; import egovframework.rte.tex.mbr.service.MemberVO; /** * 메일기능에 관한 인터페이스를 정의한다. * @author 실행환경 개발팀 신혜연 * @since 2011.06.07 * @version 1.0 * @see <pre> * == 개정이력(Modification Information) == * * 수정일 수정자 수정내용 * ------- -------- --------------------------- * 2011.06.07 신혜연 최초 생성 * * </pre> */ public interface EgovMailService { /** * 회원 정보를 사용하여 회원에게 메일을 전송한다. * @param vo * @return 메일전송 여부 */ public boolean sendEmailTo(MemberVO vo); }
apache-2.0
wso2/carbon-data
components/data-services/org.wso2.carbon.dataservices.core/src/main/java/org/wso2/carbon/dataservices/core/description/query/ExpressionQuery.java
15136
/* * Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.dataservices.core.description.query; import org.wso2.carbon.dataservices.common.DBConstants; import org.wso2.carbon.dataservices.core.DataServiceFault; import org.wso2.carbon.dataservices.core.description.event.EventTrigger; import org.wso2.carbon.dataservices.core.engine.DataService; import org.wso2.carbon.dataservices.core.engine.InternalParam; import org.wso2.carbon.dataservices.core.engine.InternalParamCollection; import org.wso2.carbon.dataservices.core.engine.ParamValue; import org.wso2.carbon.dataservices.core.engine.QueryParam; import org.wso2.carbon.dataservices.core.engine.Result; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; /** * This class represents Generalized Query Expressions which are similar to SQL and CQL. */ public abstract class ExpressionQuery extends Query { private String query; private List<String> namedParamNames; private int paramCount; private static final String QUESTION_MARK = "?"; public ExpressionQuery(DataService dataService, String queryId, List<QueryParam> queryParams, String query, Result result, String configId, EventTrigger inputEventTrigger, EventTrigger outputEventTrigger, Map<String, String> advancedProperties, String inputNamespace) { super(dataService, queryId, queryParams, result, configId, inputEventTrigger, outputEventTrigger, advancedProperties, inputNamespace); this.query = query; } public String getQuery() { return query; } public int getParamCount() { return paramCount; } /** * Pre-processing of the query * * @param query Query * @throws DataServiceFault */ protected void init(String query) throws DataServiceFault { this.processNamedParams(query); this.query = createPreprocessedQueryFromQueryString(query); paramCount = calculateParamCount(this.query); } private void processNamedParams(String query) { Map<String, QueryParam> paramMap = new HashMap<>(); for (QueryParam param : this.getQueryParams()) { paramMap.put(param.getName(), param); } List<String> paramNames = this.extractParamNames(query, paramMap.keySet()); this.namedParamNames = new ArrayList<>(); QueryParam tmpParam; String tmpParamName; int tmpOrdinal; Set<String> checkedQueryParams = new HashSet<>(); Set<Integer> processedOrdinalsForNamedParams = new HashSet<>(); for (int i = 0; i < paramNames.size(); i++) { String tmp = paramNames.get(i); if (!tmp.equals(QUESTION_MARK)) { tmpParamName = tmp; tmpParam = paramMap.get(tmpParamName); if (tmpParam != null) { if (!checkedQueryParams.contains(tmpParamName)) { tmpParam.clearOrdinals(); checkedQueryParams.add(tmpParamName); } this.namedParamNames.add(tmpParamName); /* ordinals of named params */ tmpOrdinal = i + 1; tmpParam.addOrdinal(tmpOrdinal); processedOrdinalsForNamedParams.add(tmpOrdinal); } } } this.cleanupProcessedNamedParams(checkedQueryParams, processedOrdinalsForNamedParams, paramMap); } /** * This method is used to clean up the ordinal in the named parameter * scenario, where the Query may not have all the params as named parameters, * so other non-named parameters ordinals may clash with the processed one. */ private void cleanupProcessedNamedParams(Set<String> checkedQueryParams, Set<Integer> processedOrdinalsForNamedParams, Map<String, QueryParam> paramMap) { QueryParam tmpQueryParam; for (String paramName : paramMap.keySet()) { if (!checkedQueryParams.contains(paramName)) { tmpQueryParam = paramMap.get(paramName); /* unchecked query param can only have one ordinal */ if (processedOrdinalsForNamedParams.contains(tmpQueryParam.getOrdinal())) { /* set to a value that will not clash with valid ordinals */ tmpQueryParam.setOrdinal(0); } } } } private void sortStringsByLength(List<String> values) { Collections.sort(values, new Comparator<String>() { @Override public int compare(String lhs, String rhs) { return lhs.length() - rhs.length(); } }); } private String createPreprocessedQueryFromQueryString(String query) { /* get a copy of the param names */ List<String> values = new ArrayList<>(namedParamNames); /* sort the strings */ this.sortStringsByLength(values); /* * make it from largest to smallest, this is done to make sure, if there * are params like, :abcd,:abc, then the step of replacing :abc doesn't * also initially replace :abcd's substring as well */ Collections.reverse(values); for (String val : values) { /* replace named params with ?'s */ query = query.replaceAll(":" + val, QUESTION_MARK); } return query; } private List<String> extractParamNames(String query, Set<String> queryParams) { boolean doubleQuoteExists = false; boolean singleQuoteExists = false; List<String> paramNames = new ArrayList<>(); String tmpParam; for (int i = 0; i < query.length(); i++) { if (query.charAt(i) == '\'') { singleQuoteExists = !singleQuoteExists; } else if (query.charAt(i) == '\"') { doubleQuoteExists = !doubleQuoteExists; } else if (query.charAt(i) == '?' && !(doubleQuoteExists || singleQuoteExists)) { paramNames.add(QUESTION_MARK); } else if (query.charAt(i) == ':' && !(doubleQuoteExists || singleQuoteExists)) { /* check if the string is at the end */ if (i + 1 < query.length()) { /* * split params in situations like ":a,:b", ":a :b", ":a:b", * "(:a,:b)" */ tmpParam = query.substring(i + 1, query.length()).split(" |,|\\)|\\(|:|\\r|\\n|\\.")[0]; if (queryParams.contains(tmpParam)) { /* * only consider this as a parameter if it's in input * mappings */ paramNames.add(tmpParam); } } } } return paramNames; } private int calculateParamCount(String query) { int n = 0; boolean doubleQuoteExists = false; boolean singleQuoteExists = false; for (char ch : query.toCharArray()) { if (ch == '\'') { singleQuoteExists = !singleQuoteExists; } else if (ch == '\"') { doubleQuoteExists = !doubleQuoteExists; } else if (ch == '?' && !(doubleQuoteExists || singleQuoteExists)) { n++; } } return n; } /** * This method checks whether DataTypes.QUERY_STRING type parameters are available in the query * input mappings and returns a boolean value. * * @param params The parameters in the input mappings * @return The boolean value of the isDynamicQuery variable */ protected boolean isDynamicQuery(InternalParamCollection params) { boolean isDynamicQuery = false; InternalParam tmpParam; for (int i = 1; i <= params.getData().size(); i++) { tmpParam = params.getParam(i); if (DBConstants.DataTypes.QUERY_STRING.equals(tmpParam.getSqlType())) { isDynamicQuery = true; break; } } return isDynamicQuery; } /** * Returns the Query manipulated to suite the given parameters, e.g. adding * additional "?"'s for array types. */ protected String createProcessedQuery(String query, InternalParamCollection params, int paramCount) { String currentQuery = query; int start = 0; Object[] vals; InternalParam param; ParamValue value; int count; for (int i = 1; i <= paramCount; i++) { param = params.getParam(i); if (param != null) { value = param.getValue(); /* * value can be null in stored proc OUT params, so it is simply * treated as a single param, because the number of elements in an * array cannot be calculated, since there's no actual value passed * in */ if (value != null && (value.getValueType() == ParamValue.PARAM_VALUE_ARRAY)) { count = (value.getArrayValue()).size(); } else { count = 1; } vals = this.expandQuery(start, count, currentQuery); start = (Integer) vals[0]; currentQuery = (String) vals[1]; } } return currentQuery; } /** * Given the starting position, this method searches for the first occurrence * of "?" and replace it with `count` "?"'s. Returns [0] - end position of * "?"'s, [1] - modified query. */ private Object[] expandQuery(int start, int count, String query) { StringBuilder result = new StringBuilder(); int n = query.length(); boolean doubleQuoteExists = false; boolean singleQuoteExists = false; int end = n; for (int i = start; i < n; i++) { if (query.charAt(i) == '\'') { singleQuoteExists = !singleQuoteExists; } else if (query.charAt(i) == '\"') { doubleQuoteExists = !doubleQuoteExists; } else if (query.charAt(i) == '?' && !(doubleQuoteExists || singleQuoteExists)) { result.append(query.substring(0, i)); result.append(this.generateQuestionMarks(count)); end = result.length() + 1; if (i + 1 < n) { result.append(query.substring(i + 1)); } break; } } return new Object[] { end, result.toString() }; } private String generateQuestionMarks(int n) { StringBuilder builder = new StringBuilder(); for (int i = 0; i < n; i++) { builder.append(QUESTION_MARK); if (i + 1 < n) { builder.append(","); } } return builder.toString(); } /** * Modifies the Query to include the direct value of the parameters of type * "QUERY_STRING"; The Query will be recreated and the other parameters will * be re-organized to point to correct ordinal values. * * @return [0] The updated Query, [1] The updated parameter count */ protected Object[] processDynamicQuery(String query, InternalParamCollection params) { Integer[] paramIndices = this.extractQueryParamIndices(query); Map<String, QueryParam> tempParams = new HashMap<>(); int currentOrdinalDiff = 0; int currentParamIndexDiff = 0; InternalParam tmpParam; int paramIndex; String tmpValue; int resultParamCount = paramCount; for (QueryParam queryParam : this.getQueryParams()) { tempParams.put(queryParam.getName(), queryParam); } for (int ordinal = 1; ordinal <= paramCount; ordinal++) { tmpParam = params.getParam(ordinal); if (tmpParam == null && !(((SQLQuery)this).getSqlQueryType() == SQLQuery.QueryType.UPDATE)) { throw new RuntimeException("Parameters are not Defined Correctly, missing parameter ordinal - " + ordinal); } if (tmpParam == null && !(tempParams.get(namedParamNames.get(ordinal - 1)).isOptional())) { throw new RuntimeException("Parameters are not Defined Correctly, missing parameter ordinal - " + ordinal); } if (tmpParam != null && !(tempParams.get(tmpParam.getName()).isOptional()) && DBConstants.DataTypes.QUERY_STRING.equals(tmpParam.getSqlType())) { paramIndex = paramIndices[ordinal - 1] + currentParamIndexDiff; tmpValue = params.getParam(ordinal).getValue().getScalarValue(); currentParamIndexDiff += tmpValue.length() - 1; if (paramIndex + 1 < query.length()) { query = query.substring(0, paramIndex) + tmpValue + query.substring(paramIndex + 1); } else { query = query.substring(0, paramIndex) + tmpValue; } params.remove(ordinal); currentOrdinalDiff++; resultParamCount--; } else { if (params.getParam(ordinal) != null) { params.remove(ordinal); tmpParam.setOrdinal(ordinal - currentOrdinalDiff); params.addParam(tmpParam); } } } return new Object[] { query, resultParamCount }; } private Integer[] extractQueryParamIndices(String query) { List<Integer> result = new ArrayList<>(); boolean doubleQuoteExists = false; boolean singleQuoteExists = false; char[] data = query.toCharArray(); for (int i = 0; i < data.length; i++) { if (data[i] == '\'') { singleQuoteExists = !singleQuoteExists; } else if (data[i] == '\"') { doubleQuoteExists = !doubleQuoteExists; } else if (data[i] == '?' && !(doubleQuoteExists || singleQuoteExists)) { result.add(i); } } return result.toArray(new Integer[result.size()]); } }
apache-2.0
whiskeysierra/riptide
riptide-chaos/src/main/java/org/zalando/riptide/chaos/EmptyInputStream.java
333
package org.zalando.riptide.chaos; import java.io.IOException; import java.io.InputStream; final class EmptyInputStream extends InputStream { static final InputStream EMPTY = new EmptyInputStream(); private EmptyInputStream() { } @Override public int read() throws IOException { return -1; } }
apache-2.0
Hurence/log-island
logisland-documentation/src/test/java/com/hurence/logisland/documentation/html/ProcessorDocumentationWriterTest.java
2287
/** * Copyright (C) 2016 Hurence (support@hurence.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hurence.logisland.documentation.html; import com.hurence.logisland.annotation.documentation.CapabilityDescription; import com.hurence.logisland.documentation.DocumentationWriter; import com.hurence.logisland.documentation.example.FullyDocumentedProcessor; import com.hurence.logisland.documentation.example.NakedProcessor; import com.hurence.logisland.documentation.rst.RstDocumentationWriter; import org.junit.Test; import java.io.ByteArrayOutputStream; import java.io.IOException; public class ProcessorDocumentationWriterTest { @Test public void testFullyDocumentedProcessor() throws IOException { FullyDocumentedProcessor processor = new FullyDocumentedProcessor(); DocumentationWriter writer = new RstDocumentationWriter(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); writer.write(processor, baos); String results = new String(baos.toByteArray()); FullyDocumentedProcessor.class.getAnnotation(CapabilityDescription.class); } @Test public void testNakedProcessor() throws IOException { NakedProcessor processor = new NakedProcessor(); DocumentationWriter writer = new RstDocumentationWriter(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); writer.write(processor, baos); String results = new String(baos.toByteArray()); // no description // assertContains(results, "No description provided."); // no tags // assertContains(results, "None."); // properties // assertContains(results, "This component has no required or optional properties."); } }
apache-2.0
sflyphotobooks/crp-batik
sources/org/apache/batik/dom/svg/SVGOMAnimatedEnumeration.java
6722
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.batik.dom.svg; import org.apache.batik.anim.values.AnimatableStringValue; import org.apache.batik.anim.values.AnimatableValue; import org.apache.batik.dom.anim.AnimationTarget; import org.w3c.dom.Attr; import org.w3c.dom.DOMException; import org.w3c.dom.svg.SVGAnimatedEnumeration; /** * This class provides an implementation of the {@link * SVGAnimatedEnumeration} interface. * * @author <a href="mailto:stephane@hillion.org">Stephane Hillion</a> * @version $Id: SVGOMAnimatedEnumeration.java 527382 2007-04-11 04:31:58Z cam $ */ public class SVGOMAnimatedEnumeration extends AbstractSVGAnimatedValue implements SVGAnimatedEnumeration { /** * The values in this enumeration. */ protected String[] values; /** * The default value, if the attribute is not specified. */ protected short defaultValue; /** * Whether the current base value is valid. */ protected boolean valid; /** * The current base value. */ protected short baseVal; /** * The current animated value. */ protected short animVal; /** * Whether the value is changing. */ protected boolean changing; /** * Creates a new SVGOMAnimatedEnumeration. * @param elt The associated element. * @param ns The attribute's namespace URI. * @param ln The attribute's local name. * @param val The values in this enumeration. * @param def The default value to use. */ public SVGOMAnimatedEnumeration(AbstractElement elt, String ns, String ln, String[] val, short def) { super(elt, ns, ln); values = val; defaultValue = def; } /** * <b>DOM</b>: Implements {@link SVGAnimatedEnumeration#getBaseVal()}. */ public short getBaseVal() { if (!valid) { update(); } return baseVal; } /** * Returns the base value as a string. */ public String getBaseValAsString() { if (!valid) { update(); } return values[baseVal]; } /** * Updates the base value from the attribute. */ protected void update() { String val = element.getAttributeNS(namespaceURI, localName); if (val.length() == 0) { baseVal = defaultValue; } else { baseVal = getEnumerationNumber(val); } valid = true; } /** * Returns the enumeration number of the specified string. */ protected short getEnumerationNumber(String s) { for (short i = 0; i < values.length; i++) { if (s.equals(values[i])) { return i; } } return 0; } /** * <b>DOM</b>: Implements {@link * SVGAnimatedEnumeration#setBaseVal(short)}. */ public void setBaseVal(short baseVal) throws DOMException { if (baseVal >= 0 && baseVal < values.length) { try { this.baseVal = baseVal; valid = true; changing = true; element.setAttributeNS(namespaceURI, localName, values[baseVal]); } finally { changing = false; } } } /** * <b>DOM</b>: Implements {@link SVGAnimatedEnumeration#getAnimVal()}. */ public short getAnimVal() { if (hasAnimVal) { return animVal; } if (!valid) { update(); } return baseVal; } /** * Gets the current animated value, throwing an exception if the attribute * is malformed. */ public short getCheckedVal() { if (hasAnimVal) { return animVal; } if (!valid) { update(); } if (baseVal == 0) { throw new LiveAttributeException (element, localName, LiveAttributeException.ERR_ATTRIBUTE_MALFORMED, getBaseValAsString()); } return baseVal; } /** * Returns the base value of the attribute as an {@link AnimatableValue}. */ public AnimatableValue getUnderlyingValue(AnimationTarget target) { return new AnimatableStringValue(target, getBaseValAsString()); } /** * Called when an Attr node has been added. */ public void attrAdded(Attr node, String newv) { if (!changing) { valid = false; } fireBaseAttributeListeners(); if (!hasAnimVal) { fireAnimatedAttributeListeners(); } } /** * Updates the animated value with the given {@link AnimatableValue}. */ protected void updateAnimatedValue(AnimatableValue val) { if (val == null) { hasAnimVal = false; } else { hasAnimVal = true; this.animVal = getEnumerationNumber(((AnimatableStringValue) val).getString()); fireAnimatedAttributeListeners(); } fireAnimatedAttributeListeners(); } /** * Called when an Attr node has been modified. */ public void attrModified(Attr node, String oldv, String newv) { if (!changing) { valid = false; } fireBaseAttributeListeners(); if (!hasAnimVal) { fireAnimatedAttributeListeners(); } } /** * Called when an Attr node has been removed. */ public void attrRemoved(Attr node, String oldv) { if (!changing) { valid = false; } fireBaseAttributeListeners(); if (!hasAnimVal) { fireAnimatedAttributeListeners(); } } }
apache-2.0
rmetzger/flink
flink-runtime/src/test/java/org/apache/flink/runtime/source/coordinator/SourceCoordinatorTest.java
20715
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.source.coordinator; import org.apache.flink.api.connector.source.Boundedness; import org.apache.flink.api.connector.source.Source; import org.apache.flink.api.connector.source.SourceEvent; import org.apache.flink.api.connector.source.SourceReader; import org.apache.flink.api.connector.source.SourceReaderContext; import org.apache.flink.api.connector.source.SplitEnumerator; import org.apache.flink.api.connector.source.SplitEnumeratorContext; import org.apache.flink.api.connector.source.mocks.MockSourceSplit; import org.apache.flink.api.connector.source.mocks.MockSourceSplitSerializer; import org.apache.flink.api.connector.source.mocks.MockSplitEnumerator; import org.apache.flink.api.connector.source.mocks.MockSplitEnumeratorCheckpointSerializer; import org.apache.flink.api.connector.source.mocks.MockSplitEnumeratorContext; import org.apache.flink.core.io.SimpleVersionedSerializer; import org.apache.flink.core.memory.DataOutputSerializer; import org.apache.flink.runtime.jobgraph.OperatorID; import org.apache.flink.runtime.operators.coordination.MockOperatorCoordinatorContext; import org.apache.flink.runtime.operators.coordination.OperatorCoordinator; import org.apache.flink.runtime.source.event.SourceEventWrapper; import org.junit.Test; import javax.annotation.Nullable; import java.net.URL; import java.net.URLClassLoader; import java.time.Duration; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; import static org.apache.flink.core.testutils.CommonTestUtils.waitUtil; import static org.apache.flink.runtime.source.coordinator.CoordinatorTestUtils.verifyAssignment; import static org.apache.flink.runtime.source.coordinator.CoordinatorTestUtils.verifyException; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; /** Unit tests for {@link SourceCoordinator}. */ @SuppressWarnings("serial") public class SourceCoordinatorTest extends SourceCoordinatorTestBase { @Test public void testThrowExceptionWhenNotStarted() { // The following methods should only be invoked after the source coordinator has started. String failureMessage = "Call should fail when source coordinator has not started yet."; verifyException( () -> sourceCoordinator.notifyCheckpointComplete(100L), failureMessage, "The coordinator has not started yet."); verifyException( () -> sourceCoordinator.handleEventFromOperator(0, null), failureMessage, "The coordinator has not started yet."); verifyException( () -> sourceCoordinator.subtaskFailed(0, null), failureMessage, "The coordinator has not started yet."); verifyException( () -> sourceCoordinator.checkpointCoordinator(100L, new CompletableFuture<>()), failureMessage, "The coordinator has not started yet."); } @Test public void testRestCheckpointAfterCoordinatorStarted() throws Exception { // The following methods should only be invoked after the source coordinator has started. sourceCoordinator.start(); verifyException( () -> sourceCoordinator.resetToCheckpoint(0L, null), "Reset to checkpoint should fail after the coordinator has started", "The coordinator can only be reset if it was not yet started"); } @Test public void testStart() throws Exception { sourceCoordinator.start(); waitForCoordinatorToProcessActions(); assertTrue(getEnumerator().isStarted()); } @Test public void testClosed() throws Exception { sourceCoordinator.start(); sourceCoordinator.close(); assertTrue(getEnumerator().isClosed()); } @Test public void testHandleSourceEvent() throws Exception { sourceReady(); SourceEvent sourceEvent = new SourceEvent() {}; sourceCoordinator.handleEventFromOperator(0, new SourceEventWrapper(sourceEvent)); waitForCoordinatorToProcessActions(); assertEquals(1, getEnumerator().getHandledSourceEvent().size()); assertEquals(sourceEvent, getEnumerator().getHandledSourceEvent().get(0)); } @Test public void testCheckpointCoordinatorAndRestore() throws Exception { sourceReady(); addTestingSplitSet(6); registerReader(0); getEnumerator().executeAssignOneSplit(0); getEnumerator().executeAssignOneSplit(0); final CompletableFuture<byte[]> checkpointFuture = new CompletableFuture<>(); sourceCoordinator.checkpointCoordinator(100L, checkpointFuture); final byte[] bytes = checkpointFuture.get(); // restore from the checkpoints. SourceCoordinator<?, ?> restoredCoordinator = getNewSourceCoordinator(); restoredCoordinator.resetToCheckpoint(100L, bytes); TestingSplitEnumerator<?> restoredEnumerator = (TestingSplitEnumerator<?>) restoredCoordinator.getEnumerator(); SourceCoordinatorContext<?> restoredContext = restoredCoordinator.getContext(); assertEquals( "2 splits should have been assigned to reader 0", 4, restoredEnumerator.getUnassignedSplits().size()); assertTrue(restoredEnumerator.getContext().registeredReaders().isEmpty()); assertEquals( "Registered readers should not be recovered by restoring", 0, restoredContext.registeredReaders().size()); } @Test public void testSubtaskFailedAndRevertUncompletedAssignments() throws Exception { sourceReady(); addTestingSplitSet(6); // two splits pending for checkpoint 100 registerReader(0); getEnumerator().executeAssignOneSplit(0); getEnumerator().executeAssignOneSplit(0); sourceCoordinator.checkpointCoordinator(100L, new CompletableFuture<>()); getEnumerator().addNewSplits(new MockSourceSplit(6)); getEnumerator().executeAssignOneSplit(0); sourceCoordinator.checkpointCoordinator(101L, new CompletableFuture<>()); // check the state. waitForCoordinatorToProcessActions(); assertEquals(4, getEnumerator().getUnassignedSplits().size()); assertTrue(splitSplitAssignmentTracker.uncheckpointedAssignments().isEmpty()); verifyAssignment( Arrays.asList("0", "1"), splitSplitAssignmentTracker.assignmentsByCheckpointId().get(100L).get(0)); verifyAssignment( Collections.singletonList("2"), splitSplitAssignmentTracker.assignmentsByCheckpointId(101L).get(0)); // none of the checkpoints is confirmed, we fail and revert to the previous one sourceCoordinator.subtaskFailed(0, null); sourceCoordinator.subtaskReset(0, 99L); waitForCoordinatorToProcessActions(); assertFalse( "Reader 0 should have been unregistered.", context.registeredReaders().containsKey(0)); // The tracker should have reverted all the splits assignment to reader 0. for (Map<Integer, ?> assignment : splitSplitAssignmentTracker.assignmentsByCheckpointId().values()) { assertFalse( "Assignment in uncompleted checkpoint should have been reverted.", assignment.containsKey(0)); } assertFalse(splitSplitAssignmentTracker.uncheckpointedAssignments().containsKey(0)); // The split enumerator should now contains the splits used to b // assigned to reader 0. assertEquals(7, getEnumerator().getUnassignedSplits().size()); } @Test public void testFailedSubtaskDoNotRevertCompletedCheckpoint() throws Exception { sourceReady(); addTestingSplitSet(6); // Assign some splits to reader 0 then take snapshot 100. registerReader(0); getEnumerator().executeAssignOneSplit(0); getEnumerator().executeAssignOneSplit(0); sourceCoordinator.checkpointCoordinator(100L, new CompletableFuture<>()); sourceCoordinator.notifyCheckpointComplete(100L); sourceCoordinator.subtaskFailed(0, null); waitForCoordinatorToProcessActions(); assertEquals(100L, (long) getEnumerator().getSuccessfulCheckpoints().get(0)); assertFalse(context.registeredReaders().containsKey(0)); assertEquals(4, getEnumerator().getUnassignedSplits().size()); assertFalse(splitSplitAssignmentTracker.uncheckpointedAssignments().containsKey(0)); assertTrue(splitSplitAssignmentTracker.assignmentsByCheckpointId().isEmpty()); } @Test public void testFailJobWhenExceptionThrownFromStart() throws Exception { final RuntimeException failureReason = new RuntimeException("Artificial Exception"); final SplitEnumerator<MockSourceSplit, Set<MockSourceSplit>> splitEnumerator = new MockSplitEnumerator(1, new MockSplitEnumeratorContext<>(1)) { @Override public void start() { throw failureReason; } }; final SourceCoordinator<?, ?> coordinator = new SourceCoordinator<>( OPERATOR_NAME, coordinatorExecutor, new EnumeratorCreatingSource<>(() -> splitEnumerator), context); coordinator.start(); waitUtil( () -> operatorCoordinatorContext.isJobFailed(), Duration.ofSeconds(10), "The job should have failed due to the artificial exception."); assertEquals(failureReason, operatorCoordinatorContext.getJobFailureReason()); } @Test public void testErrorThrownFromSplitEnumerator() throws Exception { final Error error = new Error("Test Error"); final SplitEnumerator<MockSourceSplit, Set<MockSourceSplit>> splitEnumerator = new MockSplitEnumerator(1, new MockSplitEnumeratorContext<>(1)) { @Override public void handleSourceEvent(int subtaskId, SourceEvent sourceEvent) { throw error; } }; final SourceCoordinator<?, ?> coordinator = new SourceCoordinator<>( OPERATOR_NAME, coordinatorExecutor, new EnumeratorCreatingSource<>(() -> splitEnumerator), context); coordinator.start(); coordinator.handleEventFromOperator(1, new SourceEventWrapper(new SourceEvent() {})); waitUtil( () -> operatorCoordinatorContext.isJobFailed(), Duration.ofSeconds(10), "The job should have failed due to the artificial exception."); assertEquals(error, operatorCoordinatorContext.getJobFailureReason()); } @Test public void testUserClassLoaderWhenCreatingNewEnumerator() throws Exception { final ClassLoader testClassLoader = new URLClassLoader(new URL[0]); final OperatorCoordinator.Context context = new MockOperatorCoordinatorContext(new OperatorID(), testClassLoader); final EnumeratorCreatingSource<?, ClassLoaderTestEnumerator> source = new EnumeratorCreatingSource<>(ClassLoaderTestEnumerator::new); final SourceCoordinatorProvider<?> provider = new SourceCoordinatorProvider<>("testOperator", context.getOperatorId(), source, 1); final OperatorCoordinator coordinator = provider.getCoordinator(context); coordinator.start(); final ClassLoaderTestEnumerator enumerator = source.createEnumeratorFuture.get(); assertSame(testClassLoader, enumerator.constructorClassLoader); assertSame(testClassLoader, enumerator.threadClassLoader.get()); // cleanup coordinator.close(); } @Test public void testUserClassLoaderWhenRestoringEnumerator() throws Exception { final ClassLoader testClassLoader = new URLClassLoader(new URL[0]); final OperatorCoordinator.Context context = new MockOperatorCoordinatorContext(new OperatorID(), testClassLoader); final EnumeratorCreatingSource<?, ClassLoaderTestEnumerator> source = new EnumeratorCreatingSource<>(ClassLoaderTestEnumerator::new); final SourceCoordinatorProvider<?> provider = new SourceCoordinatorProvider<>("testOperator", context.getOperatorId(), source, 1); final OperatorCoordinator coordinator = provider.getCoordinator(context); coordinator.resetToCheckpoint(1L, createEmptyCheckpoint()); coordinator.start(); final ClassLoaderTestEnumerator enumerator = source.restoreEnumeratorFuture.get(); assertSame(testClassLoader, enumerator.constructorClassLoader); assertSame(testClassLoader, enumerator.threadClassLoader.get()); // cleanup coordinator.close(); } @Test public void testSerdeBackwardCompatibility() throws Exception { sourceReady(); addTestingSplitSet(6); // Build checkpoint data with serde version 0 final TestingSplitEnumerator<MockSourceSplit> enumerator = getEnumerator(); final Set<MockSourceSplit> splits = new HashSet<>(); enumerator.runInEnumThreadAndSync(() -> splits.addAll(enumerator.snapshotState(1L))); final byte[] checkpointDataForV0Serde = createCheckpointDataWithSerdeV0(splits); // Restore from checkpoint data with serde version 0 to test backward compatibility SourceCoordinator<?, ?> restoredCoordinator = getNewSourceCoordinator(); restoredCoordinator.resetToCheckpoint(15213L, checkpointDataForV0Serde); TestingSplitEnumerator<?> restoredEnumerator = (TestingSplitEnumerator<?>) restoredCoordinator.getEnumerator(); SourceCoordinatorContext<?> restoredContext = restoredCoordinator.getContext(); // Check if enumerator is restored correctly assertEquals(splits, restoredEnumerator.getUnassignedSplits()); assertTrue(restoredEnumerator.getHandledSourceEvent().isEmpty()); assertEquals(0, restoredContext.registeredReaders().size()); } // ------------------------------------------------------------------------ // test helpers // ------------------------------------------------------------------------ private byte[] createCheckpointDataWithSerdeV0(Set<MockSourceSplit> splits) throws Exception { final MockSplitEnumeratorCheckpointSerializer enumChkptSerializer = new MockSplitEnumeratorCheckpointSerializer(); final DataOutputSerializer serializer = new DataOutputSerializer(32); serializer.writeInt(SourceCoordinatorSerdeUtils.VERSION_0); serializer.writeInt(enumChkptSerializer.getVersion()); final byte[] serializedEnumChkpt = enumChkptSerializer.serialize(splits); serializer.writeInt(serializedEnumChkpt.length); serializer.write(serializedEnumChkpt); // Version 0 wrote number of reader, see FLINK-21452 serializer.writeInt(0); // Version 0 wrote split assignment tracker serializer.writeInt(0); // SplitSerializer version used in assignment tracker serializer.writeInt(0); // Number of checkpoint in assignment tracker return serializer.getCopyOfBuffer(); } private void check(Runnable runnable) { try { coordinatorExecutor.submit(runnable).get(); } catch (Exception e) { fail("Test failed due to " + e); } } private static byte[] createEmptyCheckpoint() throws Exception { return SourceCoordinator.writeCheckpointBytes( Collections.emptySet(), new MockSplitEnumeratorCheckpointSerializer()); } // ------------------------------------------------------------------------ // test mocks // ------------------------------------------------------------------------ private static final class ClassLoaderTestEnumerator implements SplitEnumerator<MockSourceSplit, Set<MockSourceSplit>> { final CompletableFuture<ClassLoader> threadClassLoader = new CompletableFuture<>(); final ClassLoader constructorClassLoader; public ClassLoaderTestEnumerator() { this.constructorClassLoader = Thread.currentThread().getContextClassLoader(); } @Override public void start() { threadClassLoader.complete(Thread.currentThread().getContextClassLoader()); } @Override public void handleSplitRequest(int subtaskId, @Nullable String requesterHostname) { throw new UnsupportedOperationException(); } @Override public void addSplitsBack(List<MockSourceSplit> splits, int subtaskId) { throw new UnsupportedOperationException(); } @Override public void addReader(int subtaskId) { throw new UnsupportedOperationException(); } @Override public Set<MockSourceSplit> snapshotState(long checkpointId) throws Exception { throw new UnsupportedOperationException(); } @Override public void close() {} } private static final class EnumeratorCreatingSource< T, EnumT extends SplitEnumerator<MockSourceSplit, Set<MockSourceSplit>>> implements Source<T, MockSourceSplit, Set<MockSourceSplit>> { final CompletableFuture<EnumT> createEnumeratorFuture = new CompletableFuture<>(); final CompletableFuture<EnumT> restoreEnumeratorFuture = new CompletableFuture<>(); private final Supplier<EnumT> enumeratorFactory; public EnumeratorCreatingSource(Supplier<EnumT> enumeratorFactory) { this.enumeratorFactory = enumeratorFactory; } @Override public Boundedness getBoundedness() { return Boundedness.CONTINUOUS_UNBOUNDED; } @Override public SourceReader<T, MockSourceSplit> createReader(SourceReaderContext readerContext) { throw new UnsupportedOperationException(); } @Override public SplitEnumerator<MockSourceSplit, Set<MockSourceSplit>> createEnumerator( SplitEnumeratorContext<MockSourceSplit> enumContext) { final EnumT enumerator = enumeratorFactory.get(); createEnumeratorFuture.complete(enumerator); return enumerator; } @Override public SplitEnumerator<MockSourceSplit, Set<MockSourceSplit>> restoreEnumerator( SplitEnumeratorContext<MockSourceSplit> enumContext, Set<MockSourceSplit> checkpoint) { final EnumT enumerator = enumeratorFactory.get(); restoreEnumeratorFuture.complete(enumerator); return enumerator; } @Override public SimpleVersionedSerializer<MockSourceSplit> getSplitSerializer() { return new MockSourceSplitSerializer(); } @Override public SimpleVersionedSerializer<Set<MockSourceSplit>> getEnumeratorCheckpointSerializer() { return new MockSplitEnumeratorCheckpointSerializer(); } } }
apache-2.0
akirakw/asakusafw
hive-project/core-v2/src/main/java/com/asakusafw/directio/hive/orc/v2/CompatibilityV2.java
8764
/** * Copyright 2011-2019 Asakusa Framework Team. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.asakusafw.directio.hive.orc.v2; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalInt; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.io.orc.OrcFile; import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.orc.CompressionKind; import org.apache.orc.OrcConf; import org.apache.orc.StripeInformation; import com.asakusafw.directio.hive.orc.AbstractOrcFileFormat; import com.asakusafw.directio.hive.orc.Compatibility; import com.asakusafw.directio.hive.orc.OrcFileInput; import com.asakusafw.directio.hive.orc.OrcFileOutput; import com.asakusafw.directio.hive.orc.OrcFormatConfiguration; import com.asakusafw.directio.hive.serde.DataModelInspector; import com.asakusafw.directio.hive.serde.DataModelMapping; import com.asakusafw.directio.hive.util.CompatibilityUtil; import com.asakusafw.runtime.directio.Counter; import com.asakusafw.runtime.directio.DirectInputFragment; import com.asakusafw.runtime.directio.hadoop.BlockMap; import com.asakusafw.runtime.directio.hadoop.StripedDataFormat.InputContext; import com.asakusafw.runtime.io.ModelInput; import com.asakusafw.runtime.io.ModelOutput; /** * compatibility layer for Direct I/O ORC File support. * @since 0.10.3 */ public class CompatibilityV2 extends Compatibility { static final Log LOG = LogFactory.getLog(CompatibilityV2.class); @Override protected int getPriority() { OptionalInt version = CompatibilityUtil.getHiveMajorVersion(); if (version.isPresent()) { int v = version.getAsInt(); return v == 2 ? 2 : -1; } try { Class.forName("org.apache.orc.OrcConf"); //$NON-NLS-1$ return 2; } catch (ClassNotFoundException | LinkageError e) { if (LOG.isDebugEnabled()) { LOG.debug("error occurred while initializing compatibility class", e); //$NON-NLS-1$ } return -1; } } /** * returns extra table property map from the format configuration. * @param format the format configuration * @return the extra table property map */ @Override protected Map<String, String> collectPropertyMap(OrcFormatConfiguration format) { Map<String, String> properties = new HashMap<>(); putTableProperty(properties, OrcConf.COMPRESS, format.getCompressionKind()); putTableProperty(properties, OrcConf.STRIPE_SIZE, format.getStripeSize()); return properties; } private static void putTableProperty(Map<String, String> results, OrcConf property, Object value) { if (value == null) { return; } results.put(property.getAttribute(), value.toString()); } @Override public List<DirectInputFragment> computeInputFragments( AbstractOrcFileFormat<?> format, InputContext context) throws IOException, InterruptedException { // TODO parallel? List<DirectInputFragment> results = new ArrayList<>(); for (FileStatus status : context.getInputFiles()) { if (LOG.isInfoEnabled()) { LOG.info(MessageFormat.format( Messages.getString("Compatibility.infoLoadMetadata"), //$NON-NLS-1$ context.getDataType().getSimpleName(), status.getPath())); } Reader orc = OrcFile.createReader(context.getFileSystem(), status.getPath()); if (LOG.isInfoEnabled()) { LOG.info(MessageFormat.format( Messages.getString("Compatibility.infoAnalyzeMetadata"), //$NON-NLS-1$ context.getDataType().getSimpleName(), status.getPath(), orc.getNumberOfRows(), orc.getRawDataSize())); } BlockMap blockMap = BlockMap.create( status.getPath().toString(), status.getLen(), BlockMap.computeBlocks(context.getFileSystem(), status), false); for (StripeInformation stripe : orc.getStripes()) { long begin = stripe.getOffset(); long end = begin + stripe.getLength(); DirectInputFragment fragment = blockMap.get(begin, end); if (LOG.isDebugEnabled()) { LOG.debug(MessageFormat.format( "Detect ORCFile stripe: path={0}, rows={1}, range={2}+{3}, allocation={4}", //$NON-NLS-1$ fragment.getPath(), stripe.getNumberOfRows(), fragment.getOffset(), fragment.getSize(), fragment.getOwnerNodeNames())); } results.add(fragment); } } return results; } @Override public <T> ModelInput<T> createInput( AbstractOrcFileFormat<T> format, Class<? extends T> dataType, FileSystem fileSystem, Path path, long offset, long fragmentSize, Counter counter) throws IOException, InterruptedException { DataModelMapping driverConf = new DataModelMapping(); OrcFormatConfiguration conf = format.getFormatConfiguration(); if (conf.getFieldMappingStrategy() != null) { driverConf.setFieldMappingStrategy(conf.getFieldMappingStrategy()); } if (conf.getOnMissingSource() != null) { driverConf.setOnMissingSource(conf.getOnMissingSource()); } if (conf.getOnMissingTarget() != null) { driverConf.setOnMissingTarget(conf.getOnMissingTarget()); } if (conf.getOnIncompatibleType() != null) { driverConf.setOnIncompatibleType(conf.getOnIncompatibleType()); } long size = fragmentSize; if (size < 0L) { FileStatus stat = fileSystem.getFileStatus(path); size = stat.getLen(); } return new OrcFileInput<>( format.getDataModelDescriptor(), driverConf, fileSystem, path, offset, size, counter); } @Override public <T> ModelOutput<T> createOutput( AbstractOrcFileFormat<T> format, Class<? extends T> dataType, FileSystem fileSystem, Path path, Counter counter) throws IOException, InterruptedException { OrcFormatConfiguration conf = format.getFormatConfiguration(); OrcFile.WriterOptions options = OrcFile.writerOptions(format.getConf()); options.fileSystem(fileSystem); options.inspector(new DataModelInspector(format.getDataModelDescriptor())); org.apache.orc.OrcFile.Version formatVersion = conf.getFormatVersion(org.apache.orc.OrcFile.Version.class); if (formatVersion != null) { options.version(formatVersion); } CompressionKind compressionKind = conf.getCompressionKind(getCompressionKindClass()); if (compressionKind != null) { options.compress(compressionKind); } Long stripeSize = conf.getStripeSize(); if (stripeSize != null) { options.stripeSize(stripeSize); } return new OrcFileOutput<>(format.getDataModelDescriptor(), path, fileSystem, options, counter); } @Override public Optional<OrcFile.Version> findVersionId(String name) { try { return Optional.ofNullable(OrcFile.Version.byName(name)); } catch (RuntimeException e) { return Optional.empty(); } } @Override public Class<CompressionKind> getCompressionKindClass() { return CompressionKind.class; } }
apache-2.0
oscerd/camel
components/camel-cometd/src/main/java/org/apache/camel/component/cometd/CometdEndpoint.java
6642
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.cometd; import java.net.URI; import java.net.URISyntaxException; import java.util.Map; import org.apache.camel.Consumer; import org.apache.camel.Processor; import org.apache.camel.Producer; import org.apache.camel.impl.DefaultEndpoint; import org.apache.camel.spi.Metadata; import org.apache.camel.spi.UriEndpoint; import org.apache.camel.spi.UriParam; import org.apache.camel.spi.UriPath; import org.apache.camel.util.ObjectHelper; /** * Endpoint for Camel Cometd. */ @UriEndpoint(scheme = "cometd,cometds", title = "CometD", syntax = "cometd:protocol:host:port/channelName", consumerClass = CometdConsumer.class, label = "http,websocket") public class CometdEndpoint extends DefaultEndpoint { private CometdComponent component; private URI uri; @UriPath @Metadata(required = "true") private String protocol; @UriPath @Metadata(required = "true") private String host; @UriPath @Metadata(required = "true") private int port; @UriPath @Metadata(required = "true") private String channelName; @UriParam private String baseResource; @UriParam(defaultValue = "240000") private int timeout = 240000; @UriParam private int interval; @UriParam(defaultValue = "30000") private int maxInterval = 30000; @UriParam(defaultValue = "1500") private int multiFrameInterval = 1500; @UriParam(defaultValue = "true") private boolean jsonCommented = true; @UriParam private boolean sessionHeadersEnabled; @UriParam(defaultValue = "1") private int logLevel = 1; @UriParam private boolean crossOriginFilterOn; @UriParam private String allowedOrigins; @UriParam private String filterPath; @UriParam(defaultValue = "true") private boolean disconnectLocalSession = true; public CometdEndpoint(CometdComponent component, String uri, String remaining, Map<String, Object> parameters) { super(uri, component); this.component = component; try { this.uri = new URI(uri); this.protocol = this.uri.getScheme(); this.host = this.uri.getHost(); this.port = this.uri.getPort(); this.channelName = remaining; } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } public Producer createProducer() throws Exception { ObjectHelper.notNull(component, "component"); CometdProducer producer = new CometdProducer(this); return producer; } public Consumer createConsumer(Processor processor) throws Exception { ObjectHelper.notNull(component, "component"); CometdConsumer consumer = new CometdConsumer(this, processor); configureConsumer(consumer); return consumer; } public void connect(CometdProducerConsumer prodcons) throws Exception { component.connect(prodcons); } public void disconnect(CometdProducerConsumer prodcons) throws Exception { component.disconnect(prodcons); } public CometdComponent getComponent() { return component; } public boolean isSingleton() { return false; } public String getPath() { return uri.getPath(); } public int getPort() { if (uri.getPort() == -1) { if ("cometds".equals(getProtocol())) { return 443; } else { return 80; } } return uri.getPort(); } public String getProtocol() { return uri.getScheme(); } public URI getUri() { return uri; } public String getBaseResource() { return baseResource; } public void setBaseResource(String baseResource) { this.baseResource = baseResource; } public int getTimeout() { return timeout; } public void setTimeout(int timeout) { this.timeout = timeout; } public int getInterval() { return interval; } public void setInterval(int interval) { this.interval = interval; } public int getMaxInterval() { return maxInterval; } public void setMaxInterval(int maxInterval) { this.maxInterval = maxInterval; } public int getMultiFrameInterval() { return multiFrameInterval; } public void setMultiFrameInterval(int multiFrameInterval) { this.multiFrameInterval = multiFrameInterval; } public boolean isJsonCommented() { return jsonCommented; } public void setJsonCommented(boolean commented) { jsonCommented = commented; } public void setSessionHeadersEnabled(boolean enable) { this.sessionHeadersEnabled = enable; } public boolean areSessionHeadersEnabled() { return sessionHeadersEnabled; } public int getLogLevel() { return logLevel; } public void setLogLevel(int logLevel) { this.logLevel = logLevel; } public String getAllowedOrigins() { return allowedOrigins; } public void setAllowedOrigins(String allowedOrigins) { this.allowedOrigins = allowedOrigins; } public boolean isCrossOriginFilterOn() { return crossOriginFilterOn; } public void setCrossOriginFilterOn(boolean crossOriginFilterOn) { this.crossOriginFilterOn = crossOriginFilterOn; } public String getFilterPath() { return filterPath; } public void setFilterPath(String filterPath) { this.filterPath = filterPath; } public boolean isDisconnectLocalSession() { return disconnectLocalSession; } public void setDisconnectLocalSession(boolean disconnectLocalSession) { this.disconnectLocalSession = disconnectLocalSession; } }
apache-2.0
dbmalkovsky/flowable-engine
modules/flowable-engine/src/main/java/org/flowable/engine/runtime/ProcessInstanceBuilder.java
6563
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flowable.engine.runtime; import java.util.Map; import org.flowable.common.engine.api.FlowableIllegalArgumentException; import org.flowable.common.engine.api.FlowableObjectNotFoundException; import org.flowable.form.api.FormInfo; /** * Helper for starting new ProcessInstance. * * An instance can be obtained through {@link org.flowable.engine.RuntimeService#createProcessInstanceBuilder()}. * * processDefinitionId or processDefinitionKey should be set before calling {@link #start()} to start a process instance. * * * @author Bassam Al-Sarori * @author Joram Barrez */ public interface ProcessInstanceBuilder { /** * Set the id of the process definition **/ ProcessInstanceBuilder processDefinitionId(String processDefinitionId); /** * Set the key of the process definition, latest version of the process definition with the given key. If processDefinitionId was set this will be ignored **/ ProcessInstanceBuilder processDefinitionKey(String processDefinitionKey); /** * When looking up for a process definition by key it would first lookup for a process definition * within the given parent deployment. * Then it would fallback to the latest process definition with the given key. * <p> * This is typically needed when the ProcessInstanceBuilder is called for example * from the case engine to start a process instance and it needs to * look up the process definition in the same deployment as the case. */ ProcessInstanceBuilder processDefinitionParentDeploymentId(String parentDeploymentId); /** * Set the message name that needs to be used to look up the process definition that needs to be used to start the process instance. */ ProcessInstanceBuilder messageName(String messageName); /** * Set the name of process instance **/ ProcessInstanceBuilder name(String processInstanceName); /** * Set the businessKey of process instance **/ ProcessInstanceBuilder businessKey(String businessKey); /** * Set the businessStatus of process instance **/ ProcessInstanceBuilder businessStatus(String businessStatus); /** * Sets the callback identifier of the process instance. */ ProcessInstanceBuilder callbackId(String callbackId); /** * Sets the callback type of the process instance. */ ProcessInstanceBuilder callbackType(String callbackType); /** * Sets the reference identifier of the process instance. */ ProcessInstanceBuilder referenceId(String referenceId); /** * Sets the reference type of the process instance. */ ProcessInstanceBuilder referenceType(String referenceType); /** * Set the optional instance id of the stage this process instance belongs to, if it runs in the context of a CMMN case. */ ProcessInstanceBuilder stageInstanceId(String stageInstanceId); /** * Set the tenantId of to lookup the process definition **/ ProcessInstanceBuilder tenantId(String tenantId); /** * Indicator to override the tenant id of the process definition with the provided value. * The tenantId to lookup the process definition should still be provided if needed. */ ProcessInstanceBuilder overrideProcessDefinitionTenantId(String tenantId); /** * When starting a process instance from the CMMN engine process task, the process instance id needs to be known beforehand * to store entity links and callback references before the process instance is started. */ ProcessInstanceBuilder predefineProcessInstanceId(String processInstanceId); /** * Sets the process variables */ ProcessInstanceBuilder variables(Map<String, Object> variables); /** * Adds a variable to the process instance **/ ProcessInstanceBuilder variable(String variableName, Object value); /** * Sets the transient variables */ ProcessInstanceBuilder transientVariables(Map<String, Object> transientVariables); /** * Adds a transient variable to the process instance */ ProcessInstanceBuilder transientVariable(String variableName, Object value); /** * Adds variables from a start form to the process instance. */ ProcessInstanceBuilder startFormVariables(Map<String, Object> startFormVariables); /** * Adds one variable from a start form to the process instance. */ ProcessInstanceBuilder startFormVariable(String variableName, Object value); /** * Allows to set an outcome for a start form. */ ProcessInstanceBuilder outcome(String outcome); /** * Start the process instance with the given form variables from the given {@code formInfo}. * This is different than {@link #startFormVariables(Map)} and it can be used in addition to that. */ ProcessInstanceBuilder formVariables(Map<String, Object> formVariables, FormInfo formInfo, String formOutcome); /** * Use default tenant as a fallback in the case when process definition was not found by key and tenant id */ ProcessInstanceBuilder fallbackToDefaultTenant(); /** * Start the process instance * * @throws FlowableIllegalArgumentException * if processDefinitionKey and processDefinitionId are null * @throws FlowableObjectNotFoundException * when no process definition is deployed with the given processDefinitionKey or processDefinitionId **/ ProcessInstance start(); /** * Start the process instance asynchronously * * @throws FlowableIllegalArgumentException * if processDefinitionKey and processDefinitionId are null * @throws FlowableObjectNotFoundException * when no process definition is deployed with the given processDefinitionKey or processDefinitionId **/ ProcessInstance startAsync(); }
apache-2.0
smartpcr/aws-sdk-java-resources
aws-resources-iam/src/main/java/com/amazonaws/resources/identitymanagement/SigningCertificate.java
7946
/* * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.resources.identitymanagement; import java.util.Date; import com.amazonaws.resources.ResultCapture; import com.amazonaws.services.identitymanagement.model.DeleteSigningCertificateRequest; import com.amazonaws.services.identitymanagement.model.UpdateSigningCertificateRequest; /** * The <code>SigningCertificate</code> resource. * Each <code>SigningCertificate</code> object is uniquely identified by these * identifier(s): * <ul> * <li>Id</li> * </ul> */ public interface SigningCertificate { /** * Returns true if this resource's attributes have been loaded. If this * method returns {@code false}, calls to attribute getter methods on this * instance will make an implicit call to {@code load()} to retrieve the * value. */ boolean isLoaded(); /** * Gets the value of the Id identifier. This method always directly returns * the identifier and never involves a service call. */ String getId(); /** * Gets the value of the UploadDate attribute. If this resource is not yet * loaded, a call to {@code load()} is made to retrieve the value of the * attribute. */ Date getUploadDate(); /** * Gets the value of the Status attribute. If this resource is not yet * loaded, a call to {@code load()} is made to retrieve the value of the * attribute. */ String getStatus(); /** * Gets the value of the CertificateBody attribute. If this resource is not * yet loaded, a call to {@code load()} is made to retrieve the value of the * attribute. */ String getCertificateBody(); /** * Gets the value of the UserName attribute. If this resource is not yet * loaded, a call to {@code load()} is made to retrieve the value of the * attribute. */ String getUserName(); /** * Performs the <code>Deactivate</code> action. * * <p> * The following request parameters will be populated from the data of this * <code>SigningCertificate</code> resource, and any conflicting parameter * value set in the request will be overridden: * <ul> * <li> * <b><code>CertificateId</code></b> * - mapped from the <code>Id</code> identifier. * </li> * <li> * <b><code>Status</code></b> * - constant value <code>Inactive</code>. * </li> * </ul> * * <p> * * @see UpdateSigningCertificateRequest */ void deactivate(UpdateSigningCertificateRequest request); /** * Performs the <code>Deactivate</code> action and use a ResultCapture to * retrieve the low-level client response. * * <p> * The following request parameters will be populated from the data of this * <code>SigningCertificate</code> resource, and any conflicting parameter * value set in the request will be overridden: * <ul> * <li> * <b><code>CertificateId</code></b> * - mapped from the <code>Id</code> identifier. * </li> * <li> * <b><code>Status</code></b> * - constant value <code>Inactive</code>. * </li> * </ul> * * <p> * * @see UpdateSigningCertificateRequest */ void deactivate(UpdateSigningCertificateRequest request, ResultCapture<Void> extractor); /** * The convenient method form for the <code>Deactivate</code> action. * * @see #deactivate(UpdateSigningCertificateRequest) */ void deactivate(); /** * The convenient method form for the <code>Deactivate</code> action. * * @see #deactivate(UpdateSigningCertificateRequest, ResultCapture) */ void deactivate(ResultCapture<Void> extractor); /** * Performs the <code>Activate</code> action. * * <p> * The following request parameters will be populated from the data of this * <code>SigningCertificate</code> resource, and any conflicting parameter * value set in the request will be overridden: * <ul> * <li> * <b><code>CertificateId</code></b> * - mapped from the <code>Id</code> identifier. * </li> * <li> * <b><code>Status</code></b> * - constant value <code>Active</code>. * </li> * </ul> * * <p> * * @see UpdateSigningCertificateRequest */ void activate(UpdateSigningCertificateRequest request); /** * Performs the <code>Activate</code> action and use a ResultCapture to * retrieve the low-level client response. * * <p> * The following request parameters will be populated from the data of this * <code>SigningCertificate</code> resource, and any conflicting parameter * value set in the request will be overridden: * <ul> * <li> * <b><code>CertificateId</code></b> * - mapped from the <code>Id</code> identifier. * </li> * <li> * <b><code>Status</code></b> * - constant value <code>Active</code>. * </li> * </ul> * * <p> * * @see UpdateSigningCertificateRequest */ void activate(UpdateSigningCertificateRequest request, ResultCapture<Void> extractor); /** * The convenient method form for the <code>Activate</code> action. * * @see #activate(UpdateSigningCertificateRequest) */ void activate(); /** * The convenient method form for the <code>Activate</code> action. * * @see #activate(UpdateSigningCertificateRequest, ResultCapture) */ void activate(ResultCapture<Void> extractor); /** * Performs the <code>Delete</code> action. * * <p> * The following request parameters will be populated from the data of this * <code>SigningCertificate</code> resource, and any conflicting parameter * value set in the request will be overridden: * <ul> * <li> * <b><code>CertificateId</code></b> * - mapped from the <code>Id</code> identifier. * </li> * </ul> * * <p> * * @see DeleteSigningCertificateRequest */ void delete(DeleteSigningCertificateRequest request); /** * Performs the <code>Delete</code> action and use a ResultCapture to * retrieve the low-level client response. * * <p> * The following request parameters will be populated from the data of this * <code>SigningCertificate</code> resource, and any conflicting parameter * value set in the request will be overridden: * <ul> * <li> * <b><code>CertificateId</code></b> * - mapped from the <code>Id</code> identifier. * </li> * </ul> * * <p> * * @see DeleteSigningCertificateRequest */ void delete(DeleteSigningCertificateRequest request, ResultCapture<Void> extractor); /** * The convenient method form for the <code>Delete</code> action. * * @see #delete(DeleteSigningCertificateRequest) */ void delete(); /** * The convenient method form for the <code>Delete</code> action. * * @see #delete(DeleteSigningCertificateRequest, ResultCapture) */ void delete(ResultCapture<Void> extractor); }
apache-2.0
andrewmains12/hbase
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
55256
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.io.ByteArrayOutputStream; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.util.StringUtils; /** * Provides functionality to write ({@link BlockIndexWriter}) and read * ({@link BlockIndexReader}) single-level and multi-level block indexes. * * Examples of how to use the block index writer can be found in * {@link org.apache.hadoop.hbase.util.CompoundBloomFilterWriter} and * {@link HFileWriterImpl}. Examples of how to use the reader can be * found in {@link HFileWriterImpl} and TestHFileBlockIndex. */ @InterfaceAudience.Private public class HFileBlockIndex { private static final Log LOG = LogFactory.getLog(HFileBlockIndex.class); static final int DEFAULT_MAX_CHUNK_SIZE = 128 * 1024; /** * The maximum size guideline for index blocks (both leaf, intermediate, and * root). If not specified, <code>DEFAULT_MAX_CHUNK_SIZE</code> is used. */ public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size"; /** * The number of bytes stored in each "secondary index" entry in addition to * key bytes in the non-root index block format. The first long is the file * offset of the deeper-level block the entry points to, and the int that * follows is that block's on-disk size without including header. */ static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG; /** * Error message when trying to use inline block API in single-level mode. */ private static final String INLINE_BLOCKS_NOT_ALLOWED = "Inline blocks are not allowed in the single-level-only mode"; /** * The size of a meta-data record used for finding the mid-key in a * multi-level index. Consists of the middle leaf-level index block offset * (long), its on-disk size without header included (int), and the mid-key * entry's zero-based index in that leaf index block. */ private static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG + 2 * Bytes.SIZEOF_INT; /** * The reader will always hold the root level index in the memory. Index * blocks at all other levels will be cached in the LRU cache in practice, * although this API does not enforce that. * * All non-root (leaf and intermediate) index blocks contain what we call a * "secondary index": an array of offsets to the entries within the block. * This allows us to do binary search for the entry corresponding to the * given key without having to deserialize the block. */ public static class BlockIndexReader implements HeapSize { /** Needed doing lookup on blocks. */ private final KVComparator comparator; // Root-level data. private byte[][] blockKeys; private long[] blockOffsets; private int[] blockDataSizes; private int rootCount = 0; // Mid-key metadata. private long midLeafBlockOffset = -1; private int midLeafBlockOnDiskSize = -1; private int midKeyEntry = -1; /** Pre-computed mid-key */ private AtomicReference<byte[]> midKey = new AtomicReference<byte[]>(); /** * The number of levels in the block index tree. One if there is only root * level, two for root and leaf levels, etc. */ private int searchTreeLevel; /** A way to read {@link HFile} blocks at a given offset */ private CachingBlockReader cachingBlockReader; public BlockIndexReader(final KVComparator c, final int treeLevel, final CachingBlockReader cachingBlockReader) { this(c, treeLevel); this.cachingBlockReader = cachingBlockReader; } public BlockIndexReader(final KVComparator c, final int treeLevel) { comparator = c; searchTreeLevel = treeLevel; } /** * @return true if the block index is empty. */ public boolean isEmpty() { return blockKeys.length == 0; } /** * Verifies that the block index is non-empty and throws an * {@link IllegalStateException} otherwise. */ public void ensureNonEmpty() { if (blockKeys.length == 0) { throw new IllegalStateException("Block index is empty or not loaded"); } } /** * Return the data block which contains this key. This function will only * be called when the HFile version is larger than 1. * * @param key the key we are looking for * @param currentBlock the current block, to avoid re-reading the same block * @param cacheBlocks * @param pread * @param isCompaction * @param expectedDataBlockEncoding the data block encoding the caller is * expecting the data block to be in, or null to not perform this * check and return the block irrespective of the encoding * @return reader a basic way to load blocks * @throws IOException */ public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding) throws IOException { BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, currentBlock, cacheBlocks, pread, isCompaction, expectedDataBlockEncoding); if (blockWithScanInfo == null) { return null; } else { return blockWithScanInfo.getHFileBlock(); } } /** * Return the BlockWithScanInfo which contains the DataBlock with other scan * info such as nextIndexedKey. This function will only be called when the * HFile version is larger than 1. * * @param key * the key we are looking for * @param currentBlock * the current block, to avoid re-reading the same block * @param cacheBlocks * @param pread * @param isCompaction * @param expectedDataBlockEncoding the data block encoding the caller is * expecting the data block to be in, or null to not perform this * check and return the block irrespective of the encoding. * @return the BlockWithScanInfo which contains the DataBlock with other * scan info such as nextIndexedKey. * @throws IOException */ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding) throws IOException { int rootLevelIndex = rootBlockContainingKey(key); if (rootLevelIndex < 0 || rootLevelIndex >= blockOffsets.length) { return null; } // the next indexed key Cell nextIndexedKey = null; // Read the next-level (intermediate or leaf) index block. long currentOffset = blockOffsets[rootLevelIndex]; int currentOnDiskSize = blockDataSizes[rootLevelIndex]; if (rootLevelIndex < blockKeys.length - 1) { nextIndexedKey = new KeyValue.KeyOnlyKeyValue(blockKeys[rootLevelIndex + 1]); } else { nextIndexedKey = HConstants.NO_NEXT_INDEXED_KEY; } int lookupLevel = 1; // How many levels deep we are in our lookup. int index = -1; HFileBlock block; while (true) { if (currentBlock != null && currentBlock.getOffset() == currentOffset) { // Avoid reading the same block again, even with caching turned off. // This is crucial for compaction-type workload which might have // caching turned off. This is like a one-block cache inside the // scanner. block = currentBlock; } else { // Call HFile's caching block reader API. We always cache index // blocks, otherwise we might get terrible performance. boolean shouldCache = cacheBlocks || (lookupLevel < searchTreeLevel); BlockType expectedBlockType; if (lookupLevel < searchTreeLevel - 1) { expectedBlockType = BlockType.INTERMEDIATE_INDEX; } else if (lookupLevel == searchTreeLevel - 1) { expectedBlockType = BlockType.LEAF_INDEX; } else { // this also accounts for ENCODED_DATA expectedBlockType = BlockType.DATA; } block = cachingBlockReader.readBlock(currentOffset, currentOnDiskSize, shouldCache, pread, isCompaction, true, expectedBlockType, expectedDataBlockEncoding); } if (block == null) { throw new IOException("Failed to read block at offset " + currentOffset + ", onDiskSize=" + currentOnDiskSize); } // Found a data block, break the loop and check our level in the tree. if (block.getBlockType().isData()) { break; } // Not a data block. This must be a leaf-level or intermediate-level // index block. We don't allow going deeper than searchTreeLevel. if (++lookupLevel > searchTreeLevel) { throw new IOException("Search Tree Level overflow: lookupLevel="+ lookupLevel + ", searchTreeLevel=" + searchTreeLevel); } // Locate the entry corresponding to the given key in the non-root // (leaf or intermediate-level) index block. ByteBuffer buffer = block.getBufferWithoutHeader(); index = locateNonRootIndexEntry(buffer, key, comparator); if (index == -1) { // This has to be changed // For now change this to key value KeyValue kv = KeyValueUtil.ensureKeyValue(key); throw new IOException("The key " + Bytes.toStringBinary(kv.getKey(), kv.getKeyOffset(), kv.getKeyLength()) + " is before the" + " first key of the non-root index block " + block); } currentOffset = buffer.getLong(); currentOnDiskSize = buffer.getInt(); // Only update next indexed key if there is a next indexed key in the current level byte[] tmpNextIndexedKey = getNonRootIndexedKey(buffer, index + 1); if (tmpNextIndexedKey != null) { nextIndexedKey = new KeyValue.KeyOnlyKeyValue(tmpNextIndexedKey); } } if (lookupLevel != searchTreeLevel) { throw new IOException("Reached a data block at level " + lookupLevel + " but the number of levels is " + searchTreeLevel); } // set the next indexed key for the current block. BlockWithScanInfo blockWithScanInfo = new BlockWithScanInfo(block, nextIndexedKey); return blockWithScanInfo; } /** * An approximation to the {@link HFile}'s mid-key. Operates on block * boundaries, and does not go inside blocks. In other words, returns the * first key of the middle block of the file. * * @return the first key of the middle block */ public byte[] midkey() throws IOException { if (rootCount == 0) throw new IOException("HFile empty"); byte[] targetMidKey = this.midKey.get(); if (targetMidKey != null) { return targetMidKey; } if (midLeafBlockOffset >= 0) { if (cachingBlockReader == null) { throw new IOException("Have to read the middle leaf block but " + "no block reader available"); } // Caching, using pread, assuming this is not a compaction. HFileBlock midLeafBlock = cachingBlockReader.readBlock( midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, true, BlockType.LEAF_INDEX, null); ByteBuffer b = midLeafBlock.getBufferWithoutHeader(); int numDataBlocks = b.getInt(); int keyRelOffset = b.getInt(Bytes.SIZEOF_INT * (midKeyEntry + 1)); int keyLen = b.getInt(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset; int keyOffset = Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset + SECONDARY_INDEX_ENTRY_OVERHEAD; targetMidKey = ByteBufferUtils.toBytes(b, keyOffset, keyLen); } else { // The middle of the root-level index. targetMidKey = blockKeys[rootCount / 2]; } this.midKey.set(targetMidKey); return targetMidKey; } /** * @param i from 0 to {@link #getRootBlockCount() - 1} */ public byte[] getRootBlockKey(int i) { return blockKeys[i]; } /** * @param i from 0 to {@link #getRootBlockCount() - 1} */ public long getRootBlockOffset(int i) { return blockOffsets[i]; } /** * @param i zero-based index of a root-level block * @return the on-disk size of the root-level block for version 2, or the * uncompressed size for version 1 */ public int getRootBlockDataSize(int i) { return blockDataSizes[i]; } /** * @return the number of root-level blocks in this block index */ public int getRootBlockCount() { return rootCount; } /** * Finds the root-level index block containing the given key. * * @param key * Key to find * @return Offset of block containing <code>key</code> (between 0 and the * number of blocks - 1) or -1 if this file does not contain the * request. */ public int rootBlockContainingKey(final byte[] key, int offset, int length) { int pos = Bytes.binarySearch(blockKeys, key, offset, length, comparator); // pos is between -(blockKeys.length + 1) to blockKeys.length - 1, see // binarySearch's javadoc. if (pos >= 0) { // This means this is an exact match with an element of blockKeys. assert pos < blockKeys.length; return pos; } // Otherwise, pos = -(i + 1), where blockKeys[i - 1] < key < blockKeys[i], // and i is in [0, blockKeys.length]. We are returning j = i - 1 such that // blockKeys[j] <= key < blockKeys[j + 1]. In particular, j = -1 if // key < blockKeys[0], meaning the file does not contain the given key. int i = -pos - 1; assert 0 <= i && i <= blockKeys.length; return i - 1; } /** * Finds the root-level index block containing the given key. * * @param key * Key to find */ public int rootBlockContainingKey(final Cell key) { int pos = Bytes.binarySearch(blockKeys, key, comparator); // pos is between -(blockKeys.length + 1) to blockKeys.length - 1, see // binarySearch's javadoc. if (pos >= 0) { // This means this is an exact match with an element of blockKeys. assert pos < blockKeys.length; return pos; } // Otherwise, pos = -(i + 1), where blockKeys[i - 1] < key < blockKeys[i], // and i is in [0, blockKeys.length]. We are returning j = i - 1 such that // blockKeys[j] <= key < blockKeys[j + 1]. In particular, j = -1 if // key < blockKeys[0], meaning the file does not contain the given key. int i = -pos - 1; assert 0 <= i && i <= blockKeys.length; return i - 1; } /** * Adds a new entry in the root block index. Only used when reading. * * @param key Last key in the block * @param offset file offset where the block is stored * @param dataSize the uncompressed data size */ private void add(final byte[] key, final long offset, final int dataSize) { blockOffsets[rootCount] = offset; blockKeys[rootCount] = key; blockDataSizes[rootCount] = dataSize; rootCount++; } /** * The indexed key at the ith position in the nonRootIndex. The position starts at 0. * @param nonRootIndex * @param i the ith position * @return The indexed key at the ith position in the nonRootIndex. */ private byte[] getNonRootIndexedKey(ByteBuffer nonRootIndex, int i) { int numEntries = nonRootIndex.getInt(0); if (i < 0 || i >= numEntries) { return null; } // Entries start after the number of entries and the secondary index. // The secondary index takes numEntries + 1 ints. int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2); // Targetkey's offset relative to the end of secondary index int targetKeyRelOffset = nonRootIndex.getInt( Bytes.SIZEOF_INT * (i + 1)); // The offset of the target key in the blockIndex buffer int targetKeyOffset = entriesOffset // Skip secondary index + targetKeyRelOffset // Skip all entries until mid + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size // We subtract the two consecutive secondary index elements, which // gives us the size of the whole (offset, onDiskSize, key) tuple. We // then need to subtract the overhead of offset and onDiskSize. int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - targetKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; return ByteBufferUtils.toBytes(nonRootIndex, targetKeyOffset, targetKeyLength); } /** * Performs a binary search over a non-root level index block. Utilizes the * secondary index, which records the offsets of (offset, onDiskSize, * firstKey) tuples of all entries. * * @param key * the key we are searching for offsets to individual entries in * the blockIndex buffer * @param nonRootIndex * the non-root index block buffer, starting with the secondary * index. The position is ignored. * @return the index i in [0, numEntries - 1] such that keys[i] <= key < * keys[i + 1], if keys is the array of all keys being searched, or * -1 otherwise * @throws IOException */ static int binarySearchNonRootIndex(Cell key, ByteBuffer nonRootIndex, KVComparator comparator) { int numEntries = nonRootIndex.getInt(0); int low = 0; int high = numEntries - 1; int mid = 0; // Entries start after the number of entries and the secondary index. // The secondary index takes numEntries + 1 ints. int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2); // If we imagine that keys[-1] = -Infinity and // keys[numEntries] = Infinity, then we are maintaining an invariant that // keys[low - 1] < key < keys[high + 1] while narrowing down the range. KeyValue.KeyOnlyKeyValue nonRootIndexKV = new KeyValue.KeyOnlyKeyValue(); while (low <= high) { mid = (low + high) >>> 1; // Midkey's offset relative to the end of secondary index int midKeyRelOffset = nonRootIndex.getInt( Bytes.SIZEOF_INT * (mid + 1)); // The offset of the middle key in the blockIndex buffer int midKeyOffset = entriesOffset // Skip secondary index + midKeyRelOffset // Skip all entries until mid + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size // We subtract the two consecutive secondary index elements, which // gives us the size of the whole (offset, onDiskSize, key) tuple. We // then need to subtract the overhead of offset and onDiskSize. int midLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (mid + 2)) - midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; // we have to compare in this order, because the comparator order // has special logic when the 'left side' is a special key. // TODO make KeyOnlyKeyValue to be Buffer backed and avoid array() call. This has to be // done after HBASE-12224 & HBASE-12282 nonRootIndexKV.setKey(nonRootIndex.array(), nonRootIndex.arrayOffset() + midKeyOffset, midLength); int cmp = comparator.compareOnlyKeyPortion(key, nonRootIndexKV); // key lives above the midpoint if (cmp > 0) low = mid + 1; // Maintain the invariant that keys[low - 1] < key // key lives below the midpoint else if (cmp < 0) high = mid - 1; // Maintain the invariant that key < keys[high + 1] else return mid; // exact match } // As per our invariant, keys[low - 1] < key < keys[high + 1], meaning // that low - 1 < high + 1 and (low - high) <= 1. As per the loop break // condition, low >= high + 1. Therefore, low = high + 1. if (low != high + 1) { throw new IllegalStateException("Binary search broken: low=" + low + " " + "instead of " + (high + 1)); } // OK, our invariant says that keys[low - 1] < key < keys[low]. We need to // return i such that keys[i] <= key < keys[i + 1]. Therefore i = low - 1. int i = low - 1; // Some extra validation on the result. if (i < -1 || i >= numEntries) { throw new IllegalStateException("Binary search broken: result is " + i + " but expected to be between -1 and (numEntries - 1) = " + (numEntries - 1)); } return i; } /** * Search for one key using the secondary index in a non-root block. In case * of success, positions the provided buffer at the entry of interest, where * the file offset and the on-disk-size can be read. * * @param nonRootBlock * a non-root block without header. Initial position does not * matter. * @param key * the byte array containing the key * @return the index position where the given key was found, otherwise * return -1 in the case the given key is before the first key. * */ static int locateNonRootIndexEntry(ByteBuffer nonRootBlock, Cell key, KVComparator comparator) { int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator); if (entryIndex != -1) { int numEntries = nonRootBlock.getInt(0); // The end of secondary index and the beginning of entries themselves. int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2); // The offset of the entry we are interested in relative to the end of // the secondary index. int entryRelOffset = nonRootBlock.getInt(Bytes.SIZEOF_INT * (1 + entryIndex)); nonRootBlock.position(entriesOffset + entryRelOffset); } return entryIndex; } /** * Read in the root-level index from the given input stream. Must match * what was written into the root level by * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the * offset that function returned. * * @param in the buffered input stream or wrapped byte input stream * @param numEntries the number of root-level index entries * @throws IOException */ public void readRootIndex(DataInput in, final int numEntries) throws IOException { blockOffsets = new long[numEntries]; blockKeys = new byte[numEntries][]; blockDataSizes = new int[numEntries]; // If index size is zero, no index was written. if (numEntries > 0) { for (int i = 0; i < numEntries; ++i) { long offset = in.readLong(); int dataSize = in.readInt(); byte[] key = Bytes.readByteArray(in); add(key, offset, dataSize); } } } /** * Read in the root-level index from the given input stream. Must match * what was written into the root level by * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the * offset that function returned. * * @param blk the HFile block * @param numEntries the number of root-level index entries * @return the buffered input stream or wrapped byte input stream * @throws IOException */ public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = blk.getByteStream(); readRootIndex(in, numEntries); return in; } /** * Read the root-level metadata of a multi-level block index. Based on * {@link #readRootIndex(DataInput, int)}, but also reads metadata * necessary to compute the mid-key in a multi-level index. * * @param blk the HFile block * @param numEntries the number of root-level index entries * @throws IOException */ public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = readRootIndex(blk, numEntries); // after reading the root index the checksum bytes have to // be subtracted to know if the mid key exists. int checkSumBytes = blk.totalChecksumBytes(); if ((in.available() - checkSumBytes) < MID_KEY_METADATA_SIZE) { // No mid-key metadata available. return; } midLeafBlockOffset = in.readLong(); midLeafBlockOnDiskSize = in.readInt(); midKeyEntry = in.readInt(); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("size=" + rootCount).append("\n"); for (int i = 0; i < rootCount; i++) { sb.append("key=").append(KeyValue.keyToString(blockKeys[i])) .append("\n offset=").append(blockOffsets[i]) .append(", dataSize=" + blockDataSizes[i]).append("\n"); } return sb.toString(); } @Override public long heapSize() { long heapSize = ClassSize.align(6 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + ClassSize.OBJECT); // Mid-key metadata. heapSize += MID_KEY_METADATA_SIZE; // Calculating the size of blockKeys if (blockKeys != null) { // Adding array + references overhead heapSize += ClassSize.align(ClassSize.ARRAY + blockKeys.length * ClassSize.REFERENCE); // Adding bytes for (byte[] key : blockKeys) { heapSize += ClassSize.align(ClassSize.ARRAY + key.length); } } if (blockOffsets != null) { heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length * Bytes.SIZEOF_LONG); } if (blockDataSizes != null) { heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length * Bytes.SIZEOF_INT); } return ClassSize.align(heapSize); } } /** * Writes the block index into the output stream. Generate the tree from * bottom up. The leaf level is written to disk as a sequence of inline * blocks, if it is larger than a certain number of bytes. If the leaf level * is not large enough, we write all entries to the root level instead. * * After all leaf blocks have been written, we end up with an index * referencing the resulting leaf index blocks. If that index is larger than * the allowed root index size, the writer will break it up into * reasonable-size intermediate-level index block chunks write those chunks * out, and create another index referencing those chunks. This will be * repeated until the remaining index is small enough to become the root * index. However, in most practical cases we will only have leaf-level * blocks and the root index, or just the root index. */ public static class BlockIndexWriter implements InlineBlockWriter { /** * While the index is being written, this represents the current block * index referencing all leaf blocks, with one exception. If the file is * being closed and there are not enough blocks to complete even a single * leaf block, no leaf blocks get written and this contains the entire * block index. After all levels of the index were written by * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final * root-level index. */ private BlockIndexChunk rootChunk = new BlockIndexChunk(); /** * Current leaf-level chunk. New entries referencing data blocks get added * to this chunk until it grows large enough to be written to disk. */ private BlockIndexChunk curInlineChunk = new BlockIndexChunk(); /** * The number of block index levels. This is one if there is only root * level (even empty), two if there a leaf level and root level, and is * higher if there are intermediate levels. This is only final after * {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The * initial value accounts for the root level, and will be increased to two * as soon as we find out there is a leaf-level in * {@link #blockWritten(long, int, int)}. */ private int numLevels = 1; private HFileBlock.Writer blockWriter; private byte[] firstKey = null; /** * The total number of leaf-level entries, i.e. entries referenced by * leaf-level blocks. For the data block index this is equal to the number * of data blocks. */ private long totalNumEntries; /** Total compressed size of all index blocks. */ private long totalBlockOnDiskSize; /** Total uncompressed size of all index blocks. */ private long totalBlockUncompressedSize; /** The maximum size guideline of all multi-level index blocks. */ private int maxChunkSize; /** Whether we require this block index to always be single-level. */ private boolean singleLevelOnly; /** CacheConfig, or null if cache-on-write is disabled */ private CacheConfig cacheConf; /** Name to use for computing cache keys */ private String nameForCaching; /** Creates a single-level block index writer */ public BlockIndexWriter() { this(null, null, null); singleLevelOnly = true; } /** * Creates a multi-level block index writer. * * @param blockWriter the block writer to use to write index blocks * @param cacheConf used to determine when and how a block should be cached-on-write. */ public BlockIndexWriter(HFileBlock.Writer blockWriter, CacheConfig cacheConf, String nameForCaching) { if ((cacheConf == null) != (nameForCaching == null)) { throw new IllegalArgumentException("Block cache and file name for " + "caching must be both specified or both null"); } this.blockWriter = blockWriter; this.cacheConf = cacheConf; this.nameForCaching = nameForCaching; this.maxChunkSize = HFileBlockIndex.DEFAULT_MAX_CHUNK_SIZE; } public void setMaxChunkSize(int maxChunkSize) { if (maxChunkSize <= 0) { throw new IllegalArgumentException("Invald maximum index block size"); } this.maxChunkSize = maxChunkSize; } /** * Writes the root level and intermediate levels of the block index into * the output stream, generating the tree from bottom up. Assumes that the * leaf level has been inline-written to the disk if there is enough data * for more than one leaf block. We iterate by breaking the current level * of the block index, starting with the index of all leaf-level blocks, * into chunks small enough to be written to disk, and generate its parent * level, until we end up with a level small enough to become the root * level. * * If the leaf level is not large enough, there is no inline block index * anymore, so we only write that level of block index to disk as the root * level. * * @param out FSDataOutputStream * @return position at which we entered the root-level index. * @throws IOException */ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) { throw new IOException("Trying to write a multi-level block index, " + "but are " + curInlineChunk.getNumEntries() + " entries in the " + "last inline chunk."); } // We need to get mid-key metadata before we create intermediate // indexes and overwrite the root chunk. byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() : null; if (curInlineChunk != null) { while (rootChunk.getRootSize() > maxChunkSize) { rootChunk = writeIntermediateLevel(out, rootChunk); numLevels += 1; } } // write the root level long rootLevelIndexPos = out.getPos(); { DataOutput blockStream = blockWriter.startWriting(BlockType.ROOT_INDEX); rootChunk.writeRoot(blockStream); if (midKeyMetadata != null) blockStream.write(midKeyMetadata); blockWriter.writeHeaderAndData(out); } // Add root index block size totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader(); totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); if (LOG.isTraceEnabled()) { LOG.trace("Wrote a " + numLevels + "-level index with root level at pos " + rootLevelIndexPos + ", " + rootChunk.getNumEntries() + " root-level entries, " + totalNumEntries + " total entries, " + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + " on-disk size, " + StringUtils.humanReadableInt(totalBlockUncompressedSize) + " total uncompressed size."); } return rootLevelIndexPos; } /** * Writes the block index data as a single level only. Does not do any * block framing. * * @param out the buffered output stream to write the index to. Typically a * stream writing into an {@link HFile} block. * @param description a short description of the index being written. Used * in a log message. * @throws IOException */ public void writeSingleLevelIndex(DataOutput out, String description) throws IOException { expectNumLevels(1); if (!singleLevelOnly) throw new IOException("Single-level mode is turned off"); if (rootChunk.getNumEntries() > 0) throw new IOException("Root-level entries already added in " + "single-level mode"); rootChunk = curInlineChunk; curInlineChunk = new BlockIndexChunk(); if (LOG.isTraceEnabled()) { LOG.trace("Wrote a single-level " + description + " index with " + rootChunk.getNumEntries() + " entries, " + rootChunk.getRootSize() + " bytes"); } rootChunk.writeRoot(out); } /** * Split the current level of the block index into intermediate index * blocks of permitted size and write those blocks to disk. Return the next * level of the block index referencing those intermediate-level blocks. * * @param out * @param currentLevel the current level of the block index, such as the a * chunk referencing all leaf-level index blocks * @return the parent level block index, which becomes the root index after * a few (usually zero) iterations * @throws IOException */ private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out, BlockIndexChunk currentLevel) throws IOException { // Entries referencing intermediate-level blocks we are about to create. BlockIndexChunk parent = new BlockIndexChunk(); // The current intermediate-level block index chunk. BlockIndexChunk curChunk = new BlockIndexChunk(); for (int i = 0; i < currentLevel.getNumEntries(); ++i) { curChunk.add(currentLevel.getBlockKey(i), currentLevel.getBlockOffset(i), currentLevel.getOnDiskDataSize(i)); if (curChunk.getRootSize() >= maxChunkSize) writeIntermediateBlock(out, parent, curChunk); } if (curChunk.getNumEntries() > 0) { writeIntermediateBlock(out, parent, curChunk); } return parent; } private void writeIntermediateBlock(FSDataOutputStream out, BlockIndexChunk parent, BlockIndexChunk curChunk) throws IOException { long beginOffset = out.getPos(); DataOutputStream dos = blockWriter.startWriting( BlockType.INTERMEDIATE_INDEX); curChunk.writeNonRoot(dos); byte[] curFirstKey = curChunk.getBlockKey(0); blockWriter.writeHeaderAndData(out); if (cacheConf != null) { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching, beginOffset), blockForCaching); } // Add intermediate index block size totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader(); totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); // OFFSET is the beginning offset the chunk of block index entries. // SIZE is the total byte size of the chunk of block index entries // + the secondary index size // FIRST_KEY is the first key in the chunk of block index // entries. parent.add(curFirstKey, beginOffset, blockWriter.getOnDiskSizeWithHeader()); // clear current block index chunk curChunk.clear(); curFirstKey = null; } /** * @return how many block index entries there are in the root level */ public final int getNumRootEntries() { return rootChunk.getNumEntries(); } /** * @return the number of levels in this block index. */ public int getNumLevels() { return numLevels; } private void expectNumLevels(int expectedNumLevels) { if (numLevels != expectedNumLevels) { throw new IllegalStateException("Number of block index levels is " + numLevels + "but is expected to be " + expectedNumLevels); } } /** * Whether there is an inline block ready to be written. In general, we * write an leaf-level index block as an inline block as soon as its size * as serialized in the non-root format reaches a certain threshold. */ @Override public boolean shouldWriteBlock(boolean closing) { if (singleLevelOnly) { throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); } if (curInlineChunk == null) { throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + "called with closing=true and then called again?"); } if (curInlineChunk.getNumEntries() == 0) { return false; } // We do have some entries in the current inline chunk. if (closing) { if (rootChunk.getNumEntries() == 0) { // We did not add any leaf-level blocks yet. Instead of creating a // leaf level with one block, move these entries to the root level. expectNumLevels(1); rootChunk = curInlineChunk; curInlineChunk = null; // Disallow adding any more index entries. return false; } return true; } else { return curInlineChunk.getNonRootSize() >= maxChunkSize; } } /** * Write out the current inline index block. Inline blocks are non-root * blocks, so the non-root index format is used. * * @param out */ @Override public void writeInlineBlock(DataOutput out) throws IOException { if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); // Write the inline block index to the output stream in the non-root // index block format. curInlineChunk.writeNonRoot(out); // Save the first key of the inline block so that we can add it to the // parent-level index. firstKey = curInlineChunk.getBlockKey(0); // Start a new inline index block curInlineChunk.clear(); } /** * Called after an inline block has been written so that we can add an * entry referring to that block to the parent-level index. */ @Override public void blockWritten(long offset, int onDiskSize, int uncompressedSize) { // Add leaf index block size totalBlockOnDiskSize += onDiskSize; totalBlockUncompressedSize += uncompressedSize; if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); if (firstKey == null) { throw new IllegalStateException("Trying to add second-level index " + "entry with offset=" + offset + " and onDiskSize=" + onDiskSize + "but the first key was not set in writeInlineBlock"); } if (rootChunk.getNumEntries() == 0) { // We are writing the first leaf block, so increase index level. expectNumLevels(1); numLevels = 2; } // Add another entry to the second-level index. Include the number of // entries in all previous leaf-level chunks for mid-key calculation. rootChunk.add(firstKey, offset, onDiskSize, totalNumEntries); firstKey = null; } @Override public BlockType getInlineBlockType() { return BlockType.LEAF_INDEX; } /** * Add one index entry to the current leaf-level block. When the leaf-level * block gets large enough, it will be flushed to disk as an inline block. * * @param firstKey the first key of the data block * @param blockOffset the offset of the data block * @param blockDataSize the on-disk size of the data block ({@link HFile} * format version 2), or the uncompressed size of the data block ( * {@link HFile} format version 1). */ public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) { curInlineChunk.add(firstKey, blockOffset, blockDataSize); ++totalNumEntries; } /** * @throws IOException if we happened to write a multi-level index. */ public void ensureSingleLevel() throws IOException { if (numLevels > 1) { throw new IOException ("Wrote a " + numLevels + "-level index with " + rootChunk.getNumEntries() + " root-level entries, but " + "this is expected to be a single-level block index."); } } /** * @return true if we are using cache-on-write. This is configured by the * caller of the constructor by either passing a valid block cache * or null. */ @Override public boolean getCacheOnWrite() { return cacheConf != null && cacheConf.shouldCacheIndexesOnWrite(); } /** * The total uncompressed size of the root index block, intermediate-level * index blocks, and leaf-level index blocks. * * @return the total uncompressed size of all index blocks */ public long getTotalUncompressedSize() { return totalBlockUncompressedSize; } } /** * A single chunk of the block index in the process of writing. The data in * this chunk can become a leaf-level, intermediate-level, or root index * block. */ static class BlockIndexChunk { /** First keys of the key range corresponding to each index entry. */ private final List<byte[]> blockKeys = new ArrayList<byte[]>(); /** Block offset in backing stream. */ private final List<Long> blockOffsets = new ArrayList<Long>(); /** On-disk data sizes of lower-level data or index blocks. */ private final List<Integer> onDiskDataSizes = new ArrayList<Integer>(); /** * The cumulative number of sub-entries, i.e. entries on deeper-level block * index entries. numSubEntriesAt[i] is the number of sub-entries in the * blocks corresponding to this chunk's entries #0 through #i inclusively. */ private final List<Long> numSubEntriesAt = new ArrayList<Long>(); /** * The offset of the next entry to be added, relative to the end of the * "secondary index" in the "non-root" format representation of this index * chunk. This is the next value to be added to the secondary index. */ private int curTotalNonRootEntrySize = 0; /** * The accumulated size of this chunk if stored in the root index format. */ private int curTotalRootSize = 0; /** * The "secondary index" used for binary search over variable-length * records in a "non-root" format block. These offsets are relative to the * end of this secondary index. */ private final List<Integer> secondaryIndexOffsetMarks = new ArrayList<Integer>(); /** * Adds a new entry to this block index chunk. * * @param firstKey the first key in the block pointed to by this entry * @param blockOffset the offset of the next-level block pointed to by this * entry * @param onDiskDataSize the on-disk data of the block pointed to by this * entry, including header size * @param curTotalNumSubEntries if this chunk is the root index chunk under * construction, this specifies the current total number of * sub-entries in all leaf-level chunks, including the one * corresponding to the second-level entry being added. */ void add(byte[] firstKey, long blockOffset, int onDiskDataSize, long curTotalNumSubEntries) { // Record the offset for the secondary index secondaryIndexOffsetMarks.add(curTotalNonRootEntrySize); curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD + firstKey.length; curTotalRootSize += Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT + WritableUtils.getVIntSize(firstKey.length) + firstKey.length; blockKeys.add(firstKey); blockOffsets.add(blockOffset); onDiskDataSizes.add(onDiskDataSize); if (curTotalNumSubEntries != -1) { numSubEntriesAt.add(curTotalNumSubEntries); // Make sure the parallel arrays are in sync. if (numSubEntriesAt.size() != blockKeys.size()) { throw new IllegalStateException("Only have key/value count " + "stats for " + numSubEntriesAt.size() + " block index " + "entries out of " + blockKeys.size()); } } } /** * The same as {@link #add(byte[], long, int, long)} but does not take the * key/value into account. Used for single-level indexes. * * @see {@link #add(byte[], long, int, long)} */ public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) { add(firstKey, blockOffset, onDiskDataSize, -1); } public void clear() { blockKeys.clear(); blockOffsets.clear(); onDiskDataSizes.clear(); secondaryIndexOffsetMarks.clear(); numSubEntriesAt.clear(); curTotalNonRootEntrySize = 0; curTotalRootSize = 0; } /** * Finds the entry corresponding to the deeper-level index block containing * the given deeper-level entry (a "sub-entry"), assuming a global 0-based * ordering of sub-entries. * * <p> * <i> Implementation note. </i> We are looking for i such that * numSubEntriesAt[i - 1] <= k < numSubEntriesAt[i], because a deeper-level * block #i (0-based) contains sub-entries # numSubEntriesAt[i - 1]'th * through numSubEntriesAt[i] - 1, assuming a global 0-based ordering of * sub-entries. i is by definition the insertion point of k in * numSubEntriesAt. * * @param k sub-entry index, from 0 to the total number sub-entries - 1 * @return the 0-based index of the entry corresponding to the given * sub-entry */ public int getEntryBySubEntry(long k) { // We define mid-key as the key corresponding to k'th sub-entry // (0-based). int i = Collections.binarySearch(numSubEntriesAt, k); // Exact match: cumulativeWeight[i] = k. This means chunks #0 through // #i contain exactly k sub-entries, and the sub-entry #k (0-based) // is in the (i + 1)'th chunk. if (i >= 0) return i + 1; // Inexact match. Return the insertion point. return -i - 1; } /** * Used when writing the root block index of a multi-level block index. * Serializes additional information allowing to efficiently identify the * mid-key. * * @return a few serialized fields for finding the mid-key * @throws IOException if could not create metadata for computing mid-key */ public byte[] getMidKeyMetadata() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream( MID_KEY_METADATA_SIZE); DataOutputStream baosDos = new DataOutputStream(baos); long totalNumSubEntries = numSubEntriesAt.get(blockKeys.size() - 1); if (totalNumSubEntries == 0) { throw new IOException("No leaf-level entries, mid-key unavailable"); } long midKeySubEntry = (totalNumSubEntries - 1) / 2; int midKeyEntry = getEntryBySubEntry(midKeySubEntry); baosDos.writeLong(blockOffsets.get(midKeyEntry)); baosDos.writeInt(onDiskDataSizes.get(midKeyEntry)); long numSubEntriesBefore = midKeyEntry > 0 ? numSubEntriesAt.get(midKeyEntry - 1) : 0; long subEntryWithinEntry = midKeySubEntry - numSubEntriesBefore; if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) { throw new IOException("Could not identify mid-key index within the " + "leaf-level block containing mid-key: out of range (" + subEntryWithinEntry + ", numSubEntriesBefore=" + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry + ")"); } baosDos.writeInt((int) subEntryWithinEntry); if (baosDos.size() != MID_KEY_METADATA_SIZE) { throw new IOException("Could not write mid-key metadata: size=" + baosDos.size() + ", correct size: " + MID_KEY_METADATA_SIZE); } // Close just to be good citizens, although this has no effect. baos.close(); return baos.toByteArray(); } /** * Writes the block index chunk in the non-root index block format. This * format contains the number of entries, an index of integer offsets * for quick binary search on variable-length records, and tuples of * block offset, on-disk block size, and the first key for each entry. * * @param out * @throws IOException */ void writeNonRoot(DataOutput out) throws IOException { // The number of entries in the block. out.writeInt(blockKeys.size()); if (secondaryIndexOffsetMarks.size() != blockKeys.size()) { throw new IOException("Corrupted block index chunk writer: " + blockKeys.size() + " entries but " + secondaryIndexOffsetMarks.size() + " secondary index items"); } // For each entry, write a "secondary index" of relative offsets to the // entries from the end of the secondary index. This works, because at // read time we read the number of entries and know where the secondary // index ends. for (int currentSecondaryIndex : secondaryIndexOffsetMarks) out.writeInt(currentSecondaryIndex); // We include one other element in the secondary index to calculate the // size of each entry more easily by subtracting secondary index elements. out.writeInt(curTotalNonRootEntrySize); for (int i = 0; i < blockKeys.size(); ++i) { out.writeLong(blockOffsets.get(i)); out.writeInt(onDiskDataSizes.get(i)); out.write(blockKeys.get(i)); } } /** * @return the size of this chunk if stored in the non-root index block * format */ int getNonRootSize() { return Bytes.SIZEOF_INT // Number of entries + Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index + curTotalNonRootEntrySize; // All entries } /** * Writes this chunk into the given output stream in the root block index * format. This format is similar to the {@link HFile} version 1 block * index format, except that we store on-disk size of the block instead of * its uncompressed size. * * @param out the data output stream to write the block index to. Typically * a stream writing into an {@link HFile} block. * @throws IOException */ void writeRoot(DataOutput out) throws IOException { for (int i = 0; i < blockKeys.size(); ++i) { out.writeLong(blockOffsets.get(i)); out.writeInt(onDiskDataSizes.get(i)); Bytes.writeByteArray(out, blockKeys.get(i)); } } /** * @return the size of this chunk if stored in the root index block format */ int getRootSize() { return curTotalRootSize; } /** * @return the number of entries in this block index chunk */ public int getNumEntries() { return blockKeys.size(); } public byte[] getBlockKey(int i) { return blockKeys.get(i); } public long getBlockOffset(int i) { return blockOffsets.get(i); } public int getOnDiskDataSize(int i) { return onDiskDataSizes.get(i); } public long getCumulativeNumKV(int i) { if (i < 0) return 0; return numSubEntriesAt.get(i); } } public static int getMaxChunkSize(Configuration conf) { return conf.getInt(MAX_CHUNK_SIZE_KEY, DEFAULT_MAX_CHUNK_SIZE); } }
apache-2.0
mpoindexter/teavm
teavm-dom/src/main/java/org/teavm/dom/html/HTMLOptionsCollection.java
1285
/* * Copyright 2014 Alexey Andreev. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teavm.dom.html; import org.teavm.jso.JSIndexer; import org.teavm.jso.JSProperty; /** * * @author Alexey Andreev */ public interface HTMLOptionsCollection extends HTMLCollection { @Override HTMLOptionElement item(int index); @Override HTMLOptionElement namedItem(String name); @JSIndexer void set(int index, HTMLOptionElement element); void add(HTMLOptionElement element, HTMLElement before); void add(HTMLOptionElement element, int before); void add(HTMLOptionElement element); void remove(int index); @JSProperty int getSelectedIndex(); @JSProperty void setSelectedIndex(int selectedIndex); }
apache-2.0
rzeh4n/psp-validator
javaFxModule/src/main/java/nkp/pspValidator/gui/DictionaryUpdateDialogController.java
6762
package nkp.pspValidator.gui; import javafx.concurrent.Task; import javafx.event.ActionEvent; import javafx.event.EventHandler; import javafx.fxml.FXML; import javafx.scene.control.Button; import javafx.scene.control.Label; import javafx.scene.control.ProgressIndicator; import javafx.stage.WindowEvent; import nkp.pspValidator.shared.engine.exceptions.ValidatorConfigurationException; import nkp.pspValidator.shared.metadataProfile.DictionaryManager; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.net.URL; import java.nio.file.Files; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import java.util.Scanner; import java.util.Set; /** * Created by Martin Řehánek on 7.1.19. */ public class DictionaryUpdateDialogController extends DialogController { private String dictionaryName; private String syncUrl; private boolean updatingNow = false; @FXML Label lblDictName; @FXML Label lblTotalValues; @FXML Label lblLastUpdated; @FXML Label lblStatus; @FXML Button btnUpdate; @FXML Button btnCloseOrCancel; @FXML ProgressIndicator progressIndicator; private DictionaryManager getDictMgr() { return main.getValidationDataManager().getValidatorConfigMgr().getDictionaryManager(); } private ConfigurationManager getConfigMgr() { return main.getConfigurationManager(); } @Override public void startNow() { lblDictName.setText(dictionaryName); updateUi(); } @Override public EventHandler<WindowEvent> getOnCloseEventHandler() { return event -> { if (updatingNow) { //ignore event event.consume(); } }; } public void setData(String dictionaryName, String syncUrl) { this.dictionaryName = dictionaryName; this.syncUrl = syncUrl; } public void closeDialog(ActionEvent actionEvent) { stage.close(); } public void update(ActionEvent actionEvent) { updatingNow = true; lblStatus.setText("Aktualizuji ..."); updateUi(); //https://docs.oracle.com/javafx/2/api/javafx/concurrent/Task.html Task task = new Task<Void>() { @Override protected Void call() { if (ConfigurationManager.DEV_MODE) { try { Thread.sleep(300); } catch (InterruptedException e) { //nothing } } Scanner reader = null; PrintWriter writer = null; try { //download dictionary into temporary file File tmpFile = File.createTempFile(dictionaryName, ".tmp"); writer = new PrintWriter(new FileWriter(tmpFile)); reader = new Scanner(new URL(syncUrl).openStream()); while (reader.hasNext()) { String line = reader.nextLine(); writer.println(line); } writer.flush(); //replace dictionary file File dictFile = getDictMgr().getDictionaryFile(dictionaryName); Files.copy(Paths.get(tmpFile.toURI()), Paths.get(dictFile.toURI()), StandardCopyOption.REPLACE_EXISTING); Files.delete(Paths.get(tmpFile.toURI())); updateMessage("finished"); } catch (IOException ex) { updateMessage("CHYBA: " + ex.getClass().getSimpleName() + ": " + ex.getMessage()); ex.printStackTrace(); } finally { if (reader != null) { reader.close(); } if (writer != null) { writer.close(); } } return null; } /* @Override protected void succeeded() { super.succeeded(); updateMessage("succeeded"); } @Override protected void cancelled() { super.cancelled(); updateMessage("cancelled"); } @Override protected void failed() { super.failed(); updateMessage("failed"); }*/ }; task.messageProperty().addListener((observable, oldValue, newValue) -> { System.out.println(newValue); updatingNow = false; if (newValue.startsWith("CHYBA")) { lblStatus.setText(newValue); } else { lblStatus.setText("Slovník úspěšně aktualizován!"); //update syncDate property main.getConfigurationManager().setString(ConfigurationManager.propDictionarySyncDate(dictionaryName), todayDate()); //reload dictionary manager try { getDictMgr().reload(); } catch (ValidatorConfigurationException e) { lblStatus.setText("CHYBA: " + e.getMessage()); e.printStackTrace(); } } updateUi(); }); new Thread(task).start(); } private String todayDate() { DateTime now = new DateTime(); DateTimeFormatter fmt = DateTimeFormat.forPattern("dd. MM. yyyy"); return fmt.print(now); } private void updateUi() { Set<String> values = getDictMgr().getDictionaryValues(dictionaryName); String syncDate = getConfigMgr().getStringOrDefault(ConfigurationManager.propDictionarySyncDate(dictionaryName), null); if (syncDate == null || syncDate.isEmpty()) { lblLastUpdated.setText("-"); } else { lblLastUpdated.setText(syncDate); } lblTotalValues.setText("" + values.size()); if (updatingNow) { btnUpdate.setDisable(true); btnCloseOrCancel.setText("Zrušit"); progressIndicator.setVisible(true); } else { btnUpdate.setDisable(false); btnCloseOrCancel.setText("Zavřít"); progressIndicator.setVisible(false); } } public void closeOrCancel(ActionEvent actionEvent) { if (!updatingNow) { stage.close(); } else { //Cancel button will have no effect for now } } }
apache-2.0
gstevey/gradle
subprojects/tooling-api/src/main/java/org/gradle/tooling/internal/consumer/DefaultInternalJvmTestRequest.java
1980
/* * Copyright 2015 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gradle.tooling.internal.consumer; import org.gradle.tooling.internal.protocol.test.InternalJvmTestRequest; import javax.annotation.Nullable; public class DefaultInternalJvmTestRequest implements InternalJvmTestRequest { private final String className; private final String methodName; public DefaultInternalJvmTestRequest(String className, @Nullable String methodName) { this.className = className; this.methodName = methodName; } public String getClassName() { return className; } public String getMethodName() { return methodName; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } DefaultInternalJvmTestRequest that = (DefaultInternalJvmTestRequest) o; if (className != null ? !className.equals(that.className) : that.className != null) { return false; } return !(methodName != null ? !methodName.equals(that.methodName) : that.methodName != null); } @Override public int hashCode() { int result = className != null ? className.hashCode() : 0; result = 31 * result + (methodName != null ? methodName.hashCode() : 0); return result; } }
apache-2.0
mehant/incubator-calcite
core/src/main/java/org/apache/calcite/sql/validate/SqlValidatorCatalogReader.java
3440
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.sql.validate; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.sql.SqlIdentifier; import java.util.List; /** * Supplies catalog information for {@link SqlValidator}. * * <p>This interface only provides a thin API to the underlying repository, and * this is intentional. By only presenting the repository information of * interest to the validator, we reduce the dependency on exact mechanism to * implement the repository. It is also possible to construct mock * implementations of this interface for testing purposes. */ public interface SqlValidatorCatalogReader { //~ Methods ---------------------------------------------------------------- /** * Finds a table with the given name, possibly qualified. * * @param names Qualified name of table * @return named table, or null if not found */ SqlValidatorTable getTable(List<String> names); /** * Finds a user-defined type with the given name, possibly qualified. * * <p>NOTE jvs 12-Feb-2005: the reason this method is defined here instead * of on RelDataTypeFactory is that it has to take into account * context-dependent information such as SQL schema path, whereas a type * factory is context-independent. * * @param typeName Name of type * @return named type, or null if not found */ RelDataType getNamedType(SqlIdentifier typeName); /** * Given fully qualified schema name, returns schema object names as * specified. They can be schema, table, function, view. * When names array is empty, the contents of root schema should be returned. * * @param names the array contains fully qualified schema name or empty * list for root schema * @return the list of all object (schema, table, function, * view) names under the above criteria */ List<SqlMoniker> getAllSchemaObjectNames(List<String> names); /** * Returns the name of the current schema. * * @return name of the current schema */ List<String> getSchemaName(); /** * Finds a field with a given name, using the case-sensitivity of the current * session. */ RelDataTypeField field(RelDataType rowType, String alias); /** * Finds the ordinal of a field with a given name, using the case-sensitivity * of the current session. */ int fieldOrdinal(RelDataType rowType, String alias); int match(List<String> strings, String name); RelDataType createTypeFromProjection(RelDataType type, List<String> columnNameList); } // End SqlValidatorCatalogReader.java
apache-2.0
cinderella/incubator-cloudstack
plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java
4737
// Copyright 2012 Citrix Systems, Inc. Licensed under the // Apache License, Version 2.0 (the "License"); you may not use this // file except in compliance with the License. Citrix Systems, Inc. // reserves all rights not expressly granted by the License. // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Automatically generated by addcopyright.py at 04/03/2012 package com.cloud.network.dao; import java.util.List; import javax.ejb.Local; import org.apache.log4j.Logger; import com.cloud.network.CiscoNexusVSMDeviceVO; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Local(value=CiscoNexusVSMDeviceDao.class) @DB(txn=false) public class CiscoNexusVSMDeviceDaoImpl extends GenericDaoBase<CiscoNexusVSMDeviceVO, Long> implements CiscoNexusVSMDeviceDao { protected static final Logger s_logger = Logger.getLogger(CiscoNexusVSMDeviceDaoImpl.class); final SearchBuilder<CiscoNexusVSMDeviceVO> mgmtVlanIdSearch; final SearchBuilder<CiscoNexusVSMDeviceVO> domainIdSearch; final SearchBuilder<CiscoNexusVSMDeviceVO> nameSearch; final SearchBuilder<CiscoNexusVSMDeviceVO> ipaddrSearch; final SearchBuilder<CiscoNexusVSMDeviceVO> genericVlanIdSearch; final SearchBuilder<CiscoNexusVSMDeviceVO> fullTableSearch; // We will add more searchbuilder objects. public CiscoNexusVSMDeviceDaoImpl() { super(); mgmtVlanIdSearch = createSearchBuilder(); mgmtVlanIdSearch.and("managementVlan", mgmtVlanIdSearch.entity().getManagementVlan(), Op.EQ); mgmtVlanIdSearch.done(); genericVlanIdSearch = createSearchBuilder(); genericVlanIdSearch.and("managementVlan", genericVlanIdSearch.entity().getManagementVlan(), Op.EQ); genericVlanIdSearch.or("controlVlan", genericVlanIdSearch.entity().getControlVlan(), Op.EQ); genericVlanIdSearch.or("packetVlan", genericVlanIdSearch.entity().getPacketVlan(), Op.EQ); genericVlanIdSearch.or("storageVlan", genericVlanIdSearch.entity().getStorageVlan(), Op.EQ); genericVlanIdSearch.done(); domainIdSearch = createSearchBuilder(); domainIdSearch.and("vsmSwitchDomainId", domainIdSearch.entity().getvsmDomainId(), Op.EQ); domainIdSearch.done(); nameSearch = createSearchBuilder(); nameSearch.and("vsmName", nameSearch.entity().getvsmName(), Op.EQ); nameSearch.done(); ipaddrSearch = createSearchBuilder(); ipaddrSearch.and("ipaddr", ipaddrSearch.entity().getipaddr(), Op.EQ); ipaddrSearch.done(); fullTableSearch = createSearchBuilder(); fullTableSearch.done(); // We may add more and conditions by specifying more fields, like say, accountId. } public CiscoNexusVSMDeviceVO getVSMbyDomainId(long domId) { SearchCriteria<CiscoNexusVSMDeviceVO> sc = domainIdSearch.create(); sc.setParameters("vsmSwitchDomainId", domId); return findOneBy(sc); } public CiscoNexusVSMDeviceVO getVSMbyName(String vsmName) { SearchCriteria<CiscoNexusVSMDeviceVO> sc = nameSearch.create(); sc.setParameters("vsmName", vsmName); return findOneBy(sc); } public CiscoNexusVSMDeviceVO getVSMbyIpaddress(String ipaddress) { SearchCriteria<CiscoNexusVSMDeviceVO> sc = ipaddrSearch.create(); sc.setParameters("ipaddr", ipaddress); return findOneBy(sc); } public List<CiscoNexusVSMDeviceVO> listByMgmtVlan(int vlanId) { SearchCriteria<CiscoNexusVSMDeviceVO> sc = mgmtVlanIdSearch.create(); sc.setParameters("managementVlan", vlanId); return search(sc, null); } public List<CiscoNexusVSMDeviceVO> listAllVSMs() { SearchCriteria<CiscoNexusVSMDeviceVO> sc = fullTableSearch.create(); return search(sc, null); } public List<CiscoNexusVSMDeviceVO> listByVlanId(int vlanId) { SearchCriteria<CiscoNexusVSMDeviceVO> sc = genericVlanIdSearch.create(); sc.setParameters("managementVlan", vlanId); sc.setParameters("storageVlan", vlanId); sc.setParameters("packetVlan", vlanId); sc.setParameters("controlVlan", vlanId); return search(sc, null); } }
apache-2.0
ouyangjie/hadoop
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
36130
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.shortcircuit; import java.io.BufferedOutputStream; import java.io.Closeable; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.nio.MappedByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.TreeMap; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.lang.mutable.MutableBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; import org.apache.hadoop.hdfs.net.DomainPeer; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot; import org.apache.hadoop.hdfs.util.IOUtilsClient; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocketWatcher; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Waitable; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * The ShortCircuitCache tracks things which the client needs to access * HDFS block files via short-circuit. * * These things include: memory-mapped regions, file descriptors, and shared * memory areas for communicating with the DataNode. */ @InterfaceAudience.Private public class ShortCircuitCache implements Closeable { public static final Logger LOG = LoggerFactory.getLogger( ShortCircuitCache.class); /** * Expiry thread which makes sure that the file descriptors get closed * after a while. */ private class CacheCleaner implements Runnable, Closeable { private ScheduledFuture<?> future; /** * Run the CacheCleaner thread. * * Whenever a thread requests a ShortCircuitReplica object, we will make * sure it gets one. That ShortCircuitReplica object can then be re-used * when another thread requests a ShortCircuitReplica object for the same * block. So in that sense, there is no maximum size to the cache. * * However, when a ShortCircuitReplica object is unreferenced by the * thread(s) that are using it, it becomes evictable. There are two * separate eviction lists-- one for mmaped objects, and another for * non-mmaped objects. We do this in order to avoid having the regular * files kick the mmaped files out of the cache too quickly. Reusing * an already-existing mmap gives a huge performance boost, since the * page table entries don't have to be re-populated. Both the mmap * and non-mmap evictable lists have maximum sizes and maximum lifespans. */ @Override public void run() { ShortCircuitCache.this.lock.lock(); try { if (ShortCircuitCache.this.closed) return; long curMs = Time.monotonicNow(); LOG.debug("{}: cache cleaner running at {}", this, curMs); int numDemoted = demoteOldEvictableMmaped(curMs); int numPurged = 0; Long evictionTimeNs = Long.valueOf(0); while (true) { Entry<Long, ShortCircuitReplica> entry = evictable.ceilingEntry(evictionTimeNs); if (entry == null) break; evictionTimeNs = entry.getKey(); long evictionTimeMs = TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS); if (evictionTimeMs + maxNonMmappedEvictableLifespanMs >= curMs) break; ShortCircuitReplica replica = entry.getValue(); if (LOG.isTraceEnabled()) { LOG.trace("CacheCleaner: purging " + replica + ": " + StringUtils.getStackTrace(Thread.currentThread())); } purge(replica); numPurged++; } LOG.debug("{}: finishing cache cleaner run started at {}. Demoted {} " + "mmapped replicas; purged {} replicas.", this, curMs, numDemoted, numPurged); } finally { ShortCircuitCache.this.lock.unlock(); } } @Override public void close() throws IOException { if (future != null) { future.cancel(false); } } public void setFuture(ScheduledFuture<?> future) { this.future = future; } /** * Get the rate at which this cleaner thread should be scheduled. * * We do this by taking the minimum expiration time and dividing by 4. * * @return the rate in milliseconds at which this thread should be * scheduled. */ public long getRateInMs() { long minLifespanMs = Math.min(maxNonMmappedEvictableLifespanMs, maxEvictableMmapedLifespanMs); long sampleTimeMs = minLifespanMs / 4; return (sampleTimeMs < 1) ? 1 : sampleTimeMs; } } /** * A task which asks the DataNode to release a short-circuit shared memory * slot. If successful, this will tell the DataNode to stop monitoring * changes to the mlock status of the replica associated with the slot. * It will also allow us (the client) to re-use this slot for another * replica. If we can't communicate with the DataNode for some reason, * we tear down the shared memory segment to avoid being in an inconsistent * state. */ private class SlotReleaser implements Runnable { /** * The slot that we need to release. */ private final Slot slot; SlotReleaser(Slot slot) { this.slot = slot; } @Override public void run() { LOG.trace("{}: about to release {}", ShortCircuitCache.this, slot); final DfsClientShm shm = (DfsClientShm)slot.getShm(); final DomainSocket shmSock = shm.getPeer().getDomainSocket(); final String path = shmSock.getPath(); boolean success = false; try (DomainSocket sock = DomainSocket.connect(path); DataOutputStream out = new DataOutputStream( new BufferedOutputStream(sock.getOutputStream()))) { new Sender(out).releaseShortCircuitFds(slot.getSlotId()); DataInputStream in = new DataInputStream(sock.getInputStream()); ReleaseShortCircuitAccessResponseProto resp = ReleaseShortCircuitAccessResponseProto.parseFrom( PBHelperClient.vintPrefixed(in)); if (resp.getStatus() != Status.SUCCESS) { String error = resp.hasError() ? resp.getError() : "(unknown)"; throw new IOException(resp.getStatus().toString() + ": " + error); } LOG.trace("{}: released {}", this, slot); success = true; } catch (IOException e) { LOG.error(ShortCircuitCache.this + ": failed to release " + "short-circuit shared memory slot " + slot + " by sending " + "ReleaseShortCircuitAccessRequestProto to " + path + ". Closing shared memory segment.", e); } finally { if (success) { shmManager.freeSlot(slot); } else { shm.getEndpointShmManager().shutdown(shm); } } } } public interface ShortCircuitReplicaCreator { /** * Attempt to create a ShortCircuitReplica object. * * This callback will be made without holding any locks. * * @return a non-null ShortCircuitReplicaInfo object. */ ShortCircuitReplicaInfo createShortCircuitReplicaInfo(); } /** * Lock protecting the cache. */ private final ReentrantLock lock = new ReentrantLock(); /** * The executor service that runs the cacheCleaner. */ private final ScheduledThreadPoolExecutor cleanerExecutor = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder(). setDaemon(true).setNameFormat("ShortCircuitCache_Cleaner"). build()); /** * The executor service that runs the cacheCleaner. */ private final ScheduledThreadPoolExecutor releaserExecutor = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder(). setDaemon(true).setNameFormat("ShortCircuitCache_SlotReleaser"). build()); /** * A map containing all ShortCircuitReplicaInfo objects, organized by Key. * ShortCircuitReplicaInfo objects may contain a replica, or an InvalidToken * exception. */ private final HashMap<ExtendedBlockId, Waitable<ShortCircuitReplicaInfo>> replicaInfoMap = new HashMap<ExtendedBlockId, Waitable<ShortCircuitReplicaInfo>>(); /** * The CacheCleaner. We don't create this and schedule it until it becomes * necessary. */ private CacheCleaner cacheCleaner; /** * Tree of evictable elements. * * Maps (unique) insertion time in nanoseconds to the element. */ private final TreeMap<Long, ShortCircuitReplica> evictable = new TreeMap<Long, ShortCircuitReplica>(); /** * Maximum total size of the cache, including both mmapped and * no$-mmapped elements. */ private final int maxTotalSize; /** * Non-mmaped elements older than this will be closed. */ private long maxNonMmappedEvictableLifespanMs; /** * Tree of mmaped evictable elements. * * Maps (unique) insertion time in nanoseconds to the element. */ private final TreeMap<Long, ShortCircuitReplica> evictableMmapped = new TreeMap<Long, ShortCircuitReplica>(); /** * Maximum number of mmaped evictable elements. */ private int maxEvictableMmapedSize; /** * Mmaped elements older than this will be closed. */ private final long maxEvictableMmapedLifespanMs; /** * The minimum number of milliseconds we'll wait after an unsuccessful * mmap attempt before trying again. */ private final long mmapRetryTimeoutMs; /** * How long we will keep replicas in the cache before declaring them * to be stale. */ private final long staleThresholdMs; /** * True if the ShortCircuitCache is closed. */ private boolean closed = false; /** * Number of existing mmaps associated with this cache. */ private int outstandingMmapCount = 0; /** * Manages short-circuit shared memory segments for the client. */ private final DfsClientShmManager shmManager; public static ShortCircuitCache fromConf(ShortCircuitConf conf) { return new ShortCircuitCache( conf.getShortCircuitStreamsCacheSize(), conf.getShortCircuitStreamsCacheExpiryMs(), conf.getShortCircuitMmapCacheSize(), conf.getShortCircuitMmapCacheExpiryMs(), conf.getShortCircuitMmapCacheRetryTimeout(), conf.getShortCircuitCacheStaleThresholdMs(), conf.getShortCircuitSharedMemoryWatcherInterruptCheckMs()); } public ShortCircuitCache(int maxTotalSize, long maxNonMmappedEvictableLifespanMs, int maxEvictableMmapedSize, long maxEvictableMmapedLifespanMs, long mmapRetryTimeoutMs, long staleThresholdMs, int shmInterruptCheckMs) { Preconditions.checkArgument(maxTotalSize >= 0); this.maxTotalSize = maxTotalSize; Preconditions.checkArgument(maxNonMmappedEvictableLifespanMs >= 0); this.maxNonMmappedEvictableLifespanMs = maxNonMmappedEvictableLifespanMs; Preconditions.checkArgument(maxEvictableMmapedSize >= 0); this.maxEvictableMmapedSize = maxEvictableMmapedSize; Preconditions.checkArgument(maxEvictableMmapedLifespanMs >= 0); this.maxEvictableMmapedLifespanMs = maxEvictableMmapedLifespanMs; this.mmapRetryTimeoutMs = mmapRetryTimeoutMs; this.staleThresholdMs = staleThresholdMs; DfsClientShmManager shmManager = null; if ((shmInterruptCheckMs > 0) && (DomainSocketWatcher.getLoadingFailureReason() == null)) { try { shmManager = new DfsClientShmManager(shmInterruptCheckMs); } catch (IOException e) { LOG.error("failed to create ShortCircuitShmManager", e); } } this.shmManager = shmManager; } public long getStaleThresholdMs() { return staleThresholdMs; } /** * Increment the reference count of a replica, and remove it from any free * list it may be in. * * You must hold the cache lock while calling this function. * * @param replica The replica we're removing. */ private void ref(ShortCircuitReplica replica) { lock.lock(); try { Preconditions.checkArgument(replica.refCount > 0, "can't ref %s because its refCount reached %d", replica, replica.refCount); Long evictableTimeNs = replica.getEvictableTimeNs(); replica.refCount++; if (evictableTimeNs != null) { String removedFrom = removeEvictable(replica); if (LOG.isTraceEnabled()) { LOG.trace(this + ": " + removedFrom + " no longer contains " + replica + ". refCount " + (replica.refCount - 1) + " -> " + replica.refCount + StringUtils.getStackTrace(Thread.currentThread())); } } else if (LOG.isTraceEnabled()) { LOG.trace(this + ": replica refCount " + (replica.refCount - 1) + " -> " + replica.refCount + StringUtils.getStackTrace(Thread.currentThread())); } } finally { lock.unlock(); } } /** * Unreference a replica. * * You must hold the cache lock while calling this function. * * @param replica The replica being unreferenced. */ void unref(ShortCircuitReplica replica) { lock.lock(); try { // If the replica is stale or unusable, but we haven't purged it yet, // let's do that. It would be a shame to evict a non-stale replica so // that we could put a stale or unusable one into the cache. if (!replica.purged) { String purgeReason = null; if (!replica.getDataStream().getChannel().isOpen()) { purgeReason = "purging replica because its data channel is closed."; } else if (!replica.getMetaStream().getChannel().isOpen()) { purgeReason = "purging replica because its meta channel is closed."; } else if (replica.isStale()) { purgeReason = "purging replica because it is stale."; } if (purgeReason != null) { LOG.debug("{}: {}", this, purgeReason); purge(replica); } } String addedString = ""; boolean shouldTrimEvictionMaps = false; int newRefCount = --replica.refCount; if (newRefCount == 0) { // Close replica, since there are no remaining references to it. Preconditions.checkArgument(replica.purged, "Replica %s reached a refCount of 0 without being purged", replica); replica.close(); } else if (newRefCount == 1) { Preconditions.checkState(null == replica.getEvictableTimeNs(), "Replica %s had a refCount higher than 1, " + "but was still evictable (evictableTimeNs = %d)", replica, replica.getEvictableTimeNs()); if (!replica.purged) { // Add the replica to the end of an eviction list. // Eviction lists are sorted by time. if (replica.hasMmap()) { insertEvictable(System.nanoTime(), replica, evictableMmapped); addedString = "added to evictableMmapped, "; } else { insertEvictable(System.nanoTime(), replica, evictable); addedString = "added to evictable, "; } shouldTrimEvictionMaps = true; } } else { Preconditions.checkArgument(replica.refCount >= 0, "replica's refCount went negative (refCount = %d" + " for %s)", replica.refCount, replica); } if (LOG.isTraceEnabled()) { LOG.trace(this + ": unref replica " + replica + ": " + addedString + " refCount " + (newRefCount + 1) + " -> " + newRefCount + StringUtils.getStackTrace(Thread.currentThread())); } if (shouldTrimEvictionMaps) { trimEvictionMaps(); } } finally { lock.unlock(); } } /** * Demote old evictable mmaps into the regular eviction map. * * You must hold the cache lock while calling this function. * * @param now Current time in monotonic milliseconds. * @return Number of replicas demoted. */ private int demoteOldEvictableMmaped(long now) { int numDemoted = 0; boolean needMoreSpace = false; Long evictionTimeNs = Long.valueOf(0); while (true) { Entry<Long, ShortCircuitReplica> entry = evictableMmapped.ceilingEntry(evictionTimeNs); if (entry == null) break; evictionTimeNs = entry.getKey(); long evictionTimeMs = TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS); if (evictionTimeMs + maxEvictableMmapedLifespanMs >= now) { if (evictableMmapped.size() < maxEvictableMmapedSize) { break; } needMoreSpace = true; } ShortCircuitReplica replica = entry.getValue(); if (LOG.isTraceEnabled()) { String rationale = needMoreSpace ? "because we need more space" : "because it's too old"; LOG.trace("demoteOldEvictable: demoting " + replica + ": " + rationale + ": " + StringUtils.getStackTrace(Thread.currentThread())); } removeEvictable(replica, evictableMmapped); munmap(replica); insertEvictable(evictionTimeNs, replica, evictable); numDemoted++; } return numDemoted; } /** * Trim the eviction lists. */ private void trimEvictionMaps() { long now = Time.monotonicNow(); demoteOldEvictableMmaped(now); while (true) { long evictableSize = evictable.size(); long evictableMmappedSize = evictableMmapped.size(); if (evictableSize + evictableMmappedSize <= maxTotalSize) { return; } ShortCircuitReplica replica; if (evictableSize == 0) { replica = evictableMmapped.firstEntry().getValue(); } else { replica = evictable.firstEntry().getValue(); } if (LOG.isTraceEnabled()) { LOG.trace(this + ": trimEvictionMaps is purging " + replica + StringUtils.getStackTrace(Thread.currentThread())); } purge(replica); } } /** * Munmap a replica, updating outstandingMmapCount. * * @param replica The replica to munmap. */ private void munmap(ShortCircuitReplica replica) { replica.munmap(); outstandingMmapCount--; } /** * Remove a replica from an evictable map. * * @param replica The replica to remove. * @return The map it was removed from. */ private String removeEvictable(ShortCircuitReplica replica) { if (replica.hasMmap()) { removeEvictable(replica, evictableMmapped); return "evictableMmapped"; } else { removeEvictable(replica, evictable); return "evictable"; } } /** * Remove a replica from an evictable map. * * @param replica The replica to remove. * @param map The map to remove it from. */ private void removeEvictable(ShortCircuitReplica replica, TreeMap<Long, ShortCircuitReplica> map) { Long evictableTimeNs = replica.getEvictableTimeNs(); Preconditions.checkNotNull(evictableTimeNs); ShortCircuitReplica removed = map.remove(evictableTimeNs); Preconditions.checkState(removed == replica, "failed to make %s unevictable", replica); replica.setEvictableTimeNs(null); } /** * Insert a replica into an evictable map. * * If an element already exists with this eviction time, we add a nanosecond * to it until we find an unused key. * * @param evictionTimeNs The eviction time in absolute nanoseconds. * @param replica The replica to insert. * @param map The map to insert it into. */ private void insertEvictable(Long evictionTimeNs, ShortCircuitReplica replica, TreeMap<Long, ShortCircuitReplica> map) { while (map.containsKey(evictionTimeNs)) { evictionTimeNs++; } Preconditions.checkState(null == replica.getEvictableTimeNs()); replica.setEvictableTimeNs(evictionTimeNs); map.put(evictionTimeNs, replica); } /** * Purge a replica from the cache. * * This doesn't necessarily close the replica, since there may be * outstanding references to it. However, it does mean the cache won't * hand it out to anyone after this. * * You must hold the cache lock while calling this function. * * @param replica The replica being removed. */ private void purge(ShortCircuitReplica replica) { boolean removedFromInfoMap = false; String evictionMapName = null; Preconditions.checkArgument(!replica.purged); replica.purged = true; Waitable<ShortCircuitReplicaInfo> val = replicaInfoMap.get(replica.key); if (val != null) { ShortCircuitReplicaInfo info = val.getVal(); if ((info != null) && (info.getReplica() == replica)) { replicaInfoMap.remove(replica.key); removedFromInfoMap = true; } } Long evictableTimeNs = replica.getEvictableTimeNs(); if (evictableTimeNs != null) { evictionMapName = removeEvictable(replica); } if (LOG.isTraceEnabled()) { StringBuilder builder = new StringBuilder(); builder.append(this).append(": ").append(": purged "). append(replica).append(" from the cache."); if (removedFromInfoMap) { builder.append(" Removed from the replicaInfoMap."); } if (evictionMapName != null) { builder.append(" Removed from ").append(evictionMapName); } LOG.trace(builder.toString()); } unref(replica); } /** * Fetch or create a replica. * * You must hold the cache lock while calling this function. * * @param key Key to use for lookup. * @param creator Replica creator callback. Will be called without * the cache lock being held. * * @return Null if no replica could be found or created. * The replica, otherwise. */ public ShortCircuitReplicaInfo fetchOrCreate(ExtendedBlockId key, ShortCircuitReplicaCreator creator) { Waitable<ShortCircuitReplicaInfo> newWaitable = null; lock.lock(); try { ShortCircuitReplicaInfo info = null; do { if (closed) { LOG.trace("{}: can't fethchOrCreate {} because the cache is closed.", this, key); return null; } Waitable<ShortCircuitReplicaInfo> waitable = replicaInfoMap.get(key); if (waitable != null) { try { info = fetch(key, waitable); } catch (RetriableException e) { LOG.debug("{}: retrying {}", this, e.getMessage()); continue; } } } while (false); if (info != null) return info; // We need to load the replica ourselves. newWaitable = new Waitable<ShortCircuitReplicaInfo>(lock.newCondition()); replicaInfoMap.put(key, newWaitable); } finally { lock.unlock(); } return create(key, creator, newWaitable); } /** * Fetch an existing ReplicaInfo object. * * @param key The key that we're using. * @param waitable The waitable object to wait on. * @return The existing ReplicaInfo object, or null if there is * none. * * @throws RetriableException If the caller needs to retry. */ private ShortCircuitReplicaInfo fetch(ExtendedBlockId key, Waitable<ShortCircuitReplicaInfo> waitable) throws RetriableException { // Another thread is already in the process of loading this // ShortCircuitReplica. So we simply wait for it to complete. ShortCircuitReplicaInfo info; try { LOG.trace("{}: found waitable for {}", this, key); info = waitable.await(); } catch (InterruptedException e) { LOG.info(this + ": interrupted while waiting for " + key); Thread.currentThread().interrupt(); throw new RetriableException("interrupted"); } if (info.getInvalidTokenException() != null) { LOG.info(this + ": could not get " + key + " due to InvalidToken " + "exception.", info.getInvalidTokenException()); return info; } ShortCircuitReplica replica = info.getReplica(); if (replica == null) { LOG.warn(this + ": failed to get " + key); return info; } if (replica.purged) { // Ignore replicas that have already been purged from the cache. throw new RetriableException("Ignoring purged replica " + replica + ". Retrying."); } // Check if the replica is stale before using it. // If it is, purge it and retry. if (replica.isStale()) { LOG.info(this + ": got stale replica " + replica + ". Removing " + "this replica from the replicaInfoMap and retrying."); // Remove the cache's reference to the replica. This may or may not // trigger a close. purge(replica); throw new RetriableException("ignoring stale replica " + replica); } ref(replica); return info; } private ShortCircuitReplicaInfo create(ExtendedBlockId key, ShortCircuitReplicaCreator creator, Waitable<ShortCircuitReplicaInfo> newWaitable) { // Handle loading a new replica. ShortCircuitReplicaInfo info = null; try { LOG.trace("{}: loading {}", this, key); info = creator.createShortCircuitReplicaInfo(); } catch (RuntimeException e) { LOG.warn(this + ": failed to load " + key, e); } if (info == null) info = new ShortCircuitReplicaInfo(); lock.lock(); try { if (info.getReplica() != null) { // On success, make sure the cache cleaner thread is running. LOG.trace("{}: successfully loaded {}", this, info.getReplica()); startCacheCleanerThreadIfNeeded(); // Note: new ShortCircuitReplicas start with a refCount of 2, // indicating that both this cache and whoever requested the // creation of the replica hold a reference. So we don't need // to increment the reference count here. } else { // On failure, remove the waitable from the replicaInfoMap. Waitable<ShortCircuitReplicaInfo> waitableInMap = replicaInfoMap.get(key); if (waitableInMap == newWaitable) replicaInfoMap.remove(key); if (info.getInvalidTokenException() != null) { LOG.info(this + ": could not load " + key + " due to InvalidToken " + "exception.", info.getInvalidTokenException()); } else { LOG.warn(this + ": failed to load " + key); } } newWaitable.provide(info); } finally { lock.unlock(); } return info; } private void startCacheCleanerThreadIfNeeded() { if (cacheCleaner == null) { cacheCleaner = new CacheCleaner(); long rateMs = cacheCleaner.getRateInMs(); ScheduledFuture<?> future = cleanerExecutor.scheduleAtFixedRate(cacheCleaner, rateMs, rateMs, TimeUnit.MILLISECONDS); cacheCleaner.setFuture(future); LOG.debug("{}: starting cache cleaner thread which will run every {} ms", this, rateMs); } } ClientMmap getOrCreateClientMmap(ShortCircuitReplica replica, boolean anchored) { Condition newCond; lock.lock(); try { while (replica.mmapData != null) { if (replica.mmapData instanceof MappedByteBuffer) { ref(replica); MappedByteBuffer mmap = (MappedByteBuffer)replica.mmapData; return new ClientMmap(replica, mmap, anchored); } else if (replica.mmapData instanceof Long) { long lastAttemptTimeMs = (Long)replica.mmapData; long delta = Time.monotonicNow() - lastAttemptTimeMs; if (delta < mmapRetryTimeoutMs) { LOG.trace("{}: can't create client mmap for {} because we failed to" + " create one just {}ms ago.", this, replica, delta); return null; } LOG.trace("{}: retrying client mmap for {}, {} ms after the previous " + "failure.", this, replica, delta); } else if (replica.mmapData instanceof Condition) { Condition cond = (Condition)replica.mmapData; cond.awaitUninterruptibly(); } else { Preconditions.checkState(false, "invalid mmapData type %s", replica.mmapData.getClass().getName()); } } newCond = lock.newCondition(); replica.mmapData = newCond; } finally { lock.unlock(); } MappedByteBuffer map = replica.loadMmapInternal(); lock.lock(); try { if (map == null) { replica.mmapData = Long.valueOf(Time.monotonicNow()); newCond.signalAll(); return null; } else { outstandingMmapCount++; replica.mmapData = map; ref(replica); newCond.signalAll(); return new ClientMmap(replica, map, anchored); } } finally { lock.unlock(); } } /** * Close the cache and free all associated resources. */ @Override public void close() { try { lock.lock(); if (closed) return; closed = true; LOG.info(this + ": closing"); maxNonMmappedEvictableLifespanMs = 0; maxEvictableMmapedSize = 0; // Close and join cacheCleaner thread. IOUtilsClient.cleanup(LOG, cacheCleaner); // Purge all replicas. while (true) { Entry<Long, ShortCircuitReplica> entry = evictable.firstEntry(); if (entry == null) break; purge(entry.getValue()); } while (true) { Entry<Long, ShortCircuitReplica> entry = evictableMmapped.firstEntry(); if (entry == null) break; purge(entry.getValue()); } } finally { lock.unlock(); } releaserExecutor.shutdown(); cleanerExecutor.shutdown(); // wait for existing tasks to terminate try { if (!releaserExecutor.awaitTermination(30, TimeUnit.SECONDS)) { LOG.error("Forcing SlotReleaserThreadPool to shutdown!"); releaserExecutor.shutdownNow(); } } catch (InterruptedException e) { releaserExecutor.shutdownNow(); Thread.currentThread().interrupt(); LOG.error("Interrupted while waiting for SlotReleaserThreadPool " + "to terminate", e); } // wait for existing tasks to terminate try { if (!cleanerExecutor.awaitTermination(30, TimeUnit.SECONDS)) { LOG.error("Forcing CleanerThreadPool to shutdown!"); cleanerExecutor.shutdownNow(); } } catch (InterruptedException e) { cleanerExecutor.shutdownNow(); Thread.currentThread().interrupt(); LOG.error("Interrupted while waiting for CleanerThreadPool " + "to terminate", e); } IOUtilsClient.cleanup(LOG, shmManager); } @VisibleForTesting // ONLY for testing public interface CacheVisitor { void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, Map<Long, ShortCircuitReplica> evictable, Map<Long, ShortCircuitReplica> evictableMmapped); } @VisibleForTesting // ONLY for testing public void accept(CacheVisitor visitor) { lock.lock(); try { Map<ExtendedBlockId, ShortCircuitReplica> replicas = new HashMap<ExtendedBlockId, ShortCircuitReplica>(); Map<ExtendedBlockId, InvalidToken> failedLoads = new HashMap<ExtendedBlockId, InvalidToken>(); for (Entry<ExtendedBlockId, Waitable<ShortCircuitReplicaInfo>> entry : replicaInfoMap.entrySet()) { Waitable<ShortCircuitReplicaInfo> waitable = entry.getValue(); if (waitable.hasVal()) { if (waitable.getVal().getReplica() != null) { replicas.put(entry.getKey(), waitable.getVal().getReplica()); } else { // The exception may be null here, indicating a failed load that // isn't the result of an invalid block token. failedLoads.put(entry.getKey(), waitable.getVal().getInvalidTokenException()); } } } LOG.debug("visiting {} with outstandingMmapCount={}, replicas={}, " + "failedLoads={}, evictable={}, evictableMmapped={}", visitor.getClass().getName(), outstandingMmapCount, replicas, failedLoads, evictable, evictableMmapped); visitor.visit(outstandingMmapCount, replicas, failedLoads, evictable, evictableMmapped); } finally { lock.unlock(); } } @Override public String toString() { return "ShortCircuitCache(0x" + Integer.toHexString(System.identityHashCode(this)) + ")"; } /** * Allocate a new shared memory slot. * * @param datanode The datanode to allocate a shm slot with. * @param peer A peer connected to the datanode. * @param usedPeer Will be set to true if we use up the provided peer. * @param blockId The block id and block pool id of the block we're * allocating this slot for. * @param clientName The name of the DFSClient allocating the shared * memory. * @return Null if short-circuit shared memory is disabled; * a short-circuit memory slot otherwise. * @throws IOException An exception if there was an error talking to * the datanode. */ public Slot allocShmSlot(DatanodeInfo datanode, DomainPeer peer, MutableBoolean usedPeer, ExtendedBlockId blockId, String clientName) throws IOException { if (shmManager != null) { return shmManager.allocSlot(datanode, peer, usedPeer, blockId, clientName); } else { return null; } } /** * Free a slot immediately. * * ONLY use this if the DataNode is not yet aware of the slot. * * @param slot The slot to free. */ public void freeSlot(Slot slot) { Preconditions.checkState(shmManager != null); slot.makeInvalid(); shmManager.freeSlot(slot); } /** * Schedule a shared memory slot to be released. * * @param slot The slot to release. */ public void scheduleSlotReleaser(Slot slot) { Preconditions.checkState(shmManager != null); releaserExecutor.execute(new SlotReleaser(slot)); } @VisibleForTesting public DfsClientShmManager getDfsClientShmManager() { return shmManager; } }
apache-2.0
gouravshenoy/airavata
modules/xbaya-gui/src/test/java/org/apache/airavata/xbaya/interpreter/ComplexMathService.java
1229
/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.airavata.xbaya.interpreter; public class ComplexMathService { public int adder(int param1, int param2) { return param1 + param2; } public int multiplier(int param1, int param2) { return param1 * param2; } public String echo(String str) { return str; } public String concatenate(String str1, String str2) { return str1 + str2; } }
apache-2.0
inloop/Knight
knight-compiler/src/main/java/eu/inloop/knight/builder/module/ScreenModuleBuilder.java
7717
package eu.inloop.knight.builder.module; import android.app.Activity; import android.os.Bundle; import com.squareup.javapoet.AnnotationSpec; import com.squareup.javapoet.ClassName; import com.squareup.javapoet.FieldSpec; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.TypeName; import java.util.List; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import dagger.Provides; import eu.inloop.knight.PresenterPool; import eu.inloop.knight.builder.NamedExtra; import eu.inloop.knight.builder.NavigatorBuilder; import eu.inloop.knight.core.StateManager; import eu.inloop.knight.name.GCN; import eu.inloop.knight.scope.ScreenScope; import eu.inloop.knight.util.ProcessorError; import eu.inloop.knight.util.StringUtils; /** * Class {@link ScreenModuleBuilder} is used for creating Screen Module class file. * * @author FrantisekGazo * @version 2015-10-19 */ public class ScreenModuleBuilder extends BaseModuleBuilder { private static final String METHOD_NAME_INIT = "init"; private static final String FIELD_NAME_STATE_MANAGER = "mStateManager"; private static final String STATEFUL_ID_FORMAT = "[%s]%s"; /** * Constructor * * @param className Class name of Activity for which this module will be generated. */ public ScreenModuleBuilder(ClassName className) throws ProcessorError { super(ScreenScope.class, GCN.SCREEN_MODULE, className); } /** * Constructor * * @param genClassName Name of generated module class. * @param className Class name of Activity for which this module will be generated. */ public ScreenModuleBuilder(GCN genClassName, ClassName className) throws ProcessorError { super(ScreenScope.class, genClassName, className); } @Override protected void addScopeSpecificPart() { // Application attribute FieldSpec stateField = FieldSpec.builder(StateManager.class, FIELD_NAME_STATE_MANAGER, Modifier.PRIVATE, Modifier.FINAL).build(); getBuilder().addField(stateField); // constructor String stateManager = "stateManager"; MethodSpec.Builder constructor = MethodSpec.constructorBuilder() .addModifiers(Modifier.PUBLIC) .addParameter(StateManager.class, stateManager) .addStatement("$N = $N", stateField, stateManager); addToConstructor(constructor); getBuilder().addMethod(constructor.build()); // add provides-method for Presenter Pool addProvidesPresenterPool(); } private void addProvidesPresenterPool() { ClassName className = ClassName.get(PresenterPool.class); MethodSpec.Builder method = MethodSpec.methodBuilder(createProvideMethodName(className.simpleName())) .addAnnotation(Provides.class) .addAnnotation(ScreenScope.class) .addModifiers(Modifier.PUBLIC) .returns(className); method.addStatement("return $N.manage($S, new $T())", FIELD_NAME_STATE_MANAGER, createStateManagerId(method.build()), className); getBuilder().addMethod(method.build()); } /** * Adds additional parameters or statements to constructor. * * @param constructor Constructor builder. */ protected void addToConstructor(MethodSpec.Builder constructor) { String activity = "activity"; constructor.addParameter(Activity.class, activity) .addStatement("$N($N)", METHOD_NAME_INIT, activity); } @Override protected void addProvideStatement(MethodSpec.Builder method, ExecutableElement e, String callFormat, Object... args) { MethodSpec m = method.build(); // check if is screen scoped if (m.annotations.contains(AnnotationSpec.builder(ScreenScope.class).build())) { // manage state only if scoped method.addCode("return $N.manage($S, ", FIELD_NAME_STATE_MANAGER, createStateManagerId(m)); addProvideCode(false, method, e, callFormat, args); method.addCode(");\n"); } else { addProvideCode(true, method, e, callFormat, args); } } private String createStateManagerId(MethodSpec m) { return String.format(STATEFUL_ID_FORMAT, m.returnType, m.name); } /** * Adds <code>init</code> method to the generated module. * * @param extraFields List of Extras that will be initialized. */ public void addInitMethod(List<NamedExtra> extraFields) { String activity = "activity"; String extras = "extras"; String action = "action"; MethodSpec.Builder method = MethodSpec.methodBuilder(METHOD_NAME_INIT) .addModifiers(Modifier.PRIVATE) .addParameter(Activity.class, activity); // do not retrieve extras if empty if (!extraFields.isEmpty()) { method.addStatement("$T $N = $N.getIntent().getExtras()", Bundle.class, extras, activity); method.addStatement("$T $N = $N.getIntent().getAction()", String.class, action, activity); method.beginControlFlow("if ($N != null)", extras); for (NamedExtra namedExtra : extraFields) { TypeName typeName = ClassName.get(namedExtra.getElement().asType()); String name = namedExtra.getName(); // add field FieldSpec field = FieldSpec.builder(typeName, createFieldName(name), Modifier.PRIVATE).build(); getBuilder().addField(field); // add statement to init method String extraId = NavigatorBuilder.getExtraId(getArgClassName(), name); method.addStatement("$N = ($T) $N.$N($S)", field, typeName, extras, getExtraGetterName(typeName), extraId); addProvidesField(namedExtra, field); } method.endControlFlow() .beginControlFlow("else if ($N != null && $N.equals($S))", action, action, "android.intent.action.MAIN") .addCode("// MAIN won't have any extras\n") .addStatement("return") .endControlFlow() .beginControlFlow("else") .addCode("// throw exception if Intent was wrongly created\n") .addStatement("throw new $T($S)", IllegalStateException.class, "Extras were not set") .endControlFlow(); } getBuilder().addMethod(method.build()); } /** * Adds <code>provides</code> method for given <code>field</code>. * * @param namedExtra Extra that defines the <code>field</code>. * @param field Field. */ private void addProvidesField(NamedExtra namedExtra, FieldSpec field) { MethodSpec.Builder method = MethodSpec.methodBuilder(createProvideMethodName(namedExtra.getName())) .addAnnotation(Provides.class) .addModifiers(Modifier.PUBLIC) .returns(field.type) .addStatement("return $N", field); // add also Qualifier annotations for (AnnotationSpec a : getQualifiers(namedExtra.getElement())) { method.addAnnotation(a); } getBuilder().addMethod(method.build()); } /** * Returns getter for given Extra type. */ private String getExtraGetterName(TypeName typeName) { if (typeName.isPrimitive()) { return String.format("get%s", StringUtils.startUpperCase(typeName.toString())); } else { return "get"; } } }
apache-2.0
Davi-He/sky_lkl
Demo/src/com/imagpay/iMagPayApp.java
1556
package com.imagpay; import java.util.Locale; import android.app.Application; import android.content.Context; import android.content.SharedPreferences; import android.content.SharedPreferences.Editor; import android.content.res.Configuration; import android.util.DisplayMetrics; public final class iMagPayApp extends Application { private final static String TAG = "iMagPayApp"; public final static String LOCAL_EN = "en"; public final static String LOCAL_CN = "zh"; private SharedPreferences _preferences = null; @Override public void onCreate() { super.onCreate(); _preferences = getSharedPreferences(TAG, Context.MODE_PRIVATE); } public String getLanguage() { return get("language", Locale.getDefault().getLanguage()); } public void setLanguage(String language) { if (LOCAL_CN.equalsIgnoreCase(language)) Locale.setDefault(Locale.SIMPLIFIED_CHINESE); else Locale.setDefault(Locale.ENGLISH); Configuration config = getResources().getConfiguration(); config.locale = Locale.getDefault(); DisplayMetrics metrics = getResources().getDisplayMetrics(); getResources().updateConfiguration(config, metrics); set("language", language); } private String get(String key, String defaultValue) { return _preferences.getString(key, defaultValue); } private void set(String key, String value) { commit(key, value); } private void commit(String key, String value) { Editor editor = _preferences.edit(); editor.putString(key, value); editor.commit(); } }
apache-2.0
harishpalk/Jadira
usertype.spi/src/main/java/org/jadira/usertype/spi/repository/JpaBaseRepository.java
4310
/* * Copyright 2012 Christopher Pheby * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jadira.usertype.spi.repository; import java.io.Serializable; import javax.persistence.LockModeType; import org.hibernate.Session; import org.hibernate.SessionFactory; import org.hibernate.jpa.HibernateEntityManagerFactory; import org.hibernate.engine.spi.SessionImplementor; import org.hibernate.metadata.ClassMetadata; import org.jadira.usertype.spi.utils.reflection.TypeHelper; /** * Base implementation of a Repository / type-safe DAO using Hibernate JPA * * @param <T> Entity type that this Repository handles * @param <ID> The type that identifies the ID column for the supported entity type */ public abstract class JpaBaseRepository<T extends Serializable, ID extends Serializable> extends JpaSearchRepository<T, ID> implements BaseRepository<T, ID> { protected JpaBaseRepository() { } /** * Determines the ID for the entity * * @param entity The entity to retrieve the ID for * @return The ID */ protected ID extractId(T entity) { final Class<?> entityClass = TypeHelper.getTypeArguments(JpaBaseRepository.class, this.getClass()).get(0); final SessionFactory sf = ((HibernateEntityManagerFactory) getEntityManager().getEntityManagerFactory()).getSessionFactory(); final ClassMetadata cmd = sf.getClassMetadata(entityClass); final SessionImplementor si = (SessionImplementor)(getEntityManager().getDelegate()); @SuppressWarnings("unchecked") final ID result = (ID) cmd.getIdentifier(entity, si); return result; } /** * {@inheritDoc} */ @Override public T persist(T entity) { // Case of new, non-persisted entity if (extractId(entity) == null) { getEntityManager().persist(entity); } else if (!getEntityManager().contains(entity)) { // In the case of an attached entity, we do nothing (it // will be persisted automatically on synchronisation) // But... in the case of an unattached, but persisted entity // we perform a merge to re-attach and persist it entity = getEntityManager().merge(entity); } return entity; } /** * {@inheritDoc} */ @Override public void remove(ID entityId) { T entity = getEntityManager().find(getEntityClass(), entityId); // Case of attached entity - simply remove it if (getEntityManager().contains(entity)) { getEntityManager().remove(entity); } // Case of unattached entity, first it is necessary to perform // a merge, before doing the remove else { entity = getEntityManager().merge(entity); getEntityManager().remove(entity); } } /** * {@inheritDoc} */ @Override public T refresh(T entity) { // Attempting to refresh a non-persisted entity // will result in an exception if (extractId(entity) == null) { // causes exception getEntityManager().refresh(entity); } // Case of attached empty - this gets refreshed else if (getEntityManager().contains(entity)) { getEntityManager().refresh(entity); } // Case of unattached entity, first it is necessary to perform // a merge, before doing the refresh else { entity = getEntityManager().merge(entity); getEntityManager().refresh(entity); } return entity; } /** * {@inheritDoc} */ @Override public T evict(T entity) { Session session = (Session) getEntityManager().getDelegate(); session.evict(entity); return entity; } /** * {@inheritDoc} */ @Override public void flush() { getEntityManager().flush(); } /** * {@inheritDoc} */ @Override public void lock(T entity, LockModeType lockMode) { getEntityManager().lock(entity, lockMode); } }
apache-2.0
fanlehai/CodePractice
java/src/main/java/com/fanlehai/java/atunit/Test.java
192
// The @Test tag. package com.fanlehai.java.atunit; import java.lang.annotation.*; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) public @interface Test { } /// :~
apache-2.0
rhoml/elasticsearch
core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
21473
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.percolator.PercolatorService; import java.io.IOException; import java.util.*; /** * Service responsible for submitting mapping changes */ public class MetaDataMappingService extends AbstractComponent { private final ClusterService clusterService; private final IndicesService indicesService; final ClusterStateTaskExecutor<RefreshTask> refreshExecutor = new RefreshTaskExecutor(); final ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> putMappingExecutor = new PutMappingExecutor(); private final NodeServicesProvider nodeServicesProvider; @Inject public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) { super(settings); this.clusterService = clusterService; this.indicesService = indicesService; this.nodeServicesProvider = nodeServicesProvider; } static class RefreshTask { final String index; final String indexUUID; final String[] types; RefreshTask(String index, final String indexUUID, String[] types) { this.index = index; this.indexUUID = indexUUID; this.types = types; } } class RefreshTaskExecutor implements ClusterStateTaskExecutor<RefreshTask> { @Override public BatchResult<RefreshTask> execute(ClusterState currentState, List<RefreshTask> tasks) throws Exception { ClusterState newClusterState = executeRefresh(currentState, tasks); return BatchResult.<RefreshTask>builder().successes(tasks).build(newClusterState); } } /** * Batch method to apply all the queued refresh or update operations. The idea is to try and batch as much * as possible so we won't create the same index all the time for example for the updates on the same mapping * and generate a single cluster change event out of all of those. */ ClusterState executeRefresh(final ClusterState currentState, final List<RefreshTask> allTasks) throws Exception { if (allTasks.isEmpty()) { return currentState; } // break down to tasks per index, so we can optimize the on demand index service creation // to only happen for the duration of a single index processing of its respective events Map<String, List<RefreshTask>> tasksPerIndex = new HashMap<>(); for (RefreshTask task : allTasks) { if (task.index == null) { logger.debug("ignoring a mapping task of type [{}] with a null index.", task); } tasksPerIndex.computeIfAbsent(task.index, k -> new ArrayList<>()).add(task); } boolean dirty = false; MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) { String index = entry.getKey(); IndexMetaData indexMetaData = mdBuilder.get(index); if (indexMetaData == null) { // index got deleted on us, ignore... logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index); continue; } // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep // the latest (based on order) update mapping one per node List<RefreshTask> allIndexTasks = entry.getValue(); List<RefreshTask> tasks = new ArrayList<>(); for (RefreshTask task : allIndexTasks) { if (!indexMetaData.isSameUUID(task.indexUUID)) { logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task); continue; } tasks.add(task); } // construct the actual index if needed, and make sure the relevant mappings are there boolean removeIndex = false; IndexService indexService = indicesService.indexService(index); if (indexService == null) { // we need to create the index here, and add the current mapping to it, so we can merge indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); removeIndex = true; Set<String> typesToIntroduce = new HashSet<>(); for (RefreshTask task : tasks) { Collections.addAll(typesToIntroduce, task.types); } for (String type : typesToIntroduce) { // only add the current relevant mapping (if exists) if (indexMetaData.getMappings().containsKey(type)) { // don't apply the default mapping, it has been applied when the mapping was created indexService.mapperService().merge(type, indexMetaData.getMappings().get(type).source(), false, true); } } } IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData); try { boolean indexDirty = processIndexMappingTasks(tasks, indexService, builder); if (indexDirty) { mdBuilder.put(builder); dirty = true; } } finally { if (removeIndex) { indicesService.removeIndex(index, "created for mapping processing"); } } } if (!dirty) { return currentState; } return ClusterState.builder(currentState).metaData(mdBuilder).build(); } private boolean processIndexMappingTasks(List<RefreshTask> tasks, IndexService indexService, IndexMetaData.Builder builder) { boolean dirty = false; String index = indexService.index().name(); // keep track of what we already refreshed, no need to refresh it again... Set<String> processedRefreshes = new HashSet<>(); for (RefreshTask refreshTask : tasks) { try { List<String> updatedTypes = new ArrayList<>(); for (String type : refreshTask.types) { if (processedRefreshes.contains(type)) { continue; } DocumentMapper mapper = indexService.mapperService().documentMapper(type); if (mapper == null) { continue; } if (!mapper.mappingSource().equals(builder.mapping(type).source())) { updatedTypes.add(type); builder.putMapping(new MappingMetaData(mapper)); } processedRefreshes.add(type); } if (updatedTypes.isEmpty()) { continue; } logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes); dirty = true; } catch (Throwable t) { logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types); } } return dirty; } /** * Refreshes mappings if they are not the same between original and parsed version */ public void refreshMapping(final String index, final String indexUUID, final String... types) { final RefreshTask refreshTask = new RefreshTask(index, indexUUID, types); clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", refreshTask, ClusterStateTaskConfig.build(Priority.HIGH), refreshExecutor, (source, t) -> logger.warn("failure during [{}]", t, source) ); } class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> { @Override public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception { Set<String> indicesToClose = new HashSet<>(); BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder(); try { // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up for (String index : request.indices()) { if (currentState.metaData().hasIndex(index)) { // if we don't have the index, we will throw exceptions later; if (indicesService.hasIndex(index) == false || indicesToClose.contains(index)) { final IndexMetaData indexMetaData = currentState.metaData().index(index); IndexService indexService; if (indicesService.hasIndex(index) == false) { indicesToClose.add(index); indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); // make sure to add custom default mapping if exists if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) { indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes()); } } else { indexService = indicesService.indexService(index); } // only add the current relevant mapping (if exists and not yet added) if (indexMetaData.getMappings().containsKey(request.type()) && !indexService.mapperService().hasMapping(request.type())) { indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes()); } } } } } for (PutMappingClusterStateUpdateRequest request : tasks) { try { currentState = applyRequest(currentState, request); builder.success(request); } catch (Throwable t) { builder.failure(request, t); } } return builder.build(currentState); } finally { for (String index : indicesToClose) { indicesService.removeIndex(index, "created for mapping processing"); } } } private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { Map<String, DocumentMapper> newMappers = new HashMap<>(); Map<String, DocumentMapper> existingMappers = new HashMap<>(); for (String index : request.indices()) { IndexService indexService = indicesService.indexServiceSafe(index); // try and parse it (no need to add it here) so we can bail early in case of parsing exception DocumentMapper newMapper; DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); if (MapperService.DEFAULT_MAPPING.equals(request.type())) { // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false); } else { newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); if (existingMapper != null) { // first, simulate MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); // if we have conflicts, throw an exception if (mergeResult.hasConflicts()) { throw new MergeMappingException(mergeResult.buildConflicts()); } } else { // TODO: can we find a better place for this validation? // The reason this validation is here is that the mapper service doesn't learn about // new types all at once , which can create a false error. // For example in MapperService we can't distinguish between a create index api call // and a put mapping api call, so we don't which type did exist before. // Also the order of the mappings may be backwards. if (newMapper.parentFieldMapper().active()) { IndexMetaData indexMetaData = currentState.metaData().index(index); for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) { if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); } } } } } newMappers.put(index, newMapper); if (existingMapper != null) { existingMappers.put(index, existingMapper); } } String mappingType = request.type(); if (mappingType == null) { mappingType = newMappers.values().iterator().next().type(); } else if (!mappingType.equals(newMappers.values().iterator().next().type())) { throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); } if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); } final Map<String, MappingMetaData> mappings = new HashMap<>(); for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) { String index = entry.getKey(); // do the actual merge here on the master, and update the mapping source DocumentMapper newMapper = entry.getValue(); IndexService indexService = indicesService.indexService(index); if (indexService == null) { continue; } CompressedXContent existingSource = null; if (existingMappers.containsKey(entry.getKey())) { existingSource = existingMappers.get(entry.getKey()).mappingSource(); } DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes()); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { if (existingSource.equals(updatedSource)) { // same source, no changes, ignore it } else { // use the merged mapping source mappings.put(index, new MappingMetaData(mergedMapper)); if (logger.isDebugEnabled()) { logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); } else if (logger.isInfoEnabled()) { logger.info("[{}] update_mapping [{}]", index, mergedMapper.type()); } } } else { mappings.put(index, new MappingMetaData(mergedMapper)); if (logger.isDebugEnabled()) { logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource); } else if (logger.isInfoEnabled()) { logger.info("[{}] create_mapping [{}]", index, newMapper.type()); } } } if (mappings.isEmpty()) { // no changes, return return currentState; } MetaData.Builder builder = MetaData.builder(currentState.metaData()); for (String indexName : request.indices()) { IndexMetaData indexMetaData = currentState.metaData().index(indexName); if (indexMetaData == null) { throw new IndexNotFoundException(indexName); } MappingMetaData mappingMd = mappings.get(indexName); if (mappingMd != null) { builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd)); } } return ClusterState.builder(currentState).metaData(builder).build(); } } public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) { clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", request, ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()), putMappingExecutor, new AckedClusterStateTaskListener() { @Override public void onFailure(String source, Throwable t) { listener.onFailure(t); } @Override public boolean mustAck(DiscoveryNode discoveryNode) { return true; } @Override public void onAllNodesAcked(@Nullable Throwable t) { listener.onResponse(new ClusterStateUpdateResponse(true)); } @Override public void onAckTimeout() { listener.onResponse(new ClusterStateUpdateResponse(false)); } @Override public TimeValue ackTimeout() { return request.ackTimeout(); } }); } }
apache-2.0
GovernmentCommunicationsHeadquarters/Gaffer
core/cache/src/main/java/uk/gov/gchq/gaffer/cache/util/CacheProperties.java
1225
/* * Copyright 2016-2020 Crown Copyright * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.gchq.gaffer.cache.util; /** * System properties used by the Gaffer cache service implementations. */ public final class CacheProperties { private CacheProperties() { // private constructor to prevent instantiation } /** * Name of the system property to use in order to define the cache service class. */ public static final String CACHE_SERVICE_CLASS = "gaffer.cache.service.class"; /** * Name of the system property to use in order to locate the cache config file. */ public static final String CACHE_CONFIG_FILE = "gaffer.cache.config.file"; }
apache-2.0
nknize/elasticsearch
server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java
18544
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.cluster.coordination; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.coordination.Coordinator.Mode; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportRequestOptions.Type; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Predicate; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY; /** * The FollowersChecker is responsible for allowing a leader to check that its followers are still connected and healthy. On deciding that a * follower has failed the leader will remove it from the cluster. We are fairly lenient, possibly allowing multiple checks to fail before * considering a follower to be faulty, to allow for a brief network partition or a long GC cycle to occur without triggering the removal of * a node and the consequent shard reallocation. */ public class FollowersChecker { private static final Logger logger = LogManager.getLogger(FollowersChecker.class); public static final String FOLLOWER_CHECK_ACTION_NAME = "internal:coordination/fault_detection/follower_check"; // the time between checks sent to each node public static final Setting<TimeValue> FOLLOWER_CHECK_INTERVAL_SETTING = Setting.timeSetting("cluster.fault_detection.follower_check.interval", TimeValue.timeValueMillis(1000), TimeValue.timeValueMillis(100), Setting.Property.NodeScope); // the timeout for each check sent to each node public static final Setting<TimeValue> FOLLOWER_CHECK_TIMEOUT_SETTING = Setting.timeSetting("cluster.fault_detection.follower_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), Setting.Property.NodeScope); // the number of failed checks that must happen before the follower is considered to have failed. public static final Setting<Integer> FOLLOWER_CHECK_RETRY_COUNT_SETTING = Setting.intSetting("cluster.fault_detection.follower_check.retry_count", 3, 1, Setting.Property.NodeScope); private final TimeValue followerCheckInterval; private final TimeValue followerCheckTimeout; private final int followerCheckRetryCount; private final BiConsumer<DiscoveryNode, String> onNodeFailure; private final Consumer<FollowerCheckRequest> handleRequestAndUpdateState; private final Object mutex = new Object(); // protects writes to this state; read access does not need sync private final Map<DiscoveryNode, FollowerChecker> followerCheckers = newConcurrentMap(); private final Set<DiscoveryNode> faultyNodes = new HashSet<>(); private final TransportService transportService; private final NodeHealthService nodeHealthService; private volatile FastResponseState fastResponseState; public FollowersChecker(Settings settings, TransportService transportService, Consumer<FollowerCheckRequest> handleRequestAndUpdateState, BiConsumer<DiscoveryNode, String> onNodeFailure, NodeHealthService nodeHealthService) { this.transportService = transportService; this.handleRequestAndUpdateState = handleRequestAndUpdateState; this.onNodeFailure = onNodeFailure; this.nodeHealthService = nodeHealthService; followerCheckInterval = FOLLOWER_CHECK_INTERVAL_SETTING.get(settings); followerCheckTimeout = FOLLOWER_CHECK_TIMEOUT_SETTING.get(settings); followerCheckRetryCount = FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings); updateFastResponseState(0, Mode.CANDIDATE); transportService.registerRequestHandler(FOLLOWER_CHECK_ACTION_NAME, Names.SAME, false, false, FollowerCheckRequest::new, (request, transportChannel, task) -> handleFollowerCheck(request, transportChannel)); transportService.addConnectionListener(new TransportConnectionListener() { @Override public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connection) { handleDisconnectedNode(node); } }); } /** * Update the set of known nodes, starting to check any new ones and stopping checking any previously-known-but-now-unknown ones. */ public void setCurrentNodes(DiscoveryNodes discoveryNodes) { synchronized (mutex) { final Predicate<DiscoveryNode> isUnknownNode = n -> discoveryNodes.nodeExists(n) == false; followerCheckers.keySet().removeIf(isUnknownNode); faultyNodes.removeIf(isUnknownNode); discoveryNodes.mastersFirstStream().forEach(discoveryNode -> { if (discoveryNode.equals(discoveryNodes.getLocalNode()) == false && followerCheckers.containsKey(discoveryNode) == false && faultyNodes.contains(discoveryNode) == false) { final FollowerChecker followerChecker = new FollowerChecker(discoveryNode); followerCheckers.put(discoveryNode, followerChecker); followerChecker.start(); } }); } } /** * Clear the set of known nodes, stopping all checks. */ public void clearCurrentNodes() { setCurrentNodes(DiscoveryNodes.EMPTY_NODES); } /** * The system is normally in a state in which every follower remains a follower of a stable leader in a single term for an extended * period of time, and therefore our response to every follower check is the same. We handle this case with a single volatile read * entirely on the network thread, and only if the fast path fails do we perform some work in the background, by notifying the * FollowersChecker whenever our term or mode changes here. */ public void updateFastResponseState(final long term, final Mode mode) { fastResponseState = new FastResponseState(term, mode); } private void handleFollowerCheck(FollowerCheckRequest request, TransportChannel transportChannel) throws IOException { final StatusInfo statusInfo = nodeHealthService.getHealth(); if (statusInfo.getStatus() == UNHEALTHY) { final String message = "handleFollowerCheck: node is unhealthy [" + statusInfo.getInfo() + "], rejecting " + statusInfo.getInfo(); logger.debug(message); throw new NodeHealthCheckFailureException(message); } final FastResponseState responder = this.fastResponseState; if (responder.mode == Mode.FOLLOWER && responder.term == request.term) { logger.trace("responding to {} on fast path", request); transportChannel.sendResponse(Empty.INSTANCE); return; } if (request.term < responder.term) { throw new CoordinationStateRejectedException("rejecting " + request + " since local state is " + this); } transportService.getThreadPool().generic().execute(new AbstractRunnable() { @Override protected void doRun() throws IOException { logger.trace("responding to {} on slow path", request); try { handleRequestAndUpdateState.accept(request); } catch (Exception e) { transportChannel.sendResponse(e); return; } transportChannel.sendResponse(Empty.INSTANCE); } @Override public void onFailure(Exception e) { logger.debug(new ParameterizedMessage("exception while responding to {}", request), e); } @Override public String toString() { return "slow path response to " + request; } }); } /** * @return nodes in the current cluster state which have failed their follower checks. */ public Set<DiscoveryNode> getFaultyNodes() { synchronized (mutex) { return new HashSet<>(this.faultyNodes); } } @Override public String toString() { return "FollowersChecker{" + "followerCheckInterval=" + followerCheckInterval + ", followerCheckTimeout=" + followerCheckTimeout + ", followerCheckRetryCount=" + followerCheckRetryCount + ", followerCheckers=" + followerCheckers + ", faultyNodes=" + faultyNodes + ", fastResponseState=" + fastResponseState + '}'; } // For assertions FastResponseState getFastResponseState() { return fastResponseState; } // For assertions Set<DiscoveryNode> getKnownFollowers() { synchronized (mutex) { final Set<DiscoveryNode> knownFollowers = new HashSet<>(faultyNodes); knownFollowers.addAll(followerCheckers.keySet()); return knownFollowers; } } private void handleDisconnectedNode(DiscoveryNode discoveryNode) { FollowerChecker followerChecker = followerCheckers.get(discoveryNode); if (followerChecker != null) { followerChecker.failNode("disconnected"); } } static class FastResponseState { final long term; final Mode mode; FastResponseState(final long term, final Mode mode) { this.term = term; this.mode = mode; } @Override public String toString() { return "FastResponseState{" + "term=" + term + ", mode=" + mode + '}'; } } /** * A checker for an individual follower. */ private class FollowerChecker { private final DiscoveryNode discoveryNode; private int failureCountSinceLastSuccess; FollowerChecker(DiscoveryNode discoveryNode) { this.discoveryNode = discoveryNode; } private boolean running() { return this == followerCheckers.get(discoveryNode); } void start() { assert running(); handleWakeUp(); } private void handleWakeUp() { if (running() == false) { logger.trace("handleWakeUp: not running"); return; } final FollowerCheckRequest request = new FollowerCheckRequest(fastResponseState.term, transportService.getLocalNode()); logger.trace("handleWakeUp: checking {} with {}", discoveryNode, request); transportService.sendRequest(discoveryNode, FOLLOWER_CHECK_ACTION_NAME, request, TransportRequestOptions.of(followerCheckTimeout, Type.PING), new TransportResponseHandler.Empty() { @Override public void handleResponse(TransportResponse.Empty response) { if (running() == false) { logger.trace("{} no longer running", FollowerChecker.this); return; } failureCountSinceLastSuccess = 0; logger.trace("{} check successful", FollowerChecker.this); scheduleNextWakeUp(); } @Override public void handleException(TransportException exp) { if (running() == false) { logger.debug(new ParameterizedMessage("{} no longer running", FollowerChecker.this), exp); return; } failureCountSinceLastSuccess++; final String reason; if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException) { logger.debug(() -> new ParameterizedMessage("{} disconnected", FollowerChecker.this), exp); reason = "disconnected"; } else if (exp.getCause() instanceof NodeHealthCheckFailureException) { logger.debug(() -> new ParameterizedMessage("{} health check failed", FollowerChecker.this), exp); reason = "health check failed"; } else if (failureCountSinceLastSuccess >= followerCheckRetryCount) { logger.debug(() -> new ParameterizedMessage("{} failed too many times", FollowerChecker.this), exp); reason = "followers check retry count exceeded"; } else { logger.debug(() -> new ParameterizedMessage("{} failed, retrying", FollowerChecker.this), exp); scheduleNextWakeUp(); return; } failNode(reason); } }); } void failNode(String reason) { transportService.getThreadPool().generic().execute(new Runnable() { @Override public void run() { synchronized (mutex) { if (running() == false) { logger.trace("{} no longer running, not marking faulty", FollowerChecker.this); return; } logger.debug("{} marking node as faulty", FollowerChecker.this); faultyNodes.add(discoveryNode); followerCheckers.remove(discoveryNode); } onNodeFailure.accept(discoveryNode, reason); } @Override public String toString() { return "detected failure of " + discoveryNode; } }); } private void scheduleNextWakeUp() { transportService.getThreadPool().schedule(new Runnable() { @Override public void run() { handleWakeUp(); } @Override public String toString() { return FollowerChecker.this + "::handleWakeUp"; } }, followerCheckInterval, Names.SAME); } @Override public String toString() { return "FollowerChecker{" + "discoveryNode=" + discoveryNode + ", failureCountSinceLastSuccess=" + failureCountSinceLastSuccess + ", [" + FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey() + "]=" + followerCheckRetryCount + '}'; } } public static class FollowerCheckRequest extends TransportRequest { private final long term; private final DiscoveryNode sender; public long getTerm() { return term; } public DiscoveryNode getSender() { return sender; } public FollowerCheckRequest(final long term, final DiscoveryNode sender) { this.term = term; this.sender = sender; } public FollowerCheckRequest(final StreamInput in) throws IOException { super(in); term = in.readLong(); sender = new DiscoveryNode(in); } @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(term); sender.writeTo(out); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; FollowerCheckRequest that = (FollowerCheckRequest) o; return term == that.term && Objects.equals(sender, that.sender); } @Override public String toString() { return "FollowerCheckRequest{" + "term=" + term + ", sender=" + sender + '}'; } @Override public int hashCode() { return Objects.hash(term, sender); } } }
apache-2.0
barnyard/pi
freepastry/src/rice/pastry/transport/TLDeserializer.java
6124
/******************************************************************************* "FreePastry" Peer-to-Peer Application Development Substrate Copyright 2002-2007, Rice University. Copyright 2006-2007, Max Planck Institute for Software Systems. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of Rice University (RICE), Max Planck Institute for Software Systems (MPI-SWS) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided by RICE, MPI-SWS and the contributors on an "as is" basis, without any representations or warranties of any kind, express or implied including, but not limited to, representations or warranties of non-infringement, merchantability or fitness for a particular purpose. In no event shall RICE, MPI-SWS or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. *******************************************************************************/ package rice.pastry.transport; import java.io.IOException; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import org.mpisws.p2p.transport.commonapi.RawMessageDeserializer; import rice.environment.Environment; import rice.environment.logging.Logger; import rice.p2p.commonapi.Message; import rice.p2p.commonapi.NodeHandle; import rice.p2p.commonapi.rawserialization.InputBuffer; import rice.p2p.commonapi.rawserialization.MessageDeserializer; import rice.p2p.commonapi.rawserialization.OutputBuffer; import rice.p2p.commonapi.rawserialization.RawMessage; import rice.p2p.util.rawserialization.SimpleInputBuffer; import rice.pastry.NodeHandleFactory; import rice.pastry.messaging.PRawMessage; public class TLDeserializer implements RawMessageDeserializer, Deserializer { Map<Integer, MessageDeserializer> deserializers; NodeHandleFactory nodeHandleFactory; protected Environment environment; protected Logger logger; public TLDeserializer(NodeHandleFactory nodeHandleFactory, Environment env) { this.environment = env; this.nodeHandleFactory = nodeHandleFactory; this.deserializers = new HashMap<Integer, MessageDeserializer>(); this.logger = environment.getLogManager().getLogger(TLDeserializer.class, null); } public RawMessage deserialize(InputBuffer buf, NodeHandle sender) throws IOException { // InputBuffer buf = new SimpleInputBuffer(b.array(), b.position()); int address = buf.readInt(); // boolean hasSender = buf.readBoolean(); byte priority = buf.readByte(); short type = buf.readShort(); // logger.log("addr:"+address+" sndr:"+hasSender+" pri:"+priority+" type:"+type); // NodeHandle sender = null; // if (hasSender) { // sender = nodeHandleFactory.readNodeHandle(buf); // } // TODO: Think about how to make this work right. Maybe change the default deserializer? MessageDeserializer deserializer = getDeserializer(address); if (deserializer == null) { throw new IOException("Unknown address:"+address); // TODO: Make UnknownAddressException } Message msg = deserializer.deserialize(buf, type, priority, sender); if (msg == null) { if (logger.level <= Logger.WARNING) logger.log("Deserialized message to null! d:"+deserializer+" a:"+address+" t:"+type+" p:"+priority+" s:"+sender+" b:"+buf); } if (logger.level <= Logger.FINER) logger.log("deserialize():"+msg); return (RawMessage)msg; } public void serialize(RawMessage m, OutputBuffer o) throws IOException { PRawMessage msg = (PRawMessage)m; int address = msg.getDestination(); o.writeInt(address); // NodeHandle sender = msg.getSender(); // boolean hasSender = (sender != null); // o.writeBoolean(hasSender); // range check priority int priority = msg.getPriority(); if (priority > Byte.MAX_VALUE) throw new IllegalStateException("Priority must be in the range of "+Byte.MIN_VALUE+" to "+Byte.MAX_VALUE+". Lower values are higher priority. Priority of "+msg+" was "+priority+"."); if (priority < Byte.MIN_VALUE) throw new IllegalStateException("Priority must be in the range of "+Byte.MIN_VALUE+" to "+Byte.MAX_VALUE+". Lower values are higher priority. Priority of "+msg+" was "+priority+"."); o.writeByte((byte)priority); short type = msg.getType(); o.writeShort(type); // if (isRouteMessage()) { // RouteMessage rm = (RouteMessage)msg; // sendOpts = rm.getOptions(); // rmSubAddress = rm.getAuxAddress(); // rmSubType = rm.getInternalType(); // } // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // + NodeHandle sender + // + + // ... flexable size // + + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // if (hasSender) { // msg.getSender().serialize(o); // } msg.serialize(o); } public void clearDeserializer(int address) { deserializers.remove(address); } public MessageDeserializer getDeserializer(int address) { return deserializers.get(address); } public void setDeserializer(int address, MessageDeserializer md) { deserializers.put(address, md); } }
apache-2.0
StyleTang/incubator-rocketmq-externals
rocketmq-connect-redis/src/main/java/org/apache/rocketmq/connect/redis/parser/HmSetParser.java
1386
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.rocketmq.connect.redis.parser; import io.openmessaging.connector.api.data.FieldType; import org.apache.rocketmq.connect.redis.pojo.KVEntry; import org.apache.rocketmq.connect.redis.pojo.RedisEntry; /** * hmset key field value [field value ...] */ public class HmSetParser extends AbstractCommandParser { @Override public KVEntry createBuilder() { return RedisEntry.newEntry(FieldType.MAP); } @Override public KVEntry handleValue(KVEntry builder, String[] args) { return CommonParser.commonMapParser(builder, args); } }
apache-2.0
michaelandrepearce/activemq-artemis
tests/integration-tests/src/test/java/org/apache/activemq/artemis/tests/integration/cluster/distribution/ClusterTestBase.java
74649
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.artemis.tests.integration.cluster.distribution; import java.io.File; import java.io.PrintWriter; import java.io.StringWriter; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.activemq.artemis.api.config.ActiveMQDefaultConfiguration; import org.apache.activemq.artemis.api.core.ActiveMQException; import org.apache.activemq.artemis.api.core.BroadcastGroupConfiguration; import org.apache.activemq.artemis.api.core.DiscoveryGroupConfiguration; import org.apache.activemq.artemis.api.core.Message; import org.apache.activemq.artemis.api.core.SimpleString; import org.apache.activemq.artemis.api.core.TransportConfiguration; import org.apache.activemq.artemis.api.core.UDPBroadcastEndpointFactory; import org.apache.activemq.artemis.api.core.client.ActiveMQClient; import org.apache.activemq.artemis.api.core.client.ClientConsumer; import org.apache.activemq.artemis.api.core.client.ClientMessage; import org.apache.activemq.artemis.api.core.client.ClientProducer; import org.apache.activemq.artemis.api.core.client.ClientSession; import org.apache.activemq.artemis.api.core.client.ClientSessionFactory; import org.apache.activemq.artemis.api.core.client.ServerLocator; import org.apache.activemq.artemis.core.client.impl.ServerLocatorInternal; import org.apache.activemq.artemis.core.client.impl.Topology; import org.apache.activemq.artemis.core.client.impl.TopologyMemberImpl; import org.apache.activemq.artemis.core.config.ClusterConnectionConfiguration; import org.apache.activemq.artemis.core.config.Configuration; import org.apache.activemq.artemis.core.config.HAPolicyConfiguration; import org.apache.activemq.artemis.core.config.ha.LiveOnlyPolicyConfiguration; import org.apache.activemq.artemis.core.config.ha.ReplicaPolicyConfiguration; import org.apache.activemq.artemis.core.config.ha.ReplicatedPolicyConfiguration; import org.apache.activemq.artemis.core.config.ha.SharedStoreMasterPolicyConfiguration; import org.apache.activemq.artemis.core.config.ha.SharedStoreSlavePolicyConfiguration; import org.apache.activemq.artemis.core.postoffice.Binding; import org.apache.activemq.artemis.core.postoffice.Bindings; import org.apache.activemq.artemis.core.postoffice.PostOffice; import org.apache.activemq.artemis.core.postoffice.QueueBinding; import org.apache.activemq.artemis.core.postoffice.impl.LocalQueueBinding; import org.apache.activemq.artemis.core.protocol.core.impl.CoreProtocolManagerFactory; import org.apache.activemq.artemis.core.remoting.impl.netty.TransportConstants; import org.apache.activemq.artemis.core.server.ActiveMQServer; import org.apache.activemq.artemis.core.server.ActiveMQServers; import org.apache.activemq.artemis.core.server.NodeManager; import org.apache.activemq.artemis.core.server.cluster.ActiveMQServerSideProtocolManagerFactory; import org.apache.activemq.artemis.core.server.cluster.ClusterConnection; import org.apache.activemq.artemis.core.server.cluster.ClusterManager; import org.apache.activemq.artemis.core.server.cluster.RemoteQueueBinding; import org.apache.activemq.artemis.core.server.cluster.impl.ClusterConnectionImpl; import org.apache.activemq.artemis.core.server.cluster.impl.MessageLoadBalancingType; import org.apache.activemq.artemis.core.server.cluster.qourum.SharedNothingBackupQuorum; import org.apache.activemq.artemis.core.server.group.GroupingHandler; import org.apache.activemq.artemis.core.server.group.impl.GroupingHandlerConfiguration; import org.apache.activemq.artemis.core.server.impl.InVMNodeManager; import org.apache.activemq.artemis.tests.integration.IntegrationTestLogger; import org.apache.activemq.artemis.tests.util.ActiveMQTestBase; import org.junit.After; import org.junit.Assert; import org.junit.Before; public abstract class ClusterTestBase extends ActiveMQTestBase { private static final IntegrationTestLogger log = IntegrationTestLogger.LOGGER; private static final int[] PORTS = {TransportConstants.DEFAULT_PORT, TransportConstants.DEFAULT_PORT + 1, TransportConstants.DEFAULT_PORT + 2, TransportConstants.DEFAULT_PORT + 3, TransportConstants.DEFAULT_PORT + 4, TransportConstants.DEFAULT_PORT + 5, TransportConstants.DEFAULT_PORT + 6, TransportConstants.DEFAULT_PORT + 7, TransportConstants.DEFAULT_PORT + 8, TransportConstants.DEFAULT_PORT + 9,}; protected int getLargeMessageSize() { return 500; } protected boolean isLargeMessage() { return false; } private static final long TIMEOUT_START_SERVER = 10; private static final SimpleString COUNT_PROP = new SimpleString("count_prop"); protected static final SimpleString FILTER_PROP = new SimpleString("animal"); private static final int MAX_SERVERS = 10; protected ConsumerHolder[] consumers; protected ActiveMQServer[] servers; protected NodeManager[] nodeManagers; protected ClientSessionFactory[] sfs; protected long[] timeStarts; protected ServerLocator[] locators; @Override @Before public void setUp() throws Exception { super.setUp(); forceGC(); ActiveMQTestBase.checkFreePort(ClusterTestBase.PORTS); consumers = new ConsumerHolder[ClusterTestBase.MAX_CONSUMERS]; servers = new ActiveMQServer[ClusterTestBase.MAX_SERVERS]; timeStarts = new long[ClusterTestBase.MAX_SERVERS]; sfs = new ClientSessionFactory[ClusterTestBase.MAX_SERVERS]; nodeManagers = new NodeManager[ClusterTestBase.MAX_SERVERS]; for (int i = 0, nodeManagersLength = nodeManagers.length; i < nodeManagersLength; i++) { nodeManagers[i] = new InVMNodeManager(isSharedStore(), new File(getJournalDir(i, true))); } locators = new ServerLocator[ClusterTestBase.MAX_SERVERS]; } /** * Whether the servers share the storage or not. */ protected boolean isSharedStore() { return false; } @Override @After public void tearDown() throws Exception { logTopologyDiagram(); for (int i = 0; i < MAX_SERVERS; i++) { addActiveMQComponent(nodeManagers[i]); } servers = null; sfs = null; consumers = new ConsumerHolder[ClusterTestBase.MAX_CONSUMERS]; nodeManagers = null; super.tearDown(); ActiveMQTestBase.checkFreePort(ClusterTestBase.PORTS); } // Private ------------------------------------------------------------------------------------------------------- private static final int MAX_CONSUMERS = 100; protected static class ConsumerHolder { final ClientConsumer consumer; final ClientSession session; final int id; final int node; public ClientConsumer getConsumer() { return consumer; } public ClientSession getSession() { return session; } public int getId() { return id; } public int getNode() { return node; } ConsumerHolder(final int id, final ClientConsumer consumer, final ClientSession session, int node) { this.id = id; this.node = node; this.consumer = consumer; this.session = session; } void close() { if (consumer != null) { try { consumer.close(); } catch (ActiveMQException e) { // ignore } } if (session != null) { try { session.close(); } catch (ActiveMQException e) { // ignore } } } @Override public String toString() { return "id=" + id + ", consumer=" + consumer + ", session=" + session; } } protected ClientConsumer getConsumer(final int node) { return consumers[node].consumer; } protected void waitForFailoverTopology(final int bNode, final int... nodes) throws Exception { ActiveMQServer server = servers[bNode]; log.debug("waiting for " + Arrays.toString(nodes) + " on the topology for server = " + server); long start = System.currentTimeMillis(); final int waitMillis = 2000; final int sleepTime = 50; int nWaits = 0; while (server.getClusterManager() == null && nWaits++ < waitMillis / sleepTime) { Thread.sleep(sleepTime); } Set<ClusterConnection> ccs = server.getClusterManager().getClusterConnections(); if (ccs.size() != 1) { throw new IllegalStateException("You need a single cluster connection on this version of waitForTopology on ServiceTestBase"); } boolean exists = false; for (int node : nodes) { ClusterConnectionImpl clusterConnection = (ClusterConnectionImpl) ccs.iterator().next(); Topology topology = clusterConnection.getTopology(); TransportConfiguration nodeConnector = servers[node].getClusterManager().getClusterConnections().iterator().next().getConnector(); do { Collection<TopologyMemberImpl> members = topology.getMembers(); for (TopologyMemberImpl member : members) { if (member.getConnector().getA() != null && member.getConnector().getA().equals(nodeConnector)) { exists = true; break; } } if (exists) { break; } Thread.sleep(10); } while (System.currentTimeMillis() - start < WAIT_TIMEOUT); if (!exists) { String msg = "Timed out waiting for cluster topology of " + Arrays.toString(nodes) + " (received " + topology.getMembers().size() + ") topology = " + topology + ")"; log.error(msg); logTopologyDiagram(); throw new Exception(msg); } } } private void logTopologyDiagram() { StringBuffer topologyDiagram = new StringBuffer(); for (ActiveMQServer activeMQServer : servers) { if (activeMQServer != null) { topologyDiagram.append("\n").append(activeMQServer.getIdentity()).append("\n"); if (activeMQServer.isStarted()) { Set<ClusterConnection> ccs = activeMQServer.getClusterManager().getClusterConnections(); if (ccs.size() >= 1) { ClusterConnectionImpl clusterConnection = (ClusterConnectionImpl) ccs.iterator().next(); Collection<TopologyMemberImpl> members = clusterConnection.getTopology().getMembers(); for (TopologyMemberImpl member : members) { String nodeId = member.getNodeId(); String liveServer = null; String backupServer = null; for (ActiveMQServer server : servers) { if (server != null && server.getNodeID() != null && server.isActive() && server.getNodeID().toString().equals(nodeId)) { if (server.isActive()) { liveServer = server.getIdentity(); if (member.getLive() != null) { liveServer += "(notified)"; } else { liveServer += "(not notified)"; } } else { backupServer = server.getIdentity(); if (member.getBackup() != null) { liveServer += "(notified)"; } else { liveServer += "(not notified)"; } } } } topologyDiagram.append("\t").append("|\n").append("\t->").append(liveServer).append("/").append(backupServer).append("\n"); } } else { topologyDiagram.append("-> no cluster connections\n"); } } else { topologyDiagram.append("-> stopped\n"); } } } topologyDiagram.append("\n"); log.info(topologyDiagram.toString()); } protected void waitForMessages(final int node, final String address, final int count) throws Exception { ActiveMQServer server = servers[node]; if (server == null) { throw new IllegalArgumentException("No server at " + node); } PostOffice po = server.getPostOffice(); long start = System.currentTimeMillis(); int messageCount = 0; do { messageCount = getMessageCount(po, address); if (messageCount == count) { return; } Thread.sleep(10); } while (System.currentTimeMillis() - start < ActiveMQTestBase.WAIT_TIMEOUT); throw new IllegalStateException("Timed out waiting for messages (messageCount = " + messageCount + ", expecting = " + count); } protected void waitForServerRestart(final int node) throws Exception { long waitTimeout = ActiveMQTestBase.WAIT_TIMEOUT; if (!isSharedStore()) { //it should be greater than //QuorumManager.WAIT_TIME_AFTER_FIRST_LIVE_STOPPING_MSG (60 sec) waitTimeout = 1000 * (SharedNothingBackupQuorum.WAIT_TIME_AFTER_FIRST_LIVE_STOPPING_MSG + 5); } if (!servers[node].waitForActivation(waitTimeout, TimeUnit.MILLISECONDS)) { String msg = "Timed out waiting for server starting = " + node; log.error(msg); throw new IllegalStateException(msg); } } protected void waitForBindings(final int node, final String address, final int expectedBindingCount, final int expectedConsumerCount, final boolean local) throws Exception { log.debug("waiting for bindings on node " + node + " address " + address + " expectedBindingCount " + expectedBindingCount + " consumerCount " + expectedConsumerCount + " local " + local); ActiveMQServer server = servers[node]; if (server == null) { throw new IllegalArgumentException("No server at " + node); } long timeout = ActiveMQTestBase.WAIT_TIMEOUT; if (waitForBindings(server, address, local, expectedBindingCount, expectedConsumerCount, timeout)) { return; } PostOffice po = server.getPostOffice(); Bindings bindings = po.getBindingsForAddress(new SimpleString(address)); System.out.println("======================================================================="); System.out.println("Binding information for address = " + address + " on node " + node); for (Binding binding : bindings.getBindings()) { if (binding.isConnected() && (binding instanceof LocalQueueBinding && local || binding instanceof RemoteQueueBinding && !local)) { QueueBinding qBinding = (QueueBinding) binding; System.out.println("Binding = " + qBinding + ", queue=" + qBinding.getQueue()); } } StringWriter writer = new StringWriter(); PrintWriter out = new PrintWriter(writer); try { for (ActiveMQServer activeMQServer : servers) { if (activeMQServer != null) { out.println(clusterDescription(activeMQServer)); out.println(debugBindings(activeMQServer, activeMQServer.getConfiguration().getManagementNotificationAddress().toString())); } } for (ActiveMQServer activeMQServer : servers) { out.println("Management bindings on " + activeMQServer); if (activeMQServer != null) { out.println(debugBindings(activeMQServer, activeMQServer.getConfiguration().getManagementNotificationAddress().toString())); } } } catch (Throwable dontCare) { } logAndSystemOut(writer.toString()); throw new IllegalStateException("Didn't get the expected number of bindings, look at the logging for more information"); } protected String debugBindings(final ActiveMQServer server, final String address) throws Exception { StringWriter str = new StringWriter(); PrintWriter out = new PrintWriter(str); if (server == null) { return "server is shutdown"; } PostOffice po = server.getPostOffice(); if (po == null) { return "server is shutdown"; } Bindings bindings = po.getBindingsForAddress(new SimpleString(address)); out.println("======================================================================="); out.println("Binding information for address = " + address + " on " + server); for (Binding binding : bindings.getBindings()) { QueueBinding qBinding = (QueueBinding) binding; out.println("Binding = " + qBinding + ", queue=" + qBinding.getQueue()); } out.println("======================================================================="); return str.toString(); } protected void createQueue(final int node, final String address, final String queueName, final String filterVal, final boolean durable) throws Exception { createQueue(node, address, queueName, filterVal, durable, null, null); } protected void createQueue(final int node, final String address, final String queueName, final String filterVal, final boolean durable, final String user, final String password) throws Exception { ClientSessionFactory sf = sfs[node]; if (sf == null) { throw new IllegalArgumentException("No sf at " + node); } ClientSession session = addClientSession(sf.createSession(user, password, false, true, true, ActiveMQClient.DEFAULT_PRE_ACKNOWLEDGE, ActiveMQClient.DEFAULT_ACK_BATCH_SIZE)); String filterString = null; if (filterVal != null) { filterString = ClusterTestBase.FILTER_PROP.toString() + "='" + filterVal + "'"; } log.info("Creating " + queueName + " , address " + address + " on " + servers[node]); session.createQueue(address, queueName, filterString, durable); session.close(); } protected void deleteQueue(final int node, final String queueName) throws Exception { ClientSessionFactory sf = sfs[node]; if (sf == null) { throw new IllegalArgumentException("No sf at " + node); } ClientSession session = sf.createSession(false, true, true); session.deleteQueue(queueName); session.close(); } protected void addConsumer(final int consumerID, final int node, final String queueName, final String filterVal) throws Exception { addConsumer(consumerID, node, queueName, filterVal, true); } protected void addConsumer(final int consumerID, final int node, final String queueName, final String filterVal, boolean autoCommitAcks) throws Exception { addConsumer(consumerID, node, queueName, filterVal, autoCommitAcks, null, null); } protected void addConsumer(final int consumerID, final int node, final String queueName, final String filterVal, boolean autoCommitAcks, final String user, final String password) throws Exception { try { if (consumers[consumerID] != null) { throw new IllegalArgumentException("Already a consumer at " + node); } ClientSessionFactory sf = sfs[node]; if (sf == null) { throw new IllegalArgumentException("No sf at " + node); } ClientSession session = addClientSession(sf.createSession(user, password, false, false, autoCommitAcks, ActiveMQClient.DEFAULT_PRE_ACKNOWLEDGE, ActiveMQClient.DEFAULT_ACK_BATCH_SIZE)); String filterString = null; if (filterVal != null) { filterString = ClusterTestBase.FILTER_PROP.toString() + "='" + filterVal + "'"; } ClientConsumer consumer = addClientConsumer(session.createConsumer(queueName, filterString)); session.start(); consumers[consumerID] = new ConsumerHolder(consumerID, consumer, session, node); } catch (Exception e) { // Proxy the failure and print a dump into System.out, so it is captured by Jenkins reports e.printStackTrace(); System.out.println(ActiveMQTestBase.threadDump(" - fired by ClusterTestBase::addConsumer")); throw e; } } protected void removeConsumer(final int consumerID) { ConsumerHolder holder = consumers[consumerID]; if (holder == null) { throw new IllegalArgumentException("No consumer at " + consumerID); } holder.close(); consumers[consumerID] = null; } protected void closeAllConsumers() { if (consumers == null) return; for (int i = 0; i < consumers.length; i++) { ConsumerHolder holder = consumers[i]; if (holder != null) { holder.close(); consumers[i] = null; } } } @Override protected void closeAllSessionFactories() { if (sfs != null) { for (int i = 0; i < sfs.length; i++) { closeSessionFactory(sfs[i]); sfs[i] = null; } } super.closeAllSessionFactories(); } @Override protected void closeAllServerLocatorsFactories() { for (int i = 0; i < locators.length; i++) { closeServerLocator(locators[i]); locators[i] = null; } super.closeAllServerLocatorsFactories(); } protected void closeSessionFactory(final int node) { ClientSessionFactory sf = sfs[node]; if (sf == null) { throw new IllegalArgumentException("No sf at " + node); } sf.close(); sfs[node] = null; } protected void sendInRange(final int node, final String address, final int msgStart, final int msgEnd, final boolean durable, final String filterVal) throws Exception { sendInRange(node, address, msgStart, msgEnd, durable, filterVal, null); } protected void sendInRange(final int node, final String address, final int msgStart, final int msgEnd, final boolean durable, final String filterVal, final AtomicInteger duplicateDetectionSeq) throws Exception { ClientSessionFactory sf = sfs[node]; if (sf == null) { throw new IllegalArgumentException("No sf at " + node); } ClientSession session = sf.createSession(false, false, false); try { ClientProducer producer = session.createProducer(address); for (int i = msgStart; i < msgEnd; i++) { ClientMessage message = session.createMessage(durable); if (filterVal != null) { message.putStringProperty(ClusterTestBase.FILTER_PROP, new SimpleString(filterVal)); } if (duplicateDetectionSeq != null) { String str = Integer.toString(duplicateDetectionSeq.incrementAndGet()); message.putStringProperty(Message.HDR_DUPLICATE_DETECTION_ID, new SimpleString(str)); } message.putIntProperty(ClusterTestBase.COUNT_PROP, i); if (isLargeMessage()) { message.setBodyInputStream(createFakeLargeStream(getLargeMessageSize())); } producer.send(message); if (i % 100 == 0) { session.commit(); } } session.commit(); } finally { session.close(); } } protected void sendWithProperty(final int node, final String address, final int numMessages, final boolean durable, final SimpleString key, final SimpleString val) throws Exception { sendInRange(node, address, 0, numMessages, durable, key, val); } protected void sendInRange(final int node, final String address, final int msgStart, final int msgEnd, final boolean durable, final SimpleString key, final SimpleString val) throws Exception { ClientSessionFactory sf = sfs[node]; if (sf == null) { throw new IllegalArgumentException("No sf at " + node); } ClientSession session = sf.createSession(false, true, true); try { ClientProducer producer = session.createProducer(address); for (int i = msgStart; i < msgEnd; i++) { ClientMessage message = session.createMessage(durable); if (isLargeMessage()) { message.setBodyInputStream(createFakeLargeStream(getLargeMessageSize())); } message.putStringProperty(key, val); message.putIntProperty(ClusterTestBase.COUNT_PROP, i); producer.send(message); } } finally { session.close(); } } protected void setUpGroupHandler(final GroupingHandlerConfiguration.TYPE type, final int node) { setUpGroupHandler(type, node, 5000); } protected void setUpGroupHandler(final GroupingHandlerConfiguration.TYPE type, final int node, final int timeout) { setUpGroupHandler(type, node, timeout, -1, ActiveMQDefaultConfiguration.getDefaultGroupingHandlerReaperPeriod()); } protected void setUpGroupHandler(final GroupingHandlerConfiguration.TYPE type, final int node, final int timeout, final long groupTimeout, final long reaperPeriod) { servers[node].getConfiguration().setGroupingHandlerConfiguration(new GroupingHandlerConfiguration().setName(new SimpleString("grouparbitrator")).setType(type).setAddress(new SimpleString("queues")).setTimeout(timeout).setGroupTimeout(groupTimeout).setReaperPeriod(reaperPeriod)); } protected void setUpGroupHandler(final GroupingHandler groupingHandler, final int node) { servers[node].setGroupingHandler(groupingHandler); } protected void send(final int node, final String address, final int numMessages, final boolean durable, final String filterVal) throws Exception { send(node, address, numMessages, durable, filterVal, null); } protected void send(final int node, final String address, final int numMessages, final boolean durable, final String filterVal, final AtomicInteger duplicateDetectionCounter) throws Exception { sendInRange(node, address, 0, numMessages, durable, filterVal, duplicateDetectionCounter); } protected void verifyReceiveAllInRange(final boolean ack, final int msgStart, final int msgEnd, final int... consumerIDs) throws Exception { verifyReceiveAllInRangeNotBefore(ack, -1, msgStart, msgEnd, consumerIDs); } protected void verifyReceiveAllInRange(final int msgStart, final int msgEnd, final int... consumerIDs) throws Exception { verifyReceiveAllInRangeNotBefore(false, -1, msgStart, msgEnd, consumerIDs); } protected void verifyReceiveAllWithGroupIDRoundRobin(final int msgStart, final int msgEnd, final int... consumerIDs) throws Exception { verifyReceiveAllWithGroupIDRoundRobin(true, -1, msgStart, msgEnd, consumerIDs); } protected int verifyReceiveAllOnSingleConsumer(final int msgStart, final int msgEnd, final int... consumerIDs) throws Exception { return verifyReceiveAllOnSingleConsumer(true, msgStart, msgEnd, consumerIDs); } protected void verifyReceiveAllWithGroupIDRoundRobin(final boolean ack, final long firstReceiveTime, final int msgStart, final int msgEnd, final int... consumerIDs) throws Exception { HashMap<SimpleString, Integer> groupIdsReceived = new HashMap<>(); for (int i = 0; i < consumerIDs.length; i++) { ConsumerHolder holder = consumers[consumerIDs[i]]; if (holder == null) { throw new IllegalArgumentException("No consumer at " + consumerIDs[i]); } for (int j = msgStart; j < msgEnd; j++) { ClientMessage message = holder.consumer.receive(2000); if (message == null) { log.info("*** dumping consumers:"); dumpConsumers(); Assert.assertNotNull("consumer " + consumerIDs[i] + " did not receive message " + j, message); } if (ack) { message.acknowledge(); } if (firstReceiveTime != -1) { Assert.assertTrue("Message received too soon", System.currentTimeMillis() >= firstReceiveTime); } SimpleString id = (SimpleString) message.getObjectProperty(Message.HDR_GROUP_ID); if (groupIdsReceived.get(id) == null) { groupIdsReceived.put(id, i); } else if (groupIdsReceived.get(id) != i) { Assert.fail("consumer " + groupIdsReceived.get(id) + " already bound to groupid " + id + " received on consumer " + i); } } } } protected int verifyReceiveAllOnSingleConsumer(final boolean ack, final int msgStart, final int msgEnd, final int... consumerIDs) throws Exception { int groupIdsReceived = -1; for (int i = 0; i < consumerIDs.length; i++) { ConsumerHolder holder = consumers[consumerIDs[i]]; if (holder == null) { throw new IllegalArgumentException("No consumer at " + consumerIDs[i]); } ClientMessage message = holder.consumer.receive(2000); if (message != null) { groupIdsReceived = i; for (int j = msgStart + 1; j < msgEnd; j++) { message = holder.consumer.receive(2000); if (message == null) { Assert.fail("consumer " + i + " did not receive all messages"); } if (ack) { message.acknowledge(); } } } } return groupIdsReceived; } protected void verifyReceiveAllInRangeNotBefore(final boolean ack, final long firstReceiveTime, final int msgStart, final int msgEnd, final int... consumerIDs) throws Exception { boolean outOfOrder = false; String firstOutOfOrderMessage = null; for (int consumerID : consumerIDs) { ConsumerHolder holder = consumers[consumerID]; if (holder == null) { throw new IllegalArgumentException("No consumer at " + consumerID); } for (int j = msgStart; j < msgEnd; j++) { ClientMessage message = holder.consumer.receive(WAIT_TIMEOUT); if (message == null) { log.info("*** dumping consumers:"); dumpConsumers(); Assert.fail("consumer " + consumerID + " did not receive message " + j); } if (isLargeMessage()) { checkMessageBody(message); } if (ack) { message.acknowledge(); } if (firstReceiveTime != -1) { Assert.assertTrue("Message received too soon", System.currentTimeMillis() >= firstReceiveTime); } if (j != (Integer) message.getObjectProperty(ClusterTestBase.COUNT_PROP)) { if (firstOutOfOrderMessage == null) { firstOutOfOrderMessage = "expected " + j + " received " + message.getObjectProperty(ClusterTestBase.COUNT_PROP); } outOfOrder = true; System.out.println("Message j=" + j + " was received out of order = " + message.getObjectProperty(ClusterTestBase.COUNT_PROP)); log.info("Message j=" + j + " was received out of order = " + message.getObjectProperty(ClusterTestBase.COUNT_PROP)); } } } Assert.assertFalse("Messages were consumed out of order::" + firstOutOfOrderMessage, outOfOrder); } private void dumpConsumers() throws Exception { for (int i = 0; i < consumers.length; i++) { if (consumers[i] != null && !consumers[i].consumer.isClosed()) { log.info("Dumping consumer " + i); checkReceive(i); } } } protected String clusterDescription(ActiveMQServer server) { String br = "-------------------------\n"; String out = br; out += "ActiveMQ Artemis server " + server + "\n"; ClusterManager clusterManager = server.getClusterManager(); if (clusterManager == null) { out += "N/A"; } else { for (ClusterConnection cc : clusterManager.getClusterConnections()) { out += cc.describe() + "\n"; out += cc.getTopology().describe(); } } out += "\n\nfull topology:"; return out + br; } protected void verifyReceiveAll(final boolean ack, final int numMessages, final int... consumerIDs) throws Exception { verifyReceiveAllInRange(ack, 0, numMessages, consumerIDs); } protected void verifyReceiveAll(final int numMessages, final int... consumerIDs) throws Exception { verifyReceiveAllInRange(false, 0, numMessages, consumerIDs); } protected void verifyReceiveAllNotBefore(final long firstReceiveTime, final int numMessages, final int... consumerIDs) throws Exception { verifyReceiveAllInRangeNotBefore(false, firstReceiveTime, 0, numMessages, consumerIDs); } protected void checkReceive(final int... consumerIDs) throws Exception { for (int consumerID : consumerIDs) { ConsumerHolder holder = consumers[consumerID]; if (holder == null) { throw new IllegalArgumentException("No consumer at " + consumerID); } ClientMessage message; do { message = holder.consumer.receive(500); if (message != null) { log.info("check receive Consumer " + consumerID + " received message " + message.getObjectProperty(ClusterTestBase.COUNT_PROP)); } else { log.info("check receive Consumer " + consumerID + " null message"); } } while (message != null); } } protected void verifyReceiveRoundRobin(final int numMessages, final int... consumerIDs) throws Exception { int count = 0; for (int i = 0; i < numMessages; i++) { // We may use a negative number in some tests to ignore the consumer, case we know the server is down if (consumerIDs[count] >= 0) { ConsumerHolder holder = consumers[consumerIDs[count]]; if (holder == null) { throw new IllegalArgumentException("No consumer at " + consumerIDs[i]); } ClientMessage message = holder.consumer.receive(WAIT_TIMEOUT); Assert.assertNotNull("consumer " + consumerIDs[count] + " did not receive message " + i, message); Assert.assertEquals("consumer " + consumerIDs[count] + " message " + i, i, message.getObjectProperty(ClusterTestBase.COUNT_PROP)); message.acknowledge(); consumers[consumerIDs[count]].session.commit(); } count++; if (count == consumerIDs.length) { count = 0; } } } /* * With some tests we cannot guarantee the order in which the bridges in the cluster startup so the round robin order is not predefined. * In which case we test the messages are round robin'd in any specific order that contains all the consumers */ protected void verifyReceiveRoundRobinInSomeOrder(final int numMessages, final int... consumerIDs) throws Exception { if (numMessages < consumerIDs.length) { throw new IllegalStateException("You must send more messages than consumers specified or the algorithm " + "won't work"); } verifyReceiveRoundRobinInSomeOrder(true, numMessages, consumerIDs); } class OrderedConsumerHolder implements Comparable<OrderedConsumerHolder> { ConsumerHolder consumer; int order; @Override public int compareTo(final OrderedConsumerHolder o) { int thisOrder = order; int otherOrder = o.order; return thisOrder < otherOrder ? -1 : thisOrder == otherOrder ? 0 : 1; } } protected void verifyReceiveRoundRobinInSomeOrder(final boolean ack, final int numMessages, final int... consumerIDs) throws Exception { if (numMessages < consumerIDs.length) { throw new IllegalStateException("not enough messages"); } // First get one from each consumer to determine the order, then we sort them in this order List<OrderedConsumerHolder> sorted = new ArrayList<>(); for (int consumerID : consumerIDs) { ConsumerHolder holder = consumers[consumerID]; ClientMessage msg = holder.consumer.receive(10000); Assert.assertNotNull("msg must exist", msg); int count = msg.getIntProperty(ClusterTestBase.COUNT_PROP); OrderedConsumerHolder orderedHolder = new OrderedConsumerHolder(); orderedHolder.consumer = holder; orderedHolder.order = count; sorted.add(orderedHolder); if (ack) { msg.acknowledge(); } } // Now sort them Collections.sort(sorted); // First verify the first lot received are ok int count = 0; for (OrderedConsumerHolder holder : sorted) { if (holder.order != count) { throw new IllegalStateException("Out of order"); } count++; } // Now check the rest are in order too outer: while (count < numMessages) { for (OrderedConsumerHolder holder : sorted) { ClientMessage msg = holder.consumer.consumer.receive(10000); Assert.assertNotNull("msg must exist", msg); int p = msg.getIntProperty(ClusterTestBase.COUNT_PROP); if (p != count) { throw new IllegalStateException("Out of order 2"); } if (ack) { msg.acknowledge(); } count++; if (count == numMessages) { break outer; } } } } protected void verifyReceiveRoundRobinInSomeOrderWithCounts(final boolean ack, final int[] messageCounts, final int... consumerIDs) throws Exception { List<LinkedList<Integer>> receivedCounts = new ArrayList<>(); Set<Integer> counts = new HashSet<>(); for (int consumerID : consumerIDs) { ConsumerHolder holder = consumers[consumerID]; if (holder == null) { throw new IllegalArgumentException("No consumer at " + consumerID); } LinkedList<Integer> list = new LinkedList<>(); receivedCounts.add(list); ClientMessage message; do { message = holder.consumer.receive(1000); if (message != null) { int count = (Integer) message.getObjectProperty(ClusterTestBase.COUNT_PROP); checkMessageBody(message); // log.info("consumer " + consumerIDs[i] + " received message " + count); Assert.assertFalse(counts.contains(count)); counts.add(count); list.add(count); if (ack) { message.acknowledge(); } } } while (message != null); } for (int messageCount : messageCounts) { Assert.assertTrue(counts.contains(messageCount)); } @SuppressWarnings("unchecked") LinkedList<Integer>[] lists = new LinkedList[consumerIDs.length]; for (int i = 0; i < messageCounts.length; i++) { for (LinkedList<Integer> list : receivedCounts) { int elem = list.get(0); if (elem == messageCounts[i]) { lists[i] = list; break; } } } int index = 0; for (int messageCount : messageCounts) { LinkedList<Integer> list = lists[index]; Assert.assertNotNull(list); int elem = list.poll(); Assert.assertEquals(messageCount, elem); index++; if (index == consumerIDs.length) { index = 0; } } } /** * @param message */ private void checkMessageBody(ClientMessage message) { if (isLargeMessage()) { for (int posMsg = 0; posMsg < getLargeMessageSize(); posMsg++) { assertEquals(getSamplebyte(posMsg), message.getBodyBuffer().readByte()); } } } protected void verifyReceiveRoundRobinInSomeOrderNoAck(final int numMessages, final int... consumerIDs) throws Exception { if (numMessages < consumerIDs.length) { throw new IllegalStateException("You must send more messages than consumers specified or the algorithm " + "won't work"); } verifyReceiveRoundRobinInSomeOrder(false, numMessages, consumerIDs); } protected int[] getReceivedOrder(final int consumerID) throws Exception { return getReceivedOrder(consumerID, false); } protected int[] getReceivedOrder(final int consumerID, final boolean ack) throws Exception { ConsumerHolder consumer = consumers[consumerID]; if (consumer == null) { throw new IllegalArgumentException("No consumer at " + consumerID); } List<Integer> ints = new ArrayList<>(); ClientMessage message = null; do { message = consumer.consumer.receive(500); if (message != null) { if (isLargeMessage()) { checkMessageBody(message); } if (ack) { message.acknowledge(); } int count = (Integer) message.getObjectProperty(ClusterTestBase.COUNT_PROP); ints.add(count); } } while (message != null); int[] res = new int[ints.size()]; int j = 0; for (Integer i : ints) { res[j++] = i; } if (ack) { // just to flush acks consumers[consumerID].session.commit(); } return res; } protected void verifyNotReceive(final int... consumerIDs) throws Exception { for (int i = 0; i < consumerIDs.length; i++) { ConsumerHolder holder = consumers[consumerIDs[i]]; if (holder == null) { throw new IllegalArgumentException("No consumer at " + consumerIDs[i]); } Assert.assertNull("consumer " + i + " received message", holder.consumer.receiveImmediate()); } } protected void setupSessionFactory(final int node, final boolean netty) throws Exception { setupSessionFactory(node, netty, false); } protected void setupSessionFactory(final int node, final boolean netty, boolean ha) throws Exception { setupSessionFactory(node, netty, ha, null, null); } protected void setupSessionFactory(final int node, final boolean netty, boolean ha, final String user, final String password) throws Exception { if (sfs[node] != null) { throw new IllegalArgumentException("Already a factory at " + node); } Map<String, Object> params = generateParams(node, netty); TransportConfiguration serverTotc; if (netty) { serverTotc = new TransportConfiguration(ActiveMQTestBase.NETTY_CONNECTOR_FACTORY, params); } else { serverTotc = new TransportConfiguration(INVM_CONNECTOR_FACTORY, params); } if (ha) { locators[node] = ActiveMQClient.createServerLocatorWithHA(serverTotc); } else { locators[node] = ActiveMQClient.createServerLocatorWithoutHA(serverTotc); } locators[node].setProtocolManagerFactory(ActiveMQServerSideProtocolManagerFactory.getInstance(locators[node])); locators[node].setBlockOnNonDurableSend(true).setBlockOnDurableSend(true); addServerLocator(locators[node]); ClientSessionFactory sf = createSessionFactory(locators[node]); ClientSession session = sf.createSession(user, password, false, true, true, ActiveMQClient.DEFAULT_PRE_ACKNOWLEDGE, ActiveMQClient.DEFAULT_ACK_BATCH_SIZE); session.close(); sfs[node] = sf; } protected void setupSessionFactory(final int node, final boolean netty, int reconnectAttempts) throws Exception { if (sfs[node] != null) { throw new IllegalArgumentException("Already a server at " + node); } Map<String, Object> params = generateParams(node, netty); TransportConfiguration serverTotc; if (netty) { serverTotc = new TransportConfiguration(ActiveMQTestBase.NETTY_CONNECTOR_FACTORY, params); } else { serverTotc = new TransportConfiguration(INVM_CONNECTOR_FACTORY, params); } locators[node] = ActiveMQClient.createServerLocatorWithoutHA(serverTotc).setBlockOnNonDurableSend(true).setBlockOnDurableSend(true).setReconnectAttempts(reconnectAttempts); addServerLocator(locators[node]); ClientSessionFactory sf = createSessionFactory(locators[node]); sfs[node] = sf; } protected void setupSessionFactory(final int node, final int backupNode, final boolean netty, final boolean blocking) throws Exception { if (sfs[node] != null) { throw new IllegalArgumentException("Already a server at " + node); } Map<String, Object> params = generateParams(node, netty); TransportConfiguration serverToTC = createTransportConfiguration(netty, false, params); locators[node] = addServerLocator(ActiveMQClient.createServerLocatorWithHA(serverToTC)).setRetryInterval(100).setRetryIntervalMultiplier(1d).setReconnectAttempts(-1).setBlockOnNonDurableSend(blocking).setBlockOnDurableSend(blocking); final String identity = "TestClientConnector,live=" + node + ",backup=" + backupNode; ((ServerLocatorInternal) locators[node]).setIdentity(identity); ClientSessionFactory sf = createSessionFactory(locators[node]); sfs[node] = sf; } protected void setupSessionFactory(final int node, final int backupNode, final boolean netty) throws Exception { this.setupSessionFactory(node, backupNode, netty, true); } protected ActiveMQServer getServer(final int node) { if (servers[node] == null) { throw new IllegalArgumentException("No server at node " + node); } return servers[node]; } protected void setupServer(final int node, final boolean fileStorage, final boolean netty) throws Exception { setupLiveServer(node, fileStorage, false, netty, false); } protected void setupLiveServer(final int node, final boolean fileStorage, final boolean netty, boolean isLive) throws Exception { setupLiveServer(node, fileStorage, false, netty, isLive); } protected void setupLiveServer(final int node, final boolean fileStorage, final boolean sharedStorage, final boolean netty, boolean liveOnly) throws Exception { if (servers[node] != null) { throw new IllegalArgumentException("Already a server at node " + node); } HAPolicyConfiguration haPolicyConfiguration = null; if (liveOnly) { haPolicyConfiguration = new LiveOnlyPolicyConfiguration(); } else { if (sharedStorage) haPolicyConfiguration = new SharedStoreMasterPolicyConfiguration(); else haPolicyConfiguration = new ReplicatedPolicyConfiguration(); } Configuration configuration = createBasicConfig(node).setJournalMaxIO_AIO(1000).setThreadPoolMaxSize(10).clearAcceptorConfigurations().addAcceptorConfiguration(createTransportConfiguration(netty, true, generateParams(node, netty))).setHAPolicyConfiguration(haPolicyConfiguration).setResolveProtocols(false); ActiveMQServer server; if (fileStorage) { if (sharedStorage) { server = createInVMFailoverServer(true, configuration, nodeManagers[node], node); } else { server = createServer(configuration); } } else { if (sharedStorage) { server = createInVMFailoverServer(false, configuration, nodeManagers[node], node); } else { server = createServer(false, configuration); } } server.addProtocolManagerFactory(new CoreProtocolManagerFactory()); server.setIdentity(this.getClass().getSimpleName() + "/Live(" + node + ")"); servers[node] = addServer(server); } /** * Server lacks a {@link ClusterConnectionConfiguration} necessary for the remote (replicating) * backup case. * <br> * Use * {@link #setupClusterConnectionWithBackups(String, String, org.apache.activemq.artemis.core.server.cluster.impl.MessageLoadBalancingType, int, boolean, int, int[])} * to add it. * * @param node * @param liveNode * @param fileStorage * @param sharedStorage * @param netty * @throws Exception */ protected void setupBackupServer(final int node, final int liveNode, final boolean fileStorage, final boolean sharedStorage, final boolean netty) throws Exception { if (servers[node] != null) { throw new IllegalArgumentException("Already a server at node " + node); } TransportConfiguration liveConfig = createTransportConfiguration(netty, false, generateParams(liveNode, netty)); TransportConfiguration backupConfig = createTransportConfiguration(netty, false, generateParams(node, netty)); TransportConfiguration acceptorConfig = createTransportConfiguration(netty, true, generateParams(node, netty)); Configuration configuration = createBasicConfig(sharedStorage ? liveNode : node).clearAcceptorConfigurations().addAcceptorConfiguration(acceptorConfig).addConnectorConfiguration(liveConfig.getName(), liveConfig).addConnectorConfiguration(backupConfig.getName(), backupConfig).setHAPolicyConfiguration(sharedStorage ? new SharedStoreSlavePolicyConfiguration() : new ReplicaPolicyConfiguration()); ActiveMQServer server; if (sharedStorage) { server = createInVMFailoverServer(true, configuration, nodeManagers[liveNode], liveNode); } else { boolean enablePersistency = fileStorage ? true : configuration.isPersistenceEnabled(); server = addServer(ActiveMQServers.newActiveMQServer(configuration, enablePersistency)); } server.setIdentity(this.getClass().getSimpleName() + "/Backup(" + node + " of live " + liveNode + ")"); servers[node] = addServer(server); } protected void setupLiveServerWithDiscovery(final int node, final String groupAddress, final int port, final boolean fileStorage, final boolean netty, final boolean sharedStorage) throws Exception { if (servers[node] != null) { throw new IllegalArgumentException("Already a server at node " + node); } Map<String, Object> params = generateParams(node, netty); TransportConfiguration connector = createTransportConfiguration(netty, false, params); List<String> connectorPairs = new ArrayList<>(); connectorPairs.add(connector.getName()); UDPBroadcastEndpointFactory endpoint = new UDPBroadcastEndpointFactory().setGroupAddress(groupAddress).setGroupPort(port); BroadcastGroupConfiguration bcConfig = new BroadcastGroupConfiguration().setName("bg1").setBroadcastPeriod(200).setConnectorInfos(connectorPairs).setEndpointFactory(endpoint); DiscoveryGroupConfiguration dcConfig = new DiscoveryGroupConfiguration().setName("dg1").setRefreshTimeout(1000).setDiscoveryInitialWaitTimeout(1000).setBroadcastEndpointFactory(endpoint); Configuration configuration = createBasicConfig(node).setJournalMaxIO_AIO(1000).clearAcceptorConfigurations().addAcceptorConfiguration(createTransportConfiguration(netty, true, params)).addConnectorConfiguration(connector.getName(), connector).addBroadcastGroupConfiguration(bcConfig).addDiscoveryGroupConfiguration(dcConfig.getName(), dcConfig).setHAPolicyConfiguration(sharedStorage ? new SharedStoreMasterPolicyConfiguration() : new ReplicatedPolicyConfiguration()); ActiveMQServer server; if (fileStorage) { if (sharedStorage) { server = createInVMFailoverServer(true, configuration, nodeManagers[node], node); } else { server = addServer(ActiveMQServers.newActiveMQServer(configuration)); server.setIdentity("Server " + node); } } else { if (sharedStorage) { server = createInVMFailoverServer(false, configuration, nodeManagers[node], node); } else { server = addServer(ActiveMQServers.newActiveMQServer(configuration, false)); server.setIdentity("Server " + node); } } servers[node] = server; } protected void setupBackupServerWithDiscovery(final int node, final int liveNode, final String groupAddress, final int port, final boolean fileStorage, final boolean netty, final boolean sharedStorage) throws Exception { if (servers[node] != null) { throw new IllegalArgumentException("Already a server at node " + node); } Map<String, Object> params = generateParams(node, netty); TransportConfiguration connector = createTransportConfiguration(netty, false, params); List<String> connectorPairs = new ArrayList<>(); connectorPairs.add(connector.getName()); UDPBroadcastEndpointFactory endpoint = new UDPBroadcastEndpointFactory().setGroupAddress(groupAddress).setGroupPort(port); BroadcastGroupConfiguration bcConfig = new BroadcastGroupConfiguration().setName("bg1").setBroadcastPeriod(1000).setConnectorInfos(connectorPairs).setEndpointFactory(endpoint); DiscoveryGroupConfiguration dcConfig = new DiscoveryGroupConfiguration().setName("dg1").setRefreshTimeout(5000).setDiscoveryInitialWaitTimeout(5000).setBroadcastEndpointFactory(endpoint); Configuration configuration = createBasicConfig(sharedStorage ? liveNode : node).clearAcceptorConfigurations().addAcceptorConfiguration(createTransportConfiguration(netty, true, params)).addConnectorConfiguration(connector.getName(), connector).addBroadcastGroupConfiguration(bcConfig).addDiscoveryGroupConfiguration(dcConfig.getName(), dcConfig).setHAPolicyConfiguration(sharedStorage ? new SharedStoreSlavePolicyConfiguration() : new ReplicatedPolicyConfiguration()); ActiveMQServer server; if (sharedStorage) { server = createInVMFailoverServer(fileStorage, configuration, nodeManagers[liveNode], liveNode); } else { boolean enablePersistency = fileStorage ? configuration.isPersistenceEnabled() : false; server = addServer(ActiveMQServers.newActiveMQServer(configuration, enablePersistency)); } servers[node] = server; } protected void clearServer(final int... nodes) { for (int i = 0; i < nodes.length; i++) { if (servers[nodes[i]] == null) { throw new IllegalArgumentException("No server at node " + nodes[i]); } servers[nodes[i]] = null; } } protected void clearAllServers() { for (int i = 0; i < servers.length; i++) { servers[i] = null; } } protected void setupClusterConnection(final String name, final int nodeFrom, final int nodeTo, final String address, final MessageLoadBalancingType messageLoadBalancingType, final int maxHops, final boolean netty, final boolean allowDirectConnectionsOnly) { ActiveMQServer serverFrom = servers[nodeFrom]; if (serverFrom == null) { throw new IllegalStateException("No server at node " + nodeFrom); } TransportConfiguration connectorFrom = createTransportConfiguration(netty, false, generateParams(nodeFrom, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(name, connectorFrom); List<String> pairs = null; if (nodeTo != -1) { TransportConfiguration serverTotc = createTransportConfiguration(netty, false, generateParams(nodeTo, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(serverTotc.getName(), serverTotc); pairs = new ArrayList<>(); pairs.add(serverTotc.getName()); } Configuration config = serverFrom.getConfiguration(); ClusterConnectionConfiguration clusterConf = new ClusterConnectionConfiguration().setName(name).setAddress(address).setConnectorName(name).setRetryInterval(100).setMessageLoadBalancingType(messageLoadBalancingType).setMaxHops(maxHops).setConfirmationWindowSize(1024).setStaticConnectors(pairs).setAllowDirectConnectionsOnly(allowDirectConnectionsOnly); config.getClusterConfigurations().add(clusterConf); } protected void setupClusterConnection(final String name, final int nodeFrom, final int nodeTo, final String address, final MessageLoadBalancingType messageLoadBalancingType, final int maxHops, final int reconnectAttempts, final long retryInterval, final boolean netty, final boolean allowDirectConnectionsOnly) { ActiveMQServer serverFrom = servers[nodeFrom]; if (serverFrom == null) { throw new IllegalStateException("No server at node " + nodeFrom); } TransportConfiguration connectorFrom = createTransportConfiguration(netty, false, generateParams(nodeFrom, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(name, connectorFrom); List<String> pairs = null; if (nodeTo != -1) { TransportConfiguration serverTotc = createTransportConfiguration(netty, false, generateParams(nodeTo, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(serverTotc.getName(), serverTotc); pairs = new ArrayList<>(); pairs.add(serverTotc.getName()); } Configuration config = serverFrom.getConfiguration(); ClusterConnectionConfiguration clusterConf = new ClusterConnectionConfiguration().setName(name).setAddress(address).setConnectorName(name).setReconnectAttempts(reconnectAttempts).setRetryInterval(retryInterval).setMessageLoadBalancingType(messageLoadBalancingType).setMaxHops(maxHops).setConfirmationWindowSize(1024).setStaticConnectors(pairs).setAllowDirectConnectionsOnly(allowDirectConnectionsOnly); config.getClusterConfigurations().add(clusterConf); } protected void setupClusterConnection(final String name, final String uri, int server) throws Exception { ActiveMQServer serverFrom = servers[server]; if (serverFrom == null) { throw new IllegalStateException("No server at node " + server); } ClusterConnectionConfiguration configuration = new ClusterConnectionConfiguration(new URI(uri)).setName(name); serverFrom.getConfiguration().addClusterConfiguration(configuration); } protected void setupClusterConnection(final String name, final String address, final MessageLoadBalancingType messageLoadBalancingType, final int maxHops, final boolean netty, final int nodeFrom, final int... nodesTo) { ActiveMQServer serverFrom = servers[nodeFrom]; if (serverFrom == null) { throw new IllegalStateException("No server at node " + nodeFrom); } TransportConfiguration connectorFrom = createTransportConfiguration(netty, false, generateParams(nodeFrom, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(connectorFrom.getName(), connectorFrom); List<String> pairs = new ArrayList<>(); for (int element : nodesTo) { TransportConfiguration serverTotc = createTransportConfiguration(netty, false, generateParams(element, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(serverTotc.getName(), serverTotc); pairs.add(serverTotc.getName()); } Configuration config = serverFrom.getConfiguration(); ClusterConnectionConfiguration clusterConf = createClusterConfig(name, address, messageLoadBalancingType, maxHops, connectorFrom, pairs); config.getClusterConfigurations().add(clusterConf); } protected void setupClusterConnection(final String name, final String address, final MessageLoadBalancingType messageLoadBalancingType, final int maxHops, final int reconnectAttempts, final long retryInterval, final boolean netty, final int nodeFrom, final int... nodesTo) { ActiveMQServer serverFrom = servers[nodeFrom]; if (serverFrom == null) { throw new IllegalStateException("No server at node " + nodeFrom); } TransportConfiguration connectorFrom = createTransportConfiguration(netty, false, generateParams(nodeFrom, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(connectorFrom.getName(), connectorFrom); List<String> pairs = new ArrayList<>(); for (int element : nodesTo) { TransportConfiguration serverTotc = createTransportConfiguration(netty, false, generateParams(element, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(serverTotc.getName(), serverTotc); pairs.add(serverTotc.getName()); } Configuration config = serverFrom.getConfiguration(); ClusterConnectionConfiguration clusterConf = new ClusterConnectionConfiguration().setName(name).setAddress(address).setConnectorName(connectorFrom.getName()).setRetryInterval(retryInterval).setReconnectAttempts(reconnectAttempts).setCallTimeout(100).setCallFailoverTimeout(100).setMessageLoadBalancingType(messageLoadBalancingType).setMaxHops(maxHops).setConfirmationWindowSize(1024).setStaticConnectors(pairs); config.getClusterConfigurations().add(clusterConf); } private ClusterConnectionConfiguration createClusterConfig(final String name, final String address, final MessageLoadBalancingType messageLoadBalancingType, final int maxHops, TransportConfiguration connectorFrom, List<String> pairs) { return new ClusterConnectionConfiguration().setName(name).setAddress(address).setConnectorName(connectorFrom.getName()).setRetryInterval(250).setMessageLoadBalancingType(messageLoadBalancingType).setMaxHops(maxHops).setConfirmationWindowSize(1024).setStaticConnectors(pairs); } protected void setupClusterConnectionWithBackups(final String name, final String address, final MessageLoadBalancingType messageLoadBalancingType, final int maxHops, final boolean netty, final int nodeFrom, final int[] nodesTo) { ActiveMQServer serverFrom = servers[nodeFrom]; if (serverFrom == null) { throw new IllegalStateException("No server at node " + nodeFrom); } TransportConfiguration connectorFrom = createTransportConfiguration(netty, false, generateParams(nodeFrom, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(name, connectorFrom); List<String> pairs = new ArrayList<>(); for (int element : nodesTo) { TransportConfiguration serverTotc = createTransportConfiguration(netty, false, generateParams(element, netty)); serverFrom.getConfiguration().getConnectorConfigurations().put(serverTotc.getName(), serverTotc); pairs.add(serverTotc.getName()); } Configuration config = serverFrom.getConfiguration(); ClusterConnectionConfiguration clusterConf = new ClusterConnectionConfiguration().setName(name).setAddress(address).setConnectorName(name).setRetryInterval(250).setMessageLoadBalancingType(messageLoadBalancingType).setMaxHops(maxHops).setConfirmationWindowSize(1024).setStaticConnectors(pairs); config.getClusterConfigurations().add(clusterConf); } protected void setupDiscoveryClusterConnection(final String name, final int node, final String discoveryGroupName, final String address, final MessageLoadBalancingType messageLoadBalancingType, final int maxHops, final boolean netty) { ActiveMQServer server = servers[node]; if (server == null) { throw new IllegalStateException("No server at node " + node); } TransportConfiguration connectorConfig = createTransportConfiguration(netty, false, generateParams(node, netty)); server.getConfiguration().getConnectorConfigurations().put(name, connectorConfig); Configuration config = server.getConfiguration(); ClusterConnectionConfiguration clusterConf = new ClusterConnectionConfiguration().setName(name).setAddress(address).setConnectorName(name).setRetryInterval(100).setDuplicateDetection(true).setMessageLoadBalancingType(messageLoadBalancingType).setMaxHops(maxHops).setConfirmationWindowSize(1024).setDiscoveryGroupName(discoveryGroupName); List<ClusterConnectionConfiguration> clusterConfs = config.getClusterConfigurations(); clusterConfs.add(clusterConf); } protected void startServers(final int... nodes) throws Exception { for (int node : nodes) { log.info("#test start node " + node); final long currentTime = System.currentTimeMillis(); boolean waitForSelf = currentTime - timeStarts[node] < TIMEOUT_START_SERVER; boolean waitForPrevious = node > 0 && currentTime - timeStarts[node - 1] < TIMEOUT_START_SERVER; if (waitForPrevious || waitForSelf) { Thread.sleep(TIMEOUT_START_SERVER); } timeStarts[node] = System.currentTimeMillis(); log.info("starting server " + servers[node]); servers[node].start(); log.info("started server " + servers[node]); waitForServerToStart(servers[node]); } } protected void stopClusterConnections(final int... nodes) throws Exception { for (int node : nodes) { if (servers[node].isStarted()) { for (ClusterConnection cc : servers[node].getClusterManager().getClusterConnections()) { cc.stop(); cc.flushExecutor(); } } } } protected void stopServers(final int... nodes) throws Exception { log.info("Stopping nodes " + Arrays.toString(nodes)); Exception exception = null; for (int node : nodes) { if (servers[node] != null && servers[node].isStarted()) { try { if (System.currentTimeMillis() - timeStarts[node] < TIMEOUT_START_SERVER) { // We can't stop and start a node too fast (faster than what the Topology could realize about this Thread.sleep(TIMEOUT_START_SERVER); } timeStarts[node] = System.currentTimeMillis(); log.info("stopping server " + node); servers[node].stop(); log.info("server " + node + " stopped"); } catch (Exception e) { exception = e; } } } if (exception != null) throw exception; } protected boolean isFileStorage() { return true; } }
apache-2.0
fenik17/netty
codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ClientUpgradeCodecTest.java
3357
/* * Copyright 2017 The Netty Project * * The Netty Project licenses this file to you under the Apache License, version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package io.netty.handler.codec.http2; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpVersion; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import org.junit.Test; public class Http2ClientUpgradeCodecTest { @Test public void testUpgradeToHttp2ConnectionHandler() throws Exception { testUpgrade(new Http2ConnectionHandlerBuilder().server(false).frameListener( new Http2FrameAdapter()).build(), null); } @Test public void testUpgradeToHttp2FrameCodec() throws Exception { testUpgrade(Http2FrameCodecBuilder.forClient().build(), null); } @Test public void testUpgradeToHttp2MultiplexCodec() throws Exception { testUpgrade(Http2MultiplexCodecBuilder.forClient(new HttpInboundHandler()) .withUpgradeStreamHandler(new ChannelInboundHandlerAdapter()).build(), null); } @Test public void testUpgradeToHttp2FrameCodecWithMultiplexer() throws Exception { testUpgrade(Http2FrameCodecBuilder.forClient().build(), new Http2MultiplexHandler(new HttpInboundHandler(), new HttpInboundHandler())); } private static void testUpgrade(Http2ConnectionHandler handler, Http2MultiplexHandler multiplexer) throws Exception { FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "*"); EmbeddedChannel channel = new EmbeddedChannel(new ChannelInboundHandlerAdapter()); ChannelHandlerContext ctx = channel.pipeline().firstContext(); Http2ClientUpgradeCodec codec; if (multiplexer == null) { codec = new Http2ClientUpgradeCodec("connectionHandler", handler); } else { codec = new Http2ClientUpgradeCodec("connectionHandler", handler, multiplexer); } codec.setUpgradeHeaders(ctx, request); // Flush the channel to ensure we write out all buffered data channel.flush(); codec.upgradeTo(ctx, null); assertNotNull(channel.pipeline().get("connectionHandler")); if (multiplexer != null) { assertNotNull(channel.pipeline().get(Http2MultiplexHandler.class)); } assertTrue(channel.finishAndReleaseAll()); } @ChannelHandler.Sharable private static final class HttpInboundHandler extends ChannelInboundHandlerAdapter { } }
apache-2.0
ericbottard/spring-cloud-dataflow
spring-cloud-starter-dataflow-server-local/src/test/java/org/springframework/cloud/dataflow/server/local/security/LocalServerSecurityWithUsersFileTests.java
35499
/* * Copyright 2016-2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.cloud.dataflow.server.local.security; import java.util.Arrays; import java.util.Collection; import java.util.Map; import com.google.common.collect.ImmutableMap; import org.junit.ClassRule; import org.junit.Test; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.cloud.dataflow.server.local.LocalDataflowResource; import org.springframework.data.authentication.UserCredentials; import org.springframework.http.HttpMethod; import org.springframework.http.HttpStatus; import org.springframework.test.web.servlet.ResultMatcher; import org.springframework.test.web.servlet.request.MockHttpServletRequestBuilder; import org.springframework.util.CollectionUtils; import static org.springframework.cloud.dataflow.server.local.security.SecurityTestUtils.basicAuthorizationHeader; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; import static org.springframework.test.web.servlet.result.MockMvcResultHandlers.print; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; /** * Tests for security configuration backed by a file-based user list. * * @author Eric Bottard * @author Gunnar Hillert * @author Ilayaperumal Gopinathan */ @RunWith(Parameterized.class) public class LocalServerSecurityWithUsersFileTests { private final static Logger logger = LoggerFactory.getLogger(LocalServerSecurityWithUsersFileTests.class); private final static LocalDataflowResource localDataflowResource = new LocalDataflowResource( "classpath:org/springframework/cloud/dataflow/server/local/security" + "/fileBasedUsers.yml"); @ClassRule public static TestRule springDataflowAndLdapServer = RuleChain.outerRule(localDataflowResource); private static UserCredentials viewOnlyUser = new UserCredentials("bob", "bobspassword"); private static UserCredentials manageOnlyUser = new UserCredentials("alice", "alicepwd"); private static UserCredentials createOnlyUser = new UserCredentials("cartman", "cartmanpwd"); @Parameter(value = 0) public HttpMethod httpMethod; @Parameter(value = 1) public HttpStatus expectedHttpStatus; @Parameter(value = 2) public String url; @Parameter(value = 3) public UserCredentials userCredentials; @Parameter(value = 4) public Map<String, String> urlParameters; @Parameters(name = "Authentication Test {index} - {0} {2} - Returns: {1}") public static Collection<Object[]> data() { return Arrays.asList(new Object[][] { { HttpMethod.GET, HttpStatus.OK, "/", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/", null, null }, /* AppRegistryController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/apps", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/apps", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/apps", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/apps", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/apps/task/taskname", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/apps/task/taskname", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/apps/task/taskname", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/apps/task/taskname", null, null }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/apps/task/taskname", manageOnlyUser, null }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/apps/task/taskname", viewOnlyUser, null }, { HttpMethod.POST, HttpStatus.BAD_REQUEST, "/apps/task/taskname", createOnlyUser, null }, { HttpMethod.POST, HttpStatus.CREATED, "/apps/task/taskname", createOnlyUser, ImmutableMap.of("uri", "maven://io.spring.cloud:scdf-sample-app:jar:1.0.0.BUILD-SNAPSHOT", "force", "false") }, { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/apps/task/taskname", null, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/apps/task/taskname", manageOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/apps/task/taskname", viewOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.OK, "/apps/task/taskname", createOnlyUser, null }, // Should // be // 404 // - // See https://github.com/spring-cloud/spring-cloud-dataflow/issues/1071 { HttpMethod.DELETE, HttpStatus.UNAUTHORIZED, "/apps/task/taskname", null, null }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/apps", manageOnlyUser, ImmutableMap.of("uri", "???", "apps", "??", "force", "true") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/apps", viewOnlyUser, ImmutableMap.of("uri", "???", "apps", "??", "force", "true") }, { HttpMethod.POST, HttpStatus.CREATED, "/apps", createOnlyUser, ImmutableMap.of("uri", "http://bit" + ".ly/1-0-2-GA-stream-applications-rabbit-maven", "apps", "app=is_ignored", "force", "false") }, // Should be 400 - // See https://github.com/spring-cloud/spring-cloud-dataflow/issues/1071 { HttpMethod.POST, HttpStatus.CREATED, "/apps", createOnlyUser, ImmutableMap.of("uri", "http://bit" + ".ly/1-0-2-GA-stream-applications-rabbit-maven", "force", "false") }, { HttpMethod.POST, HttpStatus.INTERNAL_SERVER_ERROR, "/apps", createOnlyUser, ImmutableMap.of("apps", "appTypeMissing=maven://io.spring.cloud:scdf-sample-app:jar:1.0.0.BUILD-SNAPSHOT", "force", "false") }, // Should be 400 - See https://github // .com/spring-cloud/spring-cloud-dataflow/issues/1071 { HttpMethod.POST, HttpStatus.CREATED, "/apps", createOnlyUser, ImmutableMap.of("apps", "task" + ".myCoolApp=maven://io.spring.cloud:scdf-sample-app:jar:1.0.0.BUILD-SNAPSHOT", "force", "false") }, { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/apps", null, ImmutableMap.of("uri", "???", "apps", "??", "force", "true") }, /* CompletionController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/completions/stream", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/completions/stream", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.BAD_REQUEST, "/completions/stream", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/completions/stream", createOnlyUser, ImmutableMap.of("start", "2") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/completions/stream", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/completions/task", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/completions/task", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.BAD_REQUEST, "/completions/task", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/completions/task", createOnlyUser, ImmutableMap.of("start", "2") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/completions/task", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/completions/stream", manageOnlyUser, ImmutableMap.of("detailLevel", "2") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/completions/stream", viewOnlyUser, ImmutableMap.of("detailLevel", "2") }, { HttpMethod.GET, HttpStatus.OK, "/completions/stream", createOnlyUser, ImmutableMap.of("start", "2", "detailLevel", "2") }, { HttpMethod.GET, HttpStatus.BAD_REQUEST, "/completions/stream", createOnlyUser, ImmutableMap.of("start", "2", "detailLevel", "-123") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/completions/stream", null, ImmutableMap.of("detailLevel", "2") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/completions/task", manageOnlyUser, ImmutableMap.of("detailLevel", "2") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/completions/task", viewOnlyUser, ImmutableMap.of("detailLevel", "2") }, { HttpMethod.GET, HttpStatus.OK, "/completions/task", createOnlyUser, ImmutableMap.of("start", "2", "detailLevel", "2") }, { HttpMethod.GET, HttpStatus.BAD_REQUEST, "/completions/task", createOnlyUser, ImmutableMap.of("start", "2", "detailLevel", "-123") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/completions/task", null, ImmutableMap.of("detailLevel", "2") }, /* ToolsController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tools/parseTaskTextToGraph", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tools/parseTaskTextToGraph", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/tools/parseTaskTextToGraph", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tools/convertTaskGraphToText", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tools/convertTaskGraphToText", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/tools/convertTaskGraphToText", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tools/parseTaskTextToGraph", manageOnlyUser, ImmutableMap.of("definition", "fooApp") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tools/parseTaskTextToGraph", viewOnlyUser, ImmutableMap.of("definition", "fooApp") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/tools/parseTaskTextToGraph", null, ImmutableMap.of("definition", "fooApp") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tools/convertTaskGraphToText", manageOnlyUser, ImmutableMap.of("detailLevel", "2") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tools/convertTaskGraphToText", viewOnlyUser, ImmutableMap.of("detailLevel", "2") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/tools/convertTaskGraphToText", null, ImmutableMap.of("detailLevel", "2") }, /* FeaturesController */ { HttpMethod.GET, HttpStatus.OK, "/features", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/features", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/features", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/features", null, null }, /* JobExecutionController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/jobs/executions", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions", manageOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.OK, "/jobs/executions", viewOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions", createOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions", null, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions", manageOnlyUser, ImmutableMap.of("name", "myname") }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/executions", viewOnlyUser, ImmutableMap.of("name", "myname") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions", createOnlyUser, ImmutableMap.of("name", "myname") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions", null, ImmutableMap.of("name", "myname") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions", manageOnlyUser, ImmutableMap.of("name", "myname", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/executions", viewOnlyUser, ImmutableMap.of("name", "myname", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions", createOnlyUser, ImmutableMap.of("name", "myname", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions", null, ImmutableMap.of("name", "myname", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/executions/123", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions/123", null, null }, { HttpMethod.PUT, HttpStatus.FORBIDDEN, "/jobs/executions/123", manageOnlyUser, ImmutableMap.of("stop", "true") }, { HttpMethod.PUT, HttpStatus.FORBIDDEN, "/jobs/executions/123", viewOnlyUser, ImmutableMap.of("stop", "true") }, { HttpMethod.PUT, HttpStatus.NOT_FOUND, "/jobs/executions/123", createOnlyUser, ImmutableMap.of("stop", "true") }, { HttpMethod.PUT, HttpStatus.UNAUTHORIZED, "/jobs/executions/123", null, ImmutableMap.of("stop", "true") }, { HttpMethod.PUT, HttpStatus.FORBIDDEN, "/jobs/executions/123", manageOnlyUser, ImmutableMap.of("restart", "true") }, { HttpMethod.PUT, HttpStatus.FORBIDDEN, "/jobs/executions/123", viewOnlyUser, ImmutableMap.of("restart", "true") }, { HttpMethod.PUT, HttpStatus.NOT_FOUND, "/jobs/executions/123", createOnlyUser, ImmutableMap.of("restart", "true") }, { HttpMethod.PUT, HttpStatus.UNAUTHORIZED, "/jobs/executions/123", null, ImmutableMap.of("restart", "true") }, /* JobInstanceController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/instances", manageOnlyUser, ImmutableMap.of("name", "my-job-name") }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/instances", viewOnlyUser, ImmutableMap.of("name", "my-job-name") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/instances", createOnlyUser, ImmutableMap.of("name", "my-job-name") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/instances", null, ImmutableMap.of("name", "my-job-name") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/instances", manageOnlyUser, ImmutableMap.of("name", "my-job-name", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/instances", viewOnlyUser, ImmutableMap.of("name", "my-job-name", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/instances", createOnlyUser, ImmutableMap.of("name", "my-job-name", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/instances", null, ImmutableMap.of("name", "my-job-name", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/instances", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.BAD_REQUEST, "/jobs/instances", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/instances", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/instances", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/instances/123", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/instances/123", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/instances/123", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/instances/123", null, null }, /* JobStepExecutionController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123/steps", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/executions/123/steps", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123/steps", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions/123/steps", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/abc/steps", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.BAD_REQUEST, "/jobs/executions/abc/steps", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/abc/steps", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions/abc/steps", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123/steps", manageOnlyUser, ImmutableMap.of("name", "my-job-name", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/executions/123/steps", viewOnlyUser, ImmutableMap.of("name", "my-job-name", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123/steps", createOnlyUser, ImmutableMap.of("name", "my-job-name", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions/123/steps", null, ImmutableMap.of("name", "my-job-name", "page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123/steps/1", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/executions/123/steps/1", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123/steps/1", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions/123/steps/1", null, null }, /* JobStepExecutionProgressController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123/steps/1/progress", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/jobs/executions/123/steps/1/progress", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/jobs/executions/123/steps/1/progress", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/jobs/executions/123/steps/1/progress", null, null }, /* RuntimeAppsController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/runtime/apps", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/runtime/apps", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/runtime/apps", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/runtime/apps", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/runtime/apps/123", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/runtime/apps/123", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/runtime/apps/123", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/runtime/apps/123", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/runtime/apps/123/instances", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/runtime/apps/123/instances", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/runtime/apps/123/instances", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/runtime/apps/123/instances", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/runtime/apps/123/instances/456", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/runtime/apps/123/instances/456", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/runtime/apps/123/instances/456", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/runtime/apps/123/instances/456", null, null }, /* StreamDefinitionController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/streams/definitions", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/streams/definitions", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions", manageOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.OK, "/streams/definitions", viewOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions", createOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/streams/definitions", null, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions", manageOnlyUser, ImmutableMap.of("search", "mysearch") }, { HttpMethod.GET, HttpStatus.OK, "/streams/definitions", viewOnlyUser, ImmutableMap.of("search", "mysearch") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions", createOnlyUser, ImmutableMap.of("search", "mysearch") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/streams/definitions", null, ImmutableMap.of("search", "mysearch") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/streams/definitions", manageOnlyUser, ImmutableMap.of("name", "myname", "definition", "fooo | baaar") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/streams/definitions", viewOnlyUser, ImmutableMap.of("name", "myname", "definition", "fooo | baaar") }, { HttpMethod.POST, HttpStatus.BAD_REQUEST, "/streams/definitions", createOnlyUser, ImmutableMap.of("name", "myname", "definition", "fooo | baaar") }, { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/streams/definitions", null, ImmutableMap.of("name", "myname", "definition", "fooo | baaar") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/streams/definitions", manageOnlyUser, ImmutableMap.of("name", "myname") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/streams/definitions", viewOnlyUser, ImmutableMap.of("name", "myname") }, { HttpMethod.POST, HttpStatus.BAD_REQUEST, "/streams/definitions", createOnlyUser, ImmutableMap.of("name", "myname") }, { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/streams/definitions", null, ImmutableMap.of("name", "myname") }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/streams/definitions/delete-me", manageOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/streams/definitions/delete-me", viewOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.NOT_FOUND, "/streams/definitions/delete-me", createOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.UNAUTHORIZED, "/streams/definitions/delete-me", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions/my-stream/related", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/streams/definitions/my-stream/related", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions/my-stream/related", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/streams/definitions/my-stream/related", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions/my-stream/related", manageOnlyUser, ImmutableMap.of("nested", "wrong-param") }, { HttpMethod.GET, HttpStatus.BAD_REQUEST, "/streams/definitions/my-stream/related", viewOnlyUser, ImmutableMap.of("nested", "wrong-param") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions/my-stream/related", createOnlyUser, ImmutableMap.of("nested", "wrong-param") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/streams/definitions/my-stream/related", null, ImmutableMap.of("nested", "wrong-param") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions/my-stream", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/streams/definitions/my-stream", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/streams/definitions/my-stream", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/streams/definitions/my-stream", null, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/streams/definitions", manageOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/streams/definitions", viewOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.OK, "/streams/definitions", createOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.UNAUTHORIZED, "/streams/definitions", null, null }, /* StreamDeploymentController */ { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/streams/deployments", manageOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/streams/deployments", viewOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.OK, "/streams/deployments", createOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.UNAUTHORIZED, "/streams/deployments", null, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/streams/deployments/my-stream", manageOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/streams/deployments/my-stream", viewOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.NOT_FOUND, "/streams/deployments/my-stream", createOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.UNAUTHORIZED, "/streams/deployments/my-stream", null, null }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/streams/deployments/my-stream", manageOnlyUser, null }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/streams/deployments/my-stream", viewOnlyUser, null }, { HttpMethod.POST, HttpStatus.NOT_FOUND, "/streams/deployments/my-stream", createOnlyUser, null }, { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/streams/deployments/my-stream", null, null }, /* TaskDefinitionController */ { HttpMethod.POST, HttpStatus.FORBIDDEN, "/tasks/definitions", manageOnlyUser, ImmutableMap.of("name", "my-name") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/tasks/definitions", viewOnlyUser, ImmutableMap.of("name", "my-name") }, { HttpMethod.POST, HttpStatus.BAD_REQUEST, "/tasks/definitions", createOnlyUser, ImmutableMap.of("name", "my-name") }, { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/tasks/definitions", null, ImmutableMap.of("name", "my-name") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/tasks/definitions", manageOnlyUser, ImmutableMap.of("name", "my-name", "definition", "foo") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/tasks/definitions", viewOnlyUser, ImmutableMap.of("name", "my-name", "definition", "foo") }, { HttpMethod.POST, HttpStatus.INTERNAL_SERVER_ERROR, "/tasks/definitions", createOnlyUser, ImmutableMap.of("name", "my-name", "definition", "foo") }, // Should // be // a // `400` // error // - // See // also: https://github.com/spring-cloud/spring-cloud-dataflow/issues/1075 { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/tasks/definitions", null, ImmutableMap.of("name", "my-name", "definition", "foo") }, /* TaskExecutionController */ { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tasks/executions", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/tasks/executions", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tasks/executions", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/tasks/executions", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tasks/executions", manageOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.OK, "/tasks/executions", viewOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tasks/executions", createOnlyUser, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/tasks/executions", null, ImmutableMap.of("page", "0", "size", "10") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tasks/executions", manageOnlyUser, ImmutableMap.of("name", "my-task-name") }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/tasks/executions", viewOnlyUser, ImmutableMap.of("name", "my-task-name") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tasks/executions", createOnlyUser, ImmutableMap.of("name", "my-task-name") }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/tasks/executions", null, ImmutableMap.of("name", "my-task-name") }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tasks/executions/123", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/tasks/executions/123", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/tasks/executions/123", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/tasks/executions/123", null, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/tasks/executions/123", manageOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.FORBIDDEN, "/tasks/executions/123", viewOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.NOT_FOUND, "/tasks/executions/123", createOnlyUser, null }, { HttpMethod.DELETE, HttpStatus.UNAUTHORIZED, "/tasks/executions/123", null, null }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/tasks/executions", manageOnlyUser, null }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/tasks/executions", viewOnlyUser, null }, { HttpMethod.POST, HttpStatus.BAD_REQUEST, "/tasks/executions", createOnlyUser, null }, { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/tasks/executions", null, null }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/tasks/executions", manageOnlyUser, ImmutableMap.of("name", "my-task-name") }, { HttpMethod.POST, HttpStatus.FORBIDDEN, "/tasks/executions", viewOnlyUser, ImmutableMap.of("name", "my-task-name") }, { HttpMethod.POST, HttpStatus.NOT_FOUND, "/tasks/executions", createOnlyUser, ImmutableMap.of("name", "my-task-name") }, { HttpMethod.POST, HttpStatus.UNAUTHORIZED, "/tasks/executions", null, ImmutableMap.of("name", "my-task-name") }, /* UiController */ { HttpMethod.GET, HttpStatus.FOUND, "/dashboard", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.FOUND, "/dashboard", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FOUND, "/dashboard", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.FOUND, "/dashboard", null, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/about", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/about", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/about", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/about", null, null }, { HttpMethod.GET, HttpStatus.OK, "/management", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/management", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/management", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/management", null, null }, { HttpMethod.GET, HttpStatus.OK, "/management/info", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/management/info", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/management/info", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/management/info", null, null }, { HttpMethod.GET, HttpStatus.NOT_FOUND, "/management/does-not-exist", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/management/does-not-exist", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.FORBIDDEN, "/management/does-not-exist", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/management/does-not-exist", null, null }, // Requires Redis // { HttpMethod.GET, HttpStatus.FORBIDDEN, "/metrics/counters", // manageOnlyUser, null }, // { HttpMethod.GET, HttpStatus.OK, "/metrics/counters", viewOnlyUser, // null }, // { HttpMethod.GET, HttpStatus.FORBIDDEN, "/metrics/counters", // createOnlyUser, null }, // { HttpMethod.GET, HttpStatus.UNAUTHORIZED, "/metrics/counters", null, // null }, /* LoginController */ { HttpMethod.POST, HttpStatus.INTERNAL_SERVER_ERROR, "/authenticate", manageOnlyUser, null }, { HttpMethod.POST, HttpStatus.INTERNAL_SERVER_ERROR, "/authenticate", viewOnlyUser, null }, { HttpMethod.POST, HttpStatus.INTERNAL_SERVER_ERROR, "/authenticate", createOnlyUser, null }, { HttpMethod.POST, HttpStatus.INTERNAL_SERVER_ERROR, "/authenticate", null, null }, /* SecurityController */ { HttpMethod.GET, HttpStatus.OK, "/security/info", manageOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/security/info", viewOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/security/info", createOnlyUser, null }, { HttpMethod.GET, HttpStatus.OK, "/security/info", null, null } }); } @Test public void testEndpointAuthentication() throws Exception { logger.info(String.format( "Using parameters - httpMethod: %s, " + "URL: %s, URL parameters: %s, user credentials: %s", this.httpMethod, this.url, this.urlParameters, userCredentials)); final MockHttpServletRequestBuilder rb; switch (httpMethod) { case GET: rb = get(url); break; case POST: rb = post(url); break; case PUT: rb = put(url); break; case DELETE: rb = delete(url); break; default: throw new IllegalArgumentException("Unsupported Method: " + httpMethod); } if (this.userCredentials != null) { rb.header("Authorization", basicAuthorizationHeader(this.userCredentials.getUsername(), this.userCredentials.getPassword())); } if (!CollectionUtils.isEmpty(urlParameters)) { for (Map.Entry<String, String> mapEntry : urlParameters.entrySet()) { rb.param(mapEntry.getKey(), mapEntry.getValue()); } } final ResultMatcher statusResultMatcher; switch (expectedHttpStatus) { case UNAUTHORIZED: statusResultMatcher = status().isUnauthorized(); break; case FORBIDDEN: statusResultMatcher = status().isForbidden(); break; case FOUND: statusResultMatcher = status().isFound(); break; case NOT_FOUND: statusResultMatcher = status().isNotFound(); break; case OK: statusResultMatcher = status().isOk(); break; case CREATED: statusResultMatcher = status().isCreated(); break; case BAD_REQUEST: statusResultMatcher = status().isBadRequest(); break; case INTERNAL_SERVER_ERROR: statusResultMatcher = status().isInternalServerError(); break; default: throw new IllegalArgumentException("Unsupported Status: " + expectedHttpStatus); } try { localDataflowResource.getMockMvc().perform(rb).andDo(print()).andExpect(statusResultMatcher); } catch (AssertionError e) { throw new AssertionError(String.format( "Assertion failed for parameters - httpMethod: %s, " + "URL: %s, URL parameters: %s, user credentials: %s", this.httpMethod, this.url, this.urlParameters, this.userCredentials), e); } } }
apache-2.0
google/physical-web
java/libs/src/test/java/org/physical_web/collection/UrlGroupTest.java
2989
/* * Copyright 2015 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.physical_web.collection; import static org.junit.Assert.*; import org.junit.Before; import org.junit.Test; import java.util.Comparator; /** * UrlGroup unit test class. */ public class UrlGroupTest { private static final String ID1 = "id1"; private static final String ID2 = "id2"; private static final String URL1 = "http://physical-web.org/#a"; private static final String URL2 = "http://physical-web.org/#b"; private static final String GROUPID1 = "group1"; private static final String GROUPID2 = "group2"; private static final double RANK1 = 0.5d; private static final double RANK2 = 0.9d; private PwPair mPwPair1; private PwPair mPwPair2; private static Comparator<PwPair> testComparator = new Comparator<PwPair>() { @Override public int compare(PwPair lhs, PwPair rhs) { return lhs.getUrlDevice().getId().compareTo(rhs.getUrlDevice().getId()); } }; public static PwPair createRankedPair(String id, String url, String groupId) { UrlDevice urlDevice = new UrlDevice(id, url); PwsResult pwsResult = new PwsResult.Builder(url, url) .setTitle("title1") .setDescription("description1") .setGroupId(groupId) .build(); return new PwPair(urlDevice, pwsResult); } @Before public void setUp() { mPwPair1 = createRankedPair(ID1, URL1, GROUPID1); mPwPair2 = createRankedPair(ID2, URL2, GROUPID1); } @Test public void constructorCreatesProperObject() { UrlGroup urlGroup = new UrlGroup(GROUPID1); assertEquals(urlGroup.getGroupId(), GROUPID1); } @Test public void addPairAndGetTopPairWorks() { UrlGroup urlGroup = new UrlGroup(GROUPID1); urlGroup.addPair(mPwPair1); PwPair pwPair = urlGroup.getTopPair(testComparator); assertEquals(pwPair.getUrlDevice().getId(), ID1); assertEquals(pwPair.getUrlDevice().getUrl(), URL1); assertEquals(pwPair.getPwsResult().getRequestUrl(), URL1); assertEquals(pwPair.getPwsResult().getSiteUrl(), URL1); assertEquals(pwPair.getPwsResult().getGroupId(), GROUPID1); } @Test public void addPairTwiceAndGetTopPairWorks() { UrlGroup urlGroup = new UrlGroup(GROUPID1); urlGroup.addPair(mPwPair1); urlGroup.addPair(mPwPair2); // higher rank PwPair pwPair = urlGroup.getTopPair(testComparator); assertEquals(pwPair.getUrlDevice().getId(), ID1); } }
apache-2.0
jamesagnew/hapi-fhir
hapi-fhir-cli/hapi-fhir-cli-api/src/main/java/ca/uhn/fhir/cli/LoadingValidationSupportR4.java
2088
package ca.uhn.fhir.cli; /*- * #%L * HAPI FHIR - Command Line Client - API * %% * Copyright (C) 2014 - 2022 Smile CDR, Inc. * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import ca.uhn.fhir.context.FhirContext; import ca.uhn.fhir.context.support.IValidationSupport; import ca.uhn.fhir.rest.client.api.IGenericClient; import ca.uhn.fhir.rest.client.api.ServerValidationModeEnum; import ca.uhn.fhir.rest.server.exceptions.BaseServerResponseException; import org.hl7.fhir.instance.model.api.IBaseResource; public class LoadingValidationSupportR4 implements IValidationSupport { // TODO: Don't use qualified names for loggers in HAPI CLI. private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(LoadingValidationSupportR4.class); private FhirContext myCtx = FhirContext.forR4(); @Override public <T extends IBaseResource> T fetchResource(Class<T> theClass, String theUri) { String resName = myCtx.getResourceType(theClass); ourLog.info("Attempting to fetch {} at URL: {}", resName, theUri); myCtx.getRestfulClientFactory().setServerValidationMode(ServerValidationModeEnum.NEVER); IGenericClient client = myCtx.newRestfulGenericClient("http://example.com"); T result; try { result = client.read(theClass, theUri); } catch (BaseServerResponseException e) { throw new CommandFailureException("FAILURE: Received HTTP " + e.getStatusCode() + ": " + e.getMessage()); } ourLog.info("Successfully loaded resource"); return result; } @Override public FhirContext getFhirContext() { return myCtx; } }
apache-2.0
apache/logging-log4j2
log4j-core/src/test/java/org/apache/logging/log4j/core/async/AsyncLoggerConfigTest4.java
3325
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache license, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the license for the specific language governing permissions and * limitations under the license. */ package org.apache.logging.log4j.core.async; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.categories.AsyncLoggers; import org.apache.logging.log4j.core.CoreLoggerContexts; import org.apache.logging.log4j.core.config.ConfigurationFactory; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import static org.hamcrest.Matchers.containsString; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @Category(AsyncLoggers.class) public class AsyncLoggerConfigTest4 { @BeforeClass public static void beforeClass() { System.setProperty("log4j2.is.webapp", "false"); System.setProperty(ConfigurationFactory.CONFIGURATION_FILE_PROPERTY, "AsyncLoggerConfigTest4.xml"); } @AfterClass public static void afterClass() { System.clearProperty("log4j2.is.webapp"); } @Test public void testParameters() throws Exception { final File file = new File("target", "AsyncLoggerConfigTest4.log"); assertTrue("Deleted old file before test", !file.exists() || file.delete()); final Logger log = LogManager.getLogger("com.foo.Bar"); log.info("Additive logging: {} for the price of {}!", 2, 1); CoreLoggerContexts.stopLoggerContext(file); // stop async thread final BufferedReader reader = new BufferedReader(new FileReader(file)); final String line1 = reader.readLine(); final String line2 = reader.readLine(); final String line3 = reader.readLine(); final String line4 = reader.readLine(); final String line5 = reader.readLine(); reader.close(); file.delete(); assertThat(line1, containsString("Additive logging: {} for the price of {}! [2,1] Additive logging: 2 for the price of 1!")); assertThat(line2, containsString("Additive logging: {} for the price of {}! [2,1] Additive logging: 2 for the price of 1!")); assertThat(line3, containsString("Additive logging: {} for the price of {}! [2,1] Additive logging: 2 for the price of 1!")); assertThat(line4, containsString("Additive logging: {} for the price of {}! [2,1] Additive logging: 2 for the price of 1!")); assertNull("Expected only two lines to be logged", line5); } }
apache-2.0
kikinteractive/maven-plugins
maven-assembly-plugin/src/test/java/org/apache/maven/plugin/assembly/archive/task/AddDirectoryTaskTest.java
5455
package org.apache.maven.plugin.assembly.archive.task; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import java.io.File; import java.io.IOException; import java.util.Collections; import junit.framework.TestCase; import org.apache.maven.plugin.assembly.archive.ArchiveCreationException; import org.apache.maven.plugin.assembly.testutils.TestFileManager; import org.codehaus.plexus.archiver.Archiver; import org.codehaus.plexus.archiver.ArchiverException; import org.codehaus.plexus.archiver.FileSet; import org.easymock.classextension.EasyMockSupport; import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.expect; public class AddDirectoryTaskTest extends TestCase { private EasyMockSupport mockManager; private TestFileManager fileManager; private Archiver archiver; public void setUp() { fileManager = new TestFileManager( "ArchiveAssemblyUtils.test.", "" ); mockManager = new EasyMockSupport(); archiver = mockManager.createMock(Archiver.class); } public void tearDown() throws IOException { fileManager.cleanUp(); } public void testAddDirectory_ShouldNotAddDirectoryIfNonExistent() throws ArchiveCreationException { File dir = new File( System.getProperty( "java.io.tmpdir" ), "non-existent." + System.currentTimeMillis() ); configureModeExpectations( -1, -1, -1, -1, false ); mockManager.replayAll(); AddDirectoryTask task = new AddDirectoryTask( dir ); task.execute( archiver ); mockManager.verifyAll(); } public void testAddDirectory_ShouldAddDirectory() throws ArchiveCreationException { File dir = fileManager.createTempDir(); try { archiver.addFileSet( (FileSet) anyObject() ); } catch ( ArchiverException e ) { fail( "Should never happen." ); } configureModeExpectations( -1, -1, -1, -1, false ); mockManager.replayAll(); AddDirectoryTask task = new AddDirectoryTask( dir ); task.setOutputDirectory( "dir" ); task.execute( archiver ); mockManager.verifyAll(); } public void testAddDirectory_ShouldAddDirectoryWithDirMode() throws ArchiveCreationException { File dir = fileManager.createTempDir(); try { archiver.addFileSet( (FileSet) anyObject() ); } catch ( ArchiverException e ) { fail( "Should never happen." ); } int dirMode = Integer.parseInt( "777", 8 ); int fileMode = Integer.parseInt( "777", 8 ); configureModeExpectations( -1, -1, dirMode, fileMode, true ); mockManager.replayAll(); AddDirectoryTask task = new AddDirectoryTask( dir ); task.setDirectoryMode( dirMode ); task.setFileMode( fileMode ); task.setOutputDirectory( "dir" ); task.execute( archiver ); mockManager.verifyAll(); } public void testAddDirectory_ShouldAddDirectoryWithIncludesAndExcludes() throws ArchiveCreationException { File dir = fileManager.createTempDir(); try { archiver.addFileSet( (FileSet) anyObject() ); } catch ( ArchiverException e ) { fail( "Should never happen." ); } configureModeExpectations( -1, -1, -1, -1, false ); mockManager.replayAll(); AddDirectoryTask task = new AddDirectoryTask( dir ); task.setIncludes( Collections.singletonList( "**/*.txt" ) ); task.setExcludes( Collections.singletonList( "**/README.txt" ) ); task.setOutputDirectory( "dir" ); task.execute( archiver ); mockManager.verifyAll(); } private void configureModeExpectations( int defaultDirMode, int defaultFileMode, int dirMode, int fileMode, boolean expectTwoSets ) { expect(archiver.getOverrideDirectoryMode()).andReturn( defaultDirMode ); expect(archiver.getOverrideFileMode()).andReturn( defaultFileMode ); if ( expectTwoSets ) { if ( dirMode > -1 ) { archiver.setDirectoryMode( dirMode ); } if ( fileMode > -1 ) { archiver.setFileMode( fileMode ); } } if ( dirMode > -1 ) { archiver.setDirectoryMode( defaultDirMode ); } if ( fileMode > -1 ) { archiver.setFileMode( defaultFileMode ); } } }
apache-2.0
glubtech/ftpswrap
src/com/glub/secureftp/wrapper/config/QuitCommand.java
569
//***************************************************************************** //* //* (c) Copyright 2002. Glub Tech, Incorporated. All Rights Reserved. //* //* $Id: QuitCommand.java 39 2009-05-11 22:50:09Z gary $ //* //***************************************************************************** package com.glub.secureftp.wrapper.config; public class QuitCommand extends Command { public QuitCommand() { super("5", CommandID.QUIT_COMMAND_ID, "Exit"); } public SecureFTPError doIt() throws CommandException { return new SecureFTPError(); } }
apache-2.0
Fabryprog/camel
components/camel-atmos/src/main/java/org/apache/camel/component/atmos/AtmosConfiguration.java
4780
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.atmos; import java.net.URI; import java.net.URISyntaxException; import com.emc.atmos.api.AtmosApi; import com.emc.atmos.api.AtmosConfig; import com.emc.atmos.api.jersey.AtmosApiClient; import org.apache.camel.component.atmos.util.AtmosException; import org.apache.camel.component.atmos.util.AtmosOperation; import org.apache.camel.spi.Metadata; import org.apache.camel.spi.UriParam; import org.apache.camel.spi.UriParams; import org.apache.camel.spi.UriPath; @UriParams public class AtmosConfiguration { private AtmosApi client; @UriPath private String name; @UriPath @Metadata(required = true) private AtmosOperation operation; @UriParam private String secretKey; @UriParam private String localPath; @UriParam private String remotePath; @UriParam private String newRemotePath; @UriParam private String query; @UriParam private String fullTokenId; @UriParam private String uri; @UriParam private boolean enableSslValidation; public void setClient(AtmosApi client) { this.client = client; } public AtmosApi getClient() { return client; } /** * Obtain a new instance of AtmosApi client and store it in configuration. * * @throws AtmosException */ public void createClient() throws AtmosException { AtmosConfig config = null; try { config = new AtmosConfig(fullTokenId, secretKey, new URI(uri)); } catch (URISyntaxException use) { throw new AtmosException("wrong syntax for Atmos URI!", use); } if (!enableSslValidation) { config.setDisableSslValidation(true); } AtmosApi atmosclient = new AtmosApiClient(config); this.client = atmosclient; } public String getName() { return name; } /** * Atmos name */ public void setName(String name) { this.name = name; } public String getSecretKey() { return secretKey; } /** * Atmos shared secret */ public void setSecretKey(String secretKey) { this.secretKey = secretKey; } public String getLocalPath() { return localPath; } /** * Local path to put files */ public void setLocalPath(String localPath) { this.localPath = localPath; } public String getRemotePath() { return remotePath; } /** * Where to put files on Atmos */ public void setRemotePath(String remotePath) { this.remotePath = remotePath; } public String getNewRemotePath() { return newRemotePath; } /** * New path on Atmos when moving files */ public void setNewRemotePath(String newRemotePath) { this.newRemotePath = newRemotePath; } public String getQuery() { return query; } /** * Search query on Atmos */ public void setQuery(String query) { this.query = query; } public String getFullTokenId() { return fullTokenId; } /** * Atmos client fullTokenId */ public void setFullTokenId(String fullTokenId) { this.fullTokenId = fullTokenId; } public AtmosOperation getOperation() { return operation; } /** * Operation to perform */ public void setOperation(AtmosOperation operation) { this.operation = operation; } /** * Atomos server uri */ public void setUri(String uri) { this.uri = uri; } public String getUri() { return uri; } public boolean isEnableSslValidation() { return enableSslValidation; } /** * Atmos SSL validation */ public void setEnableSslValidation(boolean enableSslValidation) { this.enableSslValidation = enableSslValidation; } }
apache-2.0
jcamachor/hive
serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroObjectInspectorGenerator.java
24366
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.serde2.avro; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.util.List; import org.apache.avro.Schema; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo; import org.junit.Test; public class TestAvroObjectInspectorGenerator { private static final TypeInfo STRING = TypeInfoFactory.getPrimitiveTypeInfo("string"); private static final TypeInfo INT = TypeInfoFactory.getPrimitiveTypeInfo("int"); private static final TypeInfo BOOLEAN = TypeInfoFactory.getPrimitiveTypeInfo("boolean"); private static final TypeInfo LONG = TypeInfoFactory.getPrimitiveTypeInfo("bigint"); private static final TypeInfo FLOAT = TypeInfoFactory.getPrimitiveTypeInfo("float"); private static final TypeInfo DOUBLE = TypeInfoFactory.getPrimitiveTypeInfo("double"); private static final TypeInfo VOID = TypeInfoFactory.getPrimitiveTypeInfo("void"); // These schemata are used in other tests static public final String MAP_WITH_PRIMITIVE_VALUE_TYPE = "{\n" + " \"namespace\": \"testing\",\n" + " \"name\": \"oneMap\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aMap\",\n" + " \"type\":{\"type\":\"map\",\n" + " \"values\":\"long\"}\n" + "\t}\n" + " ]\n" + "}"; static public final String ARRAY_WITH_PRIMITIVE_ELEMENT_TYPE = "{\n" + " \"namespace\": \"testing\",\n" + " \"name\": \"oneArray\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"anArray\",\n" + " \"type\":{\"type\":\"array\",\n" + " \"items\":\"string\"}\n" + "\t}\n" + " ]\n" + "}"; public static final String RECORD_SCHEMA = "{\n" + " \"namespace\": \"testing.test.mctesty\",\n" + " \"name\": \"oneRecord\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aRecord\",\n" + " \"type\":{\"type\":\"record\",\n" + " \"name\":\"recordWithinARecord\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"int1\",\n" + " \"type\":\"int\"\n" + " },\n" + " {\n" + " \"name\":\"boolean1\",\n" + " \"type\":\"boolean\"\n" + " },\n" + " {\n" + " \"name\":\"long1\",\n" + " \"type\":\"long\"\n" + " }\n" + " ]}\n" + " }\n" + " ]\n" + "}"; public static final String NULLABLE_RECORD_SCHEMA = "[\"null\", " + RECORD_SCHEMA + "]"; public static final String SINGLE_ITEM_UNION_SCHEMA = "{\n" + " \"namespace\": \"test.a.rossa\",\n" + " \"name\": \"oneUnion\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aUnion\",\n" + " \"type\":[\"string\"]\n" + " }\n" + " ]\n" + "}"; public static final String UNION_SCHEMA = "{\n" + " \"namespace\": \"test.a.rossa\",\n" + " \"name\": \"oneUnion\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aUnion\",\n" + " \"type\":[\"int\", \"string\"]\n" + " }\n" + " ]\n" + "}"; public static final String UNION_SCHEMA_2 = "{\n" + " \"namespace\": \"test.a.rossa\",\n" + " \"name\": \"oneUnion\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aUnion\",\n" + " \"type\":[\"null\", \"int\", \"string\"]\n" + " }\n" + " ]\n" + "}"; public static final String UNION_SCHEMA_3 = "{\n" + " \"namespace\": \"test.a.rossa\",\n" + " \"name\": \"oneUnion\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aUnion\",\n" + " \"type\":[\"float\",\"int\"]\n" + " }\n" + " ]\n" + "}"; public static final String UNION_SCHEMA_4 = "{\n" + " \"namespace\": \"test.a.rossa\",\n" + " \"name\": \"oneUnion\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aUnion\",\n" + " \"type\":[\"int\",\"float\",\"long\"]\n" + " }\n" + " ]\n" + "}"; public static final String ENUM_SCHEMA = "{\n" + " \"namespace\": \"clever.namespace.name.in.space\",\n" + " \"name\": \"oneEnum\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"baddies\",\n" + " \"type\":{\"type\":\"enum\",\"name\":\"villians\", \"symbols\": " + "[\"DALEKS\", \"CYBERMEN\", \"SLITHEEN\", \"JAGRAFESS\"]}\n" + " \n" + " \n" + " }\n" + " ]\n" + "}"; public static final String FIXED_SCHEMA = "{\n" + " \"namespace\": \"ecapseman\",\n" + " \"name\": \"oneFixed\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"hash\",\n" + " \"type\":{\"type\": \"fixed\", \"name\": \"MD5\", \"size\": 16}\n" + " }\n" + " ]\n" + "}"; public static final String NULLABLE_STRING_SCHEMA = "{\n" + " \"type\": \"record\", \n" + " \"name\": \"nullableUnionTest\",\n" + " \"fields\" : [\n" + " {\"name\":\"nullableString\", \"type\":[\"null\", \"string\"]}\n" + " ]\n" + "}"; public static final String MAP_WITH_NULLABLE_PRIMITIVE_VALUE_TYPE_SCHEMA = "{\n" + " \"namespace\": \"testing\",\n" + " \"name\": \"mapWithNullableUnionTest\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aMap\",\n" + " \"type\":{\"type\":\"map\",\n" + " \"values\":[\"null\",\"long\"]}\n" + "\t}\n" + " ]\n" + "}"; public static final String NULLABLE_ENUM_SCHEMA = "{\n" + " \"namespace\": \"clever.namespace.name.in.space\",\n" + " \"name\": \"nullableUnionTest\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"nullableEnum\",\n" + " \"type\": [\"null\", {\"type\":\"enum\",\"name\":\"villians\", \"symbols\": " + "[\"DALEKS\", \"CYBERMEN\", \"SLITHEEN\", \"JAGRAFESS\"]}]\n" + " \n" + " \n" + " }\n" + " ]\n" + "}"; public static final String BYTES_SCHEMA = "{\n" + " \"type\": \"record\", \n" + " \"name\": \"bytesTest\",\n" + " \"fields\" : [\n" + " {\"name\":\"bytesField\", \"type\":\"bytes\"}\n" + " ]\n" + "}"; public static final String TIMESTAMP_SCHEMA = "{\n" + " \"type\": \"record\", \n" + " \"name\": \"timestampTest\",\n" + " \"fields\" : [\n" + " {\"name\":\"timestampField\", " + " \"type\":\"" + AvroSerDe.AVRO_LONG_TYPE_NAME + "\", " + " \"logicalType\":\"" + AvroSerDe.TIMESTAMP_TYPE_NAME + "\"}" + " ]\n" + "}"; public static final String KITCHEN_SINK_SCHEMA = "{\n" + " \"namespace\": \"org.apache.hadoop.hive\",\n" + " \"name\": \"kitchsink\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"string1\",\n" + " \"type\":\"string\"\n" + " },\n" + " {\n" + " \"name\":\"string2\",\n" + " \"type\":\"string\"\n" + " },\n" + " {\n" + " \"name\":\"int1\",\n" + " \"type\":\"int\"\n" + " },\n" + " {\n" + " \"name\":\"boolean1\",\n" + " \"type\":\"boolean\"\n" + " },\n" + " {\n" + " \"name\":\"long1\",\n" + " \"type\":\"long\"\n" + " },\n" + " {\n" + " \"name\":\"float1\",\n" + " \"type\":\"float\"\n" + " },\n" + " {\n" + " \"name\":\"double1\",\n" + " \"type\":\"double\"\n" + " },\n" + " {\n" + " \"name\":\"inner_record1\",\n" + " \"type\":{ \"type\":\"record\",\n" + " \"name\":\"inner_record1_impl\",\n" + " \"fields\": [\n" + " {\"name\":\"int_in_inner_record1\",\n" + " \"type\":\"int\"},\n" + " {\"name\":\"string_in_inner_record1\",\n" + " \"type\":\"string\"}\n" + " ]\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"enum1\",\n" + " \"type\":{\"type\":\"enum\", \"name\":\"enum1_values\", " + "\"symbols\":[\"ENUM1_VALUES_VALUE1\",\"ENUM1_VALUES_VALUE2\", \"ENUM1_VALUES_VALUE3\"]}\n" + " },\n" + " {\n" + " \"name\":\"array1\",\n" + " \"type\":{\"type\":\"array\", \"items\":\"string\"}\n" + " },\n" + " {\n" + " \"name\":\"map1\",\n" + " \"type\":{\"type\":\"map\", \"values\":\"string\"}\n" + " },\n" + " {\n" + " \"name\":\"union1\",\n" + " \"type\":[\"float\", \"boolean\", \"string\"]\n" + " },\n" + " {\n" + " \"name\":\"fixed1\",\n" + " \"type\":{\"type\":\"fixed\", \"name\":\"fourbytes\", \"size\":4}\n" + " },\n" + " {\n" + " \"name\":\"null1\",\n" + " \"type\":\"null\"\n" + " },\n" + " {\n" + " \"name\":\"UnionNullInt\",\n" + " \"type\":[\"int\", \"null\"]\n" + " },\n" + " {\n" + " \"name\":\"bytes1\",\n" + " \"type\":\"bytes\"\n" + " }\n" + " ]\n" + "}"; @Test // that we can only process records public void failOnNonRecords() throws Exception { String nonRecordSchema = "{ \"type\": \"enum\",\n" + " \"name\": \"Suit\",\n" + " \"symbols\" : [\"SPADES\", \"HEARTS\", \"DIAMONDS\", \"CLUBS\"]\n" + "}"; Schema s = AvroSerdeUtils.getSchemaFor(nonRecordSchema); try { new AvroObjectInspectorGenerator(s); fail("Should not be able to handle non-record Avro types"); } catch(SerDeException sde) { assertTrue(sde.getMessage().startsWith("Schema for table must be of type RECORD")); } } @Test public void primitiveTypesWorkCorrectly() throws SerDeException { final String bunchOfPrimitives = "{\n" + " \"namespace\": \"testing\",\n" + " \"name\": \"PrimitiveTypes\",\n" + " \"type\": \"record\",\n" + " \"fields\": [\n" + " {\n" + " \"name\":\"aString\",\n" + " \"type\":\"string\"\n" + " },\n" + " {\n" + " \"name\":\"anInt\",\n" + " \"type\":\"int\"\n" + " },\n" + " {\n" + " \"name\":\"aBoolean\",\n" + " \"type\":\"boolean\"\n" + " },\n" + " {\n" + " \"name\":\"aLong\",\n" + " \"type\":\"long\"\n" + " },\n" + " {\n" + " \"name\":\"aFloat\",\n" + " \"type\":\"float\"\n" + " },\n" + " {\n" + " \"name\":\"aDouble\",\n" + " \"type\":\"double\"\n" + " },\n" + " {\n" + " \"name\":\"aNull\",\n" + " \"type\":\"null\"\n" + " }\n" + " ]\n" + "}"; AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(AvroSerdeUtils.getSchemaFor(bunchOfPrimitives)); String [] expectedColumnNames = {"aString", "anInt", "aBoolean", "aLong", "aFloat", "aDouble", "aNull"}; verifyColumnNames(expectedColumnNames, aoig.getColumnNames()); TypeInfo [] expectedColumnTypes = {STRING, INT, BOOLEAN, LONG, FLOAT, DOUBLE, VOID}; verifyColumnTypes(expectedColumnTypes, aoig.getColumnTypes()); // Rip apart the object inspector, making sure we got what we expect. final ObjectInspector oi = aoig.getObjectInspector(); assertTrue(oi instanceof StandardStructObjectInspector); final StandardStructObjectInspector ssoi = (StandardStructObjectInspector)oi; List<? extends StructField> structFields = ssoi.getAllStructFieldRefs(); assertEquals(expectedColumnNames.length, structFields.size()); for(int i = 0; i < expectedColumnNames.length;i++) { assertEquals("Column names don't match", expectedColumnNames[i].toLowerCase(), structFields.get(i).getFieldName()); assertEquals("Column types don't match", expectedColumnTypes[i].getTypeName(), structFields.get(i).getFieldObjectInspector().getTypeName()); } } private void verifyColumnTypes(TypeInfo[] expectedColumnTypes, List<TypeInfo> columnTypes) { for(int i = 0; i < expectedColumnTypes.length; i++) { assertEquals(expectedColumnTypes[i], columnTypes.get(i)); } } private void verifyColumnNames(String[] expectedColumnNames, List<String> columnNames) { for(int i = 0; i < expectedColumnNames.length; i++) { assertEquals(expectedColumnNames[i], columnNames.get(i)); } } @Test public void canHandleMapsWithPrimitiveValueTypes() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(MAP_WITH_PRIMITIVE_VALUE_TYPE); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); verifyMap(aoig, "aMap"); } /** * Check a given AvroObjectInspectorGenerator to verify that it matches our test * schema's expected map. * @param aoig should already have been intitialized, may not be null * @param fieldName name of the contianed column, will always fail if null. */ private void verifyMap(final AvroObjectInspectorGenerator aoig, final String fieldName) { // Column names assertEquals(1, aoig.getColumnNames().size()); assertEquals(fieldName, aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); TypeInfo typeInfo = aoig.getColumnTypes().get(0); assertEquals(ObjectInspector.Category.MAP, typeInfo.getCategory()); assertTrue(typeInfo instanceof MapTypeInfo); MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo; assertEquals("bigint" /* == long in Avro */, mapTypeInfo.getMapValueTypeInfo().getTypeName()); assertEquals("string", mapTypeInfo.getMapKeyTypeInfo().getTypeName()); } @Test public void canHandleArrays() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(ARRAY_WITH_PRIMITIVE_ELEMENT_TYPE); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names assertEquals(1, aoig.getColumnNames().size()); assertEquals("anArray", aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); TypeInfo typeInfo = aoig.getColumnTypes().get(0); assertEquals(ObjectInspector.Category.LIST, typeInfo.getCategory()); assertTrue(typeInfo instanceof ListTypeInfo); ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo; assertEquals("string", listTypeInfo.getListElementTypeInfo().getTypeName()); } @Test public void canHandleRecords() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(RECORD_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names assertEquals(1, aoig.getColumnNames().size()); assertEquals("aRecord", aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); TypeInfo typeInfo = aoig.getColumnTypes().get(0); assertEquals(ObjectInspector.Category.STRUCT, typeInfo.getCategory()); assertTrue(typeInfo instanceof StructTypeInfo); StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo; // Check individual elements of subrecord List<String> allStructFieldNames = structTypeInfo.getAllStructFieldNames(); List<TypeInfo> allStructFieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos(); assertEquals(allStructFieldNames.size(), 3); String[] names = new String[]{"int1", "boolean1", "long1"}; String [] typeInfoStrings = new String [] {"int", "boolean", "bigint"}; for(int i = 0; i < allStructFieldNames.size(); i++) { assertEquals("Fieldname " + allStructFieldNames.get(i) + " doesn't match expected " + names[i], names[i], allStructFieldNames.get(i)); assertEquals("Typeinfo " + allStructFieldTypeInfos.get(i) + " doesn't match expected " + typeInfoStrings[i], typeInfoStrings[i], allStructFieldTypeInfos.get(i).getTypeName()); } } @Test public void canHandleUnions() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(UNION_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names assertEquals(1, aoig.getColumnNames().size()); assertEquals("aUnion", aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); TypeInfo typeInfo = aoig.getColumnTypes().get(0); assertTrue(typeInfo instanceof UnionTypeInfo); UnionTypeInfo uti = (UnionTypeInfo)typeInfo; // Check that the union has come out unscathed. No scathing of unions allowed. List<TypeInfo> typeInfos = uti.getAllUnionObjectTypeInfos(); assertEquals(2, typeInfos.size()); assertEquals(INT, typeInfos.get(0)); assertEquals(STRING, typeInfos.get(1)); assertEquals("uniontype<int,string>", uti.getTypeName()); } @Test // Enums are one of two Avro types that Hive doesn't have any native support for. public void canHandleEnums() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(ENUM_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names - we lose the enumness of this schema assertEquals(1, aoig.getColumnNames().size()); assertEquals("baddies", aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); assertEquals(STRING, aoig.getColumnTypes().get(0)); } @Test // Hive has no concept of Avro's fixed type. Fixed -> arrays of bytes public void canHandleFixed() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(FIXED_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names assertEquals(1, aoig.getColumnNames().size()); assertEquals("hash", aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); TypeInfo typeInfo = aoig.getColumnTypes().get(0); assertTrue(typeInfo instanceof PrimitiveTypeInfo); assertEquals(((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory(), PrimitiveCategory.BINARY); } @Test // Avro considers bytes primitive, Hive doesn't. Make them list of tinyint. public void canHandleBytes() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(BYTES_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names assertEquals(1, aoig.getColumnNames().size()); assertEquals("bytesField", aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); TypeInfo typeInfo = aoig.getColumnTypes().get(0); assertTrue(typeInfo instanceof PrimitiveTypeInfo); assertEquals(((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory(), PrimitiveCategory.BINARY); } @Test // That Union[T, NULL] is converted to just T. public void convertsNullableTypes() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(NULLABLE_STRING_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); assertEquals(1, aoig.getColumnNames().size()); assertEquals("nullableString", aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); TypeInfo typeInfo = aoig.getColumnTypes().get(0); assertTrue(typeInfo instanceof PrimitiveTypeInfo); PrimitiveTypeInfo pti = (PrimitiveTypeInfo) typeInfo; // Verify the union has been hidden and just the main type has been returned. assertEquals(PrimitiveObjectInspector.PrimitiveCategory.STRING, pti.getPrimitiveCategory()); } @Test // That Union[T, NULL] is converted to just T, within a Map public void convertsMapsWithNullablePrimitiveTypes() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(MAP_WITH_NULLABLE_PRIMITIVE_VALUE_TYPE_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); verifyMap(aoig, "aMap"); } @Test // That Union[T, NULL] is converted to just T. public void convertsNullableEnum() throws SerDeException { Schema s = AvroSerdeUtils.getSchemaFor(NULLABLE_ENUM_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); assertEquals(1, aoig.getColumnNames().size()); assertEquals("nullableEnum", aoig.getColumnNames().get(0)); // Column types assertEquals(1, aoig.getColumnTypes().size()); TypeInfo typeInfo = aoig.getColumnTypes().get(0); assertTrue(typeInfo instanceof PrimitiveTypeInfo); PrimitiveTypeInfo pti = (PrimitiveTypeInfo) typeInfo; // Verify the union has been hidden and just the main type has been returned. assertEquals(PrimitiveObjectInspector.PrimitiveCategory.STRING, pti.getPrimitiveCategory()); } @Test public void objectInspectorsAreCached() throws SerDeException { // Verify that Hive is caching the object inspectors for us. Schema s = AvroSerdeUtils.getSchemaFor(KITCHEN_SINK_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); Schema s2 = AvroSerdeUtils.getSchemaFor(KITCHEN_SINK_SCHEMA); AvroObjectInspectorGenerator aoig2 = new AvroObjectInspectorGenerator(s2); assertEquals(aoig.getObjectInspector(), aoig2.getObjectInspector()); // For once we actually want reference equality in Java. assertTrue(aoig.getObjectInspector() == aoig2.getObjectInspector()); } }
apache-2.0
gingerwizard/elasticsearch
x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java
3589
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.async; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncTaskIndexService; import org.elasticsearch.xpack.core.async.AsyncTaskMaintenanceService; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; public class AsyncResultsIndexPlugin extends Plugin implements SystemIndexPlugin { protected final Settings settings; public AsyncResultsIndexPlugin(Settings settings) { this.settings = settings; } @Override public Collection<SystemIndexDescriptor> getSystemIndexDescriptors(Settings settings) { return Collections.singletonList(new SystemIndexDescriptor(XPackPlugin.ASYNC_RESULTS_INDEX, this.getClass().getSimpleName())); } @Override public Collection<Object> createComponents( Client client, ClusterService clusterService, ThreadPool threadPool, ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<RepositoriesService> repositoriesServiceSupplier ) { List<Object> components = new ArrayList<>(); if (DiscoveryNode.isDataNode(environment.settings())) { // only data nodes should be eligible to run the maintenance service. AsyncTaskIndexService<AsyncSearchResponse> indexService = new AsyncTaskIndexService<>( XPackPlugin.ASYNC_RESULTS_INDEX, clusterService, threadPool.getThreadContext(), client, ASYNC_SEARCH_ORIGIN, AsyncSearchResponse::new, namedWriteableRegistry ); AsyncTaskMaintenanceService maintenanceService = new AsyncTaskMaintenanceService( clusterService, nodeEnvironment.nodeId(), settings, threadPool, indexService ); components.add(maintenanceService); } return components; } }
apache-2.0
Gaduo/hapi-fhir
hapi-fhir-structures-dstu3/src/main/java/org/hl7/fhir/dstu3/model/codesystems/V3SubstanceAdminSubstitutionEnumFactory.java
3959
package org.hl7.fhir.dstu3.model.codesystems; /* Copyright (c) 2011+, HL7, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of HL7 nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Generated on Tue, Dec 6, 2016 09:42-0500 for FHIR v1.8.0 import org.hl7.fhir.dstu3.model.EnumFactory; public class V3SubstanceAdminSubstitutionEnumFactory implements EnumFactory<V3SubstanceAdminSubstitution> { public V3SubstanceAdminSubstitution fromCode(String codeString) throws IllegalArgumentException { if (codeString == null || "".equals(codeString)) return null; if ("_ActSubstanceAdminSubstitutionCode".equals(codeString)) return V3SubstanceAdminSubstitution._ACTSUBSTANCEADMINSUBSTITUTIONCODE; if ("E".equals(codeString)) return V3SubstanceAdminSubstitution.E; if ("EC".equals(codeString)) return V3SubstanceAdminSubstitution.EC; if ("BC".equals(codeString)) return V3SubstanceAdminSubstitution.BC; if ("G".equals(codeString)) return V3SubstanceAdminSubstitution.G; if ("TE".equals(codeString)) return V3SubstanceAdminSubstitution.TE; if ("TB".equals(codeString)) return V3SubstanceAdminSubstitution.TB; if ("TG".equals(codeString)) return V3SubstanceAdminSubstitution.TG; if ("F".equals(codeString)) return V3SubstanceAdminSubstitution.F; if ("N".equals(codeString)) return V3SubstanceAdminSubstitution.N; throw new IllegalArgumentException("Unknown V3SubstanceAdminSubstitution code '"+codeString+"'"); } public String toCode(V3SubstanceAdminSubstitution code) { if (code == V3SubstanceAdminSubstitution._ACTSUBSTANCEADMINSUBSTITUTIONCODE) return "_ActSubstanceAdminSubstitutionCode"; if (code == V3SubstanceAdminSubstitution.E) return "E"; if (code == V3SubstanceAdminSubstitution.EC) return "EC"; if (code == V3SubstanceAdminSubstitution.BC) return "BC"; if (code == V3SubstanceAdminSubstitution.G) return "G"; if (code == V3SubstanceAdminSubstitution.TE) return "TE"; if (code == V3SubstanceAdminSubstitution.TB) return "TB"; if (code == V3SubstanceAdminSubstitution.TG) return "TG"; if (code == V3SubstanceAdminSubstitution.F) return "F"; if (code == V3SubstanceAdminSubstitution.N) return "N"; return "?"; } public String toSystem(V3SubstanceAdminSubstitution code) { return code.getSystem(); } }
apache-2.0
radicalbit/ambari
ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/web/Home.java
1372
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.ambari.logsearch.web; import org.apache.ambari.logsearch.domain.StoryDataRegistry; import org.jbehave.web.selenium.WebDriverProvider; import java.util.concurrent.TimeUnit; public class Home extends AbstractPage { public Home(WebDriverProvider driverProvider) { super(driverProvider); } public void open() { get(String.format("http://%s:%d/index.html", StoryDataRegistry.INSTANCE.getDockerHost(), StoryDataRegistry.INSTANCE.getLogsearchPort())); manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS); } }
apache-2.0
jexp/idea2
platform/lang-api/src/com/intellij/lexer/DocCommentLexer.java
5217
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.lexer; import com.intellij.psi.tree.IElementType; import com.intellij.psi.tree.TokenSet; import com.intellij.util.text.CharArrayUtil; import java.io.IOException; public class DocCommentLexer extends MergingLexerAdapter { public DocCommentLexer(final DocCommentTokenTypes tokenTypes, final boolean isJdk15Enabled) { super(new AsteriskStripperLexer(new _JavaDocLexer(isJdk15Enabled, tokenTypes), tokenTypes), TokenSet.create(tokenTypes.commentData(), tokenTypes.space())); } private static class AsteriskStripperLexer extends LexerBase { private final _JavaDocLexer myFlex; private final DocCommentTokenTypes myTokenTypes; private CharSequence myBuffer; private int myBufferIndex; private int myBufferEndOffset; private int myTokenEndOffset; private int myState; private IElementType myTokenType; private boolean myAfterLineBreak; private boolean myInLeadingSpace; public AsteriskStripperLexer(final _JavaDocLexer flex, final DocCommentTokenTypes tokenTypes) { myFlex = flex; myTokenTypes = tokenTypes; } public final void start(CharSequence buffer, int startOffset, int endOffset, int initialState) { myBuffer = buffer; myBufferIndex = startOffset; myBufferEndOffset = endOffset; myTokenType = null; myTokenEndOffset = startOffset; myFlex.reset(myBuffer, startOffset, endOffset, initialState); } public int getState() { return myState; } public CharSequence getBufferSequence() { return myBuffer; } public int getBufferEnd() { return myBufferEndOffset; } public final IElementType getTokenType() { locateToken(); return myTokenType; } public final int getTokenStart() { locateToken(); return myBufferIndex; } public final int getTokenEnd() { locateToken(); return myTokenEndOffset; } public final void advance() { locateToken(); myTokenType = null; } protected final void locateToken() { if (myTokenType != null) return; _locateToken(); if (myTokenType == myTokenTypes.space()) { myAfterLineBreak = CharArrayUtil.containLineBreaks(myBuffer, getTokenStart(), getTokenEnd()); } } private void _locateToken() { if (myTokenEndOffset == myBufferEndOffset) { myTokenType = null; myBufferIndex = myBufferEndOffset; return; } myBufferIndex = myTokenEndOffset; if (myAfterLineBreak) { myAfterLineBreak = false; while (myTokenEndOffset < myBufferEndOffset && myBuffer.charAt(myTokenEndOffset) == '*' && (myTokenEndOffset + 1 >= myBufferEndOffset || myBuffer.charAt(myTokenEndOffset + 1) != '/')) { myTokenEndOffset++; } myInLeadingSpace = true; if (myBufferIndex < myTokenEndOffset) { myTokenType = myTokenTypes.commentLeadingAsterisks(); return; } } if (myInLeadingSpace) { myInLeadingSpace = false; boolean lf = false; while (myTokenEndOffset < myBufferEndOffset && Character.isWhitespace(myBuffer.charAt(myTokenEndOffset))) { if (myBuffer.charAt(myTokenEndOffset) == '\n') lf = true; myTokenEndOffset++; } final int state = myFlex.yystate(); if (state == _JavaDocLexer.COMMENT_DATA || myTokenEndOffset < myBufferEndOffset && (myBuffer.charAt(myTokenEndOffset) == '@' || myBuffer.charAt(myTokenEndOffset) == '{' || myBuffer.charAt(myTokenEndOffset) == '\"' || myBuffer.charAt(myTokenEndOffset) == '<')) { myFlex.yybegin(_JavaDocLexer.COMMENT_DATA_START); } if (myBufferIndex < myTokenEndOffset) { myTokenType = lf || state == _JavaDocLexer.PARAM_TAG_SPACE || state == _JavaDocLexer.TAG_DOC_SPACE || state == _JavaDocLexer.INLINE_TAG_NAME || state == _JavaDocLexer.DOC_TAG_VALUE_IN_PAREN ? myTokenTypes.space() : myTokenTypes.commentData(); return; } } flexLocateToken(); } private void flexLocateToken() { try { myState = myFlex.yystate(); myFlex.goTo(myBufferIndex); myTokenType = myFlex.advance(); myTokenEndOffset = myFlex.getTokenEnd(); } catch (IOException e) { // Can't be } } } }
apache-2.0
jackqth/zhebuy
src/main/java/com/yogee/zhebuy/common/persistence/dialect/Dialect.java
899
/** * Copyright &copy; 2012-2016 <a href="https://github.com/yogee/zhebuy">JeeSite</a> All rights reserved. */ package com.yogee.zhebuy.common.persistence.dialect; /** * 类似hibernate的Dialect,但只精简出分页部分 * * @author poplar.yfyang * @version 1.0 2011-11-18 下午12:31 * @since JDK 1.5 */ public interface Dialect { /** * 数据库本身是否支持分页当前的分页查询方式 * 如果数据库不支持的话,则不进行数据库分页 * * @return true:支持当前的分页查询方式 */ public boolean supportsLimit(); /** * 将sql转换为分页SQL,分别调用分页sql * * @param sql SQL语句 * @param offset 开始条数 * @param limit 每页显示多少纪录条数 * @return 分页查询的sql */ public String getLimitString(String sql, int offset, int limit); }
apache-2.0
carloshwa/apps-android-wikipedia
app/src/main/java/org/wikipedia/search/SearchBarHideHandler.java
6798
package org.wikipedia.search; import android.app.Activity; import android.content.Context; import android.content.res.Resources; import android.graphics.drawable.Drawable; import android.graphics.drawable.LayerDrawable; import android.support.annotation.ColorInt; import android.support.annotation.ColorRes; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.view.Gravity; import android.view.View; import org.wikipedia.R; import org.wikipedia.ViewAnimations; import org.wikipedia.util.DimenUtil; import org.wikipedia.util.GradientUtil; import org.wikipedia.views.ObservableWebView; public class SearchBarHideHandler implements ObservableWebView.OnScrollChangeListener, ObservableWebView.OnUpOrCancelMotionEventListener, ObservableWebView.OnDownMotionEventListener { private static final int FULL_OPACITY = 255; @NonNull private final View quickReturnView; @NonNull private final Context context; @Nullable private ObservableWebView webview; private boolean fadeEnabled; private boolean forceNoFade; @NonNull private final Drawable toolbarBackground; private Drawable toolbarGradient; @NonNull private final Drawable toolbarShadow; @NonNull private final Drawable statusBar; public SearchBarHideHandler(@NonNull Activity activity, @NonNull View quickReturnView) { context = activity; this.quickReturnView = quickReturnView; LayerDrawable toolbarBackgroundLayers = (LayerDrawable) quickReturnView .findViewById(R.id.main_toolbar_background_container).getBackground(); toolbarBackground = toolbarBackgroundLayers.findDrawableByLayerId(R.id.toolbar_background_solid).mutate(); toolbarShadow = quickReturnView.findViewById(R.id.main_toolbar_shadow).getBackground().mutate(); initToolbarGradient(toolbarBackgroundLayers); statusBar = quickReturnView.findViewById(R.id.empty_status_bar).getBackground().mutate(); } /** * Update the WebView based on whose scroll position the search bar will hide itself. * @param webView The WebView against which scrolling will be tracked. */ public void setScrollView(@Nullable ObservableWebView webView) { webview = webView; if (webview != null) { webview.addOnScrollChangeListener(this); webview.addOnDownMotionEventListener(this); webview.addOnUpOrCancelMotionEventListener(this); } } /** * Whether to enable fading in/out of the search bar when near the top of the article. * @param enabled True to enable fading, false otherwise. */ public void setFadeEnabled(boolean enabled) { fadeEnabled = enabled; update(); } /** * Whether to temporarily disable fading of the search bar, even if fading is enabled otherwise. * May be used when displaying a temporary UI element that requires the search bar to be shown * fully, e.g. when the ToC is pulled out. * @param force True to temporarily disable fading, false otherwise. */ public void setForceNoFade(boolean force) { forceNoFade = force; update(); } /** * Force an update of the appearance of the search bar. Usually it is updated automatically * when the associated WebView is scrolled, but this function may be used to manually refresh * the appearance of the search bar, e.g. when the WebView is first shown. */ public void update() { if (webview == null) { return; } onScrollChanged(webview.getScrollY(), webview.getScrollY(), false); } @Override public void onScrollChanged(int oldScrollY, int scrollY, boolean isHumanScroll) { if (webview == null) { return; } int opacity = calculateScrollOpacity(scrollY); toolbarBackground.setAlpha(opacity); toolbarShadow.setAlpha(opacity); toolbarGradient.setAlpha(FULL_OPACITY - opacity); statusBar.setAlpha(opacity); if (scrollY <= webview.getHeight()) { // For the first screenful, ensure it always exists. ViewAnimations.ensureTranslationY(quickReturnView, 0); return; } int animMargin; if (oldScrollY > scrollY) { int minMargin = 0; int scrollDelta = oldScrollY - scrollY; int newMargin = (int) quickReturnView.getTranslationY() + scrollDelta; animMargin = Math.min(minMargin, newMargin); } else { // scroll down! int scrollDelta = scrollY - oldScrollY; if (!isHumanScroll) { // we've been scrolled programmatically, probably to go to // a specific section, so keep the toolbar shown. animMargin = 0; } else { int minMargin = -quickReturnView.getHeight(); int newMargin = (int) quickReturnView.getTranslationY() - scrollDelta; animMargin = Math.max(minMargin, newMargin); } } quickReturnView.setTranslationY(animMargin); } @Override public void onUpOrCancelMotionEvent() { int transY = (int) quickReturnView.getTranslationY(); int height = quickReturnView.getHeight(); if (transY != 0 && transY > -height) { if (transY > -height / 2) { // Fully display it ViewAnimations.ensureTranslationY(quickReturnView, 0); } else { // Fully hide it ViewAnimations.ensureTranslationY(quickReturnView, -height); } } } @Override public void onDownMotionEvent() { // Don't do anything for now } private void initToolbarGradient(LayerDrawable toolbarBackgroundLayers) { @ColorInt int baseColor = getColor(R.color.lead_gradient_start); toolbarGradient = GradientUtil.getCubicGradient(baseColor, Gravity.TOP); toolbarBackgroundLayers.setDrawableByLayerId(R.id.toolbar_background_gradient, toolbarGradient); } /** @return Alpha value between 0 and 0xff. */ private int calculateScrollOpacity(int scrollY) { final int fadeHeight = 256; int opacity = FULL_OPACITY; if (fadeEnabled && !forceNoFade) { opacity = scrollY * FULL_OPACITY / (int) (fadeHeight * DimenUtil.getDensityScalar()); } opacity = Math.max(0, opacity); opacity = Math.min(FULL_OPACITY, opacity); return opacity; } @ColorInt private int getColor(@ColorRes int id) { return getResources().getColor(id); } @NonNull private Resources getResources() { return context.getResources(); } }
apache-2.0
appsw/ShopBKY
app/src/main/java/bai/kang/yun/zxd/mvp/model/api/service/GoodsGridService.java
442
package bai.kang.yun.zxd.mvp.model.api.service; import bai.kang.yun.zxd.mvp.model.entity.ReturnGoods; import retrofit2.http.GET; import rx.Observable; /** * Created by Administrator on 2017/4/17 0017. */ public interface GoodsGridService { // String HEADER_API_VERSION = "Accept: application/vnd.github.v3+json"; // // @Headers({HEADER_API_VERSION}) @GET("/goods/get_recommend") Observable<ReturnGoods> getGoodsGrid (); }
apache-2.0
jamesnetherton/camel
camel-core/src/main/java/org/apache/camel/util/StringHelper.java
24585
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.util; import static org.apache.camel.util.StringQuoteHelper.doubleQuote; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Objects; import java.util.Optional; import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Helper methods for working with Strings. */ public final class StringHelper { /** * Constructor of utility class should be private. */ private StringHelper() { } /** * Ensures that <code>s</code> is friendly for a URL or file system. * * @param s String to be sanitized. * @return sanitized version of <code>s</code>. * @throws NullPointerException if <code>s</code> is <code>null</code>. */ public static String sanitize(String s) { return s .replace(':', '-') .replace('_', '-') .replace('.', '-') .replace('/', '-') .replace('\\', '-'); } /** * Counts the number of times the given char is in the string * * @param s the string * @param ch the char * @return number of times char is located in the string */ public static int countChar(String s, char ch) { if (ObjectHelper.isEmpty(s)) { return 0; } int matches = 0; for (int i = 0; i < s.length(); i++) { char c = s.charAt(i); if (ch == c) { matches++; } } return matches; } /** * Limits the length of a string * * @param s the string * @param maxLength the maximum length of the returned string * @return s if the length of s is less than maxLength or the first maxLength characters of s * @deprecated use {@link #limitLength(String, int)} */ @Deprecated public static String limitLenght(String s, int maxLength) { return limitLength(s, maxLength); } /** * Limits the length of a string * * @param s the string * @param maxLength the maximum length of the returned string * @return s if the length of s is less than maxLength or the first maxLength characters of s */ public static String limitLength(String s, int maxLength) { if (ObjectHelper.isEmpty(s)) { return s; } return s.length() <= maxLength ? s : s.substring(0, maxLength); } /** * Removes all quotes (single and double) from the string * * @param s the string * @return the string without quotes (single and double) */ public static String removeQuotes(String s) { if (ObjectHelper.isEmpty(s)) { return s; } s = replaceAll(s, "'", ""); s = replaceAll(s, "\"", ""); return s; } /** * Removes all leading and ending quotes (single and double) from the string * * @param s the string * @return the string without leading and ending quotes (single and double) */ public static String removeLeadingAndEndingQuotes(String s) { if (ObjectHelper.isEmpty(s)) { return s; } String copy = s.trim(); if (copy.startsWith("'") && copy.endsWith("'")) { return copy.substring(1, copy.length() - 1); } if (copy.startsWith("\"") && copy.endsWith("\"")) { return copy.substring(1, copy.length() - 1); } // no quotes, so return as-is return s; } /** * Whether the string starts and ends with either single or double quotes. * * @param s the string * @return <tt>true</tt> if the string starts and ends with either single or double quotes. */ public static boolean isQuoted(String s) { if (ObjectHelper.isEmpty(s)) { return false; } if (s.startsWith("'") && s.endsWith("'")) { return true; } if (s.startsWith("\"") && s.endsWith("\"")) { return true; } return false; } /** * Encodes the text into safe XML by replacing < > and & with XML tokens * * @param text the text * @return the encoded text */ public static String xmlEncode(String text) { if (text == null) { return ""; } // must replace amp first, so we dont replace &lt; to amp later text = replaceAll(text, "&", "&amp;"); text = replaceAll(text, "\"", "&quot;"); text = replaceAll(text, "<", "&lt;"); text = replaceAll(text, ">", "&gt;"); return text; } /** * Determines if the string has at least one letter in upper case * @param text the text * @return <tt>true</tt> if at least one letter is upper case, <tt>false</tt> otherwise */ public static boolean hasUpperCase(String text) { if (text == null) { return false; } for (int i = 0; i < text.length(); i++) { char ch = text.charAt(i); if (Character.isUpperCase(ch)) { return true; } } return false; } /** * Determines if the string is a fully qualified class name */ public static boolean isClassName(String text) { boolean result = false; if (text != null) { String[] split = text.split("\\."); if (split.length > 0) { String lastToken = split[split.length - 1]; if (lastToken.length() > 0) { result = Character.isUpperCase(lastToken.charAt(0)); } } } return result; } /** * Does the expression have the language start token? * * @param expression the expression * @param language the name of the language, such as simple * @return <tt>true</tt> if the expression contains the start token, <tt>false</tt> otherwise */ public static boolean hasStartToken(String expression, String language) { if (expression == null) { return false; } // for the simple language the expression start token could be "${" if ("simple".equalsIgnoreCase(language) && expression.contains("${")) { return true; } if (language != null && expression.contains("$" + language + "{")) { return true; } return false; } /** * Replaces all the from tokens in the given input string. * <p/> * This implementation is not recursive, not does it check for tokens in the replacement string. * * @param input the input string * @param from the from string, must <b>not</b> be <tt>null</tt> or empty * @param to the replacement string, must <b>not</b> be empty * @return the replaced string, or the input string if no replacement was needed * @throws IllegalArgumentException if the input arguments is invalid */ public static String replaceAll(String input, String from, String to) { if (ObjectHelper.isEmpty(input)) { return input; } if (from == null) { throw new IllegalArgumentException("from cannot be null"); } if (to == null) { // to can be empty, so only check for null throw new IllegalArgumentException("to cannot be null"); } // fast check if there is any from at all if (!input.contains(from)) { return input; } final int len = from.length(); final int max = input.length(); StringBuilder sb = new StringBuilder(max); for (int i = 0; i < max;) { if (i + len <= max) { String token = input.substring(i, i + len); if (from.equals(token)) { sb.append(to); // fast forward i = i + len; continue; } } // append single char sb.append(input.charAt(i)); // forward to next i++; } return sb.toString(); } /** * Creates a json tuple with the given name/value pair. * * @param name the name * @param value the value * @param isMap whether the tuple should be map * @return the json */ public static String toJson(String name, String value, boolean isMap) { if (isMap) { return "{ " + doubleQuote(name) + ": " + doubleQuote(value) + " }"; } else { return doubleQuote(name) + ": " + doubleQuote(value); } } /** * Asserts whether the string is <b>not</b> empty. * * @param value the string to test * @param name the key that resolved the value * @return the passed {@code value} as is * @throws IllegalArgumentException is thrown if assertion fails */ public static String notEmpty(String value, String name) { if (ObjectHelper.isEmpty(value)) { throw new IllegalArgumentException(name + " must be specified and not empty"); } return value; } /** * Asserts whether the string is <b>not</b> empty. * * @param value the string to test * @param on additional description to indicate where this problem occurred (appended as toString()) * @param name the key that resolved the value * @return the passed {@code value} as is * @throws IllegalArgumentException is thrown if assertion fails */ public static String notEmpty(String value, String name, Object on) { if (on == null) { ObjectHelper.notNull(value, name); } else if (ObjectHelper.isEmpty(value)) { throw new IllegalArgumentException(name + " must be specified and not empty on: " + on); } return value; } public static String[] splitOnCharacter(String value, String needle, int count) { String rc[] = new String[count]; rc[0] = value; for (int i = 1; i < count; i++) { String v = rc[i - 1]; int p = v.indexOf(needle); if (p < 0) { return rc; } rc[i - 1] = v.substring(0, p); rc[i] = v.substring(p + 1); } return rc; } /** * Removes any starting characters on the given text which match the given * character * * @param text the string * @param ch the initial characters to remove * @return either the original string or the new substring */ public static String removeStartingCharacters(String text, char ch) { int idx = 0; while (text.charAt(idx) == ch) { idx++; } if (idx > 0) { return text.substring(idx); } return text; } /** * Capitalize the string (upper case first character) * * @param text the string * @return the string capitalized (upper case first character) */ public static String capitalize(String text) { if (text == null) { return null; } int length = text.length(); if (length == 0) { return text; } String answer = text.substring(0, 1).toUpperCase(Locale.ENGLISH); if (length > 1) { answer += text.substring(1, length); } return answer; } /** * Returns the string after the given token * * @param text the text * @param after the token * @return the text after the token, or <tt>null</tt> if text does not contain the token */ public static String after(String text, String after) { if (!text.contains(after)) { return null; } return text.substring(text.indexOf(after) + after.length()); } /** * Returns an object after the given token * * @param text the text * @param after the token * @param mapper a mapping function to convert the string after the token to type T * @return an Optional describing the result of applying a mapping function to the text after the token. */ public static <T> Optional<T> after(String text, String after, Function<String, T> mapper) { String result = after(text, after); if (result == null) { return Optional.empty(); } else { return Optional.ofNullable(mapper.apply(result)); } } /** * Returns the string before the given token * * @param text the text * @param before the token * @return the text before the token, or <tt>null</tt> if text does not * contain the token */ public static String before(String text, String before) { if (!text.contains(before)) { return null; } return text.substring(0, text.indexOf(before)); } /** * Returns an object before the given token * * @param text the text * @param before the token * @param mapper a mapping function to convert the string before the token to type T * @return an Optional describing the result of applying a mapping function to the text before the token. */ public static <T> Optional<T> before(String text, String before, Function<String, T> mapper) { String result = before(text, before); if (result == null) { return Optional.empty(); } else { return Optional.ofNullable(mapper.apply(result)); } } /** * Returns the string between the given tokens * * @param text the text * @param after the before token * @param before the after token * @return the text between the tokens, or <tt>null</tt> if text does not contain the tokens */ public static String between(String text, String after, String before) { text = after(text, after); if (text == null) { return null; } return before(text, before); } /** * Returns an object between the given token * * @param text the text * @param after the before token * @param before the after token * @param mapper a mapping function to convert the string between the token to type T * @return an Optional describing the result of applying a mapping function to the text between the token. */ public static <T> Optional<T> between(String text, String after, String before, Function<String, T> mapper) { String result = between(text, after, before); if (result == null) { return Optional.empty(); } else { return Optional.ofNullable(mapper.apply(result)); } } /** * Returns the string between the most outer pair of tokens * <p/> * The number of token pairs must be evenly, eg there must be same number of before and after tokens, otherwise <tt>null</tt> is returned * <p/> * This implementation skips matching when the text is either single or double quoted. * For example: * <tt>${body.matches("foo('bar')")</tt> * Will not match the parenthesis from the quoted text. * * @param text the text * @param after the before token * @param before the after token * @return the text between the outer most tokens, or <tt>null</tt> if text does not contain the tokens */ public static String betweenOuterPair(String text, char before, char after) { if (text == null) { return null; } int pos = -1; int pos2 = -1; int count = 0; int count2 = 0; boolean singleQuoted = false; boolean doubleQuoted = false; for (int i = 0; i < text.length(); i++) { char ch = text.charAt(i); if (!doubleQuoted && ch == '\'') { singleQuoted = !singleQuoted; } else if (!singleQuoted && ch == '\"') { doubleQuoted = !doubleQuoted; } if (singleQuoted || doubleQuoted) { continue; } if (ch == before) { count++; } else if (ch == after) { count2++; } if (ch == before && pos == -1) { pos = i; } else if (ch == after) { pos2 = i; } } if (pos == -1 || pos2 == -1) { return null; } // must be even paris if (count != count2) { return null; } return text.substring(pos + 1, pos2); } /** * Returns an object between the most outer pair of tokens * * @param text the text * @param after the before token * @param before the after token * @param mapper a mapping function to convert the string between the most outer pair of tokens to type T * @return an Optional describing the result of applying a mapping function to the text between the most outer pair of tokens. */ public static <T> Optional<T> betweenOuterPair(String text, char before, char after, Function<String, T> mapper) { String result = betweenOuterPair(text, before, after); if (result == null) { return Optional.empty(); } else { return Optional.ofNullable(mapper.apply(result)); } } /** * Returns true if the given name is a valid java identifier */ public static boolean isJavaIdentifier(String name) { if (name == null) { return false; } int size = name.length(); if (size < 1) { return false; } if (Character.isJavaIdentifierStart(name.charAt(0))) { for (int i = 1; i < size; i++) { if (!Character.isJavaIdentifierPart(name.charAt(i))) { return false; } } return true; } return false; } /** * Cleans the string to a pure Java identifier so we can use it for loading class names. * <p/> * Especially from Spring DSL people can have \n \t or other characters that otherwise * would result in ClassNotFoundException * * @param name the class name * @return normalized classname that can be load by a class loader. */ public static String normalizeClassName(String name) { StringBuilder sb = new StringBuilder(name.length()); for (char ch : name.toCharArray()) { if (ch == '.' || ch == '[' || ch == ']' || ch == '-' || Character.isJavaIdentifierPart(ch)) { sb.append(ch); } } return sb.toString(); } /** * Compares old and new text content and report back which lines are changed * * @param oldText the old text * @param newText the new text * @return a list of line numbers that are changed in the new text */ public static List<Integer> changedLines(String oldText, String newText) { if (oldText == null || oldText.equals(newText)) { return Collections.emptyList(); } List<Integer> changed = new ArrayList<>(); String[] oldLines = oldText.split("\n"); String[] newLines = newText.split("\n"); for (int i = 0; i < newLines.length; i++) { String newLine = newLines[i]; String oldLine = i < oldLines.length ? oldLines[i] : null; if (oldLine == null) { changed.add(i); } else if (!newLine.equals(oldLine)) { changed.add(i); } } return changed; } /** * Removes the leading and trailing whitespace and if the resulting * string is empty returns {@code null}. Examples: * <p> * Examples: * <blockquote><pre> * trimToNull("abc") -> "abc" * trimToNull(" abc") -> "abc" * trimToNull(" abc ") -> "abc" * trimToNull(" ") -> null * trimToNull("") -> null * </pre></blockquote> */ public static String trimToNull(final String given) { if (given == null) { return null; } final String trimmed = given.trim(); if (trimmed.isEmpty()) { return null; } return trimmed; } /** * Checks if the src string contains what * * @param src is the source string to be checked * @param what is the string which will be looked up in the src argument * @return true/false */ public static boolean containsIgnoreCase(String src, String what) { if (src == null || what == null) { return false; } final int length = what.length(); if (length == 0) { return true; // Empty string is contained } final char firstLo = Character.toLowerCase(what.charAt(0)); final char firstUp = Character.toUpperCase(what.charAt(0)); for (int i = src.length() - length; i >= 0; i--) { // Quick check before calling the more expensive regionMatches() method: final char ch = src.charAt(i); if (ch != firstLo && ch != firstUp) { continue; } if (src.regionMatches(true, i, what, 0, length)) { return true; } } return false; } /** * Outputs the bytes in human readable format in units of KB,MB,GB etc. * * @param locale The locale to apply during formatting. If l is {@code null} then no localization is applied. * @param bytes number of bytes * @return human readable output * @see java.lang.String#format(Locale, String, Object...) */ public static String humanReadableBytes(Locale locale, long bytes) { int unit = 1024; if (bytes < unit) { return bytes + " B"; } int exp = (int) (Math.log(bytes) / Math.log(unit)); String pre = "KMGTPE".charAt(exp - 1) + ""; return String.format(locale, "%.1f %sB", bytes / Math.pow(unit, exp), pre); } /** * Outputs the bytes in human readable format in units of KB,MB,GB etc. * * The locale always used is the one returned by {@link java.util.Locale#getDefault()}. * * @param bytes number of bytes * @return human readable output * @see org.apache.camel.util.StringHelper#humanReadableBytes(Locale, long) */ public static String humanReadableBytes(long bytes) { return humanReadableBytes(Locale.getDefault(), bytes); } /** * Check for string pattern matching with a number of strategies in the * following order: * * - equals * - null pattern always matches * - * always matches * - Ant style matching * - Regexp * * @param patter the pattern * @param target the string to test * @return true if target matches the pattern */ public static boolean matches(String patter, String target) { if (Objects.equals(patter, target)) { return true; } if (Objects.isNull(patter)) { return true; } if (Objects.equals("*", patter)) { return true; } if (AntPathMatcher.INSTANCE.match(patter, target)) { return true; } Pattern p = Pattern.compile(patter); Matcher m = p.matcher(target); return m.matches(); } }
apache-2.0
chinayin/udpx
java_src/u14/udpx/frames/SYNFrame.java
2057
/* * Simple Reliable UDP (rudp) * Copyright (c) 2009, Adrian Granados (agranados@ihmc.us) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package u14.udpx.frames; /* * SYN Frame<br/> * [flag|headerLength|seq|ack] */ public class SYNFrame extends Frame { protected SYNFrame() { } public SYNFrame(int seqn) { init(SYN_FLAG, seqn, HEADER_LEN); } @Override public byte[] getBytes() { return sum(super.getBytes()); } public String type() { return "SYN"; } }
apache-2.0
ksluckow/log2model
modeltool/src/main/java/edu/cmu/sv/modelinference/modeltool/mc/prism/PrismModelGeneratorException.java
1115
/** * Copyright 2016 Carnegie Mellon University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.cmu.sv.modelinference.modeltool.mc.prism; import edu.cmu.sv.modelinference.modeltool.mc.ModelGeneratorException; /** * @author Kasper Luckow * */ public class PrismModelGeneratorException extends ModelGeneratorException { private static final long serialVersionUID = 1L; public PrismModelGeneratorException(String details) { super(details); } public PrismModelGeneratorException(Throwable t) { super(t); } public PrismModelGeneratorException(Exception e) { super(e); } }
apache-2.0
mbebenita/closure-compiler
test/com/google/javascript/jscomp/ProcessClosurePrimitivesTest.java
42234
/* * Copyright 2006 The Closure Compiler Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.javascript.jscomp; import static com.google.javascript.jscomp.ProcessClosurePrimitives.BASE_CLASS_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.CLASS_NAMESPACE_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.CLOSURE_DEFINES_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.DUPLICATE_NAMESPACE_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.EXPECTED_OBJECTLIT_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.FUNCTION_NAMESPACE_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.GOOG_BASE_CLASS_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.INVALID_ARGUMENT_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.INVALID_CLOSURE_CALL_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.INVALID_CSS_RENAMING_MAP; import static com.google.javascript.jscomp.ProcessClosurePrimitives.INVALID_DEFINE_NAME_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.INVALID_PROVIDE_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.INVALID_STYLE_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.LATE_PROVIDE_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.MISSING_DEFINE_ANNOTATION; import static com.google.javascript.jscomp.ProcessClosurePrimitives.MISSING_PROVIDE_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.NULL_ARGUMENT_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.TOO_MANY_ARGUMENTS_ERROR; import static com.google.javascript.jscomp.ProcessClosurePrimitives.WEAK_NAMESPACE_TYPE; import static com.google.javascript.jscomp.ProcessClosurePrimitives.XMODULE_REQUIRE_ERROR; import com.google.javascript.jscomp.CompilerOptions.LanguageMode; import com.google.javascript.rhino.Node; /** * Tests for {@link ProcessClosurePrimitives}. * */ public final class ProcessClosurePrimitivesTest extends Es6CompilerTestCase { private String additionalCode; private String additionalEndCode; private boolean addAdditionalNamespace; private boolean preserveGoogRequires; private boolean banGoogBase; public ProcessClosurePrimitivesTest() { enableLineNumberCheck(true); } @Override protected void setUp() { additionalCode = null; additionalEndCode = null; addAdditionalNamespace = false; preserveGoogRequires = false; banGoogBase = false; compareJsDoc = false; } @Override protected CompilerOptions getOptions() { CompilerOptions options = super.getOptions(); if (banGoogBase) { options.setWarningLevel( DiagnosticGroups.USE_OF_GOOG_BASE, CheckLevel.ERROR); } return options; } @Override public CompilerPass getProcessor(final Compiler compiler) { if ((additionalCode == null) && (additionalEndCode == null)) { return new ProcessClosurePrimitives( compiler, null, CheckLevel.ERROR, preserveGoogRequires); } else { return new CompilerPass() { @Override public void process(Node externs, Node root) { // Process the original code. new ProcessClosurePrimitives( compiler, null, CheckLevel.OFF, preserveGoogRequires) .process(externs, root); // Inject additional code at the beginning. if (additionalCode != null) { SourceFile file = SourceFile.fromCode("additionalcode", additionalCode); Node scriptNode = root.getFirstChild(); Node newScriptNode = new CompilerInput(file).getAstRoot(compiler); if (addAdditionalNamespace) { newScriptNode.getFirstChild() .putBooleanProp(Node.IS_NAMESPACE, true); } while (newScriptNode.getLastChild() != null) { Node lastChild = newScriptNode.getLastChild(); newScriptNode.removeChild(lastChild); scriptNode.addChildBefore(lastChild, scriptNode.getFirstChild()); } } // Inject additional code at the end. if (additionalEndCode != null) { SourceFile file = SourceFile.fromCode("additionalendcode", additionalEndCode); Node scriptNode = root.getFirstChild(); Node newScriptNode = new CompilerInput(file).getAstRoot(compiler); if (addAdditionalNamespace) { newScriptNode.getFirstChild() .putBooleanProp(Node.IS_NAMESPACE, true); } while (newScriptNode.getFirstChild() != null) { Node firstChild = newScriptNode.getFirstChild(); newScriptNode.removeChild(firstChild); scriptNode.addChildToBack(firstChild); } } // Process the tree a second time. new ProcessClosurePrimitives( compiler, null, CheckLevel.ERROR, preserveGoogRequires) .process(externs, root); } }; } } @Override public int getNumRepetitions() { return 1; } public void testSimpleProvides() { test("goog.provide('foo');", "var foo={};"); test("goog.provide('foo.bar');", "var foo={}; foo.bar={};"); test("goog.provide('foo.bar.baz');", "var foo={}; foo.bar={}; foo.bar.baz={};"); test("goog.provide('foo.bar.baz.boo');", "var foo={}; foo.bar={}; foo.bar.baz={}; foo.bar.baz.boo={};"); test("goog.provide('goog.bar');", "goog.bar={};"); // goog is special-cased } public void testMultipleProvides() { test("goog.provide('foo.bar'); goog.provide('foo.baz');", "var foo={}; foo.bar={}; foo.baz={};"); test("goog.provide('foo.bar.baz'); goog.provide('foo.boo.foo');", "var foo={}; foo.bar={}; foo.bar.baz={}; foo.boo={}; foo.boo.foo={};"); test("goog.provide('foo.bar.baz'); goog.provide('foo.bar.boo');", "var foo={}; foo.bar={}; foo.bar.baz={}; foo.bar.boo={};"); test("goog.provide('foo.bar.baz'); goog.provide('goog.bar.boo');", "var foo={}; foo.bar={}; foo.bar.baz={}; goog.bar={}; " + "goog.bar.boo={};"); } public void testRemovalOfProvidedObjLit() { test("goog.provide('foo'); foo = 0;", "var foo = 0;"); test("goog.provide('foo'); foo = {a: 0};", "var foo = {a: 0};"); test("goog.provide('foo'); foo = function(){};", "var foo = function(){};"); test("goog.provide('foo'); var foo = 0;", "var foo = 0;"); test("goog.provide('foo'); var foo = {a: 0};", "var foo = {a: 0};"); test("goog.provide('foo'); var foo = function(){};", "var foo = function(){};"); test("goog.provide('foo.bar.Baz'); foo.bar.Baz=function(){};", "var foo={}; foo.bar={}; foo.bar.Baz=function(){};"); test("goog.provide('foo.bar.moo'); foo.bar.moo={E:1,S:2};", "var foo={}; foo.bar={}; foo.bar.moo={E:1,S:2};"); test("goog.provide('foo.bar.moo'); foo.bar.moo={E:1}; foo.bar.moo={E:2};", "var foo={}; foo.bar={}; foo.bar.moo={E:1}; foo.bar.moo={E:2};"); testEs6("goog.provide('foo'); var foo = class {}", "var foo = class {}"); } public void testProvidedDeclaredFunctionError() { testError("goog.provide('foo'); function foo(){}", FUNCTION_NAMESPACE_ERROR); } public void testProvidedDeclaredClassError() { testErrorEs6("goog.provide('foo'); class foo {}", CLASS_NAMESPACE_ERROR); } public void testRemovalMultipleAssignment1() { test("goog.provide('foo'); foo = 0; foo = 1", "var foo = 0; foo = 1;"); } public void testRemovalMultipleAssignment2() { test("goog.provide('foo'); var foo = 0; foo = 1", "var foo = 0; foo = 1;"); } public void testRemovalMultipleAssignment3() { test("goog.provide('foo'); foo = 0; var foo = 1", "foo = 0; var foo = 1;"); } public void testRemovalMultipleAssignment4() { test("goog.provide('foo.bar'); foo.bar = 0; foo.bar = 1", "var foo = {}; foo.bar = 0; foo.bar = 1"); } public void testNoRemovalFunction1() { test("goog.provide('foo'); function f(){foo = 0}", "var foo = {}; function f(){foo = 0}"); } public void testNoRemovalFunction2() { test("goog.provide('foo'); function f(){var foo = 0}", "var foo = {}; function f(){var foo = 0}"); } public void testRemovalMultipleAssignmentInIf1() { test("goog.provide('foo'); if (true) { var foo = 0 } else { foo = 1 }", "if (true) { var foo = 0 } else { foo = 1 }"); } public void testRemovalMultipleAssignmentInIf2() { test("goog.provide('foo'); if (true) { foo = 0 } else { var foo = 1 }", "if (true) { foo = 0 } else { var foo = 1 }"); } public void testRemovalMultipleAssignmentInIf3() { test("goog.provide('foo'); if (true) { foo = 0 } else { foo = 1 }", "if (true) { var foo = 0 } else { foo = 1 }"); } public void testRemovalMultipleAssignmentInIf4() { test("goog.provide('foo.bar');" + "if (true) { foo.bar = 0 } else { foo.bar = 1 }", "var foo = {}; if (true) { foo.bar = 0 } else { foo.bar = 1 }"); } public void testMultipleDeclarationError1() { String rest = "if (true) { foo.bar = 0 } else { foo.bar = 1 }"; test("goog.provide('foo.bar');" + "var foo = {};" + rest, "var foo = {};" + "var foo = {};" + rest); } public void testMultipleDeclarationError2() { test( LINE_JOINER.join( "goog.provide('foo.bar');", "if (true) { var foo = {}; foo.bar = 0 } else { foo.bar = 1 }"), LINE_JOINER.join( "var foo = {};", "if (true) {", " var foo = {}; foo.bar = 0", "} else {", " foo.bar = 1", "}")); } public void testMultipleDeclarationError3() { test( LINE_JOINER.join( "goog.provide('foo.bar');", "if (true) { foo.bar = 0 } else { var foo = {}; foo.bar = 1 }"), LINE_JOINER.join( "var foo = {};", "if (true) {", " foo.bar = 0", "} else {", " var foo = {}; foo.bar = 1", "}")); } public void testProvideAfterDeclarationError() { test("var x = 42; goog.provide('x');", "var x = 42; var x = {}"); } public void testProvideErrorCases() { testError("goog.provide();", NULL_ARGUMENT_ERROR); testError("goog.provide(5);", INVALID_ARGUMENT_ERROR); testError("goog.provide([]);", INVALID_ARGUMENT_ERROR); testError("goog.provide({});", INVALID_ARGUMENT_ERROR); testError("goog.provide('foo', 'bar');", TOO_MANY_ARGUMENTS_ERROR); testError("goog.provide('foo'); goog.provide('foo');", DUPLICATE_NAMESPACE_ERROR); testError("goog.provide('foo.bar'); goog.provide('foo'); goog.provide('foo');", DUPLICATE_NAMESPACE_ERROR); testErrorEs6("goog.provide(`template`);", INVALID_ARGUMENT_ERROR); testErrorEs6("goog.provide(tagged`template`);", INVALID_ARGUMENT_ERROR); testErrorEs6("goog.provide(`${template}Sub`);", INVALID_ARGUMENT_ERROR); } public void testProvideErrorCases2() { test("goog.provide('foo'); /** @type {Object} */ var foo = {};", "var foo={};", null, WEAK_NAMESPACE_TYPE); test("goog.provide('foo'); /** @type {!Object} */ var foo = {};", "var foo={};", null, WEAK_NAMESPACE_TYPE); test("goog.provide('foo.bar'); /** @type {Object} */ foo.bar = {};", "var foo={};foo.bar={};", null, WEAK_NAMESPACE_TYPE); test("goog.provide('foo.bar'); /** @type {!Object} */ foo.bar = {};", "var foo={};foo.bar={};", null, WEAK_NAMESPACE_TYPE); test("goog.provide('foo'); /** @type {Object.<string>} */ var foo = {};", "var foo={};"); } public void testProvideValidObjectType() { test("goog.provide('foo'); /** @type {Object.<string>} */ var foo = {};", "var foo={};"); } public void testRemovalOfRequires() { test("goog.provide('foo'); goog.require('foo');", "var foo={};"); test("goog.provide('foo.bar'); goog.require('foo.bar');", "var foo={}; foo.bar={};"); test("goog.provide('foo.bar.baz'); goog.require('foo.bar.baz');", "var foo={}; foo.bar={}; foo.bar.baz={};"); test("goog.provide('foo'); var x = 3; goog.require('foo'); something();", "var foo={}; var x = 3; something();"); testSame("foo.require('foo.bar');"); } public void testPreserveGoogRequires() { preserveGoogRequires = true; test("goog.provide('foo'); goog.require('foo');", "var foo={}; goog.require('foo');"); test("goog.provide('foo'); goog.require('foo'); var a = {};", "var foo = {}; goog.require('foo'); var a = {};"); } public void testRequireErrorCases() { testError("goog.require();", NULL_ARGUMENT_ERROR); testError("goog.require(5);", INVALID_ARGUMENT_ERROR); testError("goog.require([]);", INVALID_ARGUMENT_ERROR); testError("goog.require({});", INVALID_ARGUMENT_ERROR); testErrorEs6("goog.require(`template`);", INVALID_ARGUMENT_ERROR); testErrorEs6("goog.require(tagged`template`);", INVALID_ARGUMENT_ERROR); testErrorEs6("goog.require(`${template}Sub`);", INVALID_ARGUMENT_ERROR); } public void testLateProvides() { testError("goog.require('foo'); goog.provide('foo');", LATE_PROVIDE_ERROR); testError("goog.require('foo.bar'); goog.provide('foo.bar');", LATE_PROVIDE_ERROR); testError("goog.provide('foo.bar'); goog.require('foo'); goog.provide('foo');", LATE_PROVIDE_ERROR); } public void testMissingProvides() { testError("goog.require('foo');", MISSING_PROVIDE_ERROR); testError("goog.provide('foo'); goog.require('Foo');", MISSING_PROVIDE_ERROR); testError("goog.provide('foo'); goog.require('foo.bar');", MISSING_PROVIDE_ERROR); testError("goog.provide('foo'); var EXPERIMENT_FOO = true; " + "if (EXPERIMENT_FOO) {goog.require('foo.bar');}", MISSING_PROVIDE_ERROR); } public void testAddDependency() { test("goog.addDependency('x.js', ['A', 'B'], []);", "0"); Compiler compiler = getLastCompiler(); assertTrue(compiler.getTypeRegistry().isForwardDeclaredType("A")); assertTrue(compiler.getTypeRegistry().isForwardDeclaredType("B")); assertFalse(compiler.getTypeRegistry().isForwardDeclaredType("C")); } public void testForwardDeclarations() { test("goog.forwardDeclare('A.B')", ""); Compiler compiler = getLastCompiler(); assertTrue(compiler.getTypeRegistry().isForwardDeclaredType("A.B")); assertFalse(compiler.getTypeRegistry().isForwardDeclaredType("C.D")); testError("goog.forwardDeclare();", ProcessClosurePrimitives.INVALID_FORWARD_DECLARE); testError("goog.forwardDeclare('A.B', 'C.D');", ProcessClosurePrimitives.INVALID_FORWARD_DECLARE); testErrorEs6("goog.forwardDeclare(`template`);", ProcessClosurePrimitives.INVALID_FORWARD_DECLARE); testErrorEs6("goog.forwardDeclare(`${template}Sub`);", ProcessClosurePrimitives.INVALID_FORWARD_DECLARE); } public void testValidSetCssNameMapping() { test("goog.setCssNameMapping({foo:'bar',\"biz\":'baz'});", ""); CssRenamingMap map = getLastCompiler().getCssRenamingMap(); assertNotNull(map); assertEquals("bar", map.get("foo")); assertEquals("baz", map.get("biz")); } public void testValidSetCssNameMappingWithType() { test("goog.setCssNameMapping({foo:'bar',\"biz\":'baz'}, 'BY_PART');", ""); CssRenamingMap map = getLastCompiler().getCssRenamingMap(); assertNotNull(map); assertEquals("bar", map.get("foo")); assertEquals("baz", map.get("biz")); test("goog.setCssNameMapping({foo:'bar',biz:'baz','biz-foo':'baz-bar'}," + " 'BY_WHOLE');", ""); map = getLastCompiler().getCssRenamingMap(); assertNotNull(map); assertEquals("bar", map.get("foo")); assertEquals("baz", map.get("biz")); assertEquals("baz-bar", map.get("biz-foo")); } public void testSetCssNameMappingByShortHand() { testErrorEs6("goog.setCssNameMapping({shortHandFirst, shortHandSecond});", NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR); } public void testSetCssNameMappingByTemplate() { testErrorEs6("goog.setCssNameMapping({foo: `bar`});", NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR); testErrorEs6("goog.setCssNameMapping({foo: `${vari}bar`});", NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR); } public void testSetCssNameMappingNonStringValueReturnsError() { // Make sure the argument is an object literal. testError("var BAR = {foo:'bar'}; goog.setCssNameMapping(BAR);", EXPECTED_OBJECTLIT_ERROR); testError("goog.setCssNameMapping([]);", EXPECTED_OBJECTLIT_ERROR); testError("goog.setCssNameMapping(false);", EXPECTED_OBJECTLIT_ERROR); testError("goog.setCssNameMapping(null);", EXPECTED_OBJECTLIT_ERROR); testError("goog.setCssNameMapping(undefined);", EXPECTED_OBJECTLIT_ERROR); // Make sure all values of the object literal are string literals. testError("var BAR = 'bar'; goog.setCssNameMapping({foo:BAR});", NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR); testError("goog.setCssNameMapping({foo:6});", NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR); testError("goog.setCssNameMapping({foo:false});", NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR); testError("goog.setCssNameMapping({foo:null});", NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR); testError("goog.setCssNameMapping({foo:undefined});", NON_STRING_PASSED_TO_SET_CSS_NAME_MAPPING_ERROR); } public void testSetCssNameMappingValidity() { // Make sure that the keys don't have -'s test("goog.setCssNameMapping({'a': 'b', 'a-a': 'c'})", "", null, INVALID_CSS_RENAMING_MAP); // In full mode, we check that map(a-b)=map(a)-map(b) test("goog.setCssNameMapping({'a': 'b', 'a-a': 'c'}, 'BY_WHOLE')", "", null, INVALID_CSS_RENAMING_MAP); // Unknown mapping type testError("goog.setCssNameMapping({foo:'bar'}, 'UNKNOWN');", INVALID_STYLE_ERROR); } public void testBadCrossModuleRequire() { test( createModuleStar( "", "goog.provide('goog.ui');", "goog.require('goog.ui');"), new String[] { "", "goog.ui = {};", "" }, null, XMODULE_REQUIRE_ERROR); } public void testGoodCrossModuleRequire1() { test( createModuleStar( "goog.provide('goog.ui');", "", "goog.require('goog.ui');"), new String[] { "goog.ui = {};", "", "", }); } public void testGoodCrossModuleRequire2() { test( createModuleStar( "", "", "goog.provide('goog.ui'); goog.require('goog.ui');"), new String[] { "", "", "goog.ui = {};", }); } // Tests providing additional code with non-overlapping var namespace. public void testSimpleAdditionalProvide() { additionalCode = "goog.provide('b.B'); b.B = {};"; test("goog.provide('a.A'); a.A = {};", "var b={};b.B={};var a={};a.A={};"); } // Same as above, but with the additional code added after the original. public void testSimpleAdditionalProvideAtEnd() { additionalEndCode = "goog.provide('b.B'); b.B = {};"; test("goog.provide('a.A'); a.A = {};", "var a={};a.A={};var b={};b.B={};"); } // Tests providing additional code with non-overlapping dotted namespace. public void testSimpleDottedAdditionalProvide() { additionalCode = "goog.provide('a.b.B'); a.b.B = {};"; test("goog.provide('c.d.D'); c.d.D = {};", "var a={};a.b={};a.b.B={};var c={};c.d={};c.d.D={};"); } // Tests providing additional code with overlapping var namespace. public void testOverlappingAdditionalProvide() { additionalCode = "goog.provide('a.B'); a.B = {};"; test("goog.provide('a.A'); a.A = {};", "var a={};a.B={};a.A={};"); } // Tests providing additional code with overlapping var namespace. public void testOverlappingAdditionalProvideAtEnd() { additionalEndCode = "goog.provide('a.B'); a.B = {};"; test("goog.provide('a.A'); a.A = {};", "var a={};a.A={};a.B={};"); } // Tests providing additional code with overlapping dotted namespace. public void testOverlappingDottedAdditionalProvide() { additionalCode = "goog.provide('a.b.B'); a.b.B = {};"; test("goog.provide('a.b.C'); a.b.C = {};", "var a={};a.b={};a.b.B={};a.b.C={};"); } // Tests that a require of additional code generates no error. public void testRequireOfAdditionalProvide() { additionalCode = "goog.provide('b.B'); b.B = {};"; test("goog.require('b.B'); goog.provide('a.A'); a.A = {};", "var b={};b.B={};var a={};a.A={};"); } // Tests that a require not in additional code generates (only) one error. public void testMissingRequireWithAdditionalProvide() { additionalCode = "goog.provide('b.B'); b.B = {};"; testError("goog.require('b.C'); goog.provide('a.A'); a.A = {};", MISSING_PROVIDE_ERROR); } // Tests that a require in additional code generates no error. public void testLateRequire() { additionalEndCode = "goog.require('a.A');"; test("goog.provide('a.A'); a.A = {};", "var a={};a.A={};"); } // Tests a case where code is reordered after processing provides and then // provides are processed again. public void testReorderedProvides() { additionalCode = "a.B = {};"; // as if a.B was after a.A originally addAdditionalNamespace = true; test("goog.provide('a.A'); a.A = {};", "var a={};a.B={};a.A={};"); } // Another version of above. public void testReorderedProvides2() { additionalEndCode = "a.B = {};"; addAdditionalNamespace = true; test("goog.provide('a.A'); a.A = {};", "var a={};a.A={};a.B={};"); } // Provide a name before the definition of the class providing the // parent namespace. public void testProvideOrder1() { additionalEndCode = ""; addAdditionalNamespace = false; // TODO(johnlenz): This test confirms that the constructor (a.b) isn't // improperly removed, but this result isn't really what we want as the // reassign of a.b removes the definition of "a.b.c". test( LINE_JOINER.join( "goog.provide('a.b');", "goog.provide('a.b.c');", "a.b.c;", "a.b = function(x,y) {};"), LINE_JOINER.join( "var a = {};", "a.b = {};", "a.b.c = {};", "a.b.c;", "a.b = function(x,y) {};")); } // Provide a name after the definition of the class providing the // parent namespace. public void testProvideOrder2() { additionalEndCode = ""; addAdditionalNamespace = false; // TODO(johnlenz): This test confirms that the constructor (a.b) isn't // improperly removed, but this result isn't really what we want as // namespace placeholders for a.b and a.b.c remain. test( LINE_JOINER.join( "goog.provide('a.b');", "goog.provide('a.b.c');", "a.b = function(x,y) {};", "a.b.c;"), LINE_JOINER.join( "var a = {};", "a.b = {};", "a.b.c = {};", "a.b = function(x,y) {};", "a.b.c;")); } // Provide a name after the definition of the class providing the // parent namespace. public void testProvideOrder3a() { test( LINE_JOINER.join( "goog.provide('a.b');", "a.b = function(x,y) {};", "goog.provide('a.b.c');", "a.b.c;"), LINE_JOINER.join( "var a = {};", "a.b = function(x,y) {};", "a.b.c = {};", "a.b.c;")); } public void testProvideOrder3b() { additionalEndCode = ""; addAdditionalNamespace = false; // This tests a cleanly provided name, below a function namespace. test( LINE_JOINER.join( "goog.provide('a.b');", "a.b = function(x,y) {};", "goog.provide('a.b.c');", "a.b.c;"), LINE_JOINER.join( "var a = {};", "a.b = function(x,y) {};", "a.b.c = {};", "a.b.c;")); } public void testProvideOrder4a() { test( LINE_JOINER.join( "goog.provide('goog.a');", "goog.provide('goog.a.b');", "if (x) {", " goog.a.b = 1;", "} else {", " goog.a.b = 2;", "}"), LINE_JOINER.join( "goog.a={};", "if(x)", " goog.a.b=1;", "else", " goog.a.b=2;")); } public void testProvideOrder4b() { additionalEndCode = ""; addAdditionalNamespace = false; // This tests a cleanly provided name, below a namespace. test( LINE_JOINER.join( "goog.provide('goog.a');", "goog.provide('goog.a.b');", "if (x) {", " goog.a.b = 1;", "} else {", " goog.a.b = 2;", "}"), LINE_JOINER.join( "goog.a={};", "if(x)", " goog.a.b=1;", "else", " goog.a.b=2;")); } public void testInvalidProvide() { test("goog.provide('a.class');", "var a = {}; a.class = {};"); testError("goog.provide('class.a');", INVALID_PROVIDE_ERROR); testError("goog.provide('a.class');", INVALID_PROVIDE_ERROR, LanguageMode.ECMASCRIPT3); testError("goog.provide('class.a');", INVALID_PROVIDE_ERROR, LanguageMode.ECMASCRIPT3); } public void testInvalidRequire() { test("goog.provide('a.b'); goog.require('a.b');", "var a = {}; a.b = {};"); testError("goog.provide('a.b'); var x = x || goog.require('a.b');", INVALID_CLOSURE_CALL_ERROR); testError("goog.provide('a.b'); x = goog.require('a.b');", INVALID_CLOSURE_CALL_ERROR); testError("goog.provide('a.b'); function f() { goog.require('a.b'); }", INVALID_CLOSURE_CALL_ERROR); } public void testValidGoogMethod() { testSame("function f() { goog.isDef('a.b'); }"); testSame("function f() { goog.inherits(a, b); }"); testSame("function f() { goog.exportSymbol(a, b); }"); test("function f() { goog.setCssNameMapping({}); }", "function f() {}"); testSame("x || goog.isDef('a.b');"); testSame("x || goog.inherits(a, b);"); testSame("x || goog.exportSymbol(a, b);"); testSame("x || void 0"); } private static final String METHOD_FORMAT = "function Foo() {} Foo.prototype.method = function() { %s };"; private static final String FOO_INHERITS = "goog.inherits(Foo, BaseFoo);"; public void testInvalidGoogBase1() { testError("goog.base(this, 'method');", GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase2() { testError("function Foo() {}" + "Foo.method = function() {" + " goog.base(this, 'method');" + "};", GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase3() { testError(String.format(METHOD_FORMAT, "goog.base();"), GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase4() { testError(String.format(METHOD_FORMAT, "goog.base(this, 'bar');"), GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase5() { testError(String.format(METHOD_FORMAT, "goog.base('foo', 'method');"), GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase6() { testError(String.format(METHOD_FORMAT, "goog.base.call(null, this, 'method');"), GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase6b() { testError(String.format(METHOD_FORMAT, "goog.base.call(this, 'method');"), GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase7() { testError("function Foo() { goog.base(this); }", GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase8() { testError("var Foo = function() { goog.base(this); }", GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase9() { testError("var goog = {}; goog.Foo = function() { goog.base(this); }", GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase10() { testErrorEs6("class Foo extends BaseFoo { constructor() { goog.base(this); } }", GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase11() { testErrorEs6("class Foo extends BaseFoo { someMethod() { goog.base(this, 'someMethod'); } }", GOOG_BASE_CLASS_ERROR); } public void testValidGoogBase1() { test(String.format(METHOD_FORMAT, "goog.base(this, 'method');"), String.format(METHOD_FORMAT, "Foo.superClass_.method.call(this)")); } public void testValidGoogBase2() { test(String.format(METHOD_FORMAT, "goog.base(this, 'method', 1, 2);"), String.format(METHOD_FORMAT, "Foo.superClass_.method.call(this, 1, 2)")); } public void testValidGoogBase3() { test(String.format(METHOD_FORMAT, "return goog.base(this, 'method');"), String.format(METHOD_FORMAT, "return Foo.superClass_.method.call(this)")); } public void testValidGoogBase4() { test("function Foo() { goog.base(this, 1, 2); }" + FOO_INHERITS, "function Foo() { BaseFoo.call(this, 1, 2); } " + FOO_INHERITS); } public void testValidGoogBase5() { test("var Foo = function() { goog.base(this, 1); };" + FOO_INHERITS, "var Foo = function() { BaseFoo.call(this, 1); }; " + FOO_INHERITS); } public void testValidGoogBase6() { test("var goog = {}; goog.Foo = function() { goog.base(this); }; " + "goog.inherits(goog.Foo, goog.BaseFoo);", "var goog = {}; goog.Foo = function() { goog.BaseFoo.call(this); }; " + "goog.inherits(goog.Foo, goog.BaseFoo);"); } public void testBanGoogBase() { banGoogBase = true; testError( "function Foo() { goog.base(this, 1, 2); }" + FOO_INHERITS, ProcessClosurePrimitives.USE_OF_GOOG_BASE); } public void testInvalidBase1() { testError( "var Foo = function() {};" + FOO_INHERITS + "Foo.base(this, 'method');", BASE_CLASS_ERROR); } public void testInvalidBase2() { testError("function Foo() {}" + FOO_INHERITS + "Foo.method = function() {" + " Foo.base(this, 'method');" + "};", BASE_CLASS_ERROR); } public void testInvalidBase3() { testError(String.format(FOO_INHERITS + METHOD_FORMAT, "Foo.base();"), BASE_CLASS_ERROR); } public void testInvalidBase4() { testError(String.format(FOO_INHERITS + METHOD_FORMAT, "Foo.base(this, 'bar');"), BASE_CLASS_ERROR); } public void testInvalidBase5() { testError(String.format(FOO_INHERITS + METHOD_FORMAT, "Foo.base('foo', 'method');"), BASE_CLASS_ERROR); } public void testInvalidBase7() { testError("function Foo() { Foo.base(this); };" + FOO_INHERITS, BASE_CLASS_ERROR); } public void testInvalidBase8() { testError("var Foo = function() { Foo.base(this); };" + FOO_INHERITS, BASE_CLASS_ERROR); } public void testInvalidBase9() { testError("var goog = {}; goog.Foo = function() { goog.Foo.base(this); };" + FOO_INHERITS, BASE_CLASS_ERROR); } public void testInvalidBase10() { testError("function Foo() { Foo.base(this); }" + FOO_INHERITS, BASE_CLASS_ERROR); } public void testInvalidBase11() { testError("function Foo() { Foo.base(this, 'method'); }" + FOO_INHERITS, BASE_CLASS_ERROR); } public void testInvalidBase12() { testError("function Foo() { Foo.base(this, 1, 2); }" + FOO_INHERITS, BASE_CLASS_ERROR); } public void testInvalidBase13() { testError( "function Bar(){ Bar.base(this, 'constructor'); }" + "goog.inherits(Bar, Goo);" + "function Foo(){ Bar.base(this, 'constructor'); }" + FOO_INHERITS, BASE_CLASS_ERROR); } public void testInvalidGoogBase14() { testErrorEs6("class Foo extends BaseFoo { constructor() { Foo.base(this); } }", GOOG_BASE_CLASS_ERROR); } public void testInvalidGoogBase14b() { testErrorEs6("class Foo extends BaseFoo { method() { Foo.base(this, 'method'); } }", GOOG_BASE_CLASS_ERROR); } public void testValidBase1() { test(FOO_INHERITS + String.format(METHOD_FORMAT, "Foo.base(this, 'method');"), FOO_INHERITS + String.format(METHOD_FORMAT, "Foo.superClass_.method.call(this)")); } public void testValidBase2() { test(FOO_INHERITS + String.format(METHOD_FORMAT, "Foo.base(this, 'method', 1, 2);"), FOO_INHERITS + String.format(METHOD_FORMAT, "Foo.superClass_.method.call(this, 1, 2)")); } public void testValidBase3() { test(FOO_INHERITS + String.format(METHOD_FORMAT, "return Foo.base(this, 'method');"), FOO_INHERITS + String.format(METHOD_FORMAT, "return Foo.superClass_.method.call(this)")); } public void testValidBase4() { test("function Foo() { Foo.base(this, 'constructor', 1, 2); }" + FOO_INHERITS, "function Foo() { BaseFoo.call(this, 1, 2); } " + FOO_INHERITS); } public void testValidBase5() { test("var Foo = function() { Foo.base(this, 'constructor', 1); };" + FOO_INHERITS, "var Foo = function() { BaseFoo.call(this, 1); }; " + FOO_INHERITS); } public void testValidBase6() { test("var goog = {}; goog.Foo = function() {" + "goog.Foo.base(this, 'constructor'); }; " + "goog.inherits(goog.Foo, goog.BaseFoo);", "var goog = {}; goog.Foo = function() { goog.BaseFoo.call(this); }; " + "goog.inherits(goog.Foo, goog.BaseFoo);"); } public void testValidBase7() { // No goog.inherits, so this is probably a different 'base' function. testSame("" + "var a = function() {" + " a.base(this, 'constructor');" + "};"); } public void testImplicitAndExplicitProvide() { test("var goog = {}; " + "goog.provide('goog.foo.bar'); goog.provide('goog.foo');", "var goog = {}; goog.foo = {}; goog.foo.bar = {};"); } public void testImplicitProvideInIndependentModules() { testModule( new String[] { "", "goog.provide('apps.A');", "goog.provide('apps.B');" }, new String[] { "var apps = {};", "apps.A = {};", "apps.B = {};", }); } public void testImplicitProvideInIndependentModules2() { testModule( new String[] { "goog.provide('apps');", "goog.provide('apps.foo.A');", "goog.provide('apps.foo.B');" }, new String[] { "var apps = {}; apps.foo = {};", "apps.foo.A = {};", "apps.foo.B = {};", }); } public void testImplicitProvideInIndependentModules3() { testModule( new String[] { "var goog = {};", "goog.provide('goog.foo.A');", "goog.provide('goog.foo.B');" }, new String[] { "var goog = {}; goog.foo = {};", "goog.foo.A = {};", "goog.foo.B = {};", }); } public void testProvideInIndependentModules1() { testModule( new String[] { "goog.provide('apps');", "goog.provide('apps.foo');", "goog.provide('apps.foo.B');" }, new String[] { "var apps = {}; apps.foo = {};", "", "apps.foo.B = {};", }); } public void testProvideInIndependentModules2() { // TODO(nicksantos): Make this an error. testModule( new String[] { "goog.provide('apps');", "goog.provide('apps.foo'); apps.foo = {};", "goog.provide('apps.foo.B');" }, new String[] { "var apps = {};", "apps.foo = {};", "apps.foo.B = {};", }); } public void testProvideInIndependentModules2b() { // TODO(nicksantos): Make this an error. testModule( new String[] { "goog.provide('apps');", "goog.provide('apps.foo'); apps.foo = function() {};", "goog.provide('apps.foo.B');" }, new String[] { "var apps = {};", "apps.foo = function() {};", "apps.foo.B = {};", }); } public void testProvideInIndependentModules3() { testModule( new String[] { "goog.provide('apps');", "goog.provide('apps.foo.B');", "goog.provide('apps.foo'); goog.require('apps.foo');" }, new String[] { "var apps = {}; apps.foo = {};", "apps.foo.B = {};", "", }); } public void testProvideInIndependentModules3b() { // TODO(nicksantos): Make this an error. testModule( new String[] { "goog.provide('apps');", "goog.provide('apps.foo.B');", "goog.provide('apps.foo'); apps.foo = function() {}; " + "goog.require('apps.foo');" }, new String[] { "var apps = {};", "apps.foo.B = {};", "apps.foo = function() {};", }); } public void testProvideInIndependentModules4() { // Regression test for bug 261: // http://code.google.com/p/closure-compiler/issues/detail?id=261 testModule( new String[] { "goog.provide('apps');", "goog.provide('apps.foo.bar.B');", "goog.provide('apps.foo.bar.C');" }, new String[] { "var apps = {};apps.foo = {};apps.foo.bar = {}", "apps.foo.bar.B = {};", "apps.foo.bar.C = {};", }); } public void testRequireOfBaseGoog() { testError("goog.require('goog');", MISSING_PROVIDE_ERROR); } public void testSourcePositionPreservation() { test("goog.provide('foo.bar.baz');", "var foo = {};" + "foo.bar = {};" + "foo.bar.baz = {};"); Node root = getLastCompiler().getRoot(); Node fooDecl = findQualifiedNameNode("foo", root); Node fooBarDecl = findQualifiedNameNode("foo.bar", root); Node fooBarBazDecl = findQualifiedNameNode("foo.bar.baz", root); assertEquals(1, fooDecl.getLineno()); assertEquals(14, fooDecl.getCharno()); assertEquals(1, fooBarDecl.getLineno()); assertEquals(18, fooBarDecl.getCharno()); assertEquals(1, fooBarBazDecl.getLineno()); assertEquals(22, fooBarBazDecl.getCharno()); } public void testNoStubForProvidedTypedef() { test("goog.provide('x'); /** @typedef {number} */ var x;", "var x;"); } public void testNoStubForProvidedTypedef2() { test("goog.provide('x.y'); /** @typedef {number} */ x.y;", "var x = {}; x.y;"); } public void testNoStubForProvidedTypedef4() { test("goog.provide('x.y.z'); /** @typedef {number} */ x.y.z;", "var x = {}; x.y = {}; x.y.z;"); } public void testProvideRequireSameFile() { test("goog.provide('x');\ngoog.require('x');", "var x = {};"); } public void testDefineCases() { String jsdoc = "/** @define {number} */\n"; test(jsdoc + "goog.define('name', 1);", jsdoc + "var name = 1"); test(jsdoc + "goog.define('ns.name', 1);", jsdoc + "ns.name = 1"); } public void testDefineErrorCases() { String jsdoc = "/** @define {number} */\n"; testError("goog.define('name', 1);", MISSING_DEFINE_ANNOTATION); testError(jsdoc + "goog.define('name.2', 1);", INVALID_DEFINE_NAME_ERROR); testError(jsdoc + "goog.define();", NULL_ARGUMENT_ERROR); testError(jsdoc + "goog.define('value');", NULL_ARGUMENT_ERROR); testError(jsdoc + "goog.define(5);", INVALID_ARGUMENT_ERROR); testErrorEs6(jsdoc + "goog.define(`templateName`, 1);", INVALID_ARGUMENT_ERROR); testErrorEs6(jsdoc + "goog.define(`${template}Name`, 1);", INVALID_ARGUMENT_ERROR); } public void testDefineValues() { testSame("var CLOSURE_DEFINES = {'FOO': 'string'};"); testSame("var CLOSURE_DEFINES = {'FOO': true};"); testSame("var CLOSURE_DEFINES = {'FOO': false};"); testSame("var CLOSURE_DEFINES = {'FOO': 1};"); testSame("var CLOSURE_DEFINES = {'FOO': 0xABCD};"); testSame("var CLOSURE_DEFINES = {'FOO': -1};"); } public void testDefineValuesErrors() { testError("var CLOSURE_DEFINES = {'FOO': a};", CLOSURE_DEFINES_ERROR); testError("var CLOSURE_DEFINES = {'FOO': 0+1};", CLOSURE_DEFINES_ERROR); testError("var CLOSURE_DEFINES = {'FOO': 'value' + 'value'};", CLOSURE_DEFINES_ERROR); testError("var CLOSURE_DEFINES = {'FOO': !true};", CLOSURE_DEFINES_ERROR); testError("var CLOSURE_DEFINES = {'FOO': -true};", CLOSURE_DEFINES_ERROR); testErrorEs6("var CLOSURE_DEFINES = {SHORTHAND};", CLOSURE_DEFINES_ERROR); testErrorEs6("var CLOSURE_DEFINES = {'TEMPLATE': `template`};", CLOSURE_DEFINES_ERROR); testErrorEs6("var CLOSURE_DEFINES = {'TEMPLATE': `${template}Sub`};", CLOSURE_DEFINES_ERROR); } }
apache-2.0
xorware/android_frameworks_base
media/java/android/media/midi/MidiDeviceServer.java
13922
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package android.media.midi; import android.os.Binder; import android.os.IBinder; import android.os.ParcelFileDescriptor; import android.os.Process; import android.os.RemoteException; import android.system.OsConstants; import android.util.Log; import com.android.internal.midi.MidiDispatcher; import dalvik.system.CloseGuard; import libcore.io.IoUtils; import java.io.Closeable; import java.io.IOException; import java.util.HashMap; import java.util.concurrent.CopyOnWriteArrayList; /** * Internal class used for providing an implementation for a MIDI device. * * @hide */ public final class MidiDeviceServer implements Closeable { private static final String TAG = "MidiDeviceServer"; private final IMidiManager mMidiManager; // MidiDeviceInfo for the device implemented by this server private MidiDeviceInfo mDeviceInfo; private final int mInputPortCount; private final int mOutputPortCount; // MidiReceivers for receiving data on our input ports private final MidiReceiver[] mInputPortReceivers; // MidiDispatchers for sending data on our output ports private MidiDispatcher[] mOutputPortDispatchers; // MidiOutputPorts for clients connected to our input ports private final MidiOutputPort[] mInputPortOutputPorts; // List of all MidiInputPorts we created private final CopyOnWriteArrayList<MidiInputPort> mInputPorts = new CopyOnWriteArrayList<MidiInputPort>(); // for reporting device status private final boolean[] mInputPortOpen; private final int[] mOutputPortOpenCount; private final CloseGuard mGuard = CloseGuard.get(); private boolean mIsClosed; private final Callback mCallback; public interface Callback { /** * Called to notify when an our device status has changed * @param server the {@link MidiDeviceServer} that changed * @param status the {@link MidiDeviceStatus} for the device */ public void onDeviceStatusChanged(MidiDeviceServer server, MidiDeviceStatus status); /** * Called to notify when the device is closed */ public void onClose(); } abstract private class PortClient implements IBinder.DeathRecipient { final IBinder mToken; PortClient(IBinder token) { mToken = token; try { token.linkToDeath(this, 0); } catch (RemoteException e) { close(); } } abstract void close(); @Override public void binderDied() { close(); } } private class InputPortClient extends PortClient { private final MidiOutputPort mOutputPort; InputPortClient(IBinder token, MidiOutputPort outputPort) { super(token); mOutputPort = outputPort; } @Override void close() { mToken.unlinkToDeath(this, 0); synchronized (mInputPortOutputPorts) { int portNumber = mOutputPort.getPortNumber(); mInputPortOutputPorts[portNumber] = null; mInputPortOpen[portNumber] = false; updateDeviceStatus(); } IoUtils.closeQuietly(mOutputPort); } } private class OutputPortClient extends PortClient { private final MidiInputPort mInputPort; OutputPortClient(IBinder token, MidiInputPort inputPort) { super(token); mInputPort = inputPort; } @Override void close() { mToken.unlinkToDeath(this, 0); int portNumber = mInputPort.getPortNumber(); MidiDispatcher dispatcher = mOutputPortDispatchers[portNumber]; synchronized (dispatcher) { dispatcher.getSender().disconnect(mInputPort); int openCount = dispatcher.getReceiverCount(); mOutputPortOpenCount[portNumber] = openCount; updateDeviceStatus(); } mInputPorts.remove(mInputPort); IoUtils.closeQuietly(mInputPort); } } private final HashMap<IBinder, PortClient> mPortClients = new HashMap<IBinder, PortClient>(); // Binder interface stub for receiving connection requests from clients private final IMidiDeviceServer mServer = new IMidiDeviceServer.Stub() { @Override public ParcelFileDescriptor openInputPort(IBinder token, int portNumber) { if (mDeviceInfo.isPrivate()) { if (Binder.getCallingUid() != Process.myUid()) { throw new SecurityException("Can't access private device from different UID"); } } if (portNumber < 0 || portNumber >= mInputPortCount) { Log.e(TAG, "portNumber out of range in openInputPort: " + portNumber); return null; } synchronized (mInputPortOutputPorts) { if (mInputPortOutputPorts[portNumber] != null) { Log.d(TAG, "port " + portNumber + " already open"); return null; } try { ParcelFileDescriptor[] pair = ParcelFileDescriptor.createSocketPair( OsConstants.SOCK_SEQPACKET); MidiOutputPort outputPort = new MidiOutputPort(pair[0], portNumber); mInputPortOutputPorts[portNumber] = outputPort; outputPort.connect(mInputPortReceivers[portNumber]); InputPortClient client = new InputPortClient(token, outputPort); synchronized (mPortClients) { mPortClients.put(token, client); } mInputPortOpen[portNumber] = true; updateDeviceStatus(); return pair[1]; } catch (IOException e) { Log.e(TAG, "unable to create ParcelFileDescriptors in openInputPort"); return null; } } } @Override public ParcelFileDescriptor openOutputPort(IBinder token, int portNumber) { if (mDeviceInfo.isPrivate()) { if (Binder.getCallingUid() != Process.myUid()) { throw new SecurityException("Can't access private device from different UID"); } } if (portNumber < 0 || portNumber >= mOutputPortCount) { Log.e(TAG, "portNumber out of range in openOutputPort: " + portNumber); return null; } try { ParcelFileDescriptor[] pair = ParcelFileDescriptor.createSocketPair( OsConstants.SOCK_SEQPACKET); MidiInputPort inputPort = new MidiInputPort(pair[0], portNumber); MidiDispatcher dispatcher = mOutputPortDispatchers[portNumber]; synchronized (dispatcher) { dispatcher.getSender().connect(inputPort); int openCount = dispatcher.getReceiverCount(); mOutputPortOpenCount[portNumber] = openCount; updateDeviceStatus(); } mInputPorts.add(inputPort); OutputPortClient client = new OutputPortClient(token, inputPort); synchronized (mPortClients) { mPortClients.put(token, client); } return pair[1]; } catch (IOException e) { Log.e(TAG, "unable to create ParcelFileDescriptors in openOutputPort"); return null; } } @Override public void closePort(IBinder token) { synchronized (mPortClients) { PortClient client = mPortClients.remove(token); if (client != null) { client.close(); } } } @Override public void closeDevice() { if (mCallback != null) { mCallback.onClose(); } IoUtils.closeQuietly(MidiDeviceServer.this); } @Override public int connectPorts(IBinder token, ParcelFileDescriptor pfd, int outputPortNumber) { MidiInputPort inputPort = new MidiInputPort(pfd, outputPortNumber); MidiDispatcher dispatcher = mOutputPortDispatchers[outputPortNumber]; synchronized (dispatcher) { dispatcher.getSender().connect(inputPort); int openCount = dispatcher.getReceiverCount(); mOutputPortOpenCount[outputPortNumber] = openCount; updateDeviceStatus(); } mInputPorts.add(inputPort); OutputPortClient client = new OutputPortClient(token, inputPort); synchronized (mPortClients) { mPortClients.put(token, client); } return Process.myPid(); // for caller to detect same process ID } @Override public MidiDeviceInfo getDeviceInfo() { return mDeviceInfo; } @Override public void setDeviceInfo(MidiDeviceInfo deviceInfo) { if (Binder.getCallingUid() != Process.SYSTEM_UID) { throw new SecurityException("setDeviceInfo should only be called by MidiService"); } if (mDeviceInfo != null) { throw new IllegalStateException("setDeviceInfo should only be called once"); } mDeviceInfo = deviceInfo; } }; // Constructor for MidiManager.createDeviceServer() /* package */ MidiDeviceServer(IMidiManager midiManager, MidiReceiver[] inputPortReceivers, int numOutputPorts, Callback callback) { mMidiManager = midiManager; mInputPortReceivers = inputPortReceivers; mInputPortCount = inputPortReceivers.length; mOutputPortCount = numOutputPorts; mCallback = callback; mInputPortOutputPorts = new MidiOutputPort[mInputPortCount]; mOutputPortDispatchers = new MidiDispatcher[numOutputPorts]; for (int i = 0; i < numOutputPorts; i++) { mOutputPortDispatchers[i] = new MidiDispatcher(); } mInputPortOpen = new boolean[mInputPortCount]; mOutputPortOpenCount = new int[numOutputPorts]; mGuard.open("close"); } // Constructor for MidiDeviceService.onCreate() /* package */ MidiDeviceServer(IMidiManager midiManager, MidiReceiver[] inputPortReceivers, MidiDeviceInfo deviceInfo, Callback callback) { this(midiManager, inputPortReceivers, deviceInfo.getOutputPortCount(), callback); mDeviceInfo = deviceInfo; } /* package */ IMidiDeviceServer getBinderInterface() { return mServer; } public IBinder asBinder() { return mServer.asBinder(); } private void updateDeviceStatus() { // clear calling identity, since we may be in a Binder call from one of our clients long identityToken = Binder.clearCallingIdentity(); MidiDeviceStatus status = new MidiDeviceStatus(mDeviceInfo, mInputPortOpen, mOutputPortOpenCount); if (mCallback != null) { mCallback.onDeviceStatusChanged(this, status); } try { mMidiManager.setDeviceStatus(mServer, status); } catch (RemoteException e) { Log.e(TAG, "RemoteException in updateDeviceStatus"); } finally { Binder.restoreCallingIdentity(identityToken); } } @Override public void close() throws IOException { synchronized (mGuard) { if (mIsClosed) return; mGuard.close(); for (int i = 0; i < mInputPortCount; i++) { MidiOutputPort outputPort = mInputPortOutputPorts[i]; if (outputPort != null) { IoUtils.closeQuietly(outputPort); mInputPortOutputPorts[i] = null; } } for (MidiInputPort inputPort : mInputPorts) { IoUtils.closeQuietly(inputPort); } mInputPorts.clear(); try { mMidiManager.unregisterDeviceServer(mServer); } catch (RemoteException e) { Log.e(TAG, "RemoteException in unregisterDeviceServer"); } mIsClosed = true; } } @Override protected void finalize() throws Throwable { try { mGuard.warnIfOpen(); close(); } finally { super.finalize(); } } /** * Returns an array of {@link MidiReceiver} for the device's output ports. * Clients can use these receivers to send data out the device's output ports. * @return array of MidiReceivers */ public MidiReceiver[] getOutputPortReceivers() { MidiReceiver[] receivers = new MidiReceiver[mOutputPortCount]; System.arraycopy(mOutputPortDispatchers, 0, receivers, 0, mOutputPortCount); return receivers; } }
apache-2.0
prasi-in/geode
geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RDDSaveJavaDemo.java
3078
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package demo; import org.apache.geode.spark.connector.GeodeConnectionConf; import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.api.java.function.PairFunction; import scala.Tuple2; import java.util.ArrayList; import java.util.List; import static org.apache.geode.spark.connector.javaapi.GeodeJavaUtil.*; /** * This Spark application demonstrates how to save a RDD to Geode using Geode Spark * Connector with Java. * <p/> * In order to run it, you will need to start Geode cluster, and create the following region * with GFSH: * <pre> * gfsh> create region --name=str_int_region --type=REPLICATE \ * --key-constraint=java.lang.String --value-constraint=java.lang.Integer * </pre> * * Once you compile and package the demo, the jar file basic-demos_2.10-0.5.0.jar * should be generated under geode-spark-demos/basic-demos/target/scala-2.10/. * Then run the following command to start a Spark job: * <pre> * <path to spark>/bin/spark-submit --master=local[2] --class demo.RDDSaveJavaDemo \ * <path to>/basic-demos_2.10-0.5.0.jar <locator host>:<port> * </pre> * * Verify the data was saved to Geode with GFSH: * <pre>gfsh> query --query="select * from /str_int_region.entrySet" </pre> */ public class RDDSaveJavaDemo { public static void main(String[] argv) { if (argv.length != 1) { System.err.printf("Usage: RDDSaveJavaDemo <locators>\n"); return; } SparkConf conf = new SparkConf().setAppName("RDDSaveJavaDemo"); conf.set(GeodeLocatorPropKey, argv[0]); JavaSparkContext sc = new JavaSparkContext(conf); List<String> data = new ArrayList<String>(); data.add("abcdefg"); data.add("abcdefgh"); data.add("abcdefghi"); JavaRDD<String> rdd = sc.parallelize(data); GeodeConnectionConf connConf = GeodeConnectionConf.apply(conf); PairFunction<String, String, Integer> func = new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) throws Exception { return new Tuple2<String, Integer>(s, s.length()); } }; javaFunctions(rdd).saveToGeode("str_int_region", func, connConf); sc.stop(); } }
apache-2.0
cereblanco/usbong-builder
src/main/java/usbong/android/builder/controllers/UtreeController.java
1803
package usbong.android.builder.controllers; import com.activeandroid.query.Select; import rx.Observable; import rx.Observer; import rx.Subscriber; import rx.android.schedulers.AndroidSchedulers; import rx.schedulers.Schedulers; import usbong.android.builder.models.Utree; /** * Created by Rocky Camacho on 6/26/2014. */ public class UtreeController implements Controller { public void fetchUtree(long id, Observer<Utree> observer) { getUtree(id).observeOn(AndroidSchedulers.mainThread()) .subscribe(observer); } private Observable<Utree> getUtree(final long id) { return Observable.create(new Observable.OnSubscribe<Utree>() { @Override public void call(Subscriber<? super Utree> subscriber) { Utree utree = loadUtree(); subscriber.onNext(utree); subscriber.onCompleted(); } private Utree loadUtree() { Utree utree = null; if (id == -1) { utree = new Utree(); } else { utree = new Select().from(Utree.class) .where("id = ?", id) .executeSingle(); } return utree; } }).subscribeOn(Schedulers.io()); } public void save(final Utree utree, Observer<Utree> observer) { Observable.create(new Observable.OnSubscribe<Utree>() { @Override public void call(Subscriber<? super Utree> subscriber) { utree.save(); subscriber.onCompleted(); } }).subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(observer); } }
apache-2.0
AndrewKhitrin/dbeaver
plugins/org.jkiss.dbeaver.ui.editors.data/src/org/jkiss/dbeaver/ui/controls/lightgrid/GridColumnRenderer.java
9778
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2019 Serge Rider (serge@jkiss.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ui.controls.lightgrid; import org.eclipse.swt.SWT; import org.eclipse.swt.graphics.Font; import org.eclipse.swt.graphics.GC; import org.eclipse.swt.graphics.Image; import org.eclipse.swt.graphics.Rectangle; import org.jkiss.code.Nullable; import org.jkiss.dbeaver.ui.DBeaverIcons; import org.jkiss.dbeaver.ui.TextUtils; import org.jkiss.dbeaver.ui.UIIcon; import org.jkiss.utils.CommonUtils; /** * Grid column renderer */ class GridColumnRenderer extends AbstractRenderer { public static final int LEFT_MARGIN = 6; public static final int RIGHT_MARGIN = 6; public static final int BOTTOM_MARGIN = 6; public static final int TOP_MARGIN = 6; public static final int ARROW_MARGIN = 6; public static final int IMAGE_SPACING = 3; public static final Image IMAGE_ASTERISK = DBeaverIcons.getImage(UIIcon.SORT_UNKNOWN); public static final Image IMAGE_DESC = DBeaverIcons.getImage(UIIcon.SORT_DECREASE); public static final Image IMAGE_ASC = DBeaverIcons.getImage(UIIcon.SORT_INCREASE); public static final Image IMAGE_FILTER = DBeaverIcons.getImage(UIIcon.FILTER_SMALL); public static final int SORT_WIDTH = IMAGE_DESC.getBounds().width; public static final int FILTER_WIDTH = IMAGE_FILTER.getBounds().width; public GridColumnRenderer(LightGrid grid) { super(grid); } public static Rectangle getSortControlBounds() { return IMAGE_DESC.getBounds(); } public static Rectangle getFilterControlBounds() { return IMAGE_FILTER.getBounds(); } @Nullable protected Image getColumnImage(Object element) { return grid.getLabelProvider().getImage(element); } protected String getColumnText(Object element) { return grid.getLabelProvider().getText(element); } protected String getColumnDescription(Object element) { return grid.getLabelProvider().getDescription(element); } protected Font getColumnFont(Object element) { Font font = grid.getLabelProvider().getFont(element); return font != null ? font : grid.normalFont; } public void paint(GC gc, Rectangle bounds, boolean selected, boolean hovering, Object element) { boolean hasFilters = grid.getContentProvider().isElementSupportsFilter(element); //GridColumn col = grid.getColumnByElement(cell.col); //AbstractRenderer arrowRenderer = col.getSortRenderer(); int sortOrder = grid.getContentProvider().getSortOrder(element); final Rectangle sortBounds = getSortControlBounds(); final Rectangle filterBounds = getFilterControlBounds(); // set the font to be used to display the text. gc.setFont(getColumnFont(element)); boolean flat = true; boolean drawSelected = false; if (flat && (selected || hovering)) { gc.setBackground(grid.getContentProvider().getCellHeaderSelectionBackground(element)); } else { gc.setBackground(grid.getContentProvider().getCellHeaderBackground(element)); } gc.setForeground(grid.getContentProvider().getCellHeaderForeground(element)); gc.fillRectangle(bounds.x, bounds.y, bounds.width, bounds.height); int pushedDrawingOffset = 0; if (drawSelected) { pushedDrawingOffset = 1; } int x = LEFT_MARGIN; Image columnImage = getColumnImage(element); if (columnImage != null) { int y = bounds.y + pushedDrawingOffset + TOP_MARGIN; gc.drawImage(columnImage, bounds.x + x + pushedDrawingOffset, y); x += columnImage.getBounds().width + IMAGE_SPACING; } int width = bounds.width - x; if (sortOrder == SWT.NONE) { width -= RIGHT_MARGIN; } else { width -= ARROW_MARGIN + sortBounds.width; } if (hasFilters) { width -= filterBounds.width; } //gc.setForeground(getDisplay().getSystemColor(SWT.COLOR_WIDGET_FOREGROUND)); int y = bounds.y + TOP_MARGIN; { // Column name String text = getColumnText(element); text = TextUtils.getShortString(grid.fontMetrics, text, width); gc.setFont(grid.normalFont); gc.drawString(text, bounds.x + x + pushedDrawingOffset, y + pushedDrawingOffset, true); } if (sortOrder != SWT.NONE) { if (drawSelected) { sortBounds.x = bounds.x + bounds.width - ARROW_MARGIN - sortBounds.width + 1; sortBounds.y = y; } else { sortBounds.x = bounds.x + bounds.width - ARROW_MARGIN - sortBounds.width; sortBounds.y = y; } sortBounds.x += IMAGE_SPACING; paintSort(gc, sortBounds, sortOrder); } if (hasFilters) { gc.drawImage(IMAGE_FILTER, bounds.x + bounds.width - filterBounds.width - (sortOrder != SWT.NONE ? IMAGE_SPACING + sortBounds.width + 1 : ARROW_MARGIN), y); } { // Draw column description String text = getColumnDescription(element); if (!CommonUtils.isEmpty(text)) { y += TOP_MARGIN + grid.fontMetrics.getHeight(); text = TextUtils.getShortString(grid.fontMetrics, text, width); gc.setFont(grid.normalFont); gc.drawString(text, bounds.x + x + pushedDrawingOffset, y + pushedDrawingOffset, true); } } // Draw border if (!flat) { if (drawSelected) { gc.setForeground(getDisplay().getSystemColor(SWT.COLOR_WIDGET_NORMAL_SHADOW)); } else { gc.setForeground(getDisplay().getSystemColor(SWT.COLOR_WIDGET_HIGHLIGHT_SHADOW)); } gc.drawLine(bounds.x, bounds.y, bounds.x + bounds.width - 1, bounds.y); gc.drawLine(bounds.x, bounds.y, bounds.x, bounds.y + bounds.height - 1); if (!drawSelected) { gc.setForeground(getDisplay().getSystemColor(SWT.COLOR_WIDGET_LIGHT_SHADOW)); gc.drawLine(bounds.x + 1, bounds.y + 1, bounds.x + bounds.width - 2, bounds.y + 1); gc.drawLine(bounds.x + 1, bounds.y + 1, bounds.x + 1, bounds.y + bounds.height - 2); } if (drawSelected) { gc.setForeground(getDisplay().getSystemColor(SWT.COLOR_WIDGET_NORMAL_SHADOW)); } else { gc.setForeground(getDisplay().getSystemColor(SWT.COLOR_WIDGET_DARK_SHADOW)); } gc.drawLine(bounds.x + bounds.width - 1, bounds.y, bounds.x + bounds.width - 1, bounds.y + bounds.height - 1); gc.drawLine(bounds.x, bounds.y + bounds.height - 1, bounds.x + bounds.width - 1, bounds.y + bounds.height - 1); if (!drawSelected) { gc.setForeground(getDisplay().getSystemColor(SWT.COLOR_WIDGET_NORMAL_SHADOW)); gc.drawLine(bounds.x + bounds.width - 2, bounds.y + 1, bounds.x + bounds.width - 2, bounds.y + bounds.height - 2); gc.drawLine(bounds.x + 1, bounds.y + bounds.height - 2, bounds.x + bounds.width - 2, bounds.y + bounds.height - 2); } } else { gc.setForeground(getDisplay().getSystemColor(SWT.COLOR_WIDGET_DARK_SHADOW)); gc.drawLine(bounds.x + bounds.width - 1, bounds.y, bounds.x + bounds.width - 1, bounds.y + bounds.height - 1); gc.drawLine(bounds.x, bounds.y + bounds.height - 1, bounds.x + bounds.width - 1, bounds.y + bounds.height - 1); } gc.setFont(grid.normalFont); } public static void paintSort(GC gc, Rectangle bounds, int sort) { switch (sort) { case SWT.DEFAULT: gc.drawImage(IMAGE_ASTERISK, bounds.x, bounds.y); break; case SWT.UP: gc.drawImage(IMAGE_ASC, bounds.x, bounds.y); break; case SWT.DOWN: gc.drawImage(IMAGE_DESC, bounds.x, bounds.y); break; } /* if (isSelected()) { gc.drawLine(bounds.x, bounds.y, bounds.x + 6, bounds.y); gc.drawLine(bounds.x + 1, bounds.y + 1, bounds.x + 5, bounds.y + 1); gc.drawLine(bounds.x + 2, bounds.y + 2, bounds.x + 4, bounds.y + 2); gc.drawPoint(bounds.x + 3, bounds.y + 3); } else { gc.drawPoint(bounds.x + 3, bounds.y); gc.drawLine(bounds.x + 2, bounds.y + 1, bounds.x + 4, bounds.y + 1); gc.drawLine(bounds.x + 1, bounds.y + 2, bounds.x + 5, bounds.y + 2); gc.drawLine(bounds.x, bounds.y + 3, bounds.x + 6, bounds.y + 3); } */ } }
apache-2.0
chirino/activemq-apollo
apollo-openwire/src/main/scala/org/apache/activemq/apollo/openwire/command/ActiveMQStreamMessage.java
25732
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.apollo.openwire.command; import org.apache.activemq.apollo.openwire.support.MarshallingSupport; import org.apache.activemq.apollo.openwire.support.OpenwireException; import org.apache.activemq.apollo.openwire.support.Settings; import org.fusesource.hawtbuf.Buffer; import org.fusesource.hawtbuf.ByteArrayInputStream; import org.fusesource.hawtbuf.ByteArrayOutputStream; import java.io.*; import java.util.zip.DeflaterOutputStream; import java.util.zip.InflaterInputStream; /** * @openwire:marshaller code="27" */ public class ActiveMQStreamMessage extends ActiveMQMessage { public static final byte DATA_STRUCTURE_TYPE = CommandTypes.ACTIVEMQ_STREAM_MESSAGE; protected transient DataOutputStream dataOut; protected transient ByteArrayOutputStream bytesOut; protected transient DataInputStream dataIn; protected transient int remainingBytes = -1; public Message copy() { ActiveMQStreamMessage copy = new ActiveMQStreamMessage(); copy(copy); return copy; } private void copy(ActiveMQStreamMessage copy) { storeContent(); super.copy(copy); copy.dataOut = null; copy.bytesOut = null; copy.dataIn = null; } public void onSend() throws OpenwireException { super.onSend(); storeContent(); } private void storeContent() { if (dataOut != null) { try { dataOut.close(); setContent(bytesOut.toBuffer()); bytesOut = null; dataOut = null; } catch (IOException ioe) { throw new RuntimeException(ioe); } } } public byte getDataStructureType() { return DATA_STRUCTURE_TYPE; } public String getJMSXMimeType() { return "jms/stream-message"; } public void clearBody() throws OpenwireException { super.clearBody(); this.dataOut = null; this.dataIn = null; this.bytesOut = null; this.remainingBytes = -1; } public boolean readBoolean() throws OpenwireException { initializeReading(); try { this.dataIn.mark(10); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.BOOLEAN_TYPE) { return this.dataIn.readBoolean(); } if (type == MarshallingSupport.STRING_TYPE) { return Boolean.valueOf(this.dataIn.readUTF()).booleanValue(); } if (type == MarshallingSupport.NULL) { this.dataIn.reset(); throw new NullPointerException("Cannot convert NULL value to boolean."); } else { this.dataIn.reset(); throw new OpenwireException(" not a boolean type"); } } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public byte readByte() throws OpenwireException { initializeReading(); try { this.dataIn.mark(10); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.BYTE_TYPE) { return this.dataIn.readByte(); } if (type == MarshallingSupport.STRING_TYPE) { return Byte.valueOf(this.dataIn.readUTF()).byteValue(); } if (type == MarshallingSupport.NULL) { this.dataIn.reset(); throw new NullPointerException("Cannot convert NULL value to byte."); } else { this.dataIn.reset(); throw new OpenwireException(" not a byte type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public short readShort() throws OpenwireException { initializeReading(); try { this.dataIn.mark(17); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.SHORT_TYPE) { return this.dataIn.readShort(); } if (type == MarshallingSupport.BYTE_TYPE) { return this.dataIn.readByte(); } if (type == MarshallingSupport.STRING_TYPE) { return Short.valueOf(this.dataIn.readUTF()).shortValue(); } if (type == MarshallingSupport.NULL) { this.dataIn.reset(); throw new NullPointerException("Cannot convert NULL value to short."); } else { this.dataIn.reset(); throw new OpenwireException(" not a short type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public char readChar() throws OpenwireException { initializeReading(); try { this.dataIn.mark(17); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.CHAR_TYPE) { return this.dataIn.readChar(); } if (type == MarshallingSupport.NULL) { this.dataIn.reset(); throw new NullPointerException("Cannot convert NULL value to char."); } else { this.dataIn.reset(); throw new OpenwireException(" not a char type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public int readInt() throws OpenwireException { initializeReading(); try { this.dataIn.mark(33); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.INTEGER_TYPE) { return this.dataIn.readInt(); } if (type == MarshallingSupport.SHORT_TYPE) { return this.dataIn.readShort(); } if (type == MarshallingSupport.BYTE_TYPE) { return this.dataIn.readByte(); } if (type == MarshallingSupport.STRING_TYPE) { return Integer.valueOf(this.dataIn.readUTF()).intValue(); } if (type == MarshallingSupport.NULL) { this.dataIn.reset(); throw new NullPointerException("Cannot convert NULL value to int."); } else { this.dataIn.reset(); throw new OpenwireException(" not an int type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public long readLong() throws OpenwireException { initializeReading(); try { this.dataIn.mark(65); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.LONG_TYPE) { return this.dataIn.readLong(); } if (type == MarshallingSupport.INTEGER_TYPE) { return this.dataIn.readInt(); } if (type == MarshallingSupport.SHORT_TYPE) { return this.dataIn.readShort(); } if (type == MarshallingSupport.BYTE_TYPE) { return this.dataIn.readByte(); } if (type == MarshallingSupport.STRING_TYPE) { return Long.valueOf(this.dataIn.readUTF()).longValue(); } if (type == MarshallingSupport.NULL) { this.dataIn.reset(); throw new NullPointerException("Cannot convert NULL value to long."); } else { this.dataIn.reset(); throw new OpenwireException(" not a long type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public float readFloat() throws OpenwireException { initializeReading(); try { this.dataIn.mark(33); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.FLOAT_TYPE) { return this.dataIn.readFloat(); } if (type == MarshallingSupport.STRING_TYPE) { return Float.valueOf(this.dataIn.readUTF()).floatValue(); } if (type == MarshallingSupport.NULL) { this.dataIn.reset(); throw new NullPointerException("Cannot convert NULL value to float."); } else { this.dataIn.reset(); throw new OpenwireException(" not a float type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public double readDouble() throws OpenwireException { initializeReading(); try { this.dataIn.mark(65); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.DOUBLE_TYPE) { return this.dataIn.readDouble(); } if (type == MarshallingSupport.FLOAT_TYPE) { return this.dataIn.readFloat(); } if (type == MarshallingSupport.STRING_TYPE) { return Double.valueOf(this.dataIn.readUTF()).doubleValue(); } if (type == MarshallingSupport.NULL) { this.dataIn.reset(); throw new NullPointerException("Cannot convert NULL value to double."); } else { this.dataIn.reset(); throw new OpenwireException(" not a double type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public String readString() throws OpenwireException { initializeReading(); try { this.dataIn.mark(65); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.NULL) { return null; } if (type == MarshallingSupport.BIG_STRING_TYPE) { return MarshallingSupport.readUTF8(dataIn); } if (type == MarshallingSupport.STRING_TYPE) { return this.dataIn.readUTF(); } if (type == MarshallingSupport.LONG_TYPE) { return new Long(this.dataIn.readLong()).toString(); } if (type == MarshallingSupport.INTEGER_TYPE) { return new Integer(this.dataIn.readInt()).toString(); } if (type == MarshallingSupport.SHORT_TYPE) { return new Short(this.dataIn.readShort()).toString(); } if (type == MarshallingSupport.BYTE_TYPE) { return new Byte(this.dataIn.readByte()).toString(); } if (type == MarshallingSupport.FLOAT_TYPE) { return new Float(this.dataIn.readFloat()).toString(); } if (type == MarshallingSupport.DOUBLE_TYPE) { return new Double(this.dataIn.readDouble()).toString(); } if (type == MarshallingSupport.BOOLEAN_TYPE) { return (this.dataIn.readBoolean() ? Boolean.TRUE : Boolean.FALSE).toString(); } if (type == MarshallingSupport.CHAR_TYPE) { return new Character(this.dataIn.readChar()).toString(); } else { this.dataIn.reset(); throw new OpenwireException(" not a String type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e); } catch (IOException e) { throw new OpenwireException(e); } } public int readBytes(byte[] value) throws OpenwireException { initializeReading(); try { if (value == null) { throw new NullPointerException(); } if (remainingBytes == -1) { this.dataIn.mark(value.length + 1); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type != MarshallingSupport.BYTE_ARRAY_TYPE) { throw new OpenwireException("Not a byte array"); } remainingBytes = this.dataIn.readInt(); } else if (remainingBytes == 0) { remainingBytes = -1; return -1; } if (value.length <= remainingBytes) { // small buffer remainingBytes -= value.length; this.dataIn.readFully(value); return value.length; } else { // big buffer int rc = this.dataIn.read(value, 0, remainingBytes); remainingBytes = 0; return rc; } } catch (EOFException e) { throw new OpenwireException(e.getMessage(),e); } catch (IOException e) { throw new OpenwireException(e.getMessage(),e); } } public Object readObject() throws OpenwireException { initializeReading(); try { this.dataIn.mark(65); int type = this.dataIn.read(); if (type == -1) { throw new OpenwireException("reached end of data"); } if (type == MarshallingSupport.NULL) { return null; } if (type == MarshallingSupport.BIG_STRING_TYPE) { return MarshallingSupport.readUTF8(dataIn); } if (type == MarshallingSupport.STRING_TYPE) { return this.dataIn.readUTF(); } if (type == MarshallingSupport.LONG_TYPE) { return Long.valueOf(this.dataIn.readLong()); } if (type == MarshallingSupport.INTEGER_TYPE) { return Integer.valueOf(this.dataIn.readInt()); } if (type == MarshallingSupport.SHORT_TYPE) { return Short.valueOf(this.dataIn.readShort()); } if (type == MarshallingSupport.BYTE_TYPE) { return Byte.valueOf(this.dataIn.readByte()); } if (type == MarshallingSupport.FLOAT_TYPE) { return new Float(this.dataIn.readFloat()); } if (type == MarshallingSupport.DOUBLE_TYPE) { return new Double(this.dataIn.readDouble()); } if (type == MarshallingSupport.BOOLEAN_TYPE) { return this.dataIn.readBoolean() ? Boolean.TRUE : Boolean.FALSE; } if (type == MarshallingSupport.CHAR_TYPE) { return Character.valueOf(this.dataIn.readChar()); } if (type == MarshallingSupport.BYTE_ARRAY_TYPE) { int len = this.dataIn.readInt(); byte[] value = new byte[len]; this.dataIn.readFully(value); return value; } else { this.dataIn.reset(); throw new OpenwireException("unknown type"); } } catch (NumberFormatException mfe) { try { this.dataIn.reset(); } catch (IOException ioe) { throw new OpenwireException(ioe); } throw mfe; } catch (EOFException e) { throw new OpenwireException(e.getMessage(),e); } catch (IOException e) { throw new OpenwireException(e.getMessage(),e); } } public void writeBoolean(boolean value) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalBoolean(dataOut, value); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeByte(byte value) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalByte(dataOut, value); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeShort(short value) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalShort(dataOut, value); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeChar(char value) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalChar(dataOut, value); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeInt(int value) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalInt(dataOut, value); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeLong(long value) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalLong(dataOut, value); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeFloat(float value) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalFloat(dataOut, value); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeDouble(double value) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalDouble(dataOut, value); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeString(String value) throws OpenwireException { initializeWriting(); try { if (value == null) { MarshallingSupport.marshalNull(dataOut); } else { MarshallingSupport.marshalString(dataOut, value); } } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeBytes(byte[] value) throws OpenwireException { writeBytes(value, 0, value.length); } public void writeBytes(byte[] value, int offset, int length) throws OpenwireException { initializeWriting(); try { MarshallingSupport.marshalByteArray(dataOut, value, offset, length); } catch (IOException ioe) { throw new OpenwireException(ioe); } } public void writeObject(Object value) throws OpenwireException { initializeWriting(); if (value == null) { try { MarshallingSupport.marshalNull(dataOut); } catch (IOException ioe) { throw new OpenwireException(ioe); } } else if (value instanceof String) { writeString(value.toString()); } else if (value instanceof Character) { writeChar(((Character)value).charValue()); } else if (value instanceof Boolean) { writeBoolean(((Boolean)value).booleanValue()); } else if (value instanceof Byte) { writeByte(((Byte)value).byteValue()); } else if (value instanceof Short) { writeShort(((Short)value).shortValue()); } else if (value instanceof Integer) { writeInt(((Integer)value).intValue()); } else if (value instanceof Float) { writeFloat(((Float)value).floatValue()); } else if (value instanceof Double) { writeDouble(((Double)value).doubleValue()); } else if (value instanceof byte[]) { writeBytes((byte[])value); }else if (value instanceof Long) { writeLong(((Long)value).longValue()); }else { throw new OpenwireException("Unsupported Object type: " + value.getClass()); } } public void reset() throws OpenwireException { storeContent(); this.bytesOut = null; this.dataIn = null; this.dataOut = null; this.remainingBytes = -1; setReadOnlyBody(true); } private void initializeWriting() throws OpenwireException { checkReadOnlyBody(); if (this.dataOut == null) { this.bytesOut = new ByteArrayOutputStream(); OutputStream os = bytesOut; if (Settings.enable_compression()) { compressed = true; os = new DeflaterOutputStream(os); } this.dataOut = new DataOutputStream(os); } } protected void checkWriteOnlyBody() throws OpenwireException { if (!readOnlyBody) { throw new OpenwireException("Message body is write-only"); } } private void initializeReading() throws OpenwireException { checkWriteOnlyBody(); if (this.dataIn == null) { Buffer data = getContent(); if (data == null) { data = new Buffer(new byte[] {}, 0, 0); } InputStream is = new ByteArrayInputStream(data); if (isCompressed()) { is = new InflaterInputStream(is); is = new BufferedInputStream(is); } this.dataIn = new DataInputStream(is); } } public String toString() { return super.toString() + " ActiveMQStreamMessage{ " + "bytesOut = " + bytesOut + ", dataOut = " + dataOut + ", dataIn = " + dataIn + " }"; } }
apache-2.0
jonvestal/open-kilda
src-java/base-topology/base-storm-topology/src/main/java/org/openkilda/wfm/error/FlowNotFoundException.java
1066
/* Copyright 2018 Telstra Open Source * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.openkilda.wfm.error; /** * {@code FlowNotFoundException} indicates that a flow can't be found / doesn't exist. */ public class FlowNotFoundException extends Exception { public FlowNotFoundException(String flowId) { super(String.format("Flow %s not found", flowId)); } public FlowNotFoundException(String flowId, String message) { super(String.format("Flow %s not found. %s", flowId, message)); } }
apache-2.0
acartapanis/camel
platforms/spring-boot/components-starter/camel-slack-starter/src/main/java/org/apache/camel/component/slack/springboot/SlackComponentConfiguration.java
2071
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.slack.springboot; import javax.annotation.Generated; import org.springframework.boot.context.properties.ConfigurationProperties; /** * The slack component allows you to send messages to Slack. * * Generated by camel-package-maven-plugin - do not edit this file! */ @Generated("org.apache.camel.maven.packaging.SpringBootAutoConfigurationMojo") @ConfigurationProperties(prefix = "camel.component.slack") public class SlackComponentConfiguration { /** * The incoming webhook URL */ private String webhookUrl; /** * Whether the component should resolve property placeholders on itself when * starting. Only properties which are of String type can use property * placeholders. */ private Boolean resolvePropertyPlaceholders = true; public String getWebhookUrl() { return webhookUrl; } public void setWebhookUrl(String webhookUrl) { this.webhookUrl = webhookUrl; } public Boolean getResolvePropertyPlaceholders() { return resolvePropertyPlaceholders; } public void setResolvePropertyPlaceholders( Boolean resolvePropertyPlaceholders) { this.resolvePropertyPlaceholders = resolvePropertyPlaceholders; } }
apache-2.0
Scauser/j2ee
upwork-jax-ws-client/src/main/java/com/accenture/nes/webservices/ObligationDetails.java
3446
package com.accenture.nes.webservices; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for obligationDetails complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="obligationDetails"> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="assgimentStatus" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="assignmentCategory" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="assignmentSubject" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;element name="dueDate" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "obligationDetails", propOrder = { "assgimentStatus", "assignmentCategory", "assignmentSubject", "dueDate" }) public class ObligationDetails { protected String assgimentStatus; protected String assignmentCategory; protected String assignmentSubject; protected String dueDate; /** * Gets the value of the assgimentStatus property. * * @return * possible object is * {@link String } * */ public String getAssgimentStatus() { return assgimentStatus; } /** * Sets the value of the assgimentStatus property. * * @param value * allowed object is * {@link String } * */ public void setAssgimentStatus(String value) { this.assgimentStatus = value; } /** * Gets the value of the assignmentCategory property. * * @return * possible object is * {@link String } * */ public String getAssignmentCategory() { return assignmentCategory; } /** * Sets the value of the assignmentCategory property. * * @param value * allowed object is * {@link String } * */ public void setAssignmentCategory(String value) { this.assignmentCategory = value; } /** * Gets the value of the assignmentSubject property. * * @return * possible object is * {@link String } * */ public String getAssignmentSubject() { return assignmentSubject; } /** * Sets the value of the assignmentSubject property. * * @param value * allowed object is * {@link String } * */ public void setAssignmentSubject(String value) { this.assignmentSubject = value; } /** * Gets the value of the dueDate property. * * @return * possible object is * {@link String } * */ public String getDueDate() { return dueDate; } /** * Sets the value of the dueDate property. * * @param value * allowed object is * {@link String } * */ public void setDueDate(String value) { this.dueDate = value; } }
apache-2.0
dslam/thoughtsite
src/main/java/com/google/ie/dto/CommentDetail.java
1709
/* Copyright 2010 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package com.google.ie.dto; import com.google.ie.business.domain.Comment; import com.google.ie.business.domain.Idea; import com.google.ie.business.domain.User; import java.io.Serializable; /** * A data transfer object representing the comment information. * * @author Charanjeet singh */ public class CommentDetail implements Serializable { private static final long serialVersionUID = 1L; /** * Comment object. */ private Comment comment; private User user; private Idea idea; /** * @param comment the comment to set */ public void setComment(Comment comment) { this.comment = comment; } /** * @return the comment */ public Comment getComment() { return comment; } /** * @param user the user to set */ public void setUser(User user) { this.user = user; } /** * @return the user */ public User getUser() { return user; } public void setIdea(Idea idea) { this.idea = idea; } public Idea getIdea() { return idea; } }
apache-2.0
got5/ngCordova-demo
plugins/com.phonegap.plugins.barcodescanner/src/android/LibraryProject/src/com/google/zxing/oned/rss/DataCharacter.java
1029
/* * Copyright 2009 ZXing authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.zxing.oned.rss; public class DataCharacter { private final int value; private final int checksumPortion; public DataCharacter(int value, int checksumPortion) { this.value = value; this.checksumPortion = checksumPortion; } public final int getValue() { return value; } public final int getChecksumPortion() { return checksumPortion; } }
apache-2.0
hermione521/bazel
src/test/java/com/google/devtools/build/lib/rules/cpp/CcCommonTest.java
38880
// Copyright 2015 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.rules.cpp; import static com.google.common.collect.Iterables.getOnlyElement; import static com.google.common.truth.Truth.assertThat; import static com.google.devtools.build.lib.actions.util.ActionsTestUtil.baseArtifactNames; import static com.google.devtools.build.lib.actions.util.ActionsTestUtil.baseNamesOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.devtools.build.lib.actions.Action; import com.google.devtools.build.lib.actions.Artifact; import com.google.devtools.build.lib.actions.util.ActionsTestUtil; import com.google.devtools.build.lib.analysis.AnalysisUtils; import com.google.devtools.build.lib.analysis.BaseRuleClasses; import com.google.devtools.build.lib.analysis.ConfiguredRuleClassProvider; import com.google.devtools.build.lib.analysis.ConfiguredTarget; import com.google.devtools.build.lib.analysis.OutputGroupProvider; import com.google.devtools.build.lib.analysis.RuleDefinition; import com.google.devtools.build.lib.analysis.RuleDefinitionEnvironment; import com.google.devtools.build.lib.analysis.config.BuildConfiguration; import com.google.devtools.build.lib.analysis.config.ConfigurationFactory; import com.google.devtools.build.lib.analysis.mock.BazelAnalysisMock; import com.google.devtools.build.lib.analysis.util.AnalysisMock; import com.google.devtools.build.lib.analysis.util.BuildViewTestCase; import com.google.devtools.build.lib.bazel.rules.BazelRuleClassProvider; import com.google.devtools.build.lib.bazel.rules.BazelToolchainLookup; import com.google.devtools.build.lib.cmdline.Label; import com.google.devtools.build.lib.cmdline.PackageIdentifier; import com.google.devtools.build.lib.cmdline.RepositoryName; import com.google.devtools.build.lib.flags.InvocationPolicyEnforcer; import com.google.devtools.build.lib.packages.RuleClass; import com.google.devtools.build.lib.rules.ToolchainLookup; import com.google.devtools.build.lib.testutil.MoreAsserts; import com.google.devtools.build.lib.util.FileType; import com.google.devtools.build.lib.util.OsUtils; import com.google.devtools.build.lib.vfs.FileSystemUtils; import com.google.devtools.build.lib.vfs.ModifiedFileSet; import com.google.devtools.build.lib.vfs.PathFragment; import com.google.devtools.build.lib.view.config.crosstool.CrosstoolConfig; import java.util.Arrays; import java.util.List; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** A test for {@link CcCommon}. */ @RunWith(JUnit4.class) public class CcCommonTest extends BuildViewTestCase { private static final String STATIC_LIB = "statically/libstatically.a"; @Before public final void createBuildFiles() throws Exception { // Having lots of setUp code leads to bad running time. Don't add anything here! scratch.file("empty/BUILD", "cc_library(name = 'emptylib')", "cc_binary(name = 'emptybinary')"); scratch.file("foo/BUILD", "cc_library(name = 'foo',", " srcs = ['foo.cc'])"); scratch.file("bar/BUILD", "cc_library(name = 'bar',", " srcs = ['bar.cc'])"); } @Test public void testSameCcFileTwice() throws Exception { scratch.file( "a/BUILD", "cc_library(name='a', srcs=['a1', 'a2'])", "filegroup(name='a1', srcs=['a.cc'])", "filegroup(name='a2', srcs=['a.cc'])"); reporter.removeHandler(failFastHandler); getConfiguredTarget("//a:a"); assertContainsEvent("Artifact 'a/a.cc' is duplicated"); } @Test public void testSameHeaderFileTwice() throws Exception { scratch.file( "a/BUILD", "package(features=['parse_headers'])", "cc_library(name='a', srcs=['a1', 'a2', 'a.cc'])", "filegroup(name='a1', srcs=['a.h'])", "filegroup(name='a2', srcs=['a.h'])"); reporter.removeHandler(failFastHandler); getConfiguredTarget("//a:a"); assertNoEvents(); } @Test public void testEmptyLibrary() throws Exception { ConfiguredTarget emptylib = getConfiguredTarget("//empty:emptylib"); // We create .a for empty libraries, for simplicity (in Blaze). // But we avoid creating .so files for empty libraries, // because those have a potentially significant run-time startup cost. if (emptyShouldOutputStaticLibrary()) { assertEquals("libemptylib.a", baseNamesOf(getFilesToBuild(emptylib))); } else { assertThat(getFilesToBuild(emptylib)).isEmpty(); } assertTrue( emptylib .getProvider(CcExecutionDynamicLibrariesProvider.class) .getExecutionDynamicLibraryArtifacts() .isEmpty()); } protected boolean emptyShouldOutputStaticLibrary() { return !getAnalysisMock().isThisBazel(); } @Test public void testEmptyBinary() throws Exception { ConfiguredTarget emptybin = getConfiguredTarget("//empty:emptybinary"); assertEquals( "emptybinary" + OsUtils.executableExtension(), baseNamesOf(getFilesToBuild(emptybin))); } private List<String> getCopts(String target) throws Exception { ConfiguredTarget cLib = getConfiguredTarget(target); Artifact object = getOnlyElement(getOutputGroup(cLib, OutputGroupProvider.FILES_TO_COMPILE)); CppCompileAction compileAction = (CppCompileAction) getGeneratingAction(object); return compileAction.getCompilerOptions(); } @Test public void testCopts() throws Exception { scratch.file( "copts/BUILD", "cc_library(name = 'c_lib',", " srcs = ['foo.cc'],", " copts = [ '-Wmy-warning', '-frun-faster' ])"); MoreAsserts.assertContainsSublist(getCopts("//copts:c_lib"), "-Wmy-warning", "-frun-faster"); } @Test public void testCoptsTokenization() throws Exception { scratch.file( "copts/BUILD", "cc_library(name = 'c_lib',", " srcs = ['foo.cc'],", " copts = ['-Wmy-warning -frun-faster'])"); List<String> copts = getCopts("//copts:c_lib"); MoreAsserts.assertContainsSublist(copts, "-Wmy-warning", "-frun-faster"); assertContainsEvent("each item in the list should contain only one option"); } @Test public void testCoptsNoTokenization() throws Exception { scratch.file( "copts/BUILD", "package(features = ['no_copts_tokenization'])", "cc_library(name = 'c_lib',", " srcs = ['foo.cc'],", " copts = ['-Wmy-warning -frun-faster'])"); List<String> copts = getCopts("//copts:c_lib"); MoreAsserts.assertContainsSublist(copts, "-Wmy-warning -frun-faster"); } /** * Test that we handle ".a" files in cc_library srcs correctly when linking dynamically. In * particular, if srcs contains only the ".a" file for a library, with no corresponding ".so", * then we need to link in the ".a" file even when we're linking dynamically. If srcs contains * both ".a" and ".so" then we should only link in the ".so". */ @Test public void testArchiveInCcLibrarySrcs() throws Exception { useConfiguration("--cpu=k8"); ConfiguredTarget archiveInSrcsTest = scratchConfiguredTarget( "archive_in_srcs", "archive_in_srcs_test", "cc_test(name = 'archive_in_srcs_test',", " srcs = ['archive_in_srcs_test.cc'],", " deps = [':archive_in_srcs_lib'])", "cc_library(name = 'archive_in_srcs_lib',", " srcs = ['libstatic.a', 'libboth.a', 'libboth.so'])"); List<String> artifactNames = baseArtifactNames(getLinkerInputs(archiveInSrcsTest)); assertThat(artifactNames).containsAllOf("libboth.so", "libstatic.a"); assertThat(artifactNames).doesNotContain("libboth.a"); } private Iterable<Artifact> getLinkerInputs(ConfiguredTarget target) { Artifact executable = getExecutable(target); CppLinkAction linkAction = (CppLinkAction) getGeneratingAction(executable); return LinkerInputs.toLibraryArtifacts(linkAction.getLinkCommandLine().getLinkerInputs()); } @Test public void testDylibLibrarySuffixIsStripped() throws Exception { ConfiguredTarget archiveInSrcsTest = scratchConfiguredTarget( "archive_in_src_darwin", "archive_in_srcs", "cc_binary(name = 'archive_in_srcs',", " srcs = ['libarchive.34.dylib'])"); Artifact executable = getExecutable(archiveInSrcsTest); CppLinkAction linkAction = (CppLinkAction) getGeneratingAction(executable); assertThat(linkAction.getLinkCommandLine().toString()).contains(" -larchive.34 "); } @Test public void testLinkStaticStatically() throws Exception { ConfiguredTarget statically = scratchConfiguredTarget( "statically", "statically", "cc_library(name = 'statically',", " srcs = ['statically.cc'],", " linkstatic=1)"); assertTrue( statically .getProvider(CcExecutionDynamicLibrariesProvider.class) .getExecutionDynamicLibraryArtifacts() .isEmpty()); Artifact staticallyDotA = getOnlyElement(getFilesToBuild(statically)); assertThat(getGeneratingAction(staticallyDotA)).isInstanceOf(CppLinkAction.class); PathFragment dotAPath = staticallyDotA.getExecPath(); assertThat(dotAPath.getPathString()).endsWith(STATIC_LIB); } @Test public void testIsolatedDefines() throws Exception { ConfiguredTarget isolatedDefines = scratchConfiguredTarget( "isolated_defines", "defineslib", "cc_library(name = 'defineslib',", " srcs = ['defines.cc'],", " defines = ['FOO', 'BAR'])"); assertThat(isolatedDefines.getProvider(CppCompilationContext.class).getDefines()) .containsExactly("FOO", "BAR") .inOrder(); } @Test public void testStartEndLib() throws Exception { getAnalysisMock().ccSupport().setupCrosstool(mockToolsConfig, CrosstoolConfig.CToolchain.newBuilder().setSupportsStartEndLib(true).buildPartial()); useConfiguration( // Prevent Android from trying to setup ARM crosstool by forcing it on system cpu. "--fat_apk_cpu=" + CrosstoolConfigurationHelper.defaultCpu(), "--start_end_lib"); scratch.file( "test/BUILD", "cc_library(name='lib',", " srcs=['lib.c'])", "cc_binary(name='bin',", " srcs=['bin.c'])"); ConfiguredTarget target = getConfiguredTarget("//test:bin"); CppLinkAction action = (CppLinkAction) getGeneratingAction(getExecutable(target)); for (Artifact input : action.getInputs()) { String name = input.getFilename(); assertTrue(!CppFileTypes.ARCHIVE.matches(name) && !CppFileTypes.PIC_ARCHIVE.matches(name)); } } @Test public void testTempsWithDifferentExtensions() throws Exception { useConfiguration("--cpu=k8", "--save_temps"); scratch.file( "ananas/BUILD", "cc_library(name='ananas',", " srcs=['1.c', '2.cc', '3.cpp', '4.S', '5.h', '6.hpp'])"); ConfiguredTarget ananas = getConfiguredTarget("//ananas:ananas"); Iterable<String> temps = ActionsTestUtil.baseArtifactNames(getOutputGroup(ananas, OutputGroupProvider.TEMP_FILES)); assertThat(temps) .containsExactly( "1.pic.i", "1.pic.s", "2.pic.ii", "2.pic.s", "3.pic.ii", "3.pic.s"); } @Test public void testTempsForCc() throws Exception { for (String cpu : new String[] {"k8", "piii"}) { useConfiguration("--cpu=" + cpu, "--save_temps"); ConfiguredTarget foo = getConfiguredTarget("//foo:foo"); List<String> temps = ActionsTestUtil.baseArtifactNames(getOutputGroup(foo, OutputGroupProvider.TEMP_FILES)); if (getTargetConfiguration().getFragment(CppConfiguration.class).usePicForBinaries()) { assertThat(temps).named(cpu).containsExactly("foo.pic.ii", "foo.pic.s"); } else { assertThat(temps).named(cpu).containsExactly("foo.ii", "foo.s"); } } } @Test public void testTempsForC() throws Exception { scratch.file("csrc/BUILD", "cc_library(name='csrc', srcs=['foo.c'])"); for (String cpu : new String[] {"k8", "piii"}) { useConfiguration("--cpu=" + cpu, "--save_temps"); // Now try with a .c source file. ConfiguredTarget csrc = getConfiguredTarget("//csrc:csrc"); List<String> temps = ActionsTestUtil.baseArtifactNames(getOutputGroup(csrc, OutputGroupProvider.TEMP_FILES)); if (getTargetConfiguration().getFragment(CppConfiguration.class).usePicForBinaries()) { assertThat(temps).named(cpu).containsExactly("foo.pic.i", "foo.pic.s"); } else { assertThat(temps).named(cpu).containsExactly("foo.i", "foo.s"); } } } @Test public void testAlwaysLinkYieldsLo() throws Exception { ConfiguredTarget alwaysLink = scratchConfiguredTarget( "always_link", "always_link", "cc_library(name = 'always_link',", " alwayslink = 1,", " srcs = ['always_link.cc'])"); assertThat(baseNamesOf(getFilesToBuild(alwaysLink))).contains("libalways_link.lo"); } /** * Tests that nocopts= "-fPIC" takes '-fPIC' out of a compile invocation even if the crosstool * requires fPIC compilation (i.e. nocopts overrides crosstool settings on a rule-specific * basis). */ @Test public void testNoCoptfPicOverride() throws Exception { getAnalysisMock().ccSupport().setupCrosstool(mockToolsConfig, CrosstoolConfig.CToolchain.newBuilder().setNeedsPic(true).buildPartial()); useConfiguration( // Prevent Android from trying to setup ARM crosstool by forcing it on system cpu. "--fat_apk_cpu=" + CrosstoolConfigurationHelper.defaultCpu()); scratch.file( "a/BUILD", "cc_binary(name = 'pic',", " srcs = [ 'binary.cc' ])", "cc_binary(name = 'libpic.so',", " srcs = [ 'binary.cc' ])", "cc_library(name = 'piclib',", " srcs = [ 'library.cc' ])", "cc_binary(name = 'nopic',", " srcs = [ 'binary.cc' ],", " nocopts = '-fPIC')", "cc_binary(name = 'libnopic.so',", " srcs = [ 'binary.cc' ],", " nocopts = '-fPIC')", "cc_library(name = 'nopiclib',", " srcs = [ 'library.cc' ],", " nocopts = '-fPIC')"); assertThat(getCppCompileAction("//a:pic").getArgv()).contains("-fPIC"); assertThat(getCppCompileAction("//a:libpic.so").getArgv()).contains("-fPIC"); assertThat(getCppCompileAction("//a:piclib").getArgv()).contains("-fPIC"); assertThat(getCppCompileAction("//a:nopic").getArgv()).doesNotContain("-fPIC"); assertThat(getCppCompileAction("//a:libnopic.so").getArgv()).doesNotContain("-fPIC"); assertThat(getCppCompileAction("//a:nopiclib").getArgv()).doesNotContain("-fPIC"); } @Test public void testPicModeAssembly() throws Exception { useConfiguration("--cpu=k8"); scratch.file("a/BUILD", "cc_library(name='preprocess', srcs=['preprocess.S'])"); List<String> argv = getCppCompileAction("//a:preprocess").getArgv(); assertThat(argv).contains("-fPIC"); } private CppCompileAction getCppCompileAction(String label) throws Exception { ConfiguredTarget target = getConfiguredTarget(label); List<CppCompileAction> compilationSteps = actionsTestUtil() .findTransitivePrerequisitesOf( getFilesToBuild(target).iterator().next(), CppCompileAction.class); return compilationSteps.get(0); } @Test public void testIsolatedIncludes() throws Exception { // Tests the (immediate) effect of declaring the includes attribute on a // cc_library. scratch.file( "bang/BUILD", "cc_library(name = 'bang',", " srcs = ['bang.cc'],", " includes = ['bang_includes'])"); ConfiguredTarget foo = getConfiguredTarget("//bang:bang"); String includesRoot = "bang/bang_includes"; assertThat(foo.getProvider(CppCompilationContext.class).getSystemIncludeDirs()) .containsAllOf( new PathFragment(includesRoot), targetConfig.getGenfilesFragment().getRelative(includesRoot)); } @Test public void testUseIsystemForIncludes() throws Exception { // Tests the effect of --use_isystem_for_includes. scratch.file( "no_includes/BUILD", "cc_library(name = 'no_includes',", " srcs = ['no_includes.cc'])"); ConfiguredTarget noIncludes = getConfiguredTarget("//no_includes:no_includes"); scratch.file( "bang/BUILD", "cc_library(name = 'bang',", " srcs = ['bang.cc'],", " includes = ['bang_includes'])"); ConfiguredTarget foo = getConfiguredTarget("//bang:bang"); String includesRoot = "bang/bang_includes"; List<PathFragment> expected = new ImmutableList.Builder<PathFragment>() .addAll(noIncludes.getProvider(CppCompilationContext.class).getSystemIncludeDirs()) .add(new PathFragment(includesRoot)) .add(targetConfig.getGenfilesFragment().getRelative(includesRoot)) .build(); assertThat(foo.getProvider(CppCompilationContext.class).getSystemIncludeDirs()) .containsExactlyElementsIn(expected); } @Test public void testCcTestDisallowsAlwaysLink() throws Exception { scratch.file( "cc/common/BUILD", "cc_library(name = 'lib1',", " srcs = ['foo1.cc'],", " deps = ['//left'])", "", "cc_test(name = 'testlib',", " deps = [':lib1'],", " alwayslink=1)"); reporter.removeHandler(failFastHandler); getPackageManager().getPackage(reporter, PackageIdentifier.createInMainRepo("cc/common")); assertContainsEvent( "//cc/common:testlib: no such attribute 'alwayslink'" + " in 'cc_test' rule"); } @Test public void testCcTestBuiltWithFissionHasDwp() throws Exception { // Tests that cc_tests built statically and with Fission will have the .dwp file // in their runfiles. useConfiguration("--cpu=k8", "--build_test_dwp", "--dynamic_mode=off", "--fission=yes"); ConfiguredTarget target = scratchConfiguredTarget( "mypackage", "mytest", "cc_test(name = 'mytest', ", " srcs = ['mytest.cc'])"); Iterable<Artifact> runfiles = collectRunfiles(target); assertThat(baseArtifactNames(runfiles)).contains("mytest.dwp"); } @Test public void testCcLibraryBadIncludesWarnedAndIgnored() throws Exception { checkWarning( "badincludes", "flaky_lib", // message: "in includes attribute of cc_library rule //badincludes:flaky_lib: " + "ignoring invalid absolute path '//third_party/procps/proc'", // build file: "cc_library(name = 'flaky_lib',", " srcs = [ 'ok.cc' ],", " includes = [ '//third_party/procps/proc' ])"); } @Test public void testCcLibraryUplevelIncludesWarned() throws Exception { checkWarning( "third_party/uplevel", "lib", // message: "in includes attribute of cc_library rule //third_party/uplevel:lib: '../bar' resolves to " + "'third_party/bar' not below the relative path of its package 'third_party/uplevel'. " + "This will be an error in the future", // build file: "licenses(['unencumbered'])", "cc_library(name = 'lib',", " srcs = ['foo.cc'],", " includes = ['../bar'])"); } @Test public void testCcLibraryNonThirdPartyIncludesWarned() throws Exception { if (getAnalysisMock().isThisBazel()) { return; } checkWarning( "topdir", "lib", // message: "in includes attribute of cc_library rule //topdir:lib: './' resolves to 'topdir' not " + "in 'third_party'. This will be an error in the future", // build file: "cc_library(name = 'lib',", " srcs = ['foo.cc'],", " includes = ['./'])"); } @Test public void testCcLibraryThirdPartyIncludesNotWarned() throws Exception { eventCollector.clear(); ConfiguredTarget target = scratchConfiguredTarget( "third_party/pkg", "lib", "licenses(['unencumbered'])", "cc_library(name = 'lib',", " srcs = ['foo.cc'],", " includes = ['./'])"); assertThat(view.hasErrors(target)).isFalse(); assertNoEvents(); } @Test public void testCcLibraryExternalIncludesNotWarned() throws Exception { eventCollector.clear(); FileSystemUtils.appendIsoLatin1( scratch.resolve("WORKSPACE"), "local_repository(", " name = 'pkg',", " path = '/foo')"); getSkyframeExecutor() .invalidateFilesUnderPathForTesting( eventCollector, new ModifiedFileSet.Builder().modify(new PathFragment("WORKSPACE")).build(), rootDirectory); FileSystemUtils.createDirectoryAndParents(scratch.resolve("/foo/bar")); scratch.file("/foo/WORKSPACE", "workspace(name = 'pkg')"); scratch.file( "/foo/bar/BUILD", "cc_library(name = 'lib',", " srcs = ['foo.cc'],", " includes = ['./'])"); Label label = Label.parseAbsolute("@pkg//bar:lib"); ConfiguredTarget target = view.getConfiguredTargetForTesting(reporter, label, targetConfig); assertThat(view.hasErrors(target)).isFalse(); assertNoEvents(); } @Test public void testCcLibraryRootIncludesError() throws Exception { checkError( "third_party/root", "lib", // message: "in includes attribute of cc_library rule //third_party/root:lib: '../..' resolves to the " + "workspace root, which would allow this rule and all of its transitive dependents to " + "include any file in your workspace. Please include only what you need", // build file: "licenses(['unencumbered'])", "cc_library(name = 'lib',", " srcs = ['foo.cc'],", " includes = ['../..'])"); } @Test public void testStaticallyLinkedBinaryNeedsSharedObject() throws Exception { scratch.file( "third_party/sophos_av_pua/BUILD", "licenses(['notice'])", "cc_library(name = 'savi',", " srcs = [ 'lib/libsavi.so' ])"); ConfiguredTarget wrapsophos = scratchConfiguredTarget( "quality/malware/support", "wrapsophos", "cc_library(name = 'sophosengine',", " srcs = [ 'sophosengine.cc' ],", " deps = [ '//third_party/sophos_av_pua:savi' ])", "cc_binary(name = 'wrapsophos',", " srcs = [ 'wrapsophos.cc' ],", " deps = [ ':sophosengine' ],", " linkstatic=1)"); List<String> artifactNames = baseArtifactNames(getLinkerInputs(wrapsophos)); assertThat(artifactNames).contains("libsavi.so"); } @Test public void testExpandLabelInLinkoptsAgainstSrc() throws Exception { scratch.file( "coolthing/BUILD", "genrule(name = 'build-that',", " srcs = [ 'foo' ],", " outs = [ 'nicelib.a' ],", " cmd = 'cat $< > $@')"); // In reality the linkopts might contain several externally-provided // '.a' files with cyclic dependencies amongst them, but in this test // it suffices to show that one label in linkopts was resolved. scratch.file( "myapp/BUILD", "cc_binary(name = 'myapp',", " srcs = [ '//coolthing:nicelib.a' ],", " linkopts = [ '//coolthing:nicelib.a' ])"); ConfiguredTarget theLib = getConfiguredTarget("//coolthing:build-that"); ConfiguredTarget theApp = getConfiguredTarget("//myapp:myapp"); // make sure we did not print warnings about the linkopt assertNoEvents(); // make sure the binary is dependent on the static lib Action linkAction = getGeneratingAction(getOnlyElement(getFilesToBuild(theApp))); ImmutableList<Artifact> filesToBuild = ImmutableList.copyOf(getFilesToBuild(theLib)); assertTrue(ImmutableSet.copyOf(linkAction.getInputs()).containsAll(filesToBuild)); } @Test public void testMissingLabelInLinkopts() throws Exception { scratch.file( "linklow/BUILD", "genrule(name = 'linklow_linker_script',", " srcs = [ 'default_linker_script' ],", " tools = [ 'default_linker_script' ],", " outs = [ 'linklow.lds' ],", " cmd = 'cat $< > $@')"); checkError( "ocean/scoring2", "ms-ascorer", // error: "could not resolve label '//linklow:linklow_linker_script'", "cc_binary(name = 'ms-ascorer',", " srcs = [ ],", " deps = [ ':ascorer-servlet'],", " linkopts = [ '-static', '-Xlinker', '-script', '//linklow:linklow_linker_script'])", "cc_library(name = 'ascorer-servlet')"); } @Test public void testCcLibraryWithDashStatic() throws Exception { checkWarning( "badlib", "lib_with_dash_static", // message: "in linkopts attribute of cc_library rule //badlib:lib_with_dash_static: " + "Using '-static' here won't work. Did you mean to use 'linkstatic=1' instead?", // build file: "cc_library(name = 'lib_with_dash_static',", " srcs = [ 'ok.cc' ],", " linkopts = [ '-static' ])"); } @Test public void testStampTests() throws Exception { scratch.file( "test/BUILD", "cc_test(name ='a', srcs = ['a.cc'])", "cc_test(name ='b', srcs = ['b.cc'], stamp = 0)", "cc_test(name ='c', srcs = ['c.cc'], stamp = 1)", "cc_binary(name ='d', srcs = ['d.cc'])", "cc_binary(name ='e', srcs = ['e.cc'], stamp = 0)", "cc_binary(name ='f', srcs = ['f.cc'], stamp = 1)"); assertStamping(false, "//test:a"); assertStamping(false, "//test:b"); assertStamping(true, "//test:c"); assertStamping(true, "//test:d"); assertStamping(false, "//test:e"); assertStamping(true, "//test:f"); useConfiguration("--stamp"); assertStamping(false, "//test:a"); assertStamping(false, "//test:b"); assertStamping(true, "//test:c"); assertStamping(true, "//test:d"); assertStamping(false, "//test:e"); assertStamping(true, "//test:f"); useConfiguration("--nostamp"); assertStamping(false, "//test:a"); assertStamping(false, "//test:b"); assertStamping(true, "//test:c"); assertStamping(false, "//test:d"); assertStamping(false, "//test:e"); assertStamping(true, "//test:f"); } private void assertStamping(boolean enabled, String label) throws Exception { assertEquals( enabled, AnalysisUtils.isStampingEnabled(getRuleContext(getConfiguredTarget(label)))); } @Test public void testIncludeRelativeHeadersAboveExecRoot() throws Exception { checkError( "test", "bad_relative_include", "Path references a path above the execution root.", "cc_library(name='bad_relative_include', srcs=[], includes=['../..'])"); } @Test public void testIncludeAbsoluteHeaders() throws Exception { checkWarning( "test", "bad_absolute_include", "ignoring invalid absolute path", "cc_library(name='bad_absolute_include', srcs=[], includes=['/usr/include/'])"); } @Test public void testSelectPreferredLibrariesInvariant() { // All combinations of libraries: // a - static+pic+shared // b - static+pic // c - static+shared // d - static // e - pic+shared // f - pic // g - shared CcLinkingOutputs linkingOutputs = CcLinkingOutputs.builder() .addStaticLibraries( ImmutableList.copyOf( LinkerInputs.opaqueLibrariesToLink(ArtifactCategory.STATIC_LIBRARY, Arrays.asList( getSourceArtifact("liba.a"), getSourceArtifact("libb.a"), getSourceArtifact("libc.a"), getSourceArtifact("libd.a"))))) .addPicStaticLibraries( ImmutableList.copyOf( LinkerInputs.opaqueLibrariesToLink(ArtifactCategory.STATIC_LIBRARY, Arrays.asList( getSourceArtifact("liba.pic.a"), getSourceArtifact("libb.pic.a"), getSourceArtifact("libe.pic.a"), getSourceArtifact("libf.pic.a"))))) .addDynamicLibraries( ImmutableList.copyOf( LinkerInputs.opaqueLibrariesToLink(ArtifactCategory.DYNAMIC_LIBRARY, Arrays.asList( getSourceArtifact("liba.so"), getSourceArtifact("libc.so"), getSourceArtifact("libe.so"), getSourceArtifact("libg.so"))))) .build(); // Whether linkShared is true or false, this should return the identical results. List<Artifact> sharedLibraries1 = FileType.filterList( LinkerInputs.toLibraryArtifacts(linkingOutputs.getPreferredLibraries(true, false)), CppFileTypes.SHARED_LIBRARY); List<Artifact> sharedLibraries2 = FileType.filterList( LinkerInputs.toLibraryArtifacts(linkingOutputs.getPreferredLibraries(true, true)), CppFileTypes.SHARED_LIBRARY); assertEquals(sharedLibraries1, sharedLibraries2); } /** Tests that shared libraries of the form "libfoo.so.1.2" are permitted within "srcs". */ @Test public void testVersionedSharedLibrarySupport() throws Exception { ConfiguredTarget target = scratchConfiguredTarget( "mypackage", "mybinary", "cc_binary(name = 'mybinary',", " srcs = ['mybinary.cc'],", " deps = [':mylib'])", "cc_library(name = 'mylib',", " srcs = ['libshared.so', 'libshared.so.1.1', 'foo.cc'])"); List<String> artifactNames = baseArtifactNames(getLinkerInputs(target)); assertThat(artifactNames).containsAllOf("libshared.so", "libshared.so.1.1"); } @Test public void testNoHeaderInHdrsWarning() throws Exception { checkWarning( "hdrs_filetypes", "foo", "in hdrs attribute of cc_library rule //hdrs_filetypes:foo: file 'foo.a' " + "from target '//hdrs_filetypes:foo.a' is not allowed in hdrs", "cc_library(name = 'foo',", " srcs = [],", " hdrs = ['foo.a'])"); } @Test public void testLibraryInHdrs() throws Exception { scratchConfiguredTarget("a", "a", "cc_library(name='a', srcs=['a.cc'], hdrs=[':b'])", "cc_library(name='b', srcs=['b.cc'])"); } @Test public void testExpandedLinkopts() throws Exception { scratch.file( "a/BUILD", "genrule(name = 'linker', cmd='generate', outs=['a.lds'])", "cc_binary(", " name='bin',", " srcs=['b.cc'],", " linkopts=['-Wl,@$(location a.lds)'],", " deps=['a.lds'])"); ConfiguredTarget target = getConfiguredTarget("//a:bin"); CppLinkAction action = (CppLinkAction) getGeneratingAction(getOnlyElement(getFilesToBuild(target))); assertThat(action.getLinkCommandLine().getLinkopts()).containsExactly( String.format("-Wl,@%s/genfiles/a/a.lds", getTargetConfiguration().getOutputDirectory( RepositoryName.MAIN).getExecPath().getPathString())); } @Test public void testIncludeManglingSmoke() throws Exception { scratch.file( "third_party/a/BUILD", "licenses(['notice'])", "cc_library(name='a', hdrs=['v1/b/c.h'], strip_include_prefix='v1', include_prefix='lib')"); ConfiguredTarget lib = getConfiguredTarget("//third_party/a"); CppCompilationContext context = lib.getProvider(CppCompilationContext.class); assertThat(ActionsTestUtil.prettyArtifactNames(context.getDeclaredIncludeSrcs())) .containsExactly("third_party/a/_virtual_includes/a/lib/b/c.h"); assertThat(context.getIncludeDirs()).containsExactly( getTargetConfiguration().getBinFragment().getRelative("third_party/a/_virtual_includes/a")); } @Test public void testUpLevelReferencesInIncludeMangling() throws Exception { scratch.file( "third_party/a/BUILD", "licenses(['notice'])", "cc_library(name='sip', srcs=['a.h'], strip_include_prefix='a/../b')", "cc_library(name='ip', srcs=['a.h'], include_prefix='a/../b')", "cc_library(name='ipa', srcs=['a.h'], include_prefix='/foo')"); reporter.removeHandler(failFastHandler); getConfiguredTarget("//third_party/a:sip"); assertContainsEvent("should not contain uplevel references"); eventCollector.clear(); getConfiguredTarget("//third_party/a:ip"); assertContainsEvent("should not contain uplevel references"); eventCollector.clear(); getConfiguredTarget("//third_party/a:ipa"); assertContainsEvent("should be a relative path"); } @Test public void testAbsoluteAndRelativeStripPrefix() throws Exception { scratch.file("third_party/a/BUILD", "licenses(['notice'])", "cc_library(name='relative', hdrs=['v1/b.h'], strip_include_prefix='v1')", "cc_library(name='absolute', hdrs=['v1/b.h'], strip_include_prefix='/third_party')"); CppCompilationContext relative = getConfiguredTarget("//third_party/a:relative") .getProvider(CppCompilationContext.class); CppCompilationContext absolute = getConfiguredTarget("//third_party/a:absolute") .getProvider(CppCompilationContext.class); assertThat(ActionsTestUtil.prettyArtifactNames(relative.getDeclaredIncludeSrcs())) .containsExactly("third_party/a/_virtual_includes/relative/b.h"); assertThat(ActionsTestUtil.prettyArtifactNames(absolute.getDeclaredIncludeSrcs())) .containsExactly("third_party/a/_virtual_includes/absolute/a/v1/b.h"); } @Test public void testArtifactNotUnderStripPrefix() throws Exception { scratch.file("third_party/a/BUILD", "licenses(['notice'])", "cc_library(name='a', hdrs=['v1/b.h'], strip_include_prefix='v2')"); reporter.removeHandler(failFastHandler); getConfiguredTarget("//third_party/a:a"); assertContainsEvent( "header 'third_party/a/v1/b.h' is not under the specified strip prefix 'third_party/a/v2'"); } /** * A {@code toolchain_lookup} rule for testing that only supports C++. */ public static class OnlyCppToolchainLookup extends ToolchainLookup { public OnlyCppToolchainLookup() { super( ImmutableMap.<Label, Class<? extends BuildConfiguration.Fragment>>of(), ImmutableMap.<Label, ImmutableMap<String, String>>of()); } } /** * A {@code toolchain_lookup} rule for testing that only supports C++. */ public static class OnlyCppToolchainLookupRule implements RuleDefinition { @Override public RuleClass build(RuleClass.Builder builder, RuleDefinitionEnvironment environment) { return builder // This means that *every* toolchain_lookup rule depends on every configuration fragment // that contributes Make variables, regardless of which one it is. .requiresConfigurationFragments(CppConfiguration.class) .removeAttribute("licenses") .removeAttribute("distribs") .build(); } @Override public Metadata getMetadata() { return Metadata.builder() .name("toolchain_lookup") .factoryClass(BazelToolchainLookup.class) .ancestors(BaseRuleClasses.BaseRule.class) .build(); } } /** * Tests for the case where there are only C++ rules defined. */ @RunWith(JUnit4.class) public static class OnlyCppRules extends CcCommonTest { @Override protected AnalysisMock getAnalysisMock() { final AnalysisMock original = BazelAnalysisMock.INSTANCE; return new AnalysisMock.Delegate(original) { @Override public ConfigurationFactory createConfigurationFactory() { return new ConfigurationFactory( createRuleClassProvider().getConfigurationCollectionFactory(), createRuleClassProvider().getConfigurationFragments()); } @Override public ConfiguredRuleClassProvider createRuleClassProvider() { ConfiguredRuleClassProvider.Builder builder = new ConfiguredRuleClassProvider.Builder(); builder.setToolsRepository("@bazel_tools"); BazelRuleClassProvider.BAZEL_SETUP.init(builder); BazelRuleClassProvider.CORE_RULES.init(builder); BazelRuleClassProvider.CORE_WORKSPACE_RULES.init(builder); BazelRuleClassProvider.BASIC_RULES.init(builder); BazelRuleClassProvider.CPP_RULES.init(builder); builder.addRuleDefinition(new OnlyCppToolchainLookupRule()); return builder.build(); } @Override public InvocationPolicyEnforcer getInvocationPolicyEnforcer() { return new InvocationPolicyEnforcer(null); } @Override public boolean isThisBazel() { return true; } }; } @Override public void testNoCoptfPicOverride() throws Exception { // Test sets --fat_apk_cpu, which doesn't exist. } @Override public void testStartEndLib() throws Exception { // Test sets --fat_apk_cpu, which doesn't exist. } } }
apache-2.0
lichongxin/hbase-snapshot
src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java
7801
/** * Copyright 2007 The Apache Software Foundation * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.Collection; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseClusterTestCase; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; /** * Tests region server failover when a region server exits both cleanly and * when it aborts. */ public class DisabledTestRegionServerExit extends HBaseClusterTestCase { final Log LOG = LogFactory.getLog(this.getClass().getName()); HTable table; /** constructor */ public DisabledTestRegionServerExit() { super(2); conf.setInt("ipc.client.connect.max.retries", 5); // reduce ipc retries conf.setInt("ipc.client.timeout", 10000); // and ipc timeout conf.setInt("hbase.client.pause", 10000); // increase client timeout conf.setInt("hbase.client.retries.number", 10); // increase HBase retries } /** * Test abort of region server. * @throws IOException */ public void testAbort() throws IOException { // When the META table can be opened, the region servers are running new HTable(conf, HConstants.META_TABLE_NAME); // Create table and add a row. final String tableName = getName(); byte [] row = createTableAndAddRow(tableName); // Start up a new region server to take over serving of root and meta // after we shut down the current meta/root host. this.cluster.startRegionServer(); // Now abort the meta region server and wait for it to go down and come back stopOrAbortMetaRegionServer(true); // Verify that everything is back up. LOG.info("Starting up the verification thread for " + getName()); Thread t = startVerificationThread(tableName, row); t.start(); threadDumpingJoin(t); } /** * Test abort of region server. * Test is flakey up on hudson. Needs work. * @throws IOException */ public void testCleanExit() throws IOException { // When the META table can be opened, the region servers are running new HTable(this.conf, HConstants.META_TABLE_NAME); // Create table and add a row. final String tableName = getName(); byte [] row = createTableAndAddRow(tableName); // Start up a new region server to take over serving of root and meta // after we shut down the current meta/root host. this.cluster.startRegionServer(); // Now abort the meta region server and wait for it to go down and come back stopOrAbortMetaRegionServer(false); // Verify that everything is back up. LOG.info("Starting up the verification thread for " + getName()); Thread t = startVerificationThread(tableName, row); t.start(); threadDumpingJoin(t); } private byte [] createTableAndAddRow(final String tableName) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); HBaseAdmin admin = new HBaseAdmin(conf); admin.createTable(desc); // put some values in the table this.table = new HTable(conf, tableName); byte [] row = Bytes.toBytes("row1"); Put put = new Put(row); put.add(HConstants.CATALOG_FAMILY, null, Bytes.toBytes(tableName)); table.put(put); return row; } /* * Stop the region server serving the meta region and wait for the meta region * to get reassigned. This is always the most problematic case. * * @param abort set to true if region server should be aborted, if false it * is just shut down. */ private void stopOrAbortMetaRegionServer(boolean abort) { List<JVMClusterUtil.RegionServerThread> regionThreads = cluster.getRegionServerThreads(); int server = -1; for (int i = 0; i < regionThreads.size() && server == -1; i++) { HRegionServer s = regionThreads.get(i).getRegionServer(); Collection<HRegion> regions = s.getOnlineRegions(); for (HRegion r : regions) { if (Bytes.equals(r.getTableDesc().getName(), HConstants.META_TABLE_NAME)) { server = i; } } } if (server == -1) { LOG.fatal("could not find region server serving meta region"); fail(); } if (abort) { this.cluster.abortRegionServer(server); } else { this.cluster.stopRegionServer(server); } LOG.info(this.cluster.waitOnRegionServer(server) + " has been " + (abort ? "aborted" : "shut down")); } /* * Run verification in a thread so I can concurrently run a thread-dumper * while we're waiting (because in this test sometimes the meta scanner * looks to be be stuck). * @param tableName Name of table to find. * @param row Row we expect to find. * @return Verification thread. Caller needs to calls start on it. */ private Thread startVerificationThread(final String tableName, final byte [] row) { Runnable runnable = new Runnable() { public void run() { try { // Now try to open a scanner on the meta table. Should stall until // meta server comes back up. HTable t = new HTable(conf, HConstants.META_TABLE_NAME); Scan scan = new Scan(); scan.addFamily(HConstants.CATALOG_FAMILY); ResultScanner s = t.getScanner(scan); s.close(); } catch (IOException e) { LOG.fatal("could not re-open meta table because", e); fail(); } ResultScanner scanner = null; try { // Verify that the client can find the data after the region has moved // to a different server Scan scan = new Scan(); scan.addFamily(HConstants.CATALOG_FAMILY); scanner = table.getScanner(scan); LOG.info("Obtained scanner " + scanner); for (Result r : scanner) { assertTrue(Bytes.equals(r.getRow(), row)); assertEquals(1, r.size()); byte[] bytes = r.value(); assertNotNull(bytes); assertTrue(tableName.equals(Bytes.toString(bytes))); } LOG.info("Success!"); } catch (Exception e) { e.printStackTrace(); fail(); } finally { if (scanner != null) { LOG.info("Closing scanner " + scanner); scanner.close(); } } } }; return new Thread(runnable); } }
apache-2.0
wso2/carbon-identity-framework
components/user-mgt/org.wso2.carbon.identity.user.profile/src/main/java/org/wso2/carbon/identity/user/profile/mgt/listener/ProfileMgtEventListener.java
7537
/* * Copyright (c) 2010, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ package org.wso2.carbon.identity.user.profile.mgt.listener; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.wso2.carbon.identity.base.IdentityValidationUtil; import org.wso2.carbon.identity.core.AbstractIdentityUserOperationEventListener; import org.wso2.carbon.identity.core.util.IdentityDatabaseUtil; import org.wso2.carbon.identity.core.util.IdentityTenantUtil; import org.wso2.carbon.identity.user.profile.mgt.util.ServiceHodler; import org.wso2.carbon.user.core.UserCoreConstants; import org.wso2.carbon.user.core.UserStoreException; import org.wso2.carbon.user.core.UserStoreManager; import org.wso2.carbon.user.core.util.UserCoreUtil; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.Map; public class ProfileMgtEventListener extends AbstractIdentityUserOperationEventListener { private static final String ALPHANUMERICS_ONLY = "ALPHANUMERICS_ONLY"; private static final String DIGITS_ONLY = "DIGITS_ONLY"; private static final String WHITESPACE_EXISTS = "WHITESPACE_EXISTS"; private static final String URI_RESERVED_EXISTS = "URI_RESERVED_EXISTS"; private static final String HTML_META_EXISTS = "HTML_META_EXISTS"; private static final String XML_META_EXISTS = "XML_META_EXISTS"; private static final String REGEX_META_EXISTS = "REGEX_META_EXISTS"; private static final String URL = "URL"; private static final Log log = LogFactory.getLog(ProfileMgtEventListener.class); @Override public int getExecutionOrderId() { return 110; } @Override public boolean doPreSetUserClaimValues(String userName, Map<String, String> claims, String profileName, UserStoreManager userStoreManager) throws UserStoreException { if (!isEnable()) { return true; } if (log.isDebugEnabled()) { String userStoreDomain = UserCoreUtil.getDomainName(userStoreManager.getRealmConfiguration()); if (StringUtils.isBlank(userStoreDomain)) { userStoreDomain = UserCoreConstants.PRIMARY_DEFAULT_DOMAIN_NAME; } String tenantDomain = IdentityTenantUtil.getTenantDomain(userStoreManager.getTenantId()); log.debug("doPreSetUserClaimValues method executed in ProfileMgtEventListener for user: " + getFullQualifiedUsername(userName, userStoreDomain, tenantDomain)); } //The following black listed patterns contain possible invalid inputs for profile which could be used for a // stored XSS attack. String[] whiteListPatternKeys = {ALPHANUMERICS_ONLY, DIGITS_ONLY}; String[] blackListPatternKeys = {WHITESPACE_EXISTS, URI_RESERVED_EXISTS, HTML_META_EXISTS, XML_META_EXISTS, REGEX_META_EXISTS, URL}; if (!IdentityValidationUtil.isValid(profileName, whiteListPatternKeys, blackListPatternKeys)) { throw new UserStoreException("profile name contains invalid characters!"); } return true; } /** * Delete federated user account associations a user has upon deleting the local user account. * * @param userName * @param userStoreManager * @return * @throws UserStoreException */ @Override public boolean doPreDeleteUser(String userName, UserStoreManager userStoreManager) throws UserStoreException { if (!isEnable()) { return true; } String userStoreDomain = UserCoreUtil.getDomainName(userStoreManager.getRealmConfiguration()); if (StringUtils.isBlank(userStoreDomain)) { userStoreDomain = UserCoreConstants.PRIMARY_DEFAULT_DOMAIN_NAME; } int tenantId = userStoreManager.getTenantId(); if (log.isDebugEnabled()) { log.debug("doPreDeleteUser method executed in ProfileMgtEventListener for user:" + getFullQualifiedUsername(userName, userStoreDomain, IdentityTenantUtil.getTenantDomain(tenantId))); } deleteFederatedIdpAccountAssociations(userName, userStoreDomain, tenantId); return true; } /** * Delete federated idp account associations from IDN_ASSOCIATED_ID table * * @param tenantAwareUsername * @param userStoreDomain * @param tenantId * @throws UserStoreException */ private void deleteFederatedIdpAccountAssociations(String tenantAwareUsername, String userStoreDomain, int tenantId) throws UserStoreException { // Run this code only if IDN_ASSOCIATED_ID table presents. We are doing this because of this feature can be used // by products which does not have the IDN tables. if (!ServiceHodler.isIDNTableExist()) { return; } String sql = "DELETE FROM IDN_ASSOCIATED_ID WHERE USER_NAME=? AND DOMAIN_NAME=? AND TENANT_ID=?"; String tenantDomain = IdentityTenantUtil.getTenantDomain(tenantId); // get tenant domain and user store domain appended username for logging String fullyQualifiedUsername = getFullQualifiedUsername(tenantAwareUsername, userStoreDomain, tenantDomain); if (log.isDebugEnabled()) { log.debug("Deleting federated IDP user account associations of user:" + fullyQualifiedUsername); } try (Connection connection = IdentityDatabaseUtil.getDBConnection()) { try (PreparedStatement prepStmt = connection.prepareStatement(sql)) { prepStmt.setString(1, tenantAwareUsername); prepStmt.setString(2, userStoreDomain); prepStmt.setInt(3, tenantId); prepStmt.executeUpdate(); IdentityDatabaseUtil.commitTransaction(connection); } catch (SQLException e1) { IdentityDatabaseUtil.rollbackTransaction(connection); throw new UserStoreException(String.format("Error when trying to delete the federated IDP user " + "account associations of user:%s", fullyQualifiedUsername), e1); } } catch (SQLException e) { String msg = "Error when trying to delete the federated IDP user account associations of user:%s"; throw new UserStoreException(String.format(msg, fullyQualifiedUsername), e); } } private String getFullQualifiedUsername(String tenantAwareUsername, String userStoreDomain, String tenantDomain) { String fullyQualifiedUsername = UserCoreUtil.addDomainToName(tenantAwareUsername, userStoreDomain); fullyQualifiedUsername = UserCoreUtil.addTenantDomainToEntry(fullyQualifiedUsername, tenantDomain); return fullyQualifiedUsername; } }
apache-2.0
alibaba/weex
android/sdk/src/main/java/org/apache/weex/ui/view/listview/adapter/RecyclerViewBaseAdapter.java
3296
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.weex.ui.view.listview.adapter; import android.support.v7.widget.RecyclerView; import android.support.v7.widget.StaggeredGridLayoutManager; import android.view.ViewGroup; /** * Adapter for recyclerView */ public class RecyclerViewBaseAdapter<T extends ListBaseViewHolder> extends RecyclerView.Adapter<T> { private IRecyclerAdapterListener iRecyclerAdapterListener; public RecyclerViewBaseAdapter(IRecyclerAdapterListener Listener) { this.iRecyclerAdapterListener = Listener; } @Override public T onCreateViewHolder(ViewGroup parent, int viewType) { if (iRecyclerAdapterListener != null) { return (T) iRecyclerAdapterListener.onCreateViewHolder(parent, viewType); } return null; } @Override public void onViewAttachedToWindow(T holder) { super.onViewAttachedToWindow(holder); if( holder !=null && holder.isFullSpan()){ ViewGroup.LayoutParams lp = holder.itemView.getLayoutParams(); if(lp != null && lp instanceof StaggeredGridLayoutManager.LayoutParams ) { StaggeredGridLayoutManager.LayoutParams p = (StaggeredGridLayoutManager.LayoutParams) lp; p.setFullSpan(true); } } } @Override public void onViewDetachedFromWindow(T holder) { super.onViewDetachedFromWindow(holder); if (holder != null) holder.setComponentUsing(false); } @Override public void onBindViewHolder(T viewHolder, int i) { if (iRecyclerAdapterListener != null) { iRecyclerAdapterListener.onBindViewHolder(viewHolder, i); } } @Override public int getItemViewType(int position) { if (iRecyclerAdapterListener != null) { return iRecyclerAdapterListener.getItemViewType(position); } return position; } @Override public long getItemId(int position) { return iRecyclerAdapterListener.getItemId(position); } @Override public int getItemCount() { if (iRecyclerAdapterListener != null) { return iRecyclerAdapterListener.getItemCount(); } return 0; } @Override public void onViewRecycled(T holder) { if (iRecyclerAdapterListener != null) { iRecyclerAdapterListener.onViewRecycled(holder); } super.onViewRecycled(holder); } @Override public boolean onFailedToRecycleView(T holder) { if (iRecyclerAdapterListener != null) { return iRecyclerAdapterListener.onFailedToRecycleView(holder); } return super.onFailedToRecycleView(holder); } }
apache-2.0
USEF-Foundation/ri.usef.energy
usef-build/usef-workflow/usef-dso/src/main/java/energy/usef/dso/workflow/dto/PtuGridMonitoringDto.java
1206
/* * Copyright 2015-2016 USEF Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package energy.usef.dso.workflow.dto; import java.math.BigInteger; /** * DTO information to carry information related to the grid monitoring data for a PTU. */ public class PtuGridMonitoringDto { private Integer ptuIndex; private BigInteger actualPower; public Integer getPtuIndex() { return ptuIndex; } public void setPtuIndex(Integer ptuIndex) { this.ptuIndex = ptuIndex; } public BigInteger getActualPower() { return actualPower; } public void setActualPower(BigInteger actualPower) { this.actualPower = actualPower; } }
apache-2.0
smanvi-pivotal/geode
geode-core/src/main/java/org/apache/geode/internal/statistics/StatisticsImpl.java
17296
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.internal.statistics; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.function.DoubleSupplier; import java.util.function.IntSupplier; import java.util.function.LongSupplier; import org.apache.logging.log4j.Logger; import org.apache.geode.StatisticDescriptor; import org.apache.geode.Statistics; import org.apache.geode.StatisticsType; import org.apache.geode.internal.concurrent.Atomics; import org.apache.geode.internal.i18n.LocalizedStrings; import org.apache.geode.internal.logging.LogService; import org.apache.geode.internal.util.concurrent.CopyOnWriteHashMap; // @todo darrel Add statistics instances to archive when they are created. /** * An object that maintains the values of various application-defined statistics. The statistics * themselves are described by an instance of {@link StatisticsType}. * * <P> * * For optimal statistic access, each statistic may be referred to by its {@link #nameToId id} in * the statistics object. * * <P> * * @see <A href="package-summary.html#statistics">Package introduction</A> * * * @since GemFire 3.0 */ public abstract class StatisticsImpl implements Statistics { /** logger - not private for tests */ static Logger logger = LogService.getLogger(); /** The type of this statistics instance */ private final StatisticsTypeImpl type; /** The display name of this statistics instance */ private final String textId; /** Numeric information display with these statistics */ private final long numericId; /** Non-zero if stats values come from operating system system calls */ private final int osStatFlags; /** Are these statistics closed? */ private boolean closed; /** Uniquely identifies this instance */ private long uniqueId; /** * Suppliers of int sample values to be sampled every sample-interval */ private final CopyOnWriteHashMap<Integer, IntSupplier> intSuppliers = new CopyOnWriteHashMap<>(); /** * Suppliers of long sample values to be sampled every sample-interval */ private final CopyOnWriteHashMap<Integer, LongSupplier> longSuppliers = new CopyOnWriteHashMap<>(); /** * Suppliers of double sample values to be sampled every sample-interval */ private final CopyOnWriteHashMap<Integer, DoubleSupplier> doubleSuppliers = new CopyOnWriteHashMap<>(); /** * Suppliers that have previously failed. Tracked to avoid logging many messages about a failing * supplier */ private final Set<Object> flakySuppliers = new HashSet<Object>(); /////////////////////// Constructors /////////////////////// /** * factory method to create a class that implements Statistics */ public static Statistics createAtomicNoOS(StatisticsType type, String textId, long numericId, long uniqueId, StatisticsManager mgr) { return Atomics.createAtomicStatistics(type, textId, numericId, uniqueId, mgr); } /** * Creates a new statistics instance of the given type and unique id * * @param type A description of the statistics * @param textId Text that helps identifies this instance * @param numericId A number that helps identify this instance * @param uniqueId A number that uniquely identifies this instance * @param osStatFlags Non-zero if stats require system calls to collect them; for internal use * only */ public StatisticsImpl(StatisticsType type, String textId, long numericId, long uniqueId, int osStatFlags) { this.type = (StatisticsTypeImpl) type; this.textId = textId; this.numericId = numericId; this.uniqueId = uniqueId; this.osStatFlags = osStatFlags; closed = false; } ////////////////////// Instance Methods ////////////////////// public boolean usesSystemCalls() { return this.osStatFlags != 0; } public int getOsStatFlags() { return this.osStatFlags; } public int nameToId(String name) { return this.type.nameToId(name); } public StatisticDescriptor nameToDescriptor(String name) { return this.type.nameToDescriptor(name); } public void close() { this.closed = true; } public boolean isClosed() { return this.closed; } public abstract boolean isAtomic(); private boolean isOpen() { // fix for bug 29973 return !this.closed; } //////////////////////// attribute Methods /////////////////////// public StatisticsType getType() { return this.type; } public String getTextId() { return this.textId; } public long getNumericId() { return this.numericId; } /** * Gets the unique id for this resource */ public long getUniqueId() { return this.uniqueId; } /** * Sets a unique id for this resource. */ public void setUniqueId(long uid) { this.uniqueId = uid; } //////////////////////// set() Methods /////////////////////// public void setInt(String name, int value) { setInt(nameToDescriptor(name), value); } public void setInt(StatisticDescriptor descriptor, int value) { setInt(getIntId(descriptor), value); } public void setInt(int id, int value) { if (isOpen()) { _setInt(id, value); } } /** * Sets the value of a statistic of type <code>int</code> at the given offset, but performs no * type checking. */ protected abstract void _setInt(int offset, int value); public void setLong(String name, long value) { setLong(nameToDescriptor(name), value); } public void setLong(StatisticDescriptor descriptor, long value) { setLong(getLongId(descriptor), value); } public void setLong(int id, long value) { if (isOpen()) { _setLong(id, value); } } /** * Sets the value of a statistic of type <code>long</code> at the given offset, but performs no * type checking. */ protected abstract void _setLong(int offset, long value); public void setDouble(String name, double value) { setDouble(nameToDescriptor(name), value); } public void setDouble(StatisticDescriptor descriptor, double value) { setDouble(getDoubleId(descriptor), value); } public void setDouble(int id, double value) { if (isOpen()) { _setDouble(id, value); } } /** * Sets the value of a statistic of type <code>double</code> at the given offset, but performs no * type checking. */ protected abstract void _setDouble(int offset, double value); /////////////////////// get() Methods /////////////////////// public int getInt(String name) { return getInt(nameToDescriptor(name)); } public int getInt(StatisticDescriptor descriptor) { return getInt(getIntId(descriptor)); } public int getInt(int id) { if (isOpen()) { return _getInt(id); } else { return 0; } } /** * Returns the value of the statistic of type <code>int</code> at the given offset, but performs * no type checking. */ protected abstract int _getInt(int offset); public long getLong(String name) { return getLong(nameToDescriptor(name)); } public long getLong(StatisticDescriptor descriptor) { return getLong(getLongId(descriptor)); } public long getLong(int id) { if (isOpen()) { return _getLong(id); } else { return 0; } } /** * Returns the value of the statistic of type <code>long</code> at the given offset, but performs * no type checking. */ protected abstract long _getLong(int offset); public double getDouble(String name) { return getDouble(nameToDescriptor(name)); } public double getDouble(StatisticDescriptor descriptor) { return getDouble(getDoubleId(descriptor)); } public double getDouble(int id) { if (isOpen()) { return _getDouble(id); } else { return 0.0; } } /** * Returns the value of the statistic of type <code>double</code> at the given offset, but * performs no type checking. */ protected abstract double _getDouble(int offset); public Number get(StatisticDescriptor descriptor) { if (isOpen()) { return _get((StatisticDescriptorImpl) descriptor); } else { return Integer.valueOf(0); } } public Number get(String name) { return get(nameToDescriptor(name)); } public long getRawBits(StatisticDescriptor descriptor) { if (isOpen()) { return _getRawBits((StatisticDescriptorImpl) descriptor); } else { return 0; } } public long getRawBits(String name) { return getRawBits(nameToDescriptor(name)); } //////////////////////// inc() Methods //////////////////////// public void incInt(String name, int delta) { incInt(nameToDescriptor(name), delta); } public void incInt(StatisticDescriptor descriptor, int delta) { incInt(getIntId(descriptor), delta); } public void incInt(int id, int delta) { if (isOpen()) { _incInt(id, delta); } } /** * Increments the value of the statistic of type <code>int</code> at the given offset by a given * amount, but performs no type checking. */ protected abstract void _incInt(int offset, int delta); public void incLong(String name, long delta) { incLong(nameToDescriptor(name), delta); } public void incLong(StatisticDescriptor descriptor, long delta) { incLong(getLongId(descriptor), delta); } public void incLong(int id, long delta) { if (isOpen()) { _incLong(id, delta); } } /** * Increments the value of the statistic of type <code>long</code> at the given offset by a given * amount, but performs no type checking. */ protected abstract void _incLong(int offset, long delta); public void incDouble(String name, double delta) { incDouble(nameToDescriptor(name), delta); } public void incDouble(StatisticDescriptor descriptor, double delta) { incDouble(getDoubleId(descriptor), delta); } public void incDouble(int id, double delta) { if (isOpen()) { _incDouble(id, delta); } } /** * Increments the value of the statistic of type <code>double</code> at the given offset by a * given amount, but performs no type checking. */ protected abstract void _incDouble(int offset, double delta); /** * For internal use only. Tells the implementation to prepare the data in this instance for * sampling. * * @since GemFire 5.1 */ public void prepareForSample() { // nothing needed in this impl. } /** * Invoke sample suppliers to retrieve the current value for the suppler controlled sets and * update the stats to reflect the supplied values. * * @return the number of callback errors that occurred while sampling stats */ public int invokeSuppliers() { int errors = 0; for (Map.Entry<Integer, IntSupplier> entry : intSuppliers.entrySet()) { try { _setInt(entry.getKey(), entry.getValue().getAsInt()); } catch (Throwable t) { logSupplierError(t, entry.getKey(), entry.getValue()); errors++; } } for (Map.Entry<Integer, LongSupplier> entry : longSuppliers.entrySet()) { try { _setLong(entry.getKey(), entry.getValue().getAsLong()); } catch (Throwable t) { logSupplierError(t, entry.getKey(), entry.getValue()); errors++; } } for (Map.Entry<Integer, DoubleSupplier> entry : doubleSuppliers.entrySet()) { try { _setDouble(entry.getKey(), entry.getValue().getAsDouble()); } catch (Throwable t) { logSupplierError(t, entry.getKey(), entry.getValue()); errors++; } } return errors; } private void logSupplierError(final Throwable t, int statId, Object supplier) { if (flakySuppliers.add(supplier)) { logger.warn("Error invoking supplier for stat {}, id {}", this.getTextId(), statId, t); } } /** * @return the number of statistics that are measured using supplier callbacks */ public int getSupplierCount() { return intSuppliers.size() + doubleSuppliers.size() + longSuppliers.size(); } @Override public IntSupplier setIntSupplier(final int id, final IntSupplier supplier) { if (id >= type.getIntStatCount()) { throw new IllegalArgumentException("Id " + id + " is not in range for stat" + type); } return intSuppliers.put(id, supplier); } @Override public IntSupplier setIntSupplier(final String name, final IntSupplier supplier) { return setIntSupplier(nameToId(name), supplier); } @Override public IntSupplier setIntSupplier(final StatisticDescriptor descriptor, final IntSupplier supplier) { return setIntSupplier(getIntId(descriptor), supplier); } @Override public LongSupplier setLongSupplier(final int id, final LongSupplier supplier) { if (id >= type.getLongStatCount()) { throw new IllegalArgumentException("Id " + id + " is not in range for stat" + type); } return longSuppliers.put(id, supplier); } @Override public LongSupplier setLongSupplier(final String name, final LongSupplier supplier) { return setLongSupplier(nameToId(name), supplier); } @Override public LongSupplier setLongSupplier(final StatisticDescriptor descriptor, final LongSupplier supplier) { return setLongSupplier(getLongId(descriptor), supplier); } @Override public DoubleSupplier setDoubleSupplier(final int id, final DoubleSupplier supplier) { if (id >= type.getDoubleStatCount()) { throw new IllegalArgumentException("Id " + id + " is not in range for stat" + type); } return doubleSuppliers.put(id, supplier); } @Override public DoubleSupplier setDoubleSupplier(final String name, final DoubleSupplier supplier) { return setDoubleSupplier(nameToId(name), supplier); } @Override public DoubleSupplier setDoubleSupplier(final StatisticDescriptor descriptor, final DoubleSupplier supplier) { return setDoubleSupplier(getDoubleId(descriptor), supplier); } @Override public int hashCode() { return (int) this.uniqueId; } @Override public boolean equals(Object o) { if (o == null) { return false; } if (!(o instanceof StatisticsImpl)) { return false; } StatisticsImpl other = (StatisticsImpl) o; return this.uniqueId == other.getUniqueId(); } private static int getIntId(StatisticDescriptor descriptor) { return ((StatisticDescriptorImpl) descriptor).checkInt(); } private static int getLongId(StatisticDescriptor descriptor) { return ((StatisticDescriptorImpl) descriptor).checkLong(); } private static int getDoubleId(StatisticDescriptor descriptor) { return ((StatisticDescriptorImpl) descriptor).checkDouble(); } /** * Returns the value of the specified statistic descriptor. */ private Number _get(StatisticDescriptorImpl stat) { switch (stat.getTypeCode()) { case StatisticDescriptorImpl.INT: return Integer.valueOf(_getInt(stat.getId())); case StatisticDescriptorImpl.LONG: return Long.valueOf(_getLong(stat.getId())); case StatisticDescriptorImpl.DOUBLE: return Double.valueOf(_getDouble(stat.getId())); default: throw new RuntimeException( LocalizedStrings.StatisticsImpl_UNEXPECTED_STAT_DESCRIPTOR_TYPE_CODE_0 .toLocalizedString(Byte.valueOf(stat.getTypeCode()))); } } /** * Returns the bits that represent the raw value of the specified statistic descriptor. */ private long _getRawBits(StatisticDescriptorImpl stat) { switch (stat.getTypeCode()) { case StatisticDescriptorImpl.INT: return _getInt(stat.getId()); case StatisticDescriptorImpl.LONG: return _getLong(stat.getId()); case StatisticDescriptorImpl.DOUBLE: return Double.doubleToRawLongBits(_getDouble(stat.getId())); default: throw new RuntimeException( LocalizedStrings.StatisticsImpl_UNEXPECTED_STAT_DESCRIPTOR_TYPE_CODE_0 .toLocalizedString(Byte.valueOf(stat.getTypeCode()))); } } @Override public String toString() { final StringBuilder sb = new StringBuilder(getClass().getName()); sb.append("@").append(System.identityHashCode(this)).append("{"); sb.append("uniqueId=").append(this.uniqueId); sb.append(", numericId=").append(this.numericId); sb.append(", textId=").append(this.textId); sb.append(", type=").append(this.type.getName()); sb.append(", closed=").append(this.closed); sb.append("}"); return sb.toString(); } }
apache-2.0
ezsimple/java
spring/apt/src/main/java/net/ion/oadr2/service/bean/EventSignalInterval.java
1396
/* * Copyright 2014 kt corp. All rights reserved. * This is a proprietary software of kt corp, and you may not use this file except in * compliance with license agreement with kt corp. Any redistribution or use of this * software, with or without modification shall be strictly prohibited without prior written * approval of kt corp, and the copyright notice above does not evidence any actual or * intended publication of such software. */ package net.ion.oadr2.service.bean; public class EventSignalInterval { private transient String intervalId; private String signalId; private int uid = 0; // starts from 0, increments by 1 private long duration; private float value; public EventSignalInterval(){ } public String getIntervalId() { return intervalId; } public void setIntervalId(String intervalId) { this.intervalId = intervalId; } public String getSignalId() { return signalId; } public void setSignalId(String signalId) { this.signalId = signalId; } public int getUid() { return uid; } public void setUid(int uid) { this.uid = uid; } public long getDuration() { return duration; } public void setDuration(long duration) { this.duration = duration; } public float getValue() { return value; } public void setValue(float value) { this.value = value; } }
apache-2.0
RobAltena/deeplearning4j
nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ops/impl/controlflow/WhereNumpy.java
1759
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ package org.nd4j.linalg.api.ops.impl.controlflow; import lombok.NoArgsConstructor; import org.nd4j.autodiff.samediff.SDVariable; import org.nd4j.autodiff.samediff.SameDiff; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.api.ops.DynamicCustomOp; import java.util.List; /** * */ @NoArgsConstructor public class WhereNumpy extends DynamicCustomOp { public WhereNumpy(SameDiff sameDiff, SDVariable[] args) { super(null, sameDiff, args); } public WhereNumpy(String opName, INDArray[] inputs, INDArray[] outputs, List<Double> tArguments, List<Integer> iArguments) { super(opName, inputs, outputs, tArguments, iArguments); } public WhereNumpy(INDArray[] inputs, INDArray[] outputs) { super(null, inputs, outputs); } public WhereNumpy(SameDiff sameDiff, SDVariable[] args, boolean inPlace) { super(null, sameDiff, args, inPlace); } @Override public String opName() { return "where_np"; } }
apache-2.0
minji-kim/calcite
core/src/main/java/org/apache/calcite/plan/RelOptRuleCall.java
7909
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.plan; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Filter; import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.tools.RelBuilder; import org.apache.calcite.util.trace.CalciteTrace; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import java.util.HashMap; import java.util.List; import java.util.Map; /** * A <code>RelOptRuleCall</code> is an invocation of a {@link RelOptRule} with a * set of {@link RelNode relational expression}s as arguments. */ public abstract class RelOptRuleCall { //~ Static fields/initializers --------------------------------------------- protected static final Logger LOGGER = CalciteTrace.getPlannerTracer(); /** * Generator for {@link #id} values. */ private static int nextId = 0; //~ Instance fields -------------------------------------------------------- public final int id; protected final RelOptRuleOperand operand0; protected Map<RelNode, List<RelNode>> nodeInputs; public final RelOptRule rule; public final RelNode[] rels; private final RelOptPlanner planner; private final List<RelNode> parents; //~ Constructors ----------------------------------------------------------- /** * Creates a RelOptRuleCall. * * @param planner Planner * @param operand Root operand * @param rels Array of relational expressions which matched each * operand * @param nodeInputs For each node which matched with * {@code matchAnyChildren}=true, a list of the node's * inputs * @param parents list of parent RelNodes corresponding to the first * relational expression in the array argument, if known; * otherwise, null */ protected RelOptRuleCall( RelOptPlanner planner, RelOptRuleOperand operand, RelNode[] rels, Map<RelNode, List<RelNode>> nodeInputs, List<RelNode> parents) { this.id = nextId++; this.planner = planner; this.operand0 = operand; this.nodeInputs = nodeInputs; this.rule = operand.getRule(); this.rels = rels; this.parents = parents; assert rels.length == rule.operands.size(); } protected RelOptRuleCall( RelOptPlanner planner, RelOptRuleOperand operand, RelNode[] rels, Map<RelNode, List<RelNode>> nodeInputs) { this(planner, operand, rels, nodeInputs, null); } //~ Methods ---------------------------------------------------------------- /** * Returns the root operand matched by this rule. * * @return root operand */ public RelOptRuleOperand getOperand0() { return operand0; } /** * Returns the invoked planner rule. * * @return planner rule */ public RelOptRule getRule() { return rule; } /** * Returns a list of matched relational expressions. * * @return matched relational expressions * @deprecated Use {@link #getRelList()} or {@link #rel(int)} */ @Deprecated // to be removed before 2.0 public RelNode[] getRels() { return rels; } /** * Returns a list of matched relational expressions. * * @return matched relational expressions * @see #rel(int) */ public List<RelNode> getRelList() { return ImmutableList.copyOf(rels); } /** * Retrieves the {@code ordinal}th matched relational expression. This * corresponds to the {@code ordinal}th operand of the rule. * * @param ordinal Ordinal * @param <T> Type * @return Relational expression */ public <T extends RelNode> T rel(int ordinal) { //noinspection unchecked return (T) rels[ordinal]; } /** * Returns the children of a given relational expression node matched in a * rule. * * <p>If the policy of the operand which caused the match is not * {@link org.apache.calcite.plan.RelOptRuleOperandChildPolicy#ANY}, * the children will have their * own operands and therefore be easily available in the array returned by * the {@link #getRelList()} method, so this method returns null. * * <p>This method is for * {@link org.apache.calcite.plan.RelOptRuleOperandChildPolicy#ANY}, * which is generally used when a node can have a variable number of * children, and hence where the matched children are not retrievable by any * other means. * * @param rel Relational expression * @return Children of relational expression */ public List<RelNode> getChildRels(RelNode rel) { return nodeInputs.get(rel); } /** Assigns the input relational expressions of a given relational expression, * as seen by this particular call. Is only called when the operand is * {@link RelOptRule#any()}. */ protected void setChildRels(RelNode rel, List<RelNode> inputs) { if (nodeInputs.isEmpty()) { nodeInputs = new HashMap<>(); } nodeInputs.put(rel, inputs); } /** * Returns the planner. * * @return planner */ public RelOptPlanner getPlanner() { return planner; } /** * Returns the current RelMetadataQuery, to be used for instance by * {@link RelOptRule#onMatch(RelOptRuleCall)}. */ public RelMetadataQuery getMetadataQuery() { return rel(0).getCluster().getMetadataQuery(); } /** * @return list of parents of the first relational expression */ public List<RelNode> getParents() { return parents; } /** * Registers that a rule has produced an equivalent relational expression. * * <p>Called by the rule whenever it finds a match. The implementation of * this method guarantees that the original relational expression (that is, * <code>this.rels[0]</code>) has its traits propagated to the new * relational expression (<code>rel</code>) and its unregistered children. * Any trait not specifically set in the RelTraitSet returned by <code> * rel.getTraits()</code> will be copied from <code> * this.rels[0].getTraitSet()</code>. * * @param rel Relational expression equivalent to the root relational * expression of the rule call, {@code call.rels(0)} * @param equiv Map of other equivalences */ public abstract void transformTo(RelNode rel, Map<RelNode, RelNode> equiv); /** * Registers that a rule has produced an equivalent relational expression, * but no other equivalences. * * @param rel Relational expression equivalent to the root relational * expression of the rule call, {@code call.rels(0)} */ public final void transformTo(RelNode rel) { transformTo(rel, ImmutableMap.<RelNode, RelNode>of()); } /** Creates a {@link org.apache.calcite.tools.RelBuilder} to be used by * code within the call. The {@link RelOptRule#relBuilderFactory} argument contains policies * such as what implementation of {@link Filter} to create. */ public RelBuilder builder() { return rule.relBuilderFactory.create(rel(0).getCluster(), null); } } // End RelOptRuleCall.java
apache-2.0
xenione/swipe-maker
swipemaker/src/main/java/com/xenione/libs/swipemaker/ScrollerHelper.java
1144
package com.xenione.libs.swipemaker; import android.widget.OverScroller; /** * Created by Eugeni on 10/04/2016. */ public class ScrollerHelper { private OverScroller mScroller; public ScrollerHelper(OverScroller overScroller) { mScroller = overScroller; } public boolean startScroll(int start, int end) { if (start == end) { return false; } int delta = end - start; mScroller.startScroll(start, 0, delta, 0); return true; } public boolean startScroll(int start, int end, int duration) { if (start == end) { return false; } int delta = end - start; mScroller.startScroll(start, 0, delta, 0, duration); return true; } public void finish() { if (!mScroller.isFinished()) { mScroller.forceFinished(true); } } public boolean isFinished() { return mScroller.isFinished(); } public boolean computeScrollOffset() { return mScroller.computeScrollOffset(); } public int getCurrX() { return mScroller.getCurrX(); } }
apache-2.0
pfirmstone/JGDMS
qa/src/org/apache/river/test/impl/outrigger/matching/FieldNotifyTest.java
2337
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.river.test.impl.outrigger.matching; import java.util.logging.Level; // Test harness specific classes import org.apache.river.qa.harness.TestException; import org.apache.river.qa.harness.QAConfig; // All other imports import org.apache.river.qa.harness.Test; import java.rmi.*; import java.util.List; import java.util.Iterator; import net.jini.core.entry.Entry; import net.jini.space.JavaSpace; /** * JavaSpaces test that registers a single notification with a null or * other match all template, then writes a number of entries. */ public class FieldNotifyTest extends MatchTestBase { private NotifyTestUtil testUtil; private Entry tmpl; /** * Sets up the testing environment. * * @param config Arguments from the runner for construct. */ public Test construct(QAConfig config) throws Exception { super.construct(config); this.parse(); return this; } protected void parse() throws Exception { super.parse(); testUtil = new NotifyTestUtil(getConfig(), this); tmpl = FieldMatchTest.pickTemplate(getConfig()); } public void run() throws Exception { testUtil.init((JavaSpaceAuditor) space); // Register for event testUtil.registerForNotify(tmpl); logger.log(Level.INFO, "Registered for notify"); spaceSet(); writeBunch(tmpl); final String rslt = testUtil.waitAndCheck(); if (rslt != null) { throw new TestException(rslt); } } }
apache-2.0
wanliyang10010/Shop
src/cn/xaut/shop/dao/GoodsDao.java
2971
package cn.xaut.shop.dao; import java.util.List; import cn.xaut.common.paging.domain.Page; import cn.xaut.shop.modules.repository.CrudRepository; import cn.xaut.shop.pojo.Goods; public interface GoodsDao extends CrudRepository<Goods, Integer>{ Page<Goods> findByKey(Page<Goods> page,String key); Page<Goods> findByKeyShopId(Page<Goods> page,String key,Integer sid); Page<Goods> findHotByShopId(Page<Goods> page,int sid); Page<Goods> queryByShopId(Page<Goods> page,int sid); Page<Goods> findAllHot(Page<Goods> page); Page<Goods> queryAllHot(Page<Goods> page); Page<Goods> findDHByShopId(Page<Goods> page,String key, String stype,Integer sid); Page<Goods> findByPrice(Page<Goods> page,String p,String keyword); Page<Goods> queryDiscount(Page<Goods> page); Page<Goods> queryShopDiscount(Page<Goods> page, String date, Integer shopId); List<Goods> findByGoodsId(Integer goodsid); Page<Goods> querySale(Page<Goods> page,String keyword); Page<Goods> queryType(Page<Goods> page, String type); //dwj查询所有商品 Page<Goods> findByAllGood(Page<Goods> page); //dwj关键字list查询 public List<Goods> findGoodsInfoKeyWord (String keyWord); //dwjSET集合 public List<Goods> getShopSet(Integer sid); /** * 减库存,没有商品类型的的情况 */ public int minGoodAmount(Integer goodsid , final Integer amount); /** * 减少库存,有商品类型的情况 * @param property 商品类型值 */ public int minGoodAmountProperty(int goodsid,final int amount,final String property); /** * 取消订单,还原库存 * @param goodsid 商品ID * @param amount 还原的商品数量 * @return */ public int rollBackGoodAmount(Integer goodsid , final Integer amount); /** * 取消定单后的库存还原 * @param property 商品类别信息 * */ public int rollBackGoodAmountProperty(int goodsid, int amount,String property); /** * 增加销售数量 * @param goodsId 商品ID * @param sellAmount 销售数量 */ public int increaseSellAmount(Integer goodsId , final Integer sellAmount); /** * 减少销售数量 ywl * @param goodsId 商品ID * @param sellAmount 销售数量 */ public int decreaseSellAmount(Integer goodsId , final Integer sellAmount); List<Goods> getGoodsList(Integer shopid, String key); //根据Goods的goodsid活动goods public Goods findGoodsByGoodsId(Integer goodsid); List<Goods> findGoodsByTypeId(String gtypeID); Page<Goods> findByShopKey(Page<Goods> page, String shopId, String key); Page<Goods> findByState(Page<Goods> page); Page<Goods> findTop(Page<Goods> page); Page<Goods> queryViewShopId(Page<Goods> page, Integer sid); List<Goods> getGoodsTypeList(Integer shopid, String typeid, String key); List<Goods> getType(Integer typeid, Integer goodsid); Page<Goods> findShandByKey(Page<Goods> page, String keyword); Page<Goods> findShand(Page<Goods> page); Page<Goods> findShandByType(Page<Goods> page, String keyword, String type); }
apache-2.0
jdeppe-pivotal/geode
geode-junit/src/main/java/org/apache/geode/cache/query/data/Manager.java
1221
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.cache.query.data; import java.util.Set; public class Manager extends Employee { public int manager_id; /** Creates a new instance of Manager */ public Manager(String name, int age, int empId, String title, int salary, Set addresses, int mgrId) { super(name, age, empId, title, salary, addresses); manager_id = mgrId; } public int getManager_id() { return manager_id; } }
apache-2.0
apache/geronimo
framework/modules/geronimo-shell-base/src/main/java/org/apache/geronimo/shell/deploy/DisconnectCommand.java
1732
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.shell.deploy; import org.apache.karaf.shell.commands.Command; import org.apache.geronimo.deployment.cli.ServerConnection; /** * @version $Rev$ $Date$ */ @Command(scope = "deploy", name = "disconnect", description = "Disconnect from a Geronimo server") public class DisconnectCommand extends ConnectCommand { @Override protected Object doExecute() throws Exception { ServerConnection connection = (ServerConnection) session.get(ConnectCommand.SERVER_CONNECTION); if (connection != null) { println("Disconnecting from Geronimo server"); try { connection.close(); } catch (Exception e) { // ignore } session.put(SERVER_CONNECTION, null); println("Connection ended"); } else { println("Not connected"); } return null; } }
apache-2.0
keigohtr/apitore-response-parent
rome-response/src/main/java/com/apitore/banana/response/org/rome/sample/Api36FeedsTravelExample.java
1105
package com.apitore.banana.response.org.rome.sample; import java.util.HashMap; import java.util.Map; import org.springframework.web.client.RestTemplate; import com.apitore.banana.response.org.rome.FeedResponseEntity; import com.apitore.banana.utils.UrlFormatter; /** * @author Keigo Hattori */ public class Api36FeedsTravelExample { static String ENDPOINT = "https://api.apitore.com/api/36/feeds/travel"; static String ACCESS_TOKEN = "YOUR-ACCESS-TOKEN"; public static void main(String[] args) { RestTemplate restTemplate = new RestTemplate(); Map<String, String> params = new HashMap<String, String>(); params.put("access_token", ACCESS_TOKEN); params.put("page", "1"); String url = UrlFormatter.format(ENDPOINT, params); FeedResponseEntity response = restTemplate.getForObject(url, FeedResponseEntity.class, params); System.out.println(response.getLog()); System.out.println(response.getLastUpdatedAt()); System.out.println(response.getEntries().get(0).getTitle()); System.out.println(response.getEntries().get(0).getLink()); } }
apache-2.0
HonzaKral/elasticsearch
x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java
11667
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.eql.action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import java.io.IOException; import java.util.Arrays; import java.util.Map; import java.util.Objects; import java.util.function.Supplier; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.xpack.eql.action.RequestDefaults.FETCH_SIZE; import static org.elasticsearch.xpack.eql.action.RequestDefaults.FIELD_EVENT_CATEGORY; import static org.elasticsearch.xpack.eql.action.RequestDefaults.FIELD_TIMESTAMP; import static org.elasticsearch.xpack.eql.action.RequestDefaults.FIELD_IMPLICIT_JOIN_KEY; public class EqlSearchRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContent { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); private QueryBuilder filter = null; private String timestampField = FIELD_TIMESTAMP; private String eventCategoryField = FIELD_EVENT_CATEGORY; private String implicitJoinKeyField = FIELD_IMPLICIT_JOIN_KEY; private int fetchSize = FETCH_SIZE; private SearchAfterBuilder searchAfterBuilder; private String query; static final String KEY_FILTER = "filter"; static final String KEY_TIMESTAMP_FIELD = "timestamp_field"; static final String KEY_EVENT_CATEGORY_FIELD = "event_category_field"; static final String KEY_IMPLICIT_JOIN_KEY_FIELD = "implicit_join_key_field"; static final String KEY_SIZE = "size"; static final String KEY_SEARCH_AFTER = "search_after"; static final String KEY_QUERY = "query"; static final ParseField FILTER = new ParseField(KEY_FILTER); static final ParseField TIMESTAMP_FIELD = new ParseField(KEY_TIMESTAMP_FIELD); static final ParseField EVENT_CATEGORY_FIELD = new ParseField(KEY_EVENT_CATEGORY_FIELD); static final ParseField IMPLICIT_JOIN_KEY_FIELD = new ParseField(KEY_IMPLICIT_JOIN_KEY_FIELD); static final ParseField SIZE = new ParseField(KEY_SIZE); static final ParseField SEARCH_AFTER = new ParseField(KEY_SEARCH_AFTER); static final ParseField QUERY = new ParseField(KEY_QUERY); private static final ObjectParser<EqlSearchRequest, Void> PARSER = objectParser(EqlSearchRequest::new); public EqlSearchRequest() { super(); } public EqlSearchRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); filter = in.readOptionalNamedWriteable(QueryBuilder.class); timestampField = in.readString(); eventCategoryField = in.readString(); implicitJoinKeyField = in.readString(); fetchSize = in.readVInt(); searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); query = in.readString(); } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; if (indices == null) { validationException = addValidationError("indices is null", validationException); } else { for (String index : indices) { if (index == null) { validationException = addValidationError("index is null", validationException); break; } } } if (indicesOptions == null) { validationException = addValidationError("indicesOptions is null", validationException); } if (query == null || query.isEmpty()) { validationException = addValidationError("query is null or empty", validationException); } if (timestampField == null || timestampField.isEmpty()) { validationException = addValidationError("@timestamp field is null or empty", validationException); } if (eventCategoryField == null || eventCategoryField.isEmpty()) { validationException = addValidationError("event category field is null or empty", validationException); } if (implicitJoinKeyField == null || implicitJoinKeyField.isEmpty()) { validationException = addValidationError("implicit join key field is null or empty", validationException); } if (fetchSize <= 0) { validationException = addValidationError("size must be greater than 0", validationException); } return validationException; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (filter != null) { builder.field(KEY_FILTER, filter); } builder.field(KEY_TIMESTAMP_FIELD, timestampField()); builder.field(KEY_EVENT_CATEGORY_FIELD, eventCategoryField()); if (implicitJoinKeyField != null) { builder.field(KEY_IMPLICIT_JOIN_KEY_FIELD, implicitJoinKeyField()); } builder.field(KEY_SIZE, fetchSize()); if (searchAfterBuilder != null) { builder.array(SEARCH_AFTER.getPreferredName(), searchAfterBuilder.getSortValues()); } builder.field(KEY_QUERY, query); return builder; } public static EqlSearchRequest fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } protected static <R extends EqlSearchRequest> ObjectParser<R, Void> objectParser(Supplier<R> supplier) { ObjectParser<R, Void> parser = new ObjectParser<>("eql/search", false, supplier); parser.declareObject(EqlSearchRequest::filter, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), FILTER); parser.declareString(EqlSearchRequest::timestampField, TIMESTAMP_FIELD); parser.declareString(EqlSearchRequest::eventCategoryField, EVENT_CATEGORY_FIELD); parser.declareString(EqlSearchRequest::implicitJoinKeyField, IMPLICIT_JOIN_KEY_FIELD); parser.declareInt(EqlSearchRequest::fetchSize, SIZE); parser.declareField(EqlSearchRequest::setSearchAfter, SearchAfterBuilder::fromXContent, SEARCH_AFTER, ObjectParser.ValueType.OBJECT_ARRAY); parser.declareString(EqlSearchRequest::query, QUERY); return parser; } @Override public EqlSearchRequest indices(String... indices) { this.indices = indices; return this; } public QueryBuilder filter() { return this.filter; } public EqlSearchRequest filter(QueryBuilder filter) { this.filter = filter; return this; } public String timestampField() { return this.timestampField; } public EqlSearchRequest timestampField(String timestampField) { this.timestampField = timestampField; return this; } public String eventCategoryField() { return this.eventCategoryField; } public EqlSearchRequest eventCategoryField(String eventCategoryField) { this.eventCategoryField = eventCategoryField; return this; } public String implicitJoinKeyField() { return this.implicitJoinKeyField; } public EqlSearchRequest implicitJoinKeyField(String implicitJoinKeyField) { this.implicitJoinKeyField = implicitJoinKeyField; return this; } public int fetchSize() { return this.fetchSize; } public EqlSearchRequest fetchSize(int size) { this.fetchSize = size; return this; } public Object[] searchAfter() { if (searchAfterBuilder == null) { return null; } return searchAfterBuilder.getSortValues(); } public EqlSearchRequest searchAfter(Object[] values) { this.searchAfterBuilder = new SearchAfterBuilder().setSortValues(values); return this; } private EqlSearchRequest setSearchAfter(SearchAfterBuilder builder) { this.searchAfterBuilder = builder; return this; } public String query() { return this.query; } public EqlSearchRequest query(String query) { this.query = query; return this; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArrayNullable(indices); indicesOptions.writeIndicesOptions(out); out.writeOptionalNamedWriteable(filter); out.writeString(timestampField); out.writeString(eventCategoryField); out.writeString(implicitJoinKeyField); out.writeVInt(fetchSize); out.writeOptionalWriteable(searchAfterBuilder); out.writeString(query); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EqlSearchRequest that = (EqlSearchRequest) o; return fetchSize == that.fetchSize && Arrays.equals(indices, that.indices) && Objects.equals(indicesOptions, that.indicesOptions) && Objects.equals(filter, that.filter) && Objects.equals(timestampField, that.timestampField) && Objects.equals(eventCategoryField, that.eventCategoryField) && Objects.equals(implicitJoinKeyField, that.implicitJoinKeyField) && Objects.equals(searchAfterBuilder, that.searchAfterBuilder) && Objects.equals(query, that.query); } @Override public int hashCode() { return Objects.hash( Arrays.hashCode(indices), indicesOptions, filter, fetchSize, timestampField, eventCategoryField, implicitJoinKeyField, searchAfterBuilder, query); } @Override public String[] indices() { return indices; } public EqlSearchRequest indicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; return this; } @Override public IndicesOptions indicesOptions() { return indicesOptions; } @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) { return new EqlSearchTask(id, type, action, () -> { StringBuilder sb = new StringBuilder(); sb.append("indices["); Strings.arrayToDelimitedString(indices, ",", sb); sb.append("], "); sb.append(query); return sb.toString(); }, parentTaskId, headers); } }
apache-2.0
SciGaP/DEPRECATED-Cipres-Airavata-POC
saminda/cipres-airavata/sdk/src/main/java/org/ngbw/sdk/common/util/SSHProcessRunner.java
5258
/** * * @author Terri Liebowitz Schwartz * */ /** SSHProcessRunner provides a simple way to run a remote command and wait for it to complete. You can retrieve the output and error text as strings and get the exit code. For example: SSHProcessRunner pr = new SSHProcessRunner(true); exitCode = pr.run("echo 'fiddlefaddle\nbiddle' > xxx 2>&1; wc -l xxx; cat xxx; test -f xxx"); System.out.println("exitCode is " + exitCode + ". Output is " + pr.getStdOut()); If you do a fancy command line with i/o redirection like in the example above make sure you use the right shell syntax for the login shell of the remote_user@remote_host. You specify only the id of the remote host here, the full hostname, username, password/keyfile, are looked up with the id in ssl.properties. */ package org.ngbw.sdk.common.util; import java.io.BufferedOutputStream; import java.io.OutputStream; import java.util.concurrent.Future; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.ngbw.sdk.core.io.SSLConnectionManager; import com.trilead.ssh2.ChannelCondition; import com.trilead.ssh2.Connection; import com.trilead.ssh2.Session; /** */ public class SSHProcessRunner { private static final Log log = LogFactory.getLog(SSHProcessRunner.class.getName()); int exitStatus; Future<String> stdOut; Future<String> stdErr; OutputStream stdin; private Connection m_sshConn; private String m_hostName; private static final int TIMEOUT = 30; // wait no more than minutes. private Session m_session; // only used with start(), wait() methods, not with run(). public int getExitStatus() { return exitStatus;} public String getStdOut() { String retval = ""; try { retval = stdOut.get(); } catch(Exception e) { log.error("", e); } return retval == null ? "" : retval; } public String getStdErr() { String retval = ""; try { retval = stdErr.get(); } catch(Exception e) { log.error("", e); } return retval == null ? "" : retval; } public SSHProcessRunner(String hostName) { m_hostName = hostName; } private Session openSession() throws Exception { assert(m_sshConn == null); m_sshConn = SSLConnectionManager.getInstance().getConnection(m_hostName); if (m_sshConn == null) throw new Exception("No connection could be acquired from host: " + m_hostName); return m_sshConn.openSession(); } private void closeSession(Session session) { assert(m_sshConn != null); session.close(); m_sshConn.close(); m_sshConn = null; } /** If you need to send stdin to the process do this: SSHProcessRunner runner = new SSHProcessRunner(host); runner.start(command); OutputStream stdin = runner.getStdin(); stdin.write(...); ... stdin.flush(); stdin.close(); int exitstatus = runner.waitForExit(); */ public void start(String command) throws Exception { m_session = null; try { m_session = openSession(); m_session.execCommand(command); stdOut = InputStreamCollector.readInputStream(m_session.getStdout()); stdErr = InputStreamCollector.readInputStream(m_session.getStderr()); } catch (Exception e) { if (m_session != null) { closeSession(m_session); m_session = null; throw e; } } } public OutputStream getStdin() { return new BufferedOutputStream(m_session.getStdin(), 8192); } public int waitForExit() throws Exception { try { // The problem is that setting a timeout on the Session doesn't kill the remote job! long timeout = TIMEOUT > 0 ? ((TIMEOUT * 60) * 1000) : 0; int retval = m_session.waitForCondition(ChannelCondition.EXIT_STATUS, timeout); if ((retval & ChannelCondition.TIMEOUT) != 0) { log.debug("TL Got a timeout."); throw new java.util.concurrent.TimeoutException(); } else { return exitStatus = m_session.getExitStatus(); } } finally { close(); } } public void close() { if (m_session != null) { closeSession(m_session); m_session = null; } } /** Opens an ssh session, runs the command on the remote host. Possible outcomes: Can throw an exception (if can't open ssh session for example) Timeout waiting for remote command to complete - throws TimeoutException Remote command completed, exitStatus is valid. */ public int run(String command) throws Exception { Session session = null; try { session = openSession(); session.execCommand(command); stdOut = InputStreamCollector.readInputStream(session.getStdout()); stdErr = InputStreamCollector.readInputStream(session.getStderr()); // trilead Session.waitForCondition specifies timeout in milliseconds. convert from // minutes. long timeout = TIMEOUT > 0 ? ((TIMEOUT * 60) * 1000) : 0; // The problem is that setting a timeout on the Session doesn't kill the remote job! int retval = session.waitForCondition(ChannelCondition.EXIT_STATUS, timeout); if ((retval & ChannelCondition.TIMEOUT) != 0) { log.debug("TL Got a timeout."); throw new java.util.concurrent.TimeoutException(); } else { return exitStatus = session.getExitStatus(); } } finally { if (session != null) { closeSession(session); } } } }
apache-2.0
seanzwx/tmp
seatalk/platform/framework/framework-service/src/main/java/com/sean/service/entity/ActionEntity.java
1880
package com.sean.service.entity; import com.sean.service.core.Version; import com.sean.service.enums.ReturnType; import com.sean.service.worker.Worker; /** * Action实体 * @author sean * */ public class ActionEntity { private boolean transation; private ParameterEntity[] mustParams; private ParameterEntity[] optionalParams; private ReturnParameterEntity[] returnParams; private int permission; private boolean authenticate; private ReturnType returnType; private String description; private Version version; private Class<?> cls; private Worker worker; public ActionEntity(boolean transation, ReturnParameterEntity[] returnParams, ParameterEntity[] mustParams, ParameterEntity[] optionalParams, int permission, boolean authenticate, ReturnType returnType, Class<?> cls, String description, Version version, Worker worker) { this.transation = transation; this.returnParams = returnParams; this.mustParams = mustParams; this.optionalParams = optionalParams; this.permission = permission; this.authenticate = authenticate; this.returnType = returnType; this.cls = cls; this.description = description; this.version = version; this.worker = worker; } public boolean isTransation() { return transation; } public ReturnParameterEntity[] getReturnParams() { return returnParams; } public ParameterEntity[] getMustParams() { return mustParams; } public ParameterEntity[] getOptionalParams() { return optionalParams; } public int getPermission() { return permission; } public ReturnType getReturnType() { return returnType; } public Class<?> getCls() { return cls; } public boolean isAuthenticate() { return authenticate; } public String getDescription() { return description; } public Version getVersion() { return version; } public Worker getWorker() { return worker; } }
apache-2.0
miniway/presto
presto-product-tests/src/main/java/io/prestosql/tests/hive/TestAvroSchemaEvolution.java
7401
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prestosql.tests.hive; import io.prestodb.tempto.AfterTestWithContext; import io.prestodb.tempto.BeforeTestWithContext; import io.prestodb.tempto.ProductTest; import io.prestodb.tempto.query.QueryExecutor; import org.testng.annotations.Test; import static io.prestodb.tempto.assertions.QueryAssert.Row.row; import static io.prestodb.tempto.assertions.QueryAssert.assertThat; import static io.prestodb.tempto.context.ThreadLocalTestContextHolder.testContext; import static io.prestodb.tempto.query.QueryExecutor.query; import static io.prestosql.tests.TestGroups.AVRO; import static java.lang.String.format; public class TestAvroSchemaEvolution extends ProductTest { private static final String TABLE_NAME = "product_tests_avro_table"; private static final String ORIGINAL_SCHEMA = "file:///docker/volumes/presto-product-tests/avro/original_schema.avsc"; private static final String CREATE_TABLE = format("" + "CREATE TABLE %s (dummy_col VARCHAR)" + "WITH (" + "format='AVRO', " + "avro_schema_url='%s'" + ")", TABLE_NAME, ORIGINAL_SCHEMA); private static final String RENAMED_COLUMN_SCHEMA = "file:///docker/volumes/presto-product-tests/avro/rename_column_schema.avsc"; private static final String REMOVED_COLUMN_SCHEMA = "file:///docker/volumes/presto-product-tests/avro/remove_column_schema.avsc"; private static final String ADDED_COLUMN_SCHEMA = "file:///docker/volumes/presto-product-tests/avro/add_column_schema.avsc"; private static final String CHANGE_COLUMN_TYPE_SCHEMA = "file:///docker/volumes/presto-product-tests/avro/change_column_type_schema.avsc"; private static final String INCOMPATIBLE_TYPE_SCHEMA = "file:///docker/volumes/presto-product-tests/avro/incompatible_type_schema.avsc"; private static final String SELECT_STAR = "SELECT * FROM " + TABLE_NAME; private static final String COLUMNS_IN_TABLE = "SHOW COLUMNS IN " + TABLE_NAME; @BeforeTestWithContext public void createAndLoadTable() { query(CREATE_TABLE); query(format("INSERT INTO %s VALUES ('string0', 0)", TABLE_NAME)); } @AfterTestWithContext public void dropTestTable() { query(format("DROP TABLE IF EXISTS %s", TABLE_NAME)); } @Test(groups = {AVRO}) public void testSelectTable() { assertThat(query(format("SELECT string_col FROM %s", TABLE_NAME))) .containsExactly(row("string0")); } @Test(groups = {AVRO}) public void testInsertAfterSchemaEvolution() { assertThat(query(SELECT_STAR)) .containsExactly(row("string0", 0)); alterTableSchemaTo(ADDED_COLUMN_SCHEMA); query(format("INSERT INTO %s VALUES ('string1', 1, 101)", TABLE_NAME)); assertThat(query(SELECT_STAR)) .containsOnly( row("string0", 0, 100), row("string1", 1, 101)); } @Test(groups = {AVRO}) public void testSchemaEvolutionWithIncompatibleType() { assertThat(query(COLUMNS_IN_TABLE)) .containsExactly( row("string_col", "varchar", "", ""), row("int_col", "integer", "", "")); assertThat(query(SELECT_STAR)) .containsExactly(row("string0", 0)); alterTableSchemaTo(INCOMPATIBLE_TYPE_SCHEMA); assertThat(() -> query(SELECT_STAR)) .failsWithMessage("Found int, expecting string"); } @Test(groups = {AVRO}) public void testSchemaEvolution() { assertThat(query(COLUMNS_IN_TABLE)) .containsExactly( row("string_col", "varchar", "", ""), row("int_col", "integer", "", "")); assertThat(query(SELECT_STAR)) .containsExactly(row("string0", 0)); alterTableSchemaTo(CHANGE_COLUMN_TYPE_SCHEMA); assertThat(query(COLUMNS_IN_TABLE)) .containsExactly( row("string_col", "varchar", "", ""), row("int_col", "bigint", "", "")); assertThat(query(SELECT_STAR)) .containsExactly(row("string0", 0)); alterTableSchemaTo(ADDED_COLUMN_SCHEMA); assertThat(query(COLUMNS_IN_TABLE)) .containsExactly( row("string_col", "varchar", "", ""), row("int_col", "integer", "", ""), row("int_col_added", "integer", "", "")); assertThat(query(SELECT_STAR)) .containsExactly(row("string0", 0, 100)); alterTableSchemaTo(REMOVED_COLUMN_SCHEMA); assertThat(query(COLUMNS_IN_TABLE)) .containsExactly(row("int_col", "integer", "", "")); assertThat(query(SELECT_STAR)) .containsExactly(row(0)); alterTableSchemaTo(RENAMED_COLUMN_SCHEMA); assertThat(query(COLUMNS_IN_TABLE)) .containsExactly( row("string_col", "varchar", "", ""), row("int_col_renamed", "integer", "", "")); assertThat(query(SELECT_STAR)) .containsExactly(row("string0", null)); } @Test(groups = {AVRO}) public void testSchemaWhenUrlIsUnset() { assertThat(query(COLUMNS_IN_TABLE)) .containsExactly( row("string_col", "varchar", "", ""), row("int_col", "integer", "", "")); assertThat(query(SELECT_STAR)) .containsExactly(row("string0", 0)); executeHiveQuery(format("ALTER TABLE %s UNSET TBLPROPERTIES('avro.schema.url')", TABLE_NAME)); assertThat(query(COLUMNS_IN_TABLE)) .containsExactly( row("dummy_col", "varchar", "", "")); } @Test(groups = {AVRO}) public void testCreateTableLike() { String createTableLikeName = "test_avro_like"; query(format( "CREATE TABLE %s (LIKE %s INCLUDING PROPERTIES)", createTableLikeName, TABLE_NAME)); query(format("INSERT INTO %s VALUES ('string0', 0)", createTableLikeName)); assertThat(query(format("SELECT string_col FROM %s", createTableLikeName))) .containsExactly(row("string0")); query("DROP TABLE IF EXISTS " + createTableLikeName); } private void alterTableSchemaTo(String schema) { executeHiveQuery(format("ALTER TABLE %s SET TBLPROPERTIES('avro.schema.url'='%s')", TABLE_NAME, schema)); } private static void executeHiveQuery(String query) { testContext().getDependency(QueryExecutor.class, "hive").executeQuery(query); } }
apache-2.0
1Evgeny/java-a-to-z
chapter_010_hibernate/configuration/src/main/java/by.vorokhobko/servlets/package-info.java
161
/** * //TODO add comments. * * @author Evgeny Vorokhobko (vorokhobko2011@yandex.ru). * @version 1. * @since 11.11.2016. */ package by.vorokhobko.servlets;
apache-2.0
apache/camel
components/camel-telegram/src/test/java/org/apache/camel/component/telegram/TelegramConsumerMediaVideoTest.java
3546
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.telegram; import org.apache.camel.EndpointInject; import org.apache.camel.Exchange; import org.apache.camel.RoutesBuilder; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; import org.apache.camel.component.telegram.model.IncomingMessage; import org.apache.camel.component.telegram.model.IncomingPhotoSize; import org.apache.camel.component.telegram.model.IncomingVideo; import org.apache.camel.component.telegram.util.TelegramMockRoutes; import org.apache.camel.component.telegram.util.TelegramTestSupport; import org.apache.camel.component.telegram.util.TelegramTestUtil; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; /** * Tests the reception of messages without text having media content. */ public class TelegramConsumerMediaVideoTest extends TelegramTestSupport { @EndpointInject("mock:telegram") private MockEndpoint endpoint; @Test public void testReceptionOfAMessageWithAVideo() throws Exception { endpoint.expectedMinimumMessageCount(1); endpoint.assertIsSatisfied(5000); Exchange mediaExchange = endpoint.getExchanges().get(0); IncomingMessage msg = mediaExchange.getIn().getBody(IncomingMessage.class); IncomingVideo video = msg.getVideo(); assertNotNull(video); assertEquals(Integer.valueOf(2), video.getDurationSeconds()); assertEquals(Integer.valueOf(360), video.getHeight()); assertEquals(Integer.valueOf(640), video.getWidth()); assertEquals(Long.valueOf(299284), video.getFileSize()); assertEquals("BAADBAADAgADyzvwCC7_4AyvdAXXXX", video.getFileId()); IncomingPhotoSize thumb = video.getThumb(); assertNotNull(thumb); } @Override protected RoutesBuilder[] createRouteBuilders() { return new RoutesBuilder[] { getMockRoutes(), new RouteBuilder() { @Override public void configure() { from("telegram:bots?authorizationToken=mock-token") .to("mock:telegram"); } } }; } @Override protected TelegramMockRoutes createMockRoutes() { return new TelegramMockRoutes(port) .addEndpoint( "getUpdates", "GET", String.class, TelegramTestUtil.stringResource("messages/updates-media-video.json"), TelegramTestUtil.stringResource("messages/updates-empty.json")); } }
apache-2.0
cloudera/cdk
cdk-data/cdk-data-hbase/src/main/java/com/cloudera/cdk/data/hbase/DaoView.java
3792
/** * Copyright 2013 Cloudera Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cloudera.cdk.data.hbase; import com.cloudera.cdk.data.DatasetReader; import com.cloudera.cdk.data.DatasetWriter; import com.cloudera.cdk.data.FieldPartitioner; import com.cloudera.cdk.data.PartitionKey; import com.cloudera.cdk.data.PartitionStrategy; import com.cloudera.cdk.data.View; import com.cloudera.cdk.data.spi.AbstractRangeView; import com.cloudera.cdk.data.spi.StorageKey; import com.cloudera.cdk.data.spi.Marker; import com.cloudera.cdk.data.spi.MarkerRange; import java.util.List; class DaoView<E> extends AbstractRangeView<E> { private final DaoDataset<E> dataset; DaoView(DaoDataset<E> dataset) { super(dataset); this.dataset = dataset; } private DaoView(DaoView<E> view, MarkerRange range) { super(view, range); this.dataset = view.dataset; } @Override protected DaoView<E> newLimitedCopy(MarkerRange newRange) { return new DaoView<E>(this, newRange); } @Override public DatasetReader<E> newReader() { return dataset.getDao().getScanner(toPartitionKey(range.getStart()), range.getStart().isInclusive(), toPartitionKey(range.getEnd()), range.getEnd().isInclusive()); } @Override public DatasetWriter<E> newWriter() { final DatasetWriter<E> wrappedWriter = dataset.getDao().newBatch(); final StorageKey partitionStratKey = new StorageKey(dataset.getDescriptor().getPartitionStrategy()); // Return a dataset writer that checks on write that an entity is within the // range of the view return new DatasetWriter<E>() { @Override public void open() { wrappedWriter.open(); } @Override public void write(E entity) { StorageKey key = partitionStratKey.reuseFor(entity); if (!range.contains(key)) { throw new IllegalArgumentException("View does not contain entity: " + entity); } wrappedWriter.write(entity); } @Override public void flush() { wrappedWriter.flush(); } @Override public void close() { wrappedWriter.close(); } @Override public boolean isOpen() { return wrappedWriter.isOpen(); } }; } @Override public Iterable<View<E>> getCoveringPartitions() { // TODO: use HBase InputFormat to construct splits throw new UnsupportedOperationException("getCoveringPartitions is not yet " + "supported."); } @SuppressWarnings("deprecation") private PartitionKey toPartitionKey(MarkerRange.Boundary boundary) { if (boundary == null || boundary.getBound() == null) { return null; } return keyFor(dataset.getDescriptor().getPartitionStrategy(), boundary.getBound()); } @Deprecated @SuppressWarnings("unchecked") static PartitionKey keyFor(PartitionStrategy strategy, Marker marker) { final List<FieldPartitioner> partitioners = strategy.getFieldPartitioners(); final Object[] values = new Object[partitioners.size()]; for (int i = 0, n = partitioners.size(); i < n; i += 1) { final FieldPartitioner fp = partitioners.get(i); values[i] = marker.valueFor(fp); } return strategy.partitionKey(values); } }
apache-2.0
awhitford/Resteasy
testsuite/integration-tests/src/test/java/org/jboss/resteasy/test/resource/basic/resource/ResourceLocatorSubresource.java
2291
package org.jboss.resteasy.test.resource.basic.resource; import java.util.List; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.core.Context; import javax.ws.rs.core.UriInfo; import org.junit.Assert; public class ResourceLocatorSubresource { @GET public String doGet(@Context UriInfo uri) { System.out.println("Uri Ancesstors for Subresource.doGet():"); List<String> matchedURIs = uri.getMatchedURIs(); Assert.assertEquals(2, matchedURIs.size()); Assert.assertEquals("base/1/resources", matchedURIs.get(0)); Assert.assertEquals("", matchedURIs.get(1)); for (String ancestor : matchedURIs) System.out.println(" " + ancestor); System.out.println("Uri Ancesstors Object for Subresource.doGet():"); Assert.assertEquals(2, uri.getMatchedResources().size()); Assert.assertEquals(ResourceLocatorSubresource.class, uri.getMatchedResources().get(0).getClass()); Assert.assertEquals(ResourceLocatorBaseResource.class, uri.getMatchedResources().get(1).getClass()); for (Object ancestor : uri.getMatchedResources()) System.out.println(" " + ancestor.getClass().getName()); return this.getClass().getName(); } @Path("/subresource2") public Object getSubresource2(@Context UriInfo uri) { System.out.println("Uri Ancesstors for Subresource.getSubresource2():"); List<String> matchedURIs = uri.getMatchedURIs(); Assert.assertEquals(3, matchedURIs.size()); Assert.assertEquals("base/1/resources/subresource2", matchedURIs.get(0)); Assert.assertEquals("base/1/resources", matchedURIs.get(1)); Assert.assertEquals("", matchedURIs.get(2)); for (String ancestor : matchedURIs) System.out.println(" " + ancestor); System.out.println("Uri Ancesstors Object for Subresource.getSubresource2():"); Assert.assertEquals(2, uri.getMatchedResources().size()); Assert.assertEquals(ResourceLocatorSubresource.class, uri.getMatchedResources().get(0).getClass()); Assert.assertEquals(ResourceLocatorBaseResource.class, uri.getMatchedResources().get(1).getClass()); for (Object ancestor : uri.getMatchedResources()) System.out.println(" " + ancestor.getClass().getName()); return new ResourceLocatorSubresource2(); } }
apache-2.0
afiantara/apache-wicket-1.5.7
src/wicket-examples/target/classes/org/apache/wicket/examples/forminput/FormInputModel.java
7945
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.wicket.examples.forminput; import java.net.MalformedURLException; import java.net.URL; import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; import org.apache.wicket.IClusterable; /** * Simple model object for FormInput example. Has a number of simple properties that can be * retrieved and set. */ public final class FormInputModel implements IClusterable { /** * Represents a line of text. Hack to get around the fact that strings are immutable. */ public final class Line implements IClusterable { private String text; /** * Construct. * * @param text */ public Line(String text) { this.text = text; } /** * Gets text. * * @return text */ public String getText() { return text; } /** * Sets text. * * @param text * text */ public void setText(String text) { this.text = text; } /** * @see java.lang.Object#toString() */ @Override public String toString() { return text; } } private Boolean booleanProperty; private Double doubleProperty = 20.5; private Integer integerInRangeProperty = 50; private Integer integerProperty = 100; private List<Line> lines = new ArrayList<Line>(); private Integer multiply = 0; private String numberRadioChoice = FormInput.NUMBERS.get(0); private final List<String> numbersCheckGroup = new ArrayList<String>(); private String numbersGroup; /** US phone number with mask '(###) ###-####'. */ private UsPhoneNumber phoneNumberUS = new UsPhoneNumber("(123) 456-1234"); private Set<String> siteSelection = new HashSet<String>(); private String stringProperty = "test"; private URL urlProperty; /** * Construct. */ public FormInputModel() { try { urlProperty = new URL("http://wicket.apache.org"); } catch (MalformedURLException e) { e.printStackTrace(); } lines.add(new Line("line one")); lines.add(new Line("line two")); lines.add(new Line("line three")); } /** * Gets the booleanProperty. * * @return booleanProperty */ public Boolean getBooleanProperty() { return booleanProperty; } /** * Gets doubleProperty. * * @return doubleProperty */ public Double getDoubleProperty() { return doubleProperty; } /** * Gets integerInRangeProperty. * * @return integerInRangeProperty */ public Integer getIntegerInRangeProperty() { return integerInRangeProperty; } /** * Gets integerProperty. * * @return integerProperty */ public Integer getIntegerProperty() { return integerProperty; } /** * Gets lines. * * @return lines */ public List<Line> getLines() { return lines; } /** * @return gets multiply */ public Integer getMultiply() { return multiply; } /** * Gets the favoriteColor. * * @return favoriteColor */ public String getNumberRadioChoice() { return numberRadioChoice; } /** * @return the numbers list */ public List<String> getNumbersCheckGroup() { return numbersCheckGroup; } /** * @return the group number */ public String getNumbersGroup() { return numbersGroup; } /** * @return the phoneNumberUS */ public UsPhoneNumber getPhoneNumberUS() { return phoneNumberUS; } /** * Gets the selectedSites. * * @return selectedSites */ public Set<String> getSiteSelection() { return siteSelection; } /** * Gets stringProperty. * * @return stringProperty */ public String getStringProperty() { return stringProperty; } /** * Gets the urlProperty. * * @return urlProperty */ public URL getUrlProperty() { return urlProperty; } /** * Sets the booleanProperty. * * @param booleanProperty * booleanProperty */ public void setBooleanProperty(Boolean booleanProperty) { this.booleanProperty = booleanProperty; } /** * Sets doubleProperty. * * @param doubleProperty * doubleProperty */ public void setDoubleProperty(Double doubleProperty) { this.doubleProperty = doubleProperty; } /** * Sets integerInRangeProperty. * * @param integerInRangeProperty * integerInRangeProperty */ public void setIntegerInRangeProperty(Integer integerInRangeProperty) { this.integerInRangeProperty = integerInRangeProperty; } /** * Sets integerProperty. * * @param integerProperty * integerProperty */ public void setIntegerProperty(Integer integerProperty) { this.integerProperty = integerProperty; } /** * Sets lines. * * @param lines * lines */ public void setLines(List<Line> lines) { this.lines = lines; } /** * @param multiply * the multiply to set */ public void setMultiply(Integer multiply) { this.multiply = multiply; } /** * Sets the favoriteColor. * * @param favoriteColor * favoriteColor */ public void setNumberRadioChoice(String favoriteColor) { numberRadioChoice = favoriteColor; } /** * Sets the number. * * @param group * number */ public void setNumbersGroup(String group) { numbersGroup = group; } /** * @param phoneNumberUS * the phoneNumberUS to set */ public void setPhoneNumberUS(UsPhoneNumber phoneNumberUS) { this.phoneNumberUS = phoneNumberUS; } /** * Sets the selectedSites. * * @param selectedSites * selectedSites */ public void setSiteSelection(Set<String> selectedSites) { siteSelection = selectedSites; } /** * Sets stringProperty. * * @param stringProperty * stringProperty */ public void setStringProperty(String stringProperty) { this.stringProperty = stringProperty; } /** * Sets the urlProperty. * * @param urlProperty * urlProperty */ public void setUrlProperty(URL urlProperty) { this.urlProperty = urlProperty; } /** * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder b = new StringBuilder(); b.append("[TestInputObject stringProperty = '") .append(stringProperty) .append("', integerProperty = ") .append(integerProperty) .append(", doubleProperty = ") .append(doubleProperty) .append(", booleanProperty = ") .append(booleanProperty) .append(", integerInRangeProperty = ") .append(integerInRangeProperty) .append(", urlProperty = ") .append(urlProperty) .append(", phoneNumberUS = ") .append(phoneNumberUS) .append(", numberRadioChoice = ") .append(numberRadioChoice) .append(", numbersCheckgroup ") .append(numbersCheckGroup) .append(", numberRadioGroup= ") .append(numbersGroup); b.append(", selected sites {"); for (Iterator<String> i = siteSelection.iterator(); i.hasNext();) { b.append(i.next()); if (i.hasNext()) { b.append(","); } } b.append("]"); b.append(", lines ["); for (Iterator<Line> i = lines.iterator(); i.hasNext();) { b.append(i.next()); if (i.hasNext()) { b.append(", "); } } b.append("]"); b.append("]"); return b.toString(); } }
apache-2.0
apache/geronimo-yoko
yoko-spec-corba/src/main/java/org/omg/PortableServer/AdapterActivatorPOA.java
5304
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.omg.PortableServer; // // This class is provided only for backward compatibility. // public abstract class AdapterActivatorPOA extends org.omg.PortableServer.Servant implements AdapterActivator { public AdapterActivator _this() { return this; } public AdapterActivator _this(org.omg.CORBA.ORB orb) { return this; } public String[] _all_interfaces(org.omg.PortableServer.POA poa, byte[] objectId) { throw new org.omg.CORBA.NO_IMPLEMENT(); } // // Implementation of org.omg.CORBA.Object methods // public boolean _is_a(String repository_id) { throw new org.omg.CORBA.NO_IMPLEMENT( "operation not supported by local object", 0x4f4d0000 | 3, // MinorNotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public boolean _is_equivalent(org.omg.CORBA.Object rhs) { return equals(rhs); } public boolean _non_existent() { return false; } public int _hash(int maximum) { // // Calculate a local hash value // return hashCode() % (maximum + 1); } public org.omg.CORBA.Object _duplicate() { throw new org.omg.CORBA.NO_IMPLEMENT( "operation not supported by local object", 0x4f4d0000 | 3, // MinorNotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public void _release() { throw new org.omg.CORBA.NO_IMPLEMENT( "operation not supported by local object", 0x4f4d0000 | 3, // MinorNotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } /** * @deprecated Deprecated by CORBA 2.3. */ public org.omg.CORBA.InterfaceDef _get_interface() { throw new org.omg.CORBA.NO_IMPLEMENT( "operation not supported by local object", 0x4f4d0000 | 3, // MinorNotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public org.omg.CORBA.Object _get_interface_def() { throw new org.omg.CORBA.NO_IMPLEMENT( "operation not supported by local object", 0x4f4d0000 | 3, // MinorNotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public org.omg.CORBA.Request _request(String operation) { throw new org.omg.CORBA.NO_IMPLEMENT( "DII operation not supported by local object", 0x4f4d0000 | 4, // MinorDIINotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public org.omg.CORBA.Request _create_request(org.omg.CORBA.Context ctx, String operation, org.omg.CORBA.NVList arg_list, org.omg.CORBA.NamedValue result) { throw new org.omg.CORBA.NO_IMPLEMENT( "DII operation not supported by local object", 0x4f4d0000 | 4, // MinorDIINotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public org.omg.CORBA.Request _create_request(org.omg.CORBA.Context ctx, String operation, org.omg.CORBA.NVList arg_list, org.omg.CORBA.NamedValue result, org.omg.CORBA.ExceptionList excepts, org.omg.CORBA.ContextList contexts) { throw new org.omg.CORBA.NO_IMPLEMENT( "DII operation not supported by local object", 0x4f4d0000 | 4, // MinorDIINotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public org.omg.CORBA.Policy _get_policy(int policy_type) { throw new org.omg.CORBA.NO_IMPLEMENT( "operation not supported by local object", 0x4f4d0000 | 3, // MinorNotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public org.omg.CORBA.Object _set_policy_override( org.omg.CORBA.Policy[] policies, org.omg.CORBA.SetOverrideType set_add) { throw new org.omg.CORBA.NO_IMPLEMENT( "operation not supported by local object", 0x4f4d0000 | 3, // MinorNotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } public org.omg.CORBA.DomainManager[] _get_domain_managers() { throw new org.omg.CORBA.NO_IMPLEMENT( "operation not supported by local object", 0x4f4d0000 | 3, // MinorNotSupportedByLocalObject org.omg.CORBA.CompletionStatus.COMPLETED_NO); } }
apache-2.0